diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml new file mode 100644 index 00000000..2e6a0ef9 --- /dev/null +++ b/.github/workflows/pylint.yml @@ -0,0 +1,27 @@ +name: Pylint + +env: + FAIL_UNDER: "9.0" + +on: [push, pull_request] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.9"] + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pylint + pip install coverage + - name: Analysing the code with pylint + run: | + pylint --fail-under=${FAIL_UNDER} basicts scripts diff --git a/.gitignore b/.gitignore index 7773dafc..aeef3178 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,3 @@ - - # dir __pycache__/ .vscode/ diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 00000000..3bc96073 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,435 @@ +# This Pylint rcfile contains a best-effort configuration to uphold the +# best-practices and style described in the Google Python style guide: +# https://google.github.io/styleguide/pyguide.html +# +# Its canonical open-source location is: +# https://google.github.io/styleguide/pylintrc + +[MASTER] + +# Files or directories to be skipped. They should be base names, not paths. +ignore=third_party + +# Files or directories matching the regex patterns are skipped. The regex +# matches against base names, not paths. +ignore-patterns= + +# Pickle collected data for later comparisons. +persistent=no + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Use multiple processes to speed up Pylint. +jobs=4 + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=abstract-method, + apply-builtin, + arguments-differ, + attribute-defined-outside-init, + backtick, + bad-option-value, + basestring-builtin, + buffer-builtin, + c-extension-no-member, + consider-using-enumerate, + consider-using-f-string, + cmp-builtin, + cmp-method, + coerce-builtin, + coerce-method, + delslice-method, + div-method, + duplicate-code, + eq-without-hash, + execfile-builtin, + file-builtin, + filter-builtin-not-iterating, + fixme, + getslice-method, + global-statement, + hex-method, + idiv-method, + implicit-str-concat-in-sequence, + import-error, + import-self, + import-star-module-level, + inconsistent-return-statements, + input-builtin, + intern-builtin, + invalid-str-codec, + locally-disabled, + logging-format-interpolation, + logging-fstring-interpolation, + long-builtin, + long-suffix, + map-builtin-not-iterating, + misplaced-comparison-constant, + missing-function-docstring, + missing-module-docstring, + metaclass-assignment, + next-method-called, + next-method-defined, + no-absolute-import, + no-else-break, + no-else-continue, + no-else-raise, + no-else-return, + no-init, # added + no-member, + no-name-in-module, + no-self-use, + nonzero-method, + oct-method, + old-division, + old-ne-operator, + old-octal-literal, + old-raise-syntax, + parameter-unpacking, + print-statement, + protected-access, + raising-string, + range-builtin-not-iterating, + raw_input-builtin, + rdiv-method, + reduce-builtin, + relative-import, + reload-builtin, + round-builtin, + setslice-method, + signature-differs, + standarderror-builtin, + suppressed-message, + sys-max-int, + too-few-public-methods, + too-many-ancestors, + too-many-arguments, + too-many-boolean-expressions, + too-many-branches, + too-many-instance-attributes, + too-many-locals, + too-many-nested-blocks, + too-many-public-methods, + too-many-return-statements, + too-many-statements, + trailing-newlines, + unichr-builtin, + unicode-builtin, + unnecessary-pass, + unpacking-in-except, + unspecified-encoding, + useless-else-on-loop, + useless-object-inheritance, + useless-suppression, + using-cmp-argument, + wrong-import-order, + xrange-builtin, + zip-builtin-not-iterating, + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html. You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages +reports=no + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + + +[BASIC] + +# Good variable names which should always be accepted, separated by a comma +good-names=main,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty,cached_property.cached_property,cached_property.threaded_cached_property,cached_property.cached_property_with_ttl,cached_property.threaded_cached_property_with_ttl + +# Regular expression matching correct function names +function-rgx=^(?:(?PsetUp|tearDown|setUpModule|tearDownModule)|(?P_?[A-Z][a-zA-Z0-9]*)|(?P_?[a-z][a-z0-9_]*))$ + +# Regular expression matching correct variable names +variable-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct constant names +const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Regular expression matching correct attribute names +attr-rgx=^_{0,2}[a-z][a-z0-9_]*$ + +# Regular expression matching correct argument names +argument-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct class attribute names +class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Regular expression matching correct inline iteration names +inlinevar-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression matching correct class names +class-rgx=^_?[A-Z][a-zA-Z0-9]*$ + +# Regular expression matching correct module names +module-rgx=^(_?[a-z][a-z0-9_]*|__init__)$ + +# Regular expression matching correct method names +method-rgx=(?x)^(?:(?P_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass|(test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next)|(?P_{0,2}[A-Z][a-zA-Z0-9_]*)|(?P_{0,2}[a-z][a-z0-9_]*))$ + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=(__.*__|main|test.*|.*test|.*Test)$ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=10 + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=150 + +# TODO(https://github.com/PyCQA/pylint/issues/3352): Direct pylint to exempt +# lines made too long by directives to pytype. + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=(?x)( + ^\s*(\#\ )??$| + ^\s*(from\s+\S+\s+)?import\s+.+$) + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=yes + +# Maximum number of lines in a module +max-module-lines=99999 + +# String used as indentation unit. The internal Google style guide mandates 2 +# spaces. Google's externaly-published style guide says 4, consistent with +# PEP 8. Here, we use 2 spaces, for conformity with many open-sourced Google +# projects (like TensorFlow). +indent-string=' ' + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=TODO + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=yes + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_) + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_,_cb + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six,six.moves,past.builtins,future.builtins,functools + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging,absl.logging,tensorflow.io.logging + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + + +[SPELLING] + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub, + TERMIOS, + Bastion, + rexec, + sets + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant, absl + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls, + class_ + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=StandardError, + Exception, + BaseException \ No newline at end of file diff --git a/README.md b/README.md index 98f714c0..49abf2ec 100644 --- a/README.md +++ b/README.md @@ -1,128 +1,155 @@ -# BasicTS +#
BasicTS: A Time Series Benchmark and Toolkit
+ +
[![EasyTorch](https://img.shields.io/badge/Developing%20with-EasyTorch-2077ff.svg)](https://github.com/cnstark/easytorch) [![LICENSE](https://img.shields.io/github/license/zezhishao/BasicTS.svg)](https://github.com/zezhishao/BasicTS/blob/master/LICENSE) +[![PyTorch](https://img.shields.io/badge/PyTorch-1.10.0-orange)](https://pytorch.org/) +[![python lint](https://github.com/zezhishao/BasicTS/actions/workflows/pylint.yml/badge.svg)](https://github.com/zezhishao/BasicTS/blob/master/.github/workflows/pylint.yml) + +
+ +BasicTS (**Basic** **T**ime **S**eries) is a PyTorch-based benchmark and toolbox for **time series forecasting** (TSF). + +On the one hand, BasicTS utilizes a ***unified and standard pipeline*** to give a fair and exhaustive reproduction and comparison of popular deep learning-based TSF models based on rich datasets. BasicTS now has a wealth of methods built-in and provides the results of their comparison. + +On the other hand, BasicTS provides users with ***easy-to-use and extensible interfaces*** to facilitate the quick design and evaluation of new models. At a minimum, users only need to define the model architecture, and all other details can be configured in a configuration file. + +## ✨ Highlighted Features + +BasicTS is developed based on [EasyTorch](https://github.com/cnstark/easytorch)[1], an easy-to-use and powerful open-source neural network training framework. +Thanks to EasyTorch, BasicTS has the following highlighted features: -## 0. What is BasicTS +### 😼 Fair Performance Review -BasicTS (**Basic** **T**ime **S**eries) is an open-source PyTorch-based benchmark and toolbox **for time series** . -At present, it only focuses on time series forecasting, and may add time series classification, anomaly detection, etc., in the future. +- 🛡**Rich Datasets**. BasicTS supports rich datasets to perform an exhaustive evaluation of a given model based on a unified pipeline. More datasets will be added in the future. -BasicTS provides users with a ***unified, standard pipeline***, which provides ***reproduction and fair comparision*** of popular deep learning-based time series models to inspire new innovations. +- ⚔️**Rich Baselines**. BasicTS has a wealth of built-in methods, such as Spatial-Temporal Graph Neural Network-based (STGNN) methods and Transformer-based methods (under construction👷). -BasicTS is developed based on [EasyTorch](https://github.com/cnstark/easytorch) [2], an easy-to-use and powerful open source neural network training framework. +### 👨‍💻 Developing with BasicTS -If this repository helps your research or work, I hope you could give me a ⭐, and I will keep updating it. If you need more features about BasicTS (e.g. more datasets or methods) or have any questions/suggestions, please feel free to let me know~ +- 🔧**Everything Based on Config**. Users can control all the details of the pipeline through a config file, such as the hyperparameter of dataloaders, optimization, and other tricks (*e.g.*, curriculum learning). -## 1. Supported Models and Datasets +- 💻**Minimum Code**. Users only need to implement key codes such as model architecture and data pre/post-processing to build their own deep learning projects. -### 1.1 Short-term Time Series Forecasting +- 📃**Save Training Log**. Support `logging` log system and `Tensorboard`, and encapsulate it as a unified interface, users can save customized training logs by calling simple interfaces. -| Model\Dataset | METR-LA | PEMS-BAY | PEMS04 | PEMS08 | PEMS03 | PEMS07 | Other Datasets | -|:-------------:|:-------:|:--------:|:------:|:------:|:------:|:------:|:--------------:| -| AR | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| VAR | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| HI | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| Graph WaveNet | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| DCRNN | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| STGCN | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| StemGNN | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| MTGNN | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| GTS | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| DGCRN | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| GMAN | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| AGCRN | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| STNorm | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| STID | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| D2STGNN | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | -| Other Models | | | | | | | | +- 🔦**Support All Devices**. BasicTS supports CPU, GPU and GPU distributed training (both single node multiple GPUs and multiple nodes) thanks to using EasyTorch as the backend. Users can use it by setting parameters without modifying any code. -Although we have tried our best to tune the hyperparameters in `basicts/options` for every model and every dataset, there is no guarantee that they are optimal. -Thus, any PRs for better hyper-parameters are welcomed to make BasicTS fairer. +## 💿 Dependencies -### 1.2 Long-term Time Series Forecasting +### OS -🕐 +We recommend using BasicTS on Linux systems (*e.g.* Ubuntu and CentOS). +Other systems (*e.g.*, Windows and macOS) have not been tested. -## 2. Installing Dependencies +### Python -We recommend that you install all dependencies by: +Python >= 3.6 (recommended >= 3.9). -```shell +[Miniconda](https://docs.conda.io/en/latest/miniconda.html) or [Anaconda](https://www.anaconda.com/) are recommended to create a virtual python environment. + +### Installing + +```bash pip install -r requirements.txt ``` -## 3. Codebase Designs and Conventions +### Warning -🕐 +BasicTS is built on PyTorch 1.9.1 or 1.10.0, while other versions have not been tested. -## 4. Usage -`git clone https://github.com/zezhishao/BasicTS.git` +## 🎯 Getting Started of Developing with BasicTS -### 4.1 Data Preparation and Preprocessing +### Preparing Data -#### 4.1.1 Data Preparation +- **Clone BasicTS** -You can download the raw datasets at [Google Drive](https://drive.google.com/drive/folders/14EJVODCU48fGK0FkyeVom_9lETh80Yjp) or [Baidu Yun](https://pan.baidu.com/s/18qonT9l1_NbvyAgpD4381g)(password: 0lrk), and unzip them to `datasets/raw_data/`. + ```bash + cd /path/to/your/project + git clone https://github.com/zezhishao/BasicTS.git + ``` -#### 4.1.2 Data Preprocessing +- **Download Raw Data** -```bash -cd /path/to/project -python scripts/data_preparation/$DATASET_NAME/generate_training_data.py -``` + You can download all the raw datasets at [Google Drive](https://drive.google.com/drive/folders/14EJVODCU48fGK0FkyeVom_9lETh80Yjp) or [Baidu Yun](https://pan.baidu.com/s/18qonT9l1_NbvyAgpD4381g)(password: 0lrk), and unzip them to `datasets/raw_data/`. -Replace `$DATASET_NAME` with one of `METR-LA`, `PEMS-BAY`, `PEMS03`, `PEMS04`, `PEMS07`, `PEMS08`, or any other supported dataset. +- **Pre-process Data** -The processed data will be placed in `datasets/$DATASET_NAME`. + ```bash + cd /path/to/your/project + python scripts/data_preparation/${DATASET_NAME}/generate_training_data.py + ``` -Details of preprocessing can be found in `docs/DataPreparation_CN.md`~(Chinese). + Replace `${DATASET_NAME}` with one of `METR-LA`, `PEMS-BAY`, `PEMS03`, `PEMS04`, `PEMS07`, `PEMS08`, or any other supported dataset. The processed data will be placed in `datasets/${DATASET_NAME}`. -### 4.2 Run a Time Series Forecasting Model + Or you can pre-process all datasets by. -We recommend running a time series model with the following command: + ```bash + cd /path/to/your/project + bash scripts/data_preparation/all.sh + ``` -```bash -cd /path/to/project -easytrain -c basicts/options/$METHOD_NAME/$METHOD_NAME_$DATASET_NAME.py --gpus '0' -``` +### 3 Steps to Evaluate Your Model -Replace the `$METHOD_NAME` and `$DATASET_NAME` with any supported method and dataset. For example, +- **Define Your Model Architecture** + + The `forward` function needs to follow the conventions of BasicTS. You can find an example of the Multi-Layer Perceptron (`MLP`) model in [examples/MLP/mlp_arch.py](examples/MLP/mlp_arch.py) + +- **Define Your Runner for Your Model** (Optional) + + BasicTS provides a unified and standard pipeline in `basicts.runner.BaseTimeSeriesForecastingRunner`. + Nevertheless, you still need to define the specific forward process (the `forward` function in the **runner**). + Fortunately, BasicTS also provides such an implementation in `basicts.runner.SimpleTimeSeriesForecastingRunner`, which can cover most of the situations. + [The runner](examples/MLP/mlp_runner.py) for the `MLP` model can also use this built-in runner. + You can also find more runners in `basicts.runners.runner_zoo` to learn more about the runner design. + +- **Configure your Configuration File** + + You can configure all the details of the pipeline and hyperparameters in a configuration file, *i.e.*, **everything is based on config**. + The configuration file is a `.py` file, in which you can import your model and runner and set all the options. BasicTS uses `EasyDict` to serve as a parameter container, which is extensible and flexible to use. + An example of the configuration file for the `MLP` model on the `METR-LA` dataset can be found in [examples/MLP/MLP_METR-LA.py](examples/MLP/MLP_METR-LA.py) + +### Run It! + +An example of a start script can be found in [examples/run.py](examples/run.py). +You can run your model by the following command: ```bash -easytrain -c basicts/options/GraphWaveNet/GraphWaveNet_METR-LA.py --gpus '0' +python examples/run.py -c /path/to/your/config/file.py --gpus '0' ``` -If you need to debug, you could run the `basicts/run.py` file. +## 📌 Examples -### 4.3 Train a Custom Model +### Reproducing Built-in Models -🕐 +BasicTS provides a wealth of built-in models. You can find all the built-in models and their corresponding runners in [`basicts/archs/arch_zoo`](basicts/archs/arch_zoo/) and [`basicts/runners/runner_zoo`](basicts/runners/runner_zoo/), respectively. You can reproduce these models by running the following command: -## 5. Detailed Docs +```bash +python examples/run.py -c examples/${MODEL_NAME}/${MODEL_NAME}_${DATASET_NAME}.py --gpus '0' +``` + +Replace `${DATASET_NAME}` and `${MODEL_NAME}` with any supported models and datasets. For example, you can run Graph WaveNet[2] on METR-LA dataset by: + +```bash +python examples/run.py -c examples/GWNet/GWNet_METR-LA.py --gpus '0' +``` -- data preparation: [data_preparation_CN.md](docs/DataFormat_CN.md) +### Customized Your Own Model -🕐 +- [Multi-Layer Perceptron (MLP)](examples/MLP) +- More... -## 6. Main Results +## 📉 Main Results -![Main results.](results/result.png) +![Main results.](results/results.png) -## 7. TODO +## 🔗 Acknowledgement -- [ ] : add the result of STID. -- [ ] : revise the data preprocessing of PEMS07, which only contains weekdays. -- [ ] : Add detailed documentation and a demo about data preprocessing. -- [ ] : Add more multivariate time series datasets: Traffic, Electricity, Exchange-Rate, ETTh1, ETTh2, ETTm1, Weather, Solar-Energy. -- [ ] : Different from the existing traffic datasets (PEMS0X, PEMS-BAY, METR-LA), these datasets have multiple usages in the existing datasets, and the baselines that need to be compared in different contexts are different. Therefore, it is necessary to add statistics for all datasets and describe their typical settings case by case. -- [ ] : Add statistical information of these dataset, and descibe their typical settings. -- [ ] : Support models like ASTGCN, ASTGNN, which take multi-periodicities data as input. -- [ ] : Add detailed docs about 4.2, e.g., the usage of gpu. -- [ ] : Update D2STGNN arch. -- [ ] : 模块化train_iters, val_iters, and test_iters中的过程。否则就会像GTS一样, 一旦模型有一点特殊 (例如多一个返回和不同的loss), 就必须重写整个train_iters, val_iters, and test_iters。 +BasicTS is developed based on [EasyTorch](https://github.com/cnstark/easytorch)[1], an easy-to-use and powerful open-source neural network training framework. -## References +## 📜 References -[1] Yuhao Wang. EasyTorch. , 2020. +- [1] Yuhao Wang. EasyTorch. , 2020. +- [2] Wu Z, Pan S, Long G, et al. Graph WaveNet for Deep Spatial-Temporal Graph Modeling[C]//The 28th International Joint Conference on Artificial Intelligence (IJCAI). International Joint Conferences on Artificial Intelligence Organization, 2019. diff --git a/TODO.md b/TODO.md new file mode 100644 index 00000000..9d572fcc --- /dev/null +++ b/TODO.md @@ -0,0 +1,13 @@ +# 📆 TODO + +- Add Transformer-based methods, such as Informer, AutoFormer, FEDFormer, PyraFormer. +- Add more datasets, such as electricity, ETT, exchange rate, weather, traffic, and illness. +- Publish to PyPI +- Add more docs. + - Document of the Pipeline + - Build-in Time Series Forecasting Dataset + - Architecture Interfaces + - Runner Interfaces + - Configuration Document + - Cutomized Loss + - Utils diff --git a/basicts/__init__.py b/basicts/__init__.py new file mode 100644 index 00000000..7cc01675 --- /dev/null +++ b/basicts/__init__.py @@ -0,0 +1,5 @@ +from .launcher import launch_training + +__version__ = "0.1.0" + +__all__ = ["__version__", "launch_training"] diff --git a/basicts/archs/AGCRN_arch/AGCN.py b/basicts/archs/AGCRN_arch/AGCN.py deleted file mode 100644 index e059fd7f..00000000 --- a/basicts/archs/AGCRN_arch/AGCN.py +++ /dev/null @@ -1,27 +0,0 @@ -import torch -import torch.nn.functional as F -import torch.nn as nn - -class AVWGCN(nn.Module): - def __init__(self, dim_in, dim_out, cheb_k, embed_dim): - super(AVWGCN, self).__init__() - self.cheb_k = cheb_k - self.weights_pool = nn.Parameter(torch.FloatTensor(embed_dim, cheb_k, dim_in, dim_out)) - self.bias_pool = nn.Parameter(torch.FloatTensor(embed_dim, dim_out)) - - def forward(self, x, node_embeddings): - #x shaped[B, N, C], node_embeddings shaped [N, D] -> supports shaped [N, N] - #output shape [B, N, C] - node_num = node_embeddings.shape[0] - supports = F.softmax(F.relu(torch.mm(node_embeddings, node_embeddings.transpose(0, 1))), dim=1) - support_set = [torch.eye(node_num).to(supports.device), supports] - #default cheb_k = 3 - for k in range(2, self.cheb_k): - support_set.append(torch.matmul(2 * supports, support_set[-1]) - support_set[-2]) - supports = torch.stack(support_set, dim=0) - weights = torch.einsum('nd,dkio->nkio', node_embeddings, self.weights_pool) #N, cheb_k, dim_in, dim_out - bias = torch.matmul(node_embeddings, self.bias_pool) #N, dim_out - x_g = torch.einsum("knm,bmc->bknc", supports, x) #B, cheb_k, N, dim_in - x_g = x_g.permute(0, 2, 1, 3) # B, N, cheb_k, dim_in - x_gconv = torch.einsum('bnki,nkio->bno', x_g, weights) + bias #b, N, dim_out - return x_gconv diff --git a/basicts/archs/AGCRN_arch/AGCRN_arch.py b/basicts/archs/AGCRN_arch/AGCRN_arch.py deleted file mode 100644 index 1872f72d..00000000 --- a/basicts/archs/AGCRN_arch/AGCRN_arch.py +++ /dev/null @@ -1,106 +0,0 @@ -import torch -import torch.nn as nn -from basicts.archs.AGCRN_arch.AGCRNCell import AGCRNCell -from basicts.archs.registry import ARCH_REGISTRY - - -""" - Paper: Adaptive Graph Convolutional Recurrent Network for Traffic Forecasting - Official Code: https://github.com/LeiBAI/AGCRN -""" - -class AVWDCRNN(nn.Module): - def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim, num_layers=1): - super(AVWDCRNN, self).__init__() - assert num_layers >= 1, 'At least one DCRNN layer in the Encoder.' - self.node_num = node_num - self.input_dim = dim_in - self.num_layers = num_layers - self.dcrnn_cells = nn.ModuleList() - self.dcrnn_cells.append(AGCRNCell(node_num, dim_in, dim_out, cheb_k, embed_dim)) - for _ in range(1, num_layers): - self.dcrnn_cells.append(AGCRNCell(node_num, dim_out, dim_out, cheb_k, embed_dim)) - - def forward(self, x, init_state, node_embeddings): - #shape of x: (B, T, N, D) - #shape of init_state: (num_layers, B, N, hidden_dim) - assert x.shape[2] == self.node_num and x.shape[3] == self.input_dim - seq_length = x.shape[1] - current_inputs = x - output_hidden = [] - for i in range(self.num_layers): - state = init_state[i] - inner_states = [] - for t in range(seq_length): - state = self.dcrnn_cells[i](current_inputs[:, t, :, :], state, node_embeddings) - inner_states.append(state) - output_hidden.append(state) - current_inputs = torch.stack(inner_states, dim=1) - #current_inputs: the outputs of last layer: (B, T, N, hidden_dim) - #output_hidden: the last state for each layer: (num_layers, B, N, hidden_dim) - #last_state: (B, N, hidden_dim) - return current_inputs, output_hidden - - def init_hidden(self, batch_size): - init_states = [] - for i in range(self.num_layers): - init_states.append(self.dcrnn_cells[i].init_hidden_state(batch_size)) - return torch.stack(init_states, dim=0) #(num_layers, B, N, hidden_dim) - - -@ARCH_REGISTRY.register() -class AGCRN(nn.Module): - def __init__(self, num_nodes, input_dim, rnn_units, output_dim, horizon, num_layers, default_graph, embed_dim, cheb_k): - super(AGCRN, self).__init__() - self.num_node = num_nodes - self.input_dim = input_dim - self.hidden_dim = rnn_units - self.output_dim = output_dim - self.horizon = horizon - self.num_layers = num_layers - - self.default_graph = default_graph - self.node_embeddings = nn.Parameter(torch.randn(self.num_node, embed_dim), requires_grad=True) - - self.encoder = AVWDCRNN(num_nodes, input_dim, rnn_units, cheb_k, - embed_dim, num_layers) - - #predictor - self.end_conv = nn.Conv2d(1, horizon * self.output_dim, kernel_size=(1, self.hidden_dim), bias=True) - - self.init_param() - - def init_param(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - else: - nn.init.uniform_(p) - # print('*****************Model Parameter*****************') - # only_num = False - # if not only_num: - # for name, param in self.named_parameters(): - # print(name, param.shape, param.requires_grad) - # total_num = sum([param.nelement() for param in self.parameters()]) - # print('Total params num: {}'.format(total_num)) - # print('*****************Finish Parameter****************') - - def forward(self, history_data: torch.Tensor) -> torch.Tensor: - """feedforward function of AGCRN. - - Args: - source (torch.Tensor): inputs with shape [B, L, N, C] - - Returns: - torch.Tensor: outputs with shape [B, L, N, C] - """ - init_state = self.encoder.init_hidden(history_data.shape[0]) - output, _ = self.encoder(history_data, init_state, self.node_embeddings) #B, T, N, hidden - output = output[:, -1:, :, :] #B, 1, N, hidden - - #CNN based predictor - output = self.end_conv((output)) #B, T*C, N, 1 - output = output.squeeze(-1).reshape(-1, self.horizon, self.output_dim, self.num_node) - output = output.permute(0, 1, 3, 2) #B, T, N, C - - return output diff --git a/basicts/archs/AGCRN_arch/__init__.py b/basicts/archs/AGCRN_arch/__init__.py deleted file mode 100644 index 294b7809..00000000 --- a/basicts/archs/AGCRN_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.AGCRN_arch.AGCRN_arch import AGCRN \ No newline at end of file diff --git a/basicts/archs/D2STGNN_arch/DiffusionBlock/__init__.py b/basicts/archs/D2STGNN_arch/DiffusionBlock/__init__.py deleted file mode 100644 index 04bcc638..00000000 --- a/basicts/archs/D2STGNN_arch/DiffusionBlock/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.D2STGNN_arch.DiffusionBlock.dif_block import DifBlock \ No newline at end of file diff --git a/basicts/archs/D2STGNN_arch/DiffusionBlock/dif_block.py b/basicts/archs/D2STGNN_arch/DiffusionBlock/dif_block.py deleted file mode 100644 index c002bf24..00000000 --- a/basicts/archs/D2STGNN_arch/DiffusionBlock/dif_block.py +++ /dev/null @@ -1,31 +0,0 @@ -import torch.nn as nn -from basicts.archs.D2STGNN_arch.DiffusionBlock.forecast import Forecast -from basicts.archs.D2STGNN_arch.Decouple.residual_decomp import ResidualDecomp -from basicts.archs.D2STGNN_arch.DiffusionBlock.dif_model import STLocalizedConv - -class DifBlock(nn.Module): - def __init__(self, hidden_dim, fk_dim=256, use_pre=None, dy_graph=None, sta_graph=None, **model_args): - super().__init__() - self.pre_defined_graph = model_args['adjs'] - - self.localized_st_conv = STLocalizedConv(hidden_dim, pre_defined_graph=self.pre_defined_graph, use_pre=use_pre, dy_graph=dy_graph, sta_graph=sta_graph, **model_args) - - # sub and norm - self.residual_decompose = ResidualDecomp([-1, -1, -1, hidden_dim]) - # forecast - self.forecast_branch = Forecast(hidden_dim, fk_dim=fk_dim, **model_args) - # backcast - self.backcast_branch = nn.Linear(hidden_dim, hidden_dim) - - def forward(self, X, X_spa, dynamic_graph, static_graph): - Z = self.localized_st_conv(X_spa, dynamic_graph, static_graph) - # forecast branch - forecast_hidden = self.forecast_branch(X_spa, Z, self.localized_st_conv, dynamic_graph, static_graph) - # backcast branch - backcast_seq = self.backcast_branch(Z) - # Residual Decomposition - backcast_seq = backcast_seq - X = X[:, -backcast_seq.shape[1]:, :, :] - backcast_seq_res= self.residual_decompose(X, backcast_seq) - - return backcast_seq_res, forecast_hidden diff --git a/basicts/archs/D2STGNN_arch/DiffusionBlock/dif_model.py b/basicts/archs/D2STGNN_arch/DiffusionBlock/dif_model.py deleted file mode 100644 index 13c9a27b..00000000 --- a/basicts/archs/D2STGNN_arch/DiffusionBlock/dif_model.py +++ /dev/null @@ -1,87 +0,0 @@ -import torch -import torch.nn as nn - -class STLocalizedConv(nn.Module): - def __init__(self, hidden_dim, pre_defined_graph=None, use_pre=None, dy_graph=None, sta_graph=None, **model_args): - super().__init__() - # gated temporal conv - self.k_s = model_args['k_s'] - self.k_t = model_args['k_t'] - self.hidden_dim = hidden_dim - - # graph conv - self.pre_defined_graph = pre_defined_graph - self.use_predefined_graph = use_pre - self.use_dynamic_hidden_graph = dy_graph - self.use_static__hidden_graph = sta_graph - - self.support_len = len(self.pre_defined_graph) + int(dy_graph) + int(sta_graph) - self.num_matric = (int(use_pre) * len(self.pre_defined_graph) + len(self.pre_defined_graph) * int(dy_graph) + int(sta_graph)) * self.k_s + 1 - self.dropout = nn.Dropout(model_args['dropout']) - self.pre_defined_graph = self.get_graph(self.pre_defined_graph) - - self.fc_list_updt = nn.Linear(self.k_t * hidden_dim, self.k_t * hidden_dim, bias=False) - self.gcn_updt = nn.Linear(self.hidden_dim*self.num_matric, self.hidden_dim) - - # others - self.bn = nn.BatchNorm2d(self.hidden_dim) - self.activation = nn.ReLU() - - def gconv(self, support, X_k, X_0): - out = [X_0] - for graph in support: - if len(graph.shape) == 2: # staitic or predefined graph - pass - else: - graph = graph.unsqueeze(1) - H_k = torch.matmul(graph, X_k) - out.append(H_k) - out = torch.cat(out, dim=-1) - out = self.gcn_updt(out) - out = self.dropout(out) - return out - - def get_graph(self, support): - # Only used in static including static hidden graph and predefined graph, but not used for dynamic graph. - graph_ordered = [] - mask = 1 - torch.eye(support[0].shape[0]).to(support[0].device) - for graph in support: - k_1_order = graph # 1 order - graph_ordered.append(k_1_order * mask) - for k in range(2, self.k_s+1): # e.g., order = 3, k=[2, 3]; order = 2, k=[2] - k_1_order = torch.matmul(graph, k_1_order) - graph_ordered.append(k_1_order * mask) - # get st localed graph - st_local_graph = [] - for graph in graph_ordered: - graph = graph.unsqueeze(-2).expand(-1, self.k_t, -1) - graph = graph.reshape(graph.shape[0], graph.shape[1] * graph.shape[2]) - st_local_graph.append(graph) # [num_nodes, kernel_size x num_nodes] - return st_local_graph # [order, num_nodes, kernel_size x num_nodes] - - def forward(self, X, dynamic_graph, static_graph): - # X: [bs, seq, nodes, feat] - X = X.unfold(1, self.k_t, 1).permute(0, 1, 2, 4, 3) # [bs, seq, num_nodes, ks, num_feat] - batch_size, seq_len, num_nodes, kernel_size, num_feat = X.shape # seq_len is changing - - # support - support = [] - ## predefined graph - if self.use_predefined_graph: - support = support + self.pre_defined_graph - ## dynamic graph - if self.use_dynamic_hidden_graph: - support = support + dynamic_graph # k_order is caled in dynamic_graph_constructor component - ## predefined graphs and static hidden graphs - if self.use_static__hidden_graph: - support = support + self.get_graph(static_graph) - - # parallelize - X = X.reshape(batch_size, seq_len, num_nodes, kernel_size * num_feat) - out = self.fc_list_updt(X) # batch_size, seq_len, num_nodes, kernel_size * hidden_dim - out = self.activation(out) - out = out.view(batch_size, seq_len, num_nodes, kernel_size, num_feat) - X_0 = torch.mean(out, dim=-2) - X_k = out.transpose(-3, -2).reshape(batch_size, seq_len, kernel_size*num_nodes, num_feat) # batch_size, seq_len, kernel_size x num_nodes, hidden_dim - hidden = self.gconv(support, X_k, X_0) # Nx3N 3NxD -> NxD: batch_size, seq_len, num_nodes, hidden_dim - return hidden diff --git a/basicts/archs/D2STGNN_arch/DynamicGraphConv/Utils/__init__.py b/basicts/archs/D2STGNN_arch/DynamicGraphConv/Utils/__init__.py deleted file mode 100644 index 1585c38e..00000000 --- a/basicts/archs/D2STGNN_arch/DynamicGraphConv/Utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from basicts.archs.D2STGNN_arch.DynamicGraphConv.Utils.mask import * -from basicts.archs.D2STGNN_arch.DynamicGraphConv.Utils.normalizer import * -from basicts.archs.D2STGNN_arch.DynamicGraphConv.Utils.distance import * diff --git a/basicts/archs/D2STGNN_arch/InherentBlock/__init__.py b/basicts/archs/D2STGNN_arch/InherentBlock/__init__.py deleted file mode 100644 index 18a29126..00000000 --- a/basicts/archs/D2STGNN_arch/InherentBlock/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.D2STGNN_arch.InherentBlock.inh_block import InhBlock diff --git a/basicts/archs/D2STGNN_arch/InherentBlock/inh_block.py b/basicts/archs/D2STGNN_arch/InherentBlock/inh_block.py deleted file mode 100644 index f206fb28..00000000 --- a/basicts/archs/D2STGNN_arch/InherentBlock/inh_block.py +++ /dev/null @@ -1,65 +0,0 @@ -import math -import torch -import torch.nn as nn -from basicts.archs.D2STGNN_arch.Decouple.residual_decomp import ResidualDecomp -from basicts.archs.D2STGNN_arch.InherentBlock.inh_model import RNNLayer, TransformerLayer -from basicts.archs.D2STGNN_arch.InherentBlock.forecast import Forecast - -class PositionalEncoding(nn.Module): - def __init__(self, d_model, dropout=None, max_len: int = 5000): - super().__init__() - self.dropout = nn.Dropout(p=dropout) - position = torch.arange(max_len).unsqueeze(1) - div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)) - pe = torch.zeros(max_len, 1, d_model) - pe[:, 0, 0::2] = torch.sin(position * div_term) - pe[:, 0, 1::2] = torch.cos(position * div_term) - self.register_buffer('pe', pe) - - def forward(self, X): - X = X + self.pe[:X.size(0)] - X = self.dropout(X) - return X - -class InhBlock(nn.Module): - def __init__(self, hidden_dim, num_heads=4, bias=True, fk_dim=256, first=None, **model_args): - super().__init__() - self.num_feat = hidden_dim - self.hidden_dim = hidden_dim - - if first: - self.pos_encoder = PositionalEncoding(hidden_dim, model_args['dropout']) - else: - self.pos_encoder = None - self.rnn_layer = RNNLayer(hidden_dim, model_args['dropout']) - self.transformer_layer = TransformerLayer(hidden_dim, num_heads, model_args['dropout'], bias) - # forecast - self.forecast_block = Forecast(hidden_dim, fk_dim, **model_args) - # backcast - self.backcast_fc = nn.Linear(hidden_dim, hidden_dim) - # sub residual - self.sub_and_norm = ResidualDecomp([-1, -1, -1, hidden_dim]) - - def forward(self, X): - [batch_size, seq_len, num_nodes, num_feat] = X.shape - # Temporal Model - ## RNN - RNN_H_raw = self.rnn_layer(X) - ## Positional Encoding - if self.pos_encoder is not None: - RNN_H = self.pos_encoder(RNN_H_raw) - else: - RNN_H = RNN_H_raw - ## MultiHead Self Attention - Z = self.transformer_layer(RNN_H, RNN_H, RNN_H) - - # forecast branch - forecast_hidden = self.forecast_block(X, RNN_H_raw, Z, self.transformer_layer, self.rnn_layer, self.pos_encoder) - - # backcast branch - Z = Z.reshape(seq_len, batch_size, num_nodes, num_feat) - Z = Z.transpose(0, 1) - backcast_seq = self.backcast_fc(Z) - backcast_seq_res= self.sub_and_norm(X, backcast_seq) - - return backcast_seq_res, forecast_hidden diff --git a/basicts/archs/D2STGNN_arch/InherentBlock/inh_model.py b/basicts/archs/D2STGNN_arch/InherentBlock/inh_model.py deleted file mode 100644 index 46e08ff9..00000000 --- a/basicts/archs/D2STGNN_arch/InherentBlock/inh_model.py +++ /dev/null @@ -1,33 +0,0 @@ -import torch as th -import torch.nn as nn -from torch.nn import MultiheadAttention - -class RNNLayer(nn.Module): - def __init__(self, hidden_dim, dropout=None): - super().__init__() - self.hidden_dim = hidden_dim - self.gru_cell = nn.GRUCell(hidden_dim, hidden_dim) - self.dropout = nn.Dropout(dropout) - - def forward(self, X): - [batch_size, seq_len, num_nodes, hidden_dim] = X.shape - X = X.transpose(1, 2).reshape(batch_size * num_nodes, seq_len, hidden_dim) - hx = th.zeros_like(X[:, 0, :]) - output = [] - for _ in range(X.shape[1]): - hx = self.gru_cell(X[:, _, :], hx) - output.append(hx) - output = th.stack(output, dim=0) - output = self.dropout(output) - return output - -class TransformerLayer(nn.Module): - def __init__(self, hidden_dim, num_heads=4, dropout=None, bias=True): - super().__init__() - self.multi_head_self_attention = MultiheadAttention(hidden_dim, num_heads, dropout=dropout, bias=bias) - self.dropout = nn.Dropout(dropout) - - def forward(self, X, K, V): - Z = self.multi_head_self_attention(X, K, V)[0] - Z = self.dropout(Z) - return Z diff --git a/basicts/archs/D2STGNN_arch/__init__.py b/basicts/archs/D2STGNN_arch/__init__.py deleted file mode 100644 index 8b9a71b4..00000000 --- a/basicts/archs/D2STGNN_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.D2STGNN_arch.D2STGNN_arch import D2STGNN \ No newline at end of file diff --git a/basicts/archs/DCRNN_arch/__init__.py b/basicts/archs/DCRNN_arch/__init__.py deleted file mode 100644 index 0ae64edd..00000000 --- a/basicts/archs/DCRNN_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.DCRNN_arch.DCRNN_arch import DCRNN \ No newline at end of file diff --git a/basicts/archs/DGCRN_arch/__init__.py b/basicts/archs/DGCRN_arch/__init__.py deleted file mode 100644 index b118f2f3..00000000 --- a/basicts/archs/DGCRN_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.DGCRN_arch.DGCRN_arch import DGCRN \ No newline at end of file diff --git a/basicts/archs/GMAN_arch/GMAN_arch.py b/basicts/archs/GMAN_arch/GMAN_arch.py deleted file mode 100644 index 1be659cc..00000000 --- a/basicts/archs/GMAN_arch/GMAN_arch.py +++ /dev/null @@ -1,380 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import math -from basicts.archs.registry import ARCH_REGISTRY - -""" - Paper: GMAN: A Graph Multi-Attention Network for Traffic Prediction - Ref Code: https://github.com/VincLee8188/GMAN-PyTorch/blob/master/model/model_.py - Official Code (TensorFlow): https://github.com/zhengchuanpan/GMAN - TODO: Compared to the official tensorflow version code, this pytorch implementation can achieve similar MAE performance. But somehow, RMSE and MAPE seem to be abnormally high. -""" - -class conv2d_(nn.Module): - def __init__(self, input_dims, output_dims, kernel_size, stride=(1, 1), - padding='SAME', use_bias=True, activation=F.relu, - bn_decay=None): - super(conv2d_, self).__init__() - self.activation = activation - if padding == 'SAME': - self.padding_size = math.ceil(kernel_size) - else: - self.padding_size = [0, 0] - self.conv = nn.Conv2d(input_dims, output_dims, kernel_size, stride=stride, - padding=0, bias=use_bias) - self.batch_norm = nn.BatchNorm2d(output_dims, momentum=bn_decay) - - def forward(self, x): - x = x.permute(0, 3, 2, 1) - x = F.pad(x, ([self.padding_size[1], self.padding_size[1], self.padding_size[0], self.padding_size[0]])) - x = self.conv(x) - x = self.batch_norm(x) - if self.activation is not None: - x = F.relu_(x) - return x.permute(0, 3, 2, 1) - - -class FC(nn.Module): - def __init__(self, input_dims, units, activations, bn_decay, use_bias=True): - super(FC, self).__init__() - if isinstance(units, int): - units = [units] - input_dims = [input_dims] - activations = [activations] - elif isinstance(units, tuple): - units = list(units) - input_dims = list(input_dims) - activations = list(activations) - assert type(units) == list - self.convs = nn.ModuleList([conv2d_( - input_dims=input_dim, output_dims=num_unit, kernel_size=[1, 1], stride=[1, 1], - padding='VALID', use_bias=use_bias, activation=activation, - bn_decay=bn_decay) for input_dim, num_unit, activation in - zip(input_dims, units, activations)]) - - def forward(self, x): - for conv in self.convs: - x = conv(x) - return x - - -class STEmbedding(nn.Module): - ''' - spatio-temporal embedding - SE: [num_vertex, D] - TE: [batch_size, num_his + num_pred, 2] (dayofweek, timeofday) - T: num of time steps in one day - D: output dims - retrun: [batch_size, num_his + num_pred, num_vertex, D] - ''' - - def __init__(self, D, bn_decay): - super(STEmbedding, self).__init__() - self.FC_se = FC( - input_dims=[D, D], units=[D, D], activations=[F.relu, None], - bn_decay=bn_decay) - - self.FC_te = FC( - input_dims=[295, D], units=[D, D], activations=[F.relu, None], - bn_decay=bn_decay) # input_dims = time step per day + days per week=288+7=295 - - def forward(self, SE, TE, T=288): - # spatial embedding - SE = SE.unsqueeze(0).unsqueeze(0) - SE = self.FC_se(SE) - # temporal embedding - dayofweek = torch.empty(TE.shape[0], TE.shape[1], 7).to(SE.device) - timeofday = torch.empty(TE.shape[0], TE.shape[1], T).to(SE.device) - for i in range(TE.shape[0]): - dayofweek[i] = F.one_hot(TE[..., 1][i].to(torch.int64) % 7, 7) - for j in range(TE.shape[0]): - timeofday[j] = F.one_hot(TE[..., 0][j].to(torch.int64) % 288, T) - TE = torch.cat((dayofweek, timeofday), dim=-1) - TE = TE.unsqueeze(dim=2) - TE = self.FC_te(TE) - del dayofweek, timeofday - return SE + TE - - -class spatialAttention(nn.Module): - ''' - spatial attention mechanism - X: [batch_size, num_step, num_vertex, D] - STE: [batch_size, num_step, num_vertex, D] - K: number of attention heads - d: dimension of each attention outputs - return: [batch_size, num_step, num_vertex, D] - ''' - - def __init__(self, K, d, bn_decay): - super(spatialAttention, self).__init__() - D = K * d - self.d = d - self.K = K - self.FC_q = FC(input_dims=2 * D, units=D, activations=F.relu, - bn_decay=bn_decay) - self.FC_k = FC(input_dims=2 * D, units=D, activations=F.relu, - bn_decay=bn_decay) - self.FC_v = FC(input_dims=2 * D, units=D, activations=F.relu, - bn_decay=bn_decay) - self.FC = FC(input_dims=D, units=D, activations=F.relu, - bn_decay=bn_decay) - - def forward(self, X, STE): - batch_size = X.shape[0] - X = torch.cat((X, STE), dim=-1) - # [batch_size, num_step, num_vertex, K * d] - query = self.FC_q(X) # [B, L, N, K*d] - key = self.FC_k(X) - value = self.FC_v(X) - # [K * batch_size, num_step, num_vertex, d] - query = torch.cat(torch.split(query, self.d, dim=-1), dim=0) # see https://github.com/VincLee8188/GMAN-PyTorch/issues/3 - key = torch.cat(torch.split(key, self.d, dim=-1), dim=0) - value = torch.cat(torch.split(value, self.d, dim=-1), dim=0) - # [K * batch_size, num_step, num_vertex, num_vertex] - attention = torch.matmul(query, key.transpose(2, 3)) - attention /= (self.d ** 0.5) - attention = F.softmax(attention, dim=-1) - # [batch_size, num_step, num_vertex, D] - X = torch.matmul(attention, value) - X = torch.cat(torch.split(X, batch_size, dim=0), dim=-1) # orginal K, change to batch_size - X = self.FC(X) - del query, key, value, attention - return X - - -class temporalAttention(nn.Module): - ''' - temporal attention mechanism - X: [batch_size, num_step, num_vertex, D] - STE: [batch_size, num_step, num_vertex, D] - K: number of attention heads - d: dimension of each attention outputs - return: [batch_size, num_step, num_vertex, D] - ''' - - def __init__(self, K, d, bn_decay, mask=True): - super(temporalAttention, self).__init__() - D = K * d - self.d = d - self.K = K - self.mask = mask - self.FC_q = FC(input_dims=2 * D, units=D, activations=F.relu, - bn_decay=bn_decay) - self.FC_k = FC(input_dims=2 * D, units=D, activations=F.relu, - bn_decay=bn_decay) - self.FC_v = FC(input_dims=2 * D, units=D, activations=F.relu, - bn_decay=bn_decay) - self.FC = FC(input_dims=D, units=D, activations=F.relu, - bn_decay=bn_decay) - - def forward(self, X, STE): - batch_size_ = X.shape[0] - X = torch.cat((X, STE), dim=-1) - # [batch_size, num_step, num_vertex, K * d] - query = self.FC_q(X) - key = self.FC_k(X) - value = self.FC_v(X) - # [K * batch_size, num_step, num_vertex, d] - query = torch.cat(torch.split(query, self.K, dim=-1), dim=0) - key = torch.cat(torch.split(key, self.K, dim=-1), dim=0) - value = torch.cat(torch.split(value, self.K, dim=-1), dim=0) - # query: [K * batch_size, num_vertex, num_step, d] - # key: [K * batch_size, num_vertex, d, num_step] - # value: [K * batch_size, num_vertex, num_step, d] - query = query.permute(0, 2, 1, 3) - key = key.permute(0, 2, 3, 1) - value = value.permute(0, 2, 1, 3) - # [K * batch_size, num_vertex, num_step, num_step] - attention = torch.matmul(query, key) - attention /= (self.d ** 0.5) - # mask attention score - if self.mask: - batch_size = X.shape[0] - num_step = X.shape[1] - num_vertex = X.shape[2] - mask = torch.ones(num_step, num_step) - mask = torch.tril(mask) - mask = torch.unsqueeze(torch.unsqueeze(mask, dim=0), dim=0) - mask = mask.repeat(self.K * batch_size, num_vertex, 1, 1) - mask = mask.to(torch.bool) - attention = torch.where(mask, attention, -2 ** 15 + 1) - # softmax - attention = F.softmax(attention, dim=-1) - # [batch_size, num_step, num_vertex, D] - X = torch.matmul(attention, value) - X = X.permute(0, 2, 1, 3) - X = torch.cat(torch.split(X, batch_size_, dim=0), dim=-1) # orginal K, change to batch_size - X = self.FC(X) - del query, key, value, attention - return X - - -class gatedFusion(nn.Module): - ''' - gated fusion - HS: [batch_size, num_step, num_vertex, D] - HT: [batch_size, num_step, num_vertex, D] - D: output dims - return: [batch_size, num_step, num_vertex, D] - ''' - - def __init__(self, D, bn_decay): - super(gatedFusion, self).__init__() - self.FC_xs = FC(input_dims=D, units=D, activations=None, - bn_decay=bn_decay, use_bias=False) - self.FC_xt = FC(input_dims=D, units=D, activations=None, - bn_decay=bn_decay, use_bias=True) - self.FC_h = FC(input_dims=[D, D], units=[D, D], activations=[F.relu, None], - bn_decay=bn_decay) - - def forward(self, HS, HT): - XS = self.FC_xs(HS) - XT = self.FC_xt(HT) - z = torch.sigmoid(torch.add(XS, XT)) - H = torch.add(torch.mul(z, HS), torch.mul(1 - z, HT)) - H = self.FC_h(H) - del XS, XT, z - return H - - -class STAttBlock(nn.Module): - def __init__(self, K, d, bn_decay, mask=False): - super(STAttBlock, self).__init__() - self.spatialAttention = spatialAttention(K, d, bn_decay) - self.temporalAttention = temporalAttention(K, d, bn_decay, mask=mask) - self.gatedFusion = gatedFusion(K * d, bn_decay) - - def forward(self, X, STE): - HS = self.spatialAttention(X, STE) - HT = self.temporalAttention(X, STE) - H = self.gatedFusion(HS, HT) - del HS, HT - return torch.add(X, H) - - -class transformAttention(nn.Module): - ''' - transform attention mechanism - X: [batch_size, num_his, num_vertex, D] - STE_his: [batch_size, num_his, num_vertex, D] - STE_pred: [batch_size, num_pred, num_vertex, D] - K: number of attention heads - d: dimension of each attention outputs - return: [batch_size, num_pred, num_vertex, D] - ''' - - def __init__(self, K, d, bn_decay): - super(transformAttention, self).__init__() - D = K * d - self.K = K - self.d = d - self.FC_q = FC(input_dims=D, units=D, activations=F.relu, - bn_decay=bn_decay) - self.FC_k = FC(input_dims=D, units=D, activations=F.relu, - bn_decay=bn_decay) - self.FC_v = FC(input_dims=D, units=D, activations=F.relu, - bn_decay=bn_decay) - self.FC = FC(input_dims=D, units=D, activations=F.relu, - bn_decay=bn_decay) - - def forward(self, X, STE_his, STE_pred): - batch_size = X.shape[0] - # [batch_size, num_step, num_vertex, K * d] - query = self.FC_q(STE_pred) - key = self.FC_k(STE_his) - value = self.FC_v(X) - # [K * batch_size, num_step, num_vertex, d] - query = torch.cat(torch.split(query, self.K, dim=-1), dim=0) - key = torch.cat(torch.split(key, self.K, dim=-1), dim=0) - value = torch.cat(torch.split(value, self.K, dim=-1), dim=0) - # query: [K * batch_size, num_vertex, num_pred, d] - # key: [K * batch_size, num_vertex, d, num_his] - # value: [K * batch_size, num_vertex, num_his, d] - query = query.permute(0, 2, 1, 3) - key = key.permute(0, 2, 3, 1) - value = value.permute(0, 2, 1, 3) - # [K * batch_size, num_vertex, num_pred, num_his] - attention = torch.matmul(query, key) - attention /= (self.d ** 0.5) - attention = F.softmax(attention, dim=-1) - # [batch_size, num_pred, num_vertex, D] - X = torch.matmul(attention, value) - X = X.permute(0, 2, 1, 3) - X = torch.cat(torch.split(X, batch_size, dim=0), dim=-1) - X = self.FC(X) - del query, key, value, attention - return X - - -@ARCH_REGISTRY.register() -class GMAN(nn.Module): - ''' - GMAN - X: [batch_size, num_his, num_vertx] - TE: [batch_size, num_his + num_pred, 2] (time-of-day, day-of-week) - SE: [num_vertex, K * d] - num_his: number of history steps - num_pred:number of prediction steps - T: one day is divided into T steps - L: number of STAtt blocks in the encoder/decoder - K: number of attention heads - d: dimension of each attention head outputs - return: [batch_size, num_pred, num_vertex] - ''' - - def __init__(self, SE, L, K, d, num_his, bn_decay): - super(GMAN, self).__init__() - D = K * d - self.num_his = num_his - self.SE = nn.Parameter(SE) - self.STEmbedding = STEmbedding(D, bn_decay) - self.STAttBlock_1 = nn.ModuleList([STAttBlock(K, d, bn_decay) for _ in range(L)]) - self.STAttBlock_2 = nn.ModuleList([STAttBlock(K, d, bn_decay) for _ in range(L)]) - self.transformAttention = transformAttention(K, d, bn_decay) - self.FC_1 = FC(input_dims=[1, D], units=[D, D], activations=[F.relu, None], - bn_decay=bn_decay) - self.FC_2 = FC(input_dims=[D, D], units=[D, 1], activations=[F.relu, None], - bn_decay=bn_decay) - - def forward(self, history_data: torch.Tensor, future_data: torch.Tensor) -> torch.Tensor: - """feedforward function of GMAN. - - Args: - X (torch.Tensor): Historical data with shape [B, L, N, C]. X[..., 1:3] is the 'time in day' and 'day in week' feature. - Y (torch.Tensor): Future data with shape [B, L, N, C]. Y[..., 1:3] is the 'time in day' and 'day in week' feature. - - Returns: - torch.Tensor: Predictions with shape [B, L, N, 1] - """ - - # prepare data - history_data[..., 1] = (history_data[..., 1] * 288).type(torch.LongTensor) - future_data[..., 1] = (future_data[..., 1] * 288).type(torch.LongTensor) - - TE_X = history_data[..., 0, 1:3] - TE_Y = future_data[..., 0, 1:3] - TE = torch.cat([TE_X, TE_Y], dim=1) - - X = history_data[..., [0]] - - # feed forward - # input - X = self.FC_1(X) - # STE - STE = self.STEmbedding(self.SE, TE) - STE_his = STE[:, :self.num_his] - STE_pred = STE[:, self.num_his:] - # encoder - for net in self.STAttBlock_1: - X = net(X, STE_his) - # transAtt - X = self.transformAttention(X, STE_his, STE_pred) - # decoder - for net in self.STAttBlock_2: - X = net(X, STE_pred) - # output - pred = self.FC_2(X) - del STE, STE_his, STE_pred - return pred diff --git a/basicts/archs/GMAN_arch/__init__.py b/basicts/archs/GMAN_arch/__init__.py deleted file mode 100644 index 5ad9e0ae..00000000 --- a/basicts/archs/GMAN_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.GMAN_arch.GMAN_arch import GMAN \ No newline at end of file diff --git a/basicts/archs/GTS_arch/__init__.py b/basicts/archs/GTS_arch/__init__.py deleted file mode 100644 index e2d25a76..00000000 --- a/basicts/archs/GTS_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.GTS_arch.GTS_arch import GTS \ No newline at end of file diff --git a/basicts/archs/GraphWaveNet_arch/__init__.py b/basicts/archs/GraphWaveNet_arch/__init__.py deleted file mode 100644 index f5ab9975..00000000 --- a/basicts/archs/GraphWaveNet_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.GraphWaveNet_arch.GraphWaveNet_arch import GraphWaveNet \ No newline at end of file diff --git a/basicts/archs/HI_arch/HI_arch.py b/basicts/archs/HI_arch/HI_arch.py deleted file mode 100644 index 31a24c2b..00000000 --- a/basicts/archs/HI_arch/HI_arch.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from basicts.archs.registry import ARCH_REGISTRY - -""" - Paper: Historical Inertia: A Neglected but Powerful Baseline for Long Sequence Time-series Forecasting -""" - -class HINetwork(nn.Module): - def __init__(self, input_length: int, output_length: int, channel=None, reverse=False): - """we use HI[1] as the baseline model for the pipline. - [1] Historical Inertia: A Neglected but Powerful Baseline for Long Sequence Time-series Forecasting - - Args: - input_length (int): input time series length - output_length (int): prediction time series length - channel (list, optional): selected channels. Defaults to None. - reverse (bool, optional): if reverse the prediction of HI. Defaults to False. - """ - super(HINetwork, self).__init__() - assert input_length >= output_length, "HI model requires input length > output length" - self.input_length = input_length - self.output_length = output_length - self.channel = channel - self.reverse = reverse - self.fake_param = nn.Linear(1, 1) - - def forward(self, history_data: torch.Tensor, **kwargs) -> torch.Tensor: - """feedforward function of HI. - - Args: - history_data (torch.Tensor): shape = [B, L_in, N, C] - - Returns: - torch.Tensor: model prediction [B, L_out, N, C]. - """ - B, L_in, N, C = history_data.shape - assert self.input_length == L_in, 'error input length' - if self.channel is not None: - history_data = history_data[..., self.channel] - prediction = history_data[:, -self.output_length:, :, :] - if self.reverse: - prediction = prediction.flip(dims=[1]) - return prediction diff --git a/basicts/archs/HI_arch/__init__.py b/basicts/archs/HI_arch/__init__.py deleted file mode 100644 index 0d611c53..00000000 --- a/basicts/archs/HI_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.HI_arch.HI_arch import HINetwork \ No newline at end of file diff --git a/basicts/archs/LSTM_arch/LSTM_arch.py b/basicts/archs/LSTM_arch/LSTM_arch.py deleted file mode 100644 index 49b57d75..00000000 --- a/basicts/archs/LSTM_arch/LSTM_arch.py +++ /dev/null @@ -1,44 +0,0 @@ -import torch -import torch.nn as nn -from basicts.archs.registry import ARCH_REGISTRY - - -@ARCH_REGISTRY.register() -class FCLSTM(nn.Module): - def __init__(self, input_dim, rnn_units, output_dim, horizon, num_layers, dropout=0.1): - super(FCLSTM, self).__init__() - self.input_dim = input_dim - self.hidden_dim = rnn_units - self.output_dim = output_dim - self.horizon = horizon - self.num_layers = num_layers - - self.encoder = nn.LSTM(input_size=self.input_dim, hidden_size=self.hidden_dim, num_layers=self.num_layers, batch_first=True, dropout=dropout) - - #predictor - self.end_fc = nn.Linear(self.num_layers*self.hidden_dim, self.horizon*self.output_dim) - - def forward(self, history_data: torch.Tensor) -> torch.Tensor: - """feedforward function of LSTM. - - Args: - source (torch.Tensor): inputs with shape [B, L, N, C] - - Returns: - torch.Tensor: outputs with shape [B, L, N, C] - """ - B, L, N, C = history_data.shape - # shared LSTM - history_data = history_data.transpose(1, 2) # [B, N, L, C] - inputs = history_data.reshape(B*N, L, C) - output, (h_n, c_n) = self.encoder(inputs) - h_n = h_n.transpose(0, 1) # [B*N, num_layers, hidden] - h_n = h_n.reshape(B*N, -1) # [B*N, num_layers * hidden] - h_n = h_n.view(B, N, -1) # [B, N, num_layers * hidden] - - # prediction - output = self.end_fc(h_n) # [B, N, self.horizon*self.output_dim] - output = output.view(B, N, self.horizon, self.output_dim) - output = output.transpose(1, 2) # [B, L, N, C] - - return output diff --git a/basicts/archs/LSTM_arch/__init__.py b/basicts/archs/LSTM_arch/__init__.py deleted file mode 100644 index 55b13776..00000000 --- a/basicts/archs/LSTM_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.LSTM_arch.LSTM_arch import FCLSTM as LSTM \ No newline at end of file diff --git a/basicts/archs/MTGNN_arch/MTGNN_arch.py b/basicts/archs/MTGNN_arch/MTGNN_arch.py deleted file mode 100644 index 0f4ed6df..00000000 --- a/basicts/archs/MTGNN_arch/MTGNN_arch.py +++ /dev/null @@ -1,140 +0,0 @@ -from basicts.archs.MTGNN_arch.MTGNN_layers import * -from basicts.archs.registry import ARCH_REGISTRY - -""" - Paper: Connecting the Dots: Multivariate Time Series Forecasting with Graph Neural Networks - Ref Official Code: https://github.com/nnzhan/MTGNN -""" - -@ARCH_REGISTRY.register() -class MTGNN(nn.Module): - def __init__(self, gcn_true, buildA_true, gcn_depth, num_nodes, predefined_A=None, static_feat=None, dropout=0.3, subgraph_size=20, node_dim=40, dilation_exponential=1, conv_channels=32, residual_channels=32, skip_channels=64, end_channels=128, seq_length=12, in_dim=2, out_dim=12, layers=3, propalpha=0.05, tanhalpha=3, layer_norm_affline=True): - super(MTGNN, self).__init__() - self.gcn_true = gcn_true - self.buildA_true = buildA_true - self.num_nodes = num_nodes - self.dropout = dropout - self.predefined_A = predefined_A - self.filter_convs = nn.ModuleList() - self.gate_convs = nn.ModuleList() - self.residual_convs = nn.ModuleList() - self.skip_convs = nn.ModuleList() - self.gconv1 = nn.ModuleList() - self.gconv2 = nn.ModuleList() - self.norm = nn.ModuleList() - self.start_conv = nn.Conv2d(in_channels=in_dim, out_channels=residual_channels, kernel_size=(1, 1)) - self.gc = graph_constructor(num_nodes, subgraph_size, node_dim, alpha=tanhalpha, static_feat=static_feat) - - self.seq_length = seq_length - kernel_size = 7 - if dilation_exponential>1: - self.receptive_field = int(1+(kernel_size-1)*(dilation_exponential**layers-1)/(dilation_exponential-1)) - else: - self.receptive_field = layers*(kernel_size-1) + 1 - - for i in range(1): - if dilation_exponential>1: - rf_size_i = int(1 + i*(kernel_size-1)*(dilation_exponential**layers-1)/(dilation_exponential-1)) - else: - rf_size_i = i*layers*(kernel_size-1)+1 - new_dilation = 1 - for j in range(1,layers+1): - if dilation_exponential > 1: - rf_size_j = int(rf_size_i + (kernel_size-1)*(dilation_exponential**j-1)/(dilation_exponential-1)) - else: - rf_size_j = rf_size_i+j*(kernel_size-1) - - self.filter_convs.append(dilated_inception(residual_channels, conv_channels, dilation_factor=new_dilation)) - self.gate_convs.append(dilated_inception(residual_channels, conv_channels, dilation_factor=new_dilation)) - self.residual_convs.append(nn.Conv2d(in_channels=conv_channels, out_channels=residual_channels, kernel_size=(1, 1))) - if self.seq_length>self.receptive_field: - self.skip_convs.append(nn.Conv2d(in_channels=conv_channels, out_channels=skip_channels, kernel_size=(1, self.seq_length-rf_size_j+1))) - else: - self.skip_convs.append(nn.Conv2d(in_channels=conv_channels, out_channels=skip_channels, kernel_size=(1, self.receptive_field-rf_size_j+1))) - - if self.gcn_true: - self.gconv1.append(mixprop(conv_channels, residual_channels, gcn_depth, dropout, propalpha)) - self.gconv2.append(mixprop(conv_channels, residual_channels, gcn_depth, dropout, propalpha)) - - if self.seq_length>self.receptive_field: - self.norm.append(LayerNorm((residual_channels, num_nodes, self.seq_length - rf_size_j + 1),elementwise_affine=layer_norm_affline)) - else: - self.norm.append(LayerNorm((residual_channels, num_nodes, self.receptive_field - rf_size_j + 1),elementwise_affine=layer_norm_affline)) - - new_dilation *= dilation_exponential - - self.layers = layers - self.end_conv_1 = nn.Conv2d(in_channels=skip_channels, out_channels=end_channels, kernel_size=(1,1), bias=True) - self.end_conv_2 = nn.Conv2d(in_channels=end_channels, out_channels=out_dim, kernel_size=(1,1), bias=True) - if self.seq_length > self.receptive_field: - self.skip0 = nn.Conv2d(in_channels=in_dim, out_channels=skip_channels, kernel_size=(1, self.seq_length), bias=True) - self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, self.seq_length-self.receptive_field+1), bias=True) - - else: - self.skip0 = nn.Conv2d(in_channels=in_dim, out_channels=skip_channels, kernel_size=(1, self.receptive_field), bias=True) - self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=(1, 1), bias=True) - - - self.idx = torch.arange(self.num_nodes) - - - def forward(self, history_data: torch.Tensor, idx: int = None, **kwargs) -> torch.Tensor: - """feedforward function of MTGNN. - - Args: - history_data (torch.Tensor): history data with shape [B, L, N, C] - idx (int, optional): Graph Learning Hyperparameter. Defaults to None. - - Returns: - torch.Tensor: prediction - """ - # select feature - history_data = history_data.transpose(1, 3).contiguous() - seq_len = history_data.size(3) - assert seq_len==self.seq_length, 'input sequence length not equal to preset sequence length' - - if self.seq_length None: - super().__init__() - self.fc1 = nn.Conv2d(in_channels=input_dim, out_channels=hidden_dim, kernel_size=(1,1), bias=True) - self.fc2 = nn.Conv2d(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=(1,1), bias=True) - self.act = nn.ReLU() - self.drop = nn.Dropout(p=0.15) - - def forward(self, input_data:torch.Tensor) -> torch.Tensor: - """feed forward of MLP. - - Args: - input_data (torch.Tensor): input data with shape [B, D, N] - - Returns: - torch.Tensor: latent repr - """ - B, D, N, _ = input_data.shape - hidden = self.fc2(self.drop(self.act(self.fc1(input_data)))) # MLP - hidden = hidden + input_data # residual - return hidden diff --git a/basicts/archs/STID_arch/STID_arch.py b/basicts/archs/STID_arch/STID_arch.py deleted file mode 100644 index 22ff583e..00000000 --- a/basicts/archs/STID_arch/STID_arch.py +++ /dev/null @@ -1,97 +0,0 @@ -import torch -import torch.nn as nn -from basicts.archs.STID_arch.MLP import MLP_res -from basicts.archs.registry import ARCH_REGISTRY - - -@ARCH_REGISTRY.register() -class STID(nn.Module): - def __init__(self, **model_args): - super().__init__() - # attributes - self.num_nodes = model_args['num_nodes'] - self.node_dim = model_args['node_dim'] - self.input_len = model_args['input_len'] - self.input_dim = model_args['input_dim'] - self.embed_dim = model_args['embed_dim'] - self.output_len = model_args['output_len'] - self.num_layer = model_args['num_layer'] - self.temp_dim_tid = model_args['temp_dim_tid'] - self.temp_dim_diw = model_args['temp_dim_diw'] - - self.if_T_i_D = model_args['if_T_i_D'] - self.if_D_i_W = model_args['if_D_i_W'] - self.if_node = model_args['if_node'] - - # spatial embeddings - if self.if_node: - self.node_emb = nn.Parameter(torch.empty(self.num_nodes, self.node_dim)) - nn.init.xavier_uniform_(self.node_emb) - # temporal embeddings - if self.if_T_i_D: - self.T_i_D_emb = nn.Parameter(torch.empty(288, self.temp_dim_tid)) - nn.init.xavier_uniform_(self.T_i_D_emb) - if self.if_D_i_W: - self.D_i_W_emb = nn.Parameter(torch.empty(7, self.temp_dim_diw)) - nn.init.xavier_uniform_(self.D_i_W_emb) - - # embedding layer - self.time_series_emb_layer = nn.Conv2d(in_channels=self.input_dim * self.input_len, out_channels=self.embed_dim, kernel_size=(1, 1), bias=True) - - # encoding - self.hidden_dim = self.embed_dim+self.node_dim*int(self.if_node)+self.temp_dim_tid*int(self.if_D_i_W) + self.temp_dim_diw*int(self.if_T_i_D) - self.encoder = nn.Sequential(*[MLP_res(self.hidden_dim, self.hidden_dim) for _ in range(self.num_layer)]) - - # regression - self.regression_layer = nn.Conv2d(in_channels=self.hidden_dim, out_channels=self.output_len, kernel_size=(1,1), bias=True) - - def forward(self, history_data: torch.Tensor, **kwargs) -> torch.Tensor: - """feed forward. - - Args: - history_data (torch.Tensor): history data with shape [B, L, N, C] - - Returns: - torch.Tensor: prediction wit shape [B, L, N, C] - """ - # prepare data - X = history_data[..., range(self.input_dim)] - t_i_d_data = history_data[..., 1] - d_i_w_data = history_data[..., 2] - - if self.if_T_i_D: - T_i_D_emb = self.T_i_D_emb[(t_i_d_data[:, -1, :] * 288).type(torch.LongTensor)] # [B, N, D] - else: - T_i_D_emb = None - if self.if_D_i_W: - D_i_W_emb = self.D_i_W_emb[(d_i_w_data[:, -1, :]).type(torch.LongTensor)] # [B, N, D] - else: - D_i_W_emb = None - - # time series embedding - B, L, N, _ = X.shape - X = X.transpose(1, 2).contiguous() # B, N, L, 1 - X = X.view(B, N, -1).transpose(1, 2).unsqueeze(-1) # B, D, N, 1 - time_series_emb = self.time_series_emb_layer(X) # B, D, N, 1 - - node_emb = [] - if self.if_node: - # expand node embeddings - node_emb.append(self.node_emb.unsqueeze(0).expand(B, -1, -1).transpose(1, 2).unsqueeze(-1)) # B, D, N, 1 - # temporal embeddings - tem_emb = [] - if T_i_D_emb is not None: - tem_emb.append(T_i_D_emb.transpose(1, 2).unsqueeze(-1)) # B, D, N, 1 - if D_i_W_emb is not None: - tem_emb.append(D_i_W_emb.transpose(1, 2).unsqueeze(-1)) # B, D, N, 1 - - # concate all embeddings - hidden = torch.cat([time_series_emb] + node_emb + tem_emb, dim=1) - - # encoding - hidden = self.encoder(hidden) - - # regression - prediction = self.regression_layer(hidden) - - return prediction diff --git a/basicts/archs/STID_arch/__init__.py b/basicts/archs/STID_arch/__init__.py deleted file mode 100644 index e86e9c6e..00000000 --- a/basicts/archs/STID_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.STID_arch.STID_arch import STID \ No newline at end of file diff --git a/basicts/archs/STNorm_arch/__init__.py b/basicts/archs/STNorm_arch/__init__.py deleted file mode 100644 index 8bff3176..00000000 --- a/basicts/archs/STNorm_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.STNorm_arch.STNorm_arch import STNorm \ No newline at end of file diff --git a/basicts/archs/Stat_arch/Stat_arch.py b/basicts/archs/Stat_arch/Stat_arch.py deleted file mode 100644 index 2511942c..00000000 --- a/basicts/archs/Stat_arch/Stat_arch.py +++ /dev/null @@ -1,153 +0,0 @@ -""" - Statistical models including: MA (Moveing Average) AR (Auto Regression), VAR (Vector Auto Regression), and ARIMA (Autoregressive Integrated Moving Average (ARIMA). - All the random noise term is omitted. - Ref Code: https://github.com/doowb/sma -""" -import torch -import torch.nn as nn -import copy -from basicts.archs.registry import ARCH_REGISTRY - - -@ARCH_REGISTRY.register() -class SimpleMovingAverage(nn.Module): - def __init__(self, q: int, input_length: int, output_length: int): - """simple moving average as prediction - - Args: - q (int): sliding window size - input_length (int): length of input history data - output_length (int): length of prediction - """ - super(SimpleMovingAverage, self).__init__() - assert input_length >= q, "Error: window size > input data length" - self.q = q - self.output_length = output_length - self.input_length = input_length - - def forward(self, history_data: torch.Tensor, **kwargs) -> torch.Tensor: - """feed forward of MA: https://github.com/doowb/sma - forward([1, 2, 3, 4, 5, 6, 7, 8, 9]) | p=4; - //=> [ '2.50', '3.50', '4.50', '5.50', '6.50', '7.50' ] - //=> │ │ │ │ │ └─(6+7+8+9)/4 - //=> │ │ │ │ └─(5+6+7+8)/4 - //=> │ │ │ └─(4+5+6+7)/4 - //=> │ │ └─(3+4+5+6)/4 - //=> │ └─(2+3+4+5)/4 - //=> └─(1+2+3+4)/4 - - Args: - history_data (torch.Tensor): history data with shape [B, L, N, C] - - Returns: - torch.Tensor: MA prediction - """ - [B, L, N, C] = history_data.shape - assert L == self.input_length, "error input data length" - data_full = copy.copy(history_data) - for i in range(self.output_length): - data_in_window = data_full[:, -self.q:, :, :] - simple_avg = torch.mean(data_in_window, dim=1) # [B, N, C] - data_full = torch.cat([data_full, simple_avg.unsqueeze(1)], dim=1) - prediction = data_full[:, -self.output_length:, :, :] - return prediction - - -@ARCH_REGISTRY.register() -class AutoRegressive(nn.Module): - def __init__(self, p: int, input_length: int, output_length: int): - """Auto Regressive (AR) model - - Args: - p (int): sliding window size - input_length (int): length of input history data - output_length (int): length of prediction - """ - super(AutoRegressive, self).__init__() - assert input_length >= p, "Error: window size > input data length" - self.p = p - self.output_length = output_length - self.input_length = input_length - self.weight = nn.Parameter(torch.empty(p, 1)) - print("Notes: the weights of WMA model are unnormalized.") - self.c = nn.Parameter(torch.empty(1)) - nn.init.uniform_(self.weight) - nn.init.zeros_(self.c) - - def forward(self, history_data: torch.Tensor, **kwargs) -> torch.Tensor: - """feed forward of autoregressive model: https://en.wikipedia.org/wiki/Autoregressive_model - - Args: - history_data (torch.Tensor): history data with shape [B, L, N, C] - - Returns: - torch.Tensor: MA prediction - """ - [B, L, N, C] = history_data.shape - assert L == self.input_length, "error input data length" - data_full = copy.copy(history_data) - for i in range(self.output_length): - data_in_window = data_full[:, -self.p:, :, :] # [B, p, N, C] - data_in_window = data_in_window.permute(0, 2, 3, 1) # [B, N, C, p] - weight_avg = torch.matmul(data_in_window, self.weight).permute(0, 3, 1, 2) # [B, 1, N, C] - weight_avg = weight_avg + self.c # the noise term is omitted - data_full = torch.cat([data_full, weight_avg], dim=1) - prediction = data_full[:, -self.output_length:, :, :] - return prediction - - -@ARCH_REGISTRY.register() -class VectorAutoRegression(nn.Module): - def __init__(self, p: int, input_length: int, output_length: int, num_time_series: int): - """vector auto regressive model for multivariate time series forecasting - - Args: - p (int): sliding window size - input_length (int): length of input history data - output_length (int): length of prediction - num_time_series (int): number of time series - """ - super(VectorAutoRegression, self).__init__() - self.p = p - self.output_length = output_length - self.input_length = input_length - self.N = num_time_series - self.weight = nn.Parameter(torch.empty(p, self.N, self.N)) # [p, N, N] - print("Notes: the weights of VAR model are unnormalized.") - self.c = nn.Parameter(torch.empty(self.N, 1)) - nn.init.xavier_uniform_(self.weight) - nn.init.zeros_(self.c) - - def forward(self, history_data: torch.Tensor, **kwargs) -> torch.Tensor: - """feed forward of VAR: https://en.wikipedia.org/wiki/Vector_autoregression - - Args: - history_data (torch.Tensor): history data with shape [B, L, N, C] - - Returns: - torch.Tensor: VAR prediction - """ - [B, L, N, C] = history_data.shape - assert L == self.input_length, "error input data length" - data_full = copy.copy(history_data) - for i in range(self.output_length): - data_in_window = data_full[:, -self.p:, :, :] # [B, p, N, C] - data_in_window = data_in_window.permute(0, 3, 1, 2).unsqueeze(-1) # [B, C, p, N, 1] - weighted_data = torch.matmul(self.weight, data_in_window).squeeze(-1) # [B, C, p, N] - weight_avg = torch.mean(weighted_data, dim=-2).permute(0, 2, 1).unsqueeze(1) # [B, 1, N, C] - weight_avg = weight_avg + self.c # error term is omitted - data_full = torch.cat([data_full, weight_avg], dim=1) - prediction = data_full[:, -self.output_length:, :, :] - return prediction - - -@ARCH_REGISTRY.register() -class ARIMA(nn.Module): - def __init__(self): - super(ARIMA, self).__init__() - """TODO: ARIMA model requires unnormalized data to add N(0, 1) noise. - """ - pass - - def forward(self, history_data: torch.Tensor, **kwargs) -> torch.Tensor: - pass diff --git a/basicts/archs/Stat_arch/__init__.py b/basicts/archs/Stat_arch/__init__.py deleted file mode 100644 index 3c89f178..00000000 --- a/basicts/archs/Stat_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.Stat_arch.Stat_arch import * \ No newline at end of file diff --git a/basicts/archs/StemGNN_arch/__init__.py b/basicts/archs/StemGNN_arch/__init__.py deleted file mode 100644 index af90b08b..00000000 --- a/basicts/archs/StemGNN_arch/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from basicts.archs.StemGNN_arch.StemGNN_arch import StemGNN \ No newline at end of file diff --git a/basicts/archs/__init__.py b/basicts/archs/__init__.py index b306c9ed..d455f735 100644 --- a/basicts/archs/__init__.py +++ b/basicts/archs/__init__.py @@ -1,8 +1,18 @@ -import os +from .arch_zoo.stid_arch import STID +from .arch_zoo.gwnet_arch import GraphWaveNet +from .arch_zoo.dcrnn_arch import DCRNN +from .arch_zoo.d2stgnn_arch import D2STGNN +from .arch_zoo.stgcn_arch import STGCN +from .arch_zoo.mtgnn_arch import MTGNN +from .arch_zoo.stnorm_arch import STNorm +from .arch_zoo.agcrn_arch import AGCRN +from .arch_zoo.stemgnn_arch import StemGNN +from .arch_zoo.gts_arch import GTS +from .arch_zoo.dgcrn_arch import DGCRN +from .arch_zoo.linear_arch import Linear, DLinear, NLinear -from .registry import ARCH_REGISTRY -from easytorch.utils.registry import scan_modules - -__all__ = ['ARCH_REGISTRY'] - -scan_modules(os.getcwd(), __file__, ['__init__.py', 'builder.py']) +__all__ = ["STID", "GraphWaveNet", "DCRNN", + "D2STGNN", "STGCN", "MTGNN", + "STNorm", "AGCRN", "StemGNN", + "GTS", "DGCRN", "Linear", + "DLinear", "NLinear"] diff --git a/basicts/archs/arch_zoo/agcrn_arch/__init__.py b/basicts/archs/arch_zoo/agcrn_arch/__init__.py new file mode 100644 index 00000000..01b4cc8d --- /dev/null +++ b/basicts/archs/arch_zoo/agcrn_arch/__init__.py @@ -0,0 +1,3 @@ +from .agcrn_arch import AGCRN + +__all__ = ["AGCRN"] diff --git a/basicts/archs/arch_zoo/agcrn_arch/agcn.py b/basicts/archs/arch_zoo/agcrn_arch/agcn.py new file mode 100644 index 00000000..1cea52ee --- /dev/null +++ b/basicts/archs/arch_zoo/agcrn_arch/agcn.py @@ -0,0 +1,35 @@ +import torch +import torch.nn.functional as F +import torch.nn as nn + + +class AVWGCN(nn.Module): + def __init__(self, dim_in, dim_out, cheb_k, embed_dim): + super(AVWGCN, self).__init__() + self.cheb_k = cheb_k + self.weights_pool = nn.Parameter( + torch.FloatTensor(embed_dim, cheb_k, dim_in, dim_out)) + self.bias_pool = nn.Parameter(torch.FloatTensor(embed_dim, dim_out)) + + def forward(self, x, node_embeddings): + # x shaped[B, N, C], node_embeddings shaped [N, D] -> supports shaped [N, N] + # output shape [B, N, C] + node_num = node_embeddings.shape[0] + supports = F.softmax( + F.relu(torch.mm(node_embeddings, node_embeddings.transpose(0, 1))), dim=1) + support_set = [torch.eye(node_num).to(supports.device), supports] + # default cheb_k = 3 + for k in range(2, self.cheb_k): + support_set.append(torch.matmul( + 2 * supports, support_set[-1]) - support_set[-2]) + supports = torch.stack(support_set, dim=0) + # N, cheb_k, dim_in, dim_out + weights = torch.einsum( + 'nd,dkio->nkio', node_embeddings, self.weights_pool) + bias = torch.matmul(node_embeddings, self.bias_pool) # N, dim_out + x_g = torch.einsum("knm,bmc->bknc", supports, + x) # B, cheb_k, N, dim_in + x_g = x_g.permute(0, 2, 1, 3) # B, N, cheb_k, dim_in + x_gconv = torch.einsum('bnki,nkio->bno', x_g, + weights) + bias # b, N, dim_out + return x_gconv diff --git a/basicts/archs/arch_zoo/agcrn_arch/agcrn_arch.py b/basicts/archs/arch_zoo/agcrn_arch/agcrn_arch.py new file mode 100644 index 00000000..a64a1c8d --- /dev/null +++ b/basicts/archs/arch_zoo/agcrn_arch/agcrn_arch.py @@ -0,0 +1,108 @@ +import torch +import torch.nn as nn + +from .agcrn_cell import AGCRNCell + + +class AVWDCRNN(nn.Module): + def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim, num_layers=1): + super(AVWDCRNN, self).__init__() + assert num_layers >= 1, 'At least one DCRNN layer in the Encoder.' + self.node_num = node_num + self.input_dim = dim_in + self.num_layers = num_layers + self.dcrnn_cells = nn.ModuleList() + self.dcrnn_cells.append( + AGCRNCell(node_num, dim_in, dim_out, cheb_k, embed_dim)) + for _ in range(1, num_layers): + self.dcrnn_cells.append( + AGCRNCell(node_num, dim_out, dim_out, cheb_k, embed_dim)) + + def forward(self, x, init_state, node_embeddings): + # shape of x: (B, T, N, D) + # shape of init_state: (num_layers, B, N, hidden_dim) + assert x.shape[2] == self.node_num and x.shape[3] == self.input_dim + seq_length = x.shape[1] + current_inputs = x + output_hidden = [] + for i in range(self.num_layers): + state = init_state[i] + inner_states = [] + for t in range(seq_length): + state = self.dcrnn_cells[i]( + current_inputs[:, t, :, :], state, node_embeddings) + inner_states.append(state) + output_hidden.append(state) + current_inputs = torch.stack(inner_states, dim=1) + # current_inputs: the outputs of last layer: (B, T, N, hidden_dim) + # output_hidden: the last state for each layer: (num_layers, B, N, hidden_dim) + #last_state: (B, N, hidden_dim) + return current_inputs, output_hidden + + def init_hidden(self, batch_size): + init_states = [] + for i in range(self.num_layers): + init_states.append( + self.dcrnn_cells[i].init_hidden_state(batch_size)) + # (num_layers, B, N, hidden_dim) + return torch.stack(init_states, dim=0) + + +class AGCRN(nn.Module): + """ + Paper: Adaptive Graph Convolutional Recurrent Network for Traffic Forecasting + Official Code: https://github.com/LeiBAI/AGCRN + Link: https://arxiv.org/abs/2007.02842 + """ + + def __init__(self, num_nodes, input_dim, rnn_units, output_dim, horizon, num_layers, default_graph, embed_dim, cheb_k): + super(AGCRN, self).__init__() + self.num_node = num_nodes + self.input_dim = input_dim + self.hidden_dim = rnn_units + self.output_dim = output_dim + self.horizon = horizon + self.num_layers = num_layers + + self.default_graph = default_graph + self.node_embeddings = nn.Parameter(torch.randn( + self.num_node, embed_dim), requires_grad=True) + + self.encoder = AVWDCRNN(num_nodes, input_dim, rnn_units, cheb_k, + embed_dim, num_layers) + + # predictor + self.end_conv = nn.Conv2d( + 1, horizon * self.output_dim, kernel_size=(1, self.hidden_dim), bias=True) + + self.init_param() + + def init_param(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + else: + nn.init.uniform_(p) + + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: + """Feedforward function of AGCRN. + + Args: + history_data (torch.Tensor): inputs with shape [B, L, N, C]. + + Returns: + torch.Tensor: outputs with shape [B, L, N, C] + """ + + init_state = self.encoder.init_hidden(history_data.shape[0]) + output, _ = self.encoder( + history_data, init_state, self.node_embeddings) # B, T, N, hidden + output = output[:, -1:, :, :] # B, 1, N, hidden + + # CNN based predictor + output = self.end_conv((output)) # B, T*C, N, 1 + output = output.squeeze(-1).reshape(-1, self.horizon, + self.output_dim, self.num_node) + output = output.permute(0, 1, 3, 2) # B, T, N, C + + return output diff --git a/basicts/archs/AGCRN_arch/AGCRNCell.py b/basicts/archs/arch_zoo/agcrn_arch/agcrn_cell.py similarity index 68% rename from basicts/archs/AGCRN_arch/AGCRNCell.py rename to basicts/archs/arch_zoo/agcrn_arch/agcrn_cell.py index b751a3b9..d11c6d59 100644 --- a/basicts/archs/AGCRN_arch/AGCRNCell.py +++ b/basicts/archs/arch_zoo/agcrn_arch/agcrn_cell.py @@ -1,18 +1,22 @@ import torch import torch.nn as nn -from basicts.archs.AGCRN_arch.AGCN import AVWGCN + +from .agcn import AVWGCN + class AGCRNCell(nn.Module): def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim): super(AGCRNCell, self).__init__() self.node_num = node_num self.hidden_dim = dim_out - self.gate = AVWGCN(dim_in+self.hidden_dim, 2*dim_out, cheb_k, embed_dim) - self.update = AVWGCN(dim_in+self.hidden_dim, dim_out, cheb_k, embed_dim) + self.gate = AVWGCN(dim_in+self.hidden_dim, 2 * + dim_out, cheb_k, embed_dim) + self.update = AVWGCN(dim_in+self.hidden_dim, + dim_out, cheb_k, embed_dim) def forward(self, x, state, node_embeddings): - #x: B, num_nodes, input_dim - #state: B, num_nodes, hidden_dim + # x: B, num_nodes, input_dim + # state: B, num_nodes, hidden_dim state = state.to(x.device) input_and_state = torch.cat((x, state), dim=-1) z_r = torch.sigmoid(self.gate(input_and_state, node_embeddings)) @@ -23,4 +27,4 @@ def forward(self, x, state, node_embeddings): return h def init_hidden_state(self, batch_size): - return torch.zeros(batch_size, self.node_num, self.hidden_dim) \ No newline at end of file + return torch.zeros(batch_size, self.node_num, self.hidden_dim) diff --git a/basicts/archs/arch_zoo/d2stgnn_arch/__init__.py b/basicts/archs/arch_zoo/d2stgnn_arch/__init__.py new file mode 100644 index 00000000..50fbf7f3 --- /dev/null +++ b/basicts/archs/arch_zoo/d2stgnn_arch/__init__.py @@ -0,0 +1,3 @@ +from .d2stgnn_arch import D2STGNN + +__all__ = ["D2STGNN"] diff --git a/basicts/archs/D2STGNN_arch/D2STGNN_arch.py b/basicts/archs/arch_zoo/d2stgnn_arch/d2stgnn_arch.py similarity index 53% rename from basicts/archs/D2STGNN_arch/D2STGNN_arch.py rename to basicts/archs/arch_zoo/d2stgnn_arch/d2stgnn_arch.py index 593f258a..91a4633e 100644 --- a/basicts/archs/D2STGNN_arch/D2STGNN_arch.py +++ b/basicts/archs/arch_zoo/d2stgnn_arch/d2stgnn_arch.py @@ -1,24 +1,21 @@ import torch import torch.nn as nn import torch.nn.functional as F -""" - Paper: Decoupled Dynamic Spatial-Temporal Graph Neural Network for Traffic Forecasting - Official Code: https://github.com/zezhishao/D2STGNN -""" -from basicts.archs.D2STGNN_arch.DiffusionBlock import DifBlock -from basicts.archs.D2STGNN_arch.InherentBlock import InhBlock -from basicts.archs.D2STGNN_arch.DynamicGraphConv.DyGraphCons import DynamicGraphConstructor -from basicts.archs.D2STGNN_arch.Decouple.estimation_gate import EstimationGate -from basicts.archs.registry import ARCH_REGISTRY +from .difusion_block import DifBlock +from .inherent_block import InhBlock +from .dynamic_graph_conv.dy_graph_conv import DynamicGraphConstructor +from .decouple.estimation_gate import EstimationGate class DecoupleLayer(nn.Module): def __init__(self, hidden_dim, fk_dim=256, first=False, **model_args): super().__init__() - self.spatial_gate = EstimationGate(model_args['node_hidden'], model_args['time_emb_dim'], 64, model_args['seq_length']) - self.dif_layer = DifBlock(hidden_dim, fk_dim=fk_dim, **model_args) - self.inh_layer = InhBlock(hidden_dim, fk_dim=fk_dim, first=first, **model_args) + self.spatial_gate = EstimationGate( + model_args['node_hidden'], model_args['time_emb_dim'], 64, model_args['seq_length']) + self.dif_layer = DifBlock(hidden_dim, fk_dim=fk_dim, **model_args) + self.inh_layer = InhBlock( + hidden_dim, fk_dim=fk_dim, first=first, **model_args) def forward(self, X: torch.Tensor, dynamic_graph: torch.Tensor, static_graph, E_u, E_d, T_D, D_W): """decouple layer @@ -37,57 +34,71 @@ def forward(self, X: torch.Tensor, dynamic_graph: torch.Tensor, static_graph, E_ torch.Tensor: the output of the forecast branch of Diffusion Block with shape (B, L'', N, D), where L''=output_seq_len / model_args['gap'] to avoid error accumulation in auto-regression. torch.Tensor: the output of the forecast branch of Inherent Block with shape (B, L'', N, D), where L''=output_seq_len / model_args['gap'] to avoid error accumulation in auto-regression. """ - X_spa = self.spatial_gate(E_u, E_d, T_D, D_W, X) - dif_backcast_seq_res, dif_forecast_hidden = self.dif_layer(X=X, X_spa=X_spa, dynamic_graph=dynamic_graph, static_graph=static_graph) - inh_backcast_seq_res, inh_forecast_hidden = self.inh_layer(dif_backcast_seq_res) + X_spa = self.spatial_gate(E_u, E_d, T_D, D_W, X) + dif_backcast_seq_res, dif_forecast_hidden = self.dif_layer( + X=X, X_spa=X_spa, dynamic_graph=dynamic_graph, static_graph=static_graph) + inh_backcast_seq_res, inh_forecast_hidden = self.inh_layer( + dif_backcast_seq_res) return inh_backcast_seq_res, dif_forecast_hidden, inh_forecast_hidden -@ARCH_REGISTRY.register() + class D2STGNN(nn.Module): + """ + Paper: Decoupled Dynamic Spatial-Temporal Graph Neural Network for Traffic Forecasting + Link: https://arxiv.org/abs/2206.09112 + Official Code: https://github.com/zezhishao/D2STGNN + """ def __init__(self, **model_args): super().__init__() # attributes - self._in_feat = model_args['num_feat'] - self._hidden_dim = model_args['num_hidden'] - self._node_dim = model_args['node_hidden'] - self._forecast_dim = 256 + self._in_feat = model_args['num_feat'] + self._hidden_dim = model_args['num_hidden'] + self._node_dim = model_args['node_hidden'] + self._forecast_dim = 256 self._output_hidden = 512 - self._output_dim = model_args['seq_length'] + self._output_dim = model_args['seq_length'] - self._num_nodes = model_args['num_nodes'] - self._k_s = model_args['k_s'] - self._k_t = model_args['k_t'] - self._num_layers = 5 + self._num_nodes = model_args['num_nodes'] + self._k_s = model_args['k_s'] + self._k_t = model_args['k_t'] + self._num_layers = 5 - model_args['use_pre'] = False - model_args['dy_graph'] = True + model_args['use_pre'] = False + model_args['dy_graph'] = True model_args['sta_graph'] = True - self._model_args = model_args + self._model_args = model_args # start embedding layer - self.embedding = nn.Linear(self._in_feat, self._hidden_dim) + self.embedding = nn.Linear(self._in_feat, self._hidden_dim) # time embedding - self.T_i_D_emb = nn.Parameter(torch.empty(288, model_args['time_emb_dim'])) - self.D_i_W_emb = nn.Parameter(torch.empty(7, model_args['time_emb_dim'])) + self.T_i_D_emb = nn.Parameter( + torch.empty(288, model_args['time_emb_dim'])) + self.D_i_W_emb = nn.Parameter( + torch.empty(7, model_args['time_emb_dim'])) # Decoupled Spatial Temporal Layer - self.layers = nn.ModuleList([DecoupleLayer(self._hidden_dim, fk_dim=self._forecast_dim, first=True, **model_args)]) + self.layers = nn.ModuleList([DecoupleLayer( + self._hidden_dim, fk_dim=self._forecast_dim, first=True, **model_args)]) for _ in range(self._num_layers - 1): - self.layers.append(DecoupleLayer(self._hidden_dim, fk_dim=self._forecast_dim, **model_args)) + self.layers.append(DecoupleLayer( + self._hidden_dim, fk_dim=self._forecast_dim, **model_args)) # dynamic and static hidden graph constructor if model_args['dy_graph']: - self.dynamic_graph_constructor = DynamicGraphConstructor(**model_args) - + self.dynamic_graph_constructor = DynamicGraphConstructor( + **model_args) + # node embeddings - self.node_emb_u = nn.Parameter(torch.empty(self._num_nodes, self._node_dim)) - self.node_emb_d = nn.Parameter(torch.empty(self._num_nodes, self._node_dim)) + self.node_emb_u = nn.Parameter( + torch.empty(self._num_nodes, self._node_dim)) + self.node_emb_d = nn.Parameter( + torch.empty(self._num_nodes, self._node_dim)) # output layer - self.out_fc_1 = nn.Linear(self._forecast_dim, self._output_hidden) - self.out_fc_2 = nn.Linear(self._output_hidden, model_args['gap']) + self.out_fc_1 = nn.Linear(self._forecast_dim, self._output_hidden) + self.out_fc_2 = nn.Linear(self._output_hidden, model_args['gap']) self.reset_parameter() @@ -105,59 +116,68 @@ def _graph_constructor(self, **inputs): else: static_graph = [] if self._model_args['dy_graph']: - dynamic_graph = self.dynamic_graph_constructor(**inputs) + dynamic_graph = self.dynamic_graph_constructor(**inputs) else: - dynamic_graph = [] + dynamic_graph = [] return static_graph, dynamic_graph def _prepare_inputs(self, X): - num_feat = self._model_args['num_feat'] + num_feat = self._model_args['num_feat'] # node embeddings - node_emb_u = self.node_emb_u # [N, d] - node_emb_d = self.node_emb_d # [N, d] + node_emb_u = self.node_emb_u # [N, d] + node_emb_d = self.node_emb_d # [N, d] # time slot embedding - T_i_D = self.T_i_D_emb[(X[:, :, :, num_feat] * 288).type(torch.LongTensor)] # [B, L, N, d] - D_i_W = self.D_i_W_emb[(X[:, :, :, num_feat+1]).type(torch.LongTensor)] # [B, L, N, d] + # [B, L, N, d] + T_i_D = self.T_i_D_emb[(X[:, :, :, num_feat] * + 288).type(torch.LongTensor)] + # [B, L, N, d] + D_i_W = self.D_i_W_emb[(X[:, :, :, num_feat+1]).type(torch.LongTensor)] # traffic signals X = X[:, :, :, :num_feat] return X, node_emb_u, node_emb_d, T_i_D, D_i_W - def forward(self, history_data, **kwargs): - r""" + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: + """ Args: - X (Tensor): Input data with shape: [B, L, N, C] + history_data (Tensor): Input data with shape: [B, L, N, C] + Returns: - + torch.Tensor: outputs with shape [B, L, N, C] """ + X = history_data # ==================== Prepare Input Data ==================== # - X, E_u, E_d, T_D, D_W = self._prepare_inputs(X) + X, E_u, E_d, T_D, D_W = self._prepare_inputs(X) # ========================= Construct Graphs ========================== # - static_graph, dynamic_graph = self._graph_constructor(E_u=E_u, E_d=E_d, X=X, T_D=T_D, D_W=D_W) + static_graph, dynamic_graph = self._graph_constructor( + E_u=E_u, E_d=E_d, X=X, T_D=T_D, D_W=D_W) # Start embedding layer - X = self.embedding(X) + X = self.embedding(X) spa_forecast_hidden_list = [] tem_forecast_hidden_list = [] tem_backcast_seq_res = X for index, layer in enumerate(self.layers): - tem_backcast_seq_res, spa_forecast_hidden, tem_forecast_hidden = layer(tem_backcast_seq_res, dynamic_graph, static_graph, E_u, E_d, T_D, D_W) + tem_backcast_seq_res, spa_forecast_hidden, tem_forecast_hidden = layer( + tem_backcast_seq_res, dynamic_graph, static_graph, E_u, E_d, T_D, D_W) spa_forecast_hidden_list.append(spa_forecast_hidden) tem_forecast_hidden_list.append(tem_forecast_hidden) # Output Layer spa_forecast_hidden = sum(spa_forecast_hidden_list) tem_forecast_hidden = sum(tem_forecast_hidden_list) - forecast_hidden = spa_forecast_hidden + tem_forecast_hidden - + forecast_hidden = spa_forecast_hidden + tem_forecast_hidden + # regression layer - forecast = self.out_fc_2(F.relu(self.out_fc_1(F.relu(forecast_hidden)))) - forecast = forecast.transpose(1,2).contiguous().view(forecast.shape[0], forecast.shape[2], -1) + forecast = self.out_fc_2( + F.relu(self.out_fc_1(F.relu(forecast_hidden)))) + forecast = forecast.transpose(1, 2).contiguous().view( + forecast.shape[0], forecast.shape[2], -1) # reshape forecast = forecast.transpose(1, 2).unsqueeze(-1) diff --git a/basicts/archs/D2STGNN_arch/Decouple/estimation_gate.py b/basicts/archs/arch_zoo/d2stgnn_arch/decouple/estimation_gate.py similarity index 100% rename from basicts/archs/D2STGNN_arch/Decouple/estimation_gate.py rename to basicts/archs/arch_zoo/d2stgnn_arch/decouple/estimation_gate.py diff --git a/basicts/archs/D2STGNN_arch/Decouple/residual_decomp.py b/basicts/archs/arch_zoo/d2stgnn_arch/decouple/residual_decomp.py similarity index 100% rename from basicts/archs/D2STGNN_arch/Decouple/residual_decomp.py rename to basicts/archs/arch_zoo/d2stgnn_arch/decouple/residual_decomp.py diff --git a/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/__init__.py b/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/__init__.py new file mode 100644 index 00000000..94000120 --- /dev/null +++ b/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/__init__.py @@ -0,0 +1 @@ +from ..difusion_block.dif_block import DifBlock diff --git a/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/dif_block.py b/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/dif_block.py new file mode 100644 index 00000000..c360f3c6 --- /dev/null +++ b/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/dif_block.py @@ -0,0 +1,34 @@ +import torch.nn as nn + +from ..decouple.residual_decomp import ResidualDecomp +from .forecast import Forecast +from .dif_model import STLocalizedConv + + +class DifBlock(nn.Module): + def __init__(self, hidden_dim, fk_dim=256, use_pre=None, dy_graph=None, sta_graph=None, **model_args): + super().__init__() + self.pre_defined_graph = model_args['adjs'] + self.localized_st_conv = STLocalizedConv(hidden_dim, pre_defined_graph=self.pre_defined_graph, \ + use_pre=use_pre, dy_graph=dy_graph, sta_graph=sta_graph, **model_args) + # sub and norm + self.residual_decompose = ResidualDecomp([-1, -1, -1, hidden_dim]) + # forecast + self.forecast_branch = Forecast( + hidden_dim, fk_dim=fk_dim, **model_args) + # backcast + self.backcast_branch = nn.Linear(hidden_dim, hidden_dim) + + def forward(self, X, X_spa, dynamic_graph, static_graph): + Z = self.localized_st_conv(X_spa, dynamic_graph, static_graph) + # forecast branch + forecast_hidden = self.forecast_branch( + X_spa, Z, self.localized_st_conv, dynamic_graph, static_graph) + # backcast branch + backcast_seq = self.backcast_branch(Z) + # Residual Decomposition + backcast_seq = backcast_seq + X = X[:, -backcast_seq.shape[1]:, :, :] + backcast_seq_res = self.residual_decompose(X, backcast_seq) + + return backcast_seq_res, forecast_hidden diff --git a/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/dif_model.py b/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/dif_model.py new file mode 100644 index 00000000..dff8b707 --- /dev/null +++ b/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/dif_model.py @@ -0,0 +1,103 @@ +import torch +import torch.nn as nn + + +class STLocalizedConv(nn.Module): + def __init__(self, hidden_dim, pre_defined_graph=None, use_pre=None, dy_graph=None, sta_graph=None, **model_args): + super().__init__() + # gated temporal conv + self.k_s = model_args['k_s'] + self.k_t = model_args['k_t'] + self.hidden_dim = hidden_dim + + # graph conv + self.pre_defined_graph = pre_defined_graph + self.use_predefined_graph = use_pre + self.use_dynamic_hidden_graph = dy_graph + self.use_static__hidden_graph = sta_graph + + self.support_len = len(self.pre_defined_graph) + \ + int(dy_graph) + int(sta_graph) + self.num_matric = (int(use_pre) * len(self.pre_defined_graph) + len( + self.pre_defined_graph) * int(dy_graph) + int(sta_graph)) * self.k_s + 1 + self.dropout = nn.Dropout(model_args['dropout']) + self.pre_defined_graph = self.get_graph(self.pre_defined_graph) + + self.fc_list_updt = nn.Linear( + self.k_t * hidden_dim, self.k_t * hidden_dim, bias=False) + self.gcn_updt = nn.Linear( + self.hidden_dim*self.num_matric, self.hidden_dim) + + # others + self.bn = nn.BatchNorm2d(self.hidden_dim) + self.activation = nn.ReLU() + + def gconv(self, support, X_k, X_0): + out = [X_0] + for graph in support: + if len(graph.shape) == 2: # staitic or predefined graph + pass + else: + graph = graph.unsqueeze(1) + H_k = torch.matmul(graph, X_k) + out.append(H_k) + out = torch.cat(out, dim=-1) + out = self.gcn_updt(out) + out = self.dropout(out) + return out + + def get_graph(self, support): + # Only used in static including static hidden graph and predefined graph, but not used for dynamic graph. + graph_ordered = [] + mask = 1 - torch.eye(support[0].shape[0]).to(support[0].device) + for graph in support: + k_1_order = graph # 1 order + graph_ordered.append(k_1_order * mask) + # e.g., order = 3, k=[2, 3]; order = 2, k=[2] + for k in range(2, self.k_s+1): + k_1_order = torch.matmul(graph, k_1_order) + graph_ordered.append(k_1_order * mask) + # get st localed graph + st_local_graph = [] + for graph in graph_ordered: + graph = graph.unsqueeze(-2).expand(-1, self.k_t, -1) + graph = graph.reshape( + graph.shape[0], graph.shape[1] * graph.shape[2]) + # [num_nodes, kernel_size x num_nodes] + st_local_graph.append(graph) + # [order, num_nodes, kernel_size x num_nodes] + return st_local_graph + + def forward(self, X, dynamic_graph, static_graph): + # X: [bs, seq, nodes, feat] + # [bs, seq, num_nodes, ks, num_feat] + X = X.unfold(1, self.k_t, 1).permute(0, 1, 2, 4, 3) + # seq_len is changing + batch_size, seq_len, num_nodes, kernel_size, num_feat = X.shape + + # support + support = [] + # predefined graph + if self.use_predefined_graph: + support = support + self.pre_defined_graph + # dynamic graph + if self.use_dynamic_hidden_graph: + # k_order is caled in dynamic_graph_constructor component + support = support + dynamic_graph + # predefined graphs and static hidden graphs + if self.use_static__hidden_graph: + support = support + self.get_graph(static_graph) + + # parallelize + X = X.reshape(batch_size, seq_len, num_nodes, kernel_size * num_feat) + # batch_size, seq_len, num_nodes, kernel_size * hidden_dim + out = self.fc_list_updt(X) + out = self.activation(out) + out = out.view(batch_size, seq_len, num_nodes, kernel_size, num_feat) + X_0 = torch.mean(out, dim=-2) + # batch_size, seq_len, kernel_size x num_nodes, hidden_dim + X_k = out.transpose(-3, -2).reshape(batch_size, + seq_len, kernel_size*num_nodes, num_feat) + # Nx3N 3NxD -> NxD: batch_size, seq_len, num_nodes, hidden_dim + hidden = self.gconv(support, X_k, X_0) + return hidden diff --git a/basicts/archs/D2STGNN_arch/DiffusionBlock/forecast.py b/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/forecast.py similarity index 74% rename from basicts/archs/D2STGNN_arch/DiffusionBlock/forecast.py rename to basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/forecast.py index 05286356..fa05190f 100644 --- a/basicts/archs/D2STGNN_arch/DiffusionBlock/forecast.py +++ b/basicts/archs/arch_zoo/d2stgnn_arch/difusion_block/forecast.py @@ -1,17 +1,18 @@ import torch import torch.nn as nn + class Forecast(nn.Module): def __init__(self, hidden_dim, fk_dim=None, **model_args): super().__init__() self.k_t = model_args['k_t'] self.output_seq_len = model_args['seq_length'] - self.forecast_fc = nn.Linear(hidden_dim, fk_dim) - self.model_args = model_args + self.forecast_fc = nn.Linear(hidden_dim, fk_dim) + self.model_args = model_args def forward(self, X, H, st_l_conv, dynamic_graph, static_graph): [B, seq_len_remain, B, D] = H.shape - [B, seq_len_input , B, D] = X.shape + [B, seq_len_input, B, D] = X.shape predict = [] history = X @@ -20,10 +21,10 @@ def forward(self, X, H, st_l_conv, dynamic_graph, static_graph): _1 = predict[-self.k_t:] if len(_1) < self.k_t: sub = self.k_t - len(_1) - _2 = history[:, -sub:, :, :] - _1 = torch.cat([_2] + _1, dim=1) + _2 = history[:, -sub:, :, :] + _1 = torch.cat([_2] + _1, dim=1) else: - _1 = torch.cat(_1, dim=1) + _1 = torch.cat(_1, dim=1) predict.append(st_l_conv(_1, dynamic_graph, static_graph)) predict = torch.cat(predict, dim=1) predict = self.forecast_fc(predict) diff --git a/basicts/archs/D2STGNN_arch/DynamicGraphConv/DyGraphCons.py b/basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/dy_graph_conv.py similarity index 56% rename from basicts/archs/D2STGNN_arch/DynamicGraphConv/DyGraphCons.py rename to basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/dy_graph_conv.py index 675ae44d..fe7c585e 100644 --- a/basicts/archs/D2STGNN_arch/DynamicGraphConv/DyGraphCons.py +++ b/basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/dy_graph_conv.py @@ -1,5 +1,7 @@ import torch.nn as nn -from basicts.archs.D2STGNN_arch.DynamicGraphConv.Utils import * + +from .utils import * + class DynamicGraphConstructor(nn.Module): def __init__(self, **model_args): @@ -7,25 +9,29 @@ def __init__(self, **model_args): # model args self.k_s = model_args['k_s'] # spatial order self.k_t = model_args['k_t'] # temporal kernel size - self.hidden_dim = model_args['num_hidden'] # hidden dimension of - self.node_dim = model_args['node_hidden'] # trainable node embedding dimension + # hidden dimension of + self.hidden_dim = model_args['num_hidden'] + # trainable node embedding dimension + self.node_dim = model_args['node_hidden'] - self.distance_function = DistanceFunction(**model_args) - self.mask = Mask(**model_args) - self.normalizer = Normalizer() - self.multi_order = MultiOrder(order=self.k_s) + self.distance_function = DistanceFunction(**model_args) + self.mask = Mask(**model_args) + self.normalizer = Normalizer() + self.multi_order = MultiOrder(order=self.k_s) def st_localization(self, graph_ordered): st_local_graph = [] for modality_i in graph_ordered: for k_order_graph in modality_i: - k_order_graph = k_order_graph.unsqueeze(-2).expand(-1, -1, self.k_t, -1) - k_order_graph = k_order_graph.reshape(k_order_graph.shape[0], k_order_graph.shape[1], k_order_graph.shape[2] * k_order_graph.shape[3]) + k_order_graph = k_order_graph.unsqueeze( + -2).expand(-1, -1, self.k_t, -1) + k_order_graph = k_order_graph.reshape( + k_order_graph.shape[0], k_order_graph.shape[1], k_order_graph.shape[2] * k_order_graph.shape[3]) st_local_graph.append(k_order_graph) return st_local_graph def forward(self, **inputs): - X = inputs['X'] + X = inputs['X'] E_d = inputs['E_d'] E_u = inputs['E_u'] T_D = inputs['T_D'] @@ -37,8 +43,8 @@ def forward(self, **inputs): # normalization dist_mx = self.normalizer(dist_mx) # multi order - mul_mx = self.multi_order(dist_mx) + mul_mx = self.multi_order(dist_mx) # spatial temporal localization dynamic_graphs = self.st_localization(mul_mx) - + return dynamic_graphs diff --git a/basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/utils/__init__.py b/basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/utils/__init__.py new file mode 100644 index 00000000..f3c93494 --- /dev/null +++ b/basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/utils/__init__.py @@ -0,0 +1,3 @@ +from .mask import * +from .normalizer import * +from .distance import * diff --git a/basicts/archs/D2STGNN_arch/DynamicGraphConv/Utils/distance.py b/basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/utils/distance.py similarity index 100% rename from basicts/archs/D2STGNN_arch/DynamicGraphConv/Utils/distance.py rename to basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/utils/distance.py diff --git a/basicts/archs/D2STGNN_arch/DynamicGraphConv/Utils/mask.py b/basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/utils/mask.py similarity index 100% rename from basicts/archs/D2STGNN_arch/DynamicGraphConv/Utils/mask.py rename to basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/utils/mask.py diff --git a/basicts/archs/D2STGNN_arch/DynamicGraphConv/Utils/normalizer.py b/basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/utils/normalizer.py similarity index 100% rename from basicts/archs/D2STGNN_arch/DynamicGraphConv/Utils/normalizer.py rename to basicts/archs/arch_zoo/d2stgnn_arch/dynamic_graph_conv/utils/normalizer.py diff --git a/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/__init__.py b/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/__init__.py new file mode 100644 index 00000000..521c8c90 --- /dev/null +++ b/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/__init__.py @@ -0,0 +1 @@ +from .inh_block import InhBlock diff --git a/basicts/archs/D2STGNN_arch/InherentBlock/forecast.py b/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/forecast.py similarity index 65% rename from basicts/archs/D2STGNN_arch/InherentBlock/forecast.py rename to basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/forecast.py index 4f7fcf1d..e9224a46 100644 --- a/basicts/archs/D2STGNN_arch/InherentBlock/forecast.py +++ b/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/forecast.py @@ -1,31 +1,32 @@ import torch import torch.nn as nn + class Forecast(nn.Module): def __init__(self, hidden_dim, fk_dim, **model_args): super().__init__() self.output_seq_len = model_args['seq_length'] - self.model_args = model_args + self.model_args = model_args - self.forecast_fc = nn.Linear(hidden_dim, fk_dim) + self.forecast_fc = nn.Linear(hidden_dim, fk_dim) def forward(self, X, RNN_H, Z, transformer_layer, rnn_layer, pe): - [B, L, N, D] = X.shape - [L, B_N, D] = RNN_H.shape - [L, B_N, D] = Z.shape + [B, L, N, D] = X.shape + [L, B_N, D] = RNN_H.shape + [L, B_N, D] = Z.shape predict = [Z[-1, :, :].unsqueeze(0)] for _ in range(int(self.output_seq_len / self.model_args['gap'])-1): # RNN - _gru = rnn_layer.gru_cell(predict[-1][0], RNN_H[-1]).unsqueeze(0) - RNN_H = torch.cat([RNN_H, _gru], dim=0) + _gru = rnn_layer.gru_cell(predict[-1][0], RNN_H[-1]).unsqueeze(0) + RNN_H = torch.cat([RNN_H, _gru], dim=0) # Positional Encoding if pe is not None: RNN_H = pe(RNN_H) # Transformer - _Z = transformer_layer(_gru, K=RNN_H, V=RNN_H) + _Z = transformer_layer(_gru, K=RNN_H, V=RNN_H) predict.append(_Z) - + predict = torch.cat(predict, dim=0) predict = predict.reshape(-1, B, N, D) predict = predict.transpose(0, 1) diff --git a/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/inh_block.py b/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/inh_block.py new file mode 100644 index 00000000..431e1e24 --- /dev/null +++ b/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/inh_block.py @@ -0,0 +1,72 @@ +import math +import torch +import torch.nn as nn + +from ..decouple.residual_decomp import ResidualDecomp +from .inh_model import RNNLayer, TransformerLayer +from .forecast import Forecast + + +class PositionalEncoding(nn.Module): + def __init__(self, d_model, dropout=None, max_len: int = 5000): + super().__init__() + self.dropout = nn.Dropout(p=dropout) + position = torch.arange(max_len).unsqueeze(1) + div_term = torch.exp(torch.arange(0, d_model, 2) + * (-math.log(10000.0) / d_model)) + pe = torch.zeros(max_len, 1, d_model) + pe[:, 0, 0::2] = torch.sin(position * div_term) + pe[:, 0, 1::2] = torch.cos(position * div_term) + self.register_buffer('pe', pe) + + def forward(self, X): + X = X + self.pe[:X.size(0)] + X = self.dropout(X) + return X + + +class InhBlock(nn.Module): + def __init__(self, hidden_dim, num_heads=4, bias=True, fk_dim=256, first=None, **model_args): + super().__init__() + self.num_feat = hidden_dim + self.hidden_dim = hidden_dim + + if first: + self.pos_encoder = PositionalEncoding( + hidden_dim, model_args['dropout']) + else: + self.pos_encoder = None + self.rnn_layer = RNNLayer(hidden_dim, model_args['dropout']) + self.transformer_layer = TransformerLayer( + hidden_dim, num_heads, model_args['dropout'], bias) + # forecast + self.forecast_block = Forecast(hidden_dim, fk_dim, **model_args) + # backcast + self.backcast_fc = nn.Linear(hidden_dim, hidden_dim) + # sub residual + self.sub_and_norm = ResidualDecomp([-1, -1, -1, hidden_dim]) + + def forward(self, X): + [batch_size, seq_len, num_nodes, num_feat] = X.shape + # Temporal Model + # RNN + RNN_H_raw = self.rnn_layer(X) + # Positional Encoding + if self.pos_encoder is not None: + RNN_H = self.pos_encoder(RNN_H_raw) + else: + RNN_H = RNN_H_raw + # MultiHead Self Attention + Z = self.transformer_layer(RNN_H, RNN_H, RNN_H) + + # forecast branch + forecast_hidden = self.forecast_block( + X, RNN_H_raw, Z, self.transformer_layer, self.rnn_layer, self.pos_encoder) + + # backcast branch + Z = Z.reshape(seq_len, batch_size, num_nodes, num_feat) + Z = Z.transpose(0, 1) + backcast_seq = self.backcast_fc(Z) + backcast_seq_res = self.sub_and_norm(X, backcast_seq) + + return backcast_seq_res, forecast_hidden diff --git a/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/inh_model.py b/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/inh_model.py new file mode 100644 index 00000000..ce8e2799 --- /dev/null +++ b/basicts/archs/arch_zoo/d2stgnn_arch/inherent_block/inh_model.py @@ -0,0 +1,37 @@ +import torch as th +import torch.nn as nn +from torch.nn import MultiheadAttention + + +class RNNLayer(nn.Module): + def __init__(self, hidden_dim, dropout=None): + super().__init__() + self.hidden_dim = hidden_dim + self.gru_cell = nn.GRUCell(hidden_dim, hidden_dim) + self.dropout = nn.Dropout(dropout) + + def forward(self, X): + [batch_size, seq_len, num_nodes, hidden_dim] = X.shape + X = X.transpose(1, 2).reshape( + batch_size * num_nodes, seq_len, hidden_dim) + hx = th.zeros_like(X[:, 0, :]) + output = [] + for _ in range(X.shape[1]): + hx = self.gru_cell(X[:, _, :], hx) + output.append(hx) + output = th.stack(output, dim=0) + output = self.dropout(output) + return output + + +class TransformerLayer(nn.Module): + def __init__(self, hidden_dim, num_heads=4, dropout=None, bias=True): + super().__init__() + self.multi_head_self_attention = MultiheadAttention( + hidden_dim, num_heads, dropout=dropout, bias=bias) + self.dropout = nn.Dropout(dropout) + + def forward(self, X, K, V): + Z = self.multi_head_self_attention(X, K, V)[0] + Z = self.dropout(Z) + return Z diff --git a/basicts/archs/arch_zoo/dcrnn_arch/__init__.py b/basicts/archs/arch_zoo/dcrnn_arch/__init__.py new file mode 100644 index 00000000..0307d9f8 --- /dev/null +++ b/basicts/archs/arch_zoo/dcrnn_arch/__init__.py @@ -0,0 +1,3 @@ +from .dcrnn_arch import DCRNN + +__all__ = ['DCRNN'] diff --git a/basicts/archs/DCRNN_arch/DCRNN_arch.py b/basicts/archs/arch_zoo/dcrnn_arch/dcrnn_arch.py similarity index 56% rename from basicts/archs/DCRNN_arch/DCRNN_arch.py rename to basicts/archs/arch_zoo/dcrnn_arch/dcrnn_arch.py index d1ec8029..759be734 100644 --- a/basicts/archs/DCRNN_arch/DCRNN_arch.py +++ b/basicts/archs/arch_zoo/dcrnn_arch/dcrnn_arch.py @@ -1,53 +1,41 @@ import torch +from torch import nn import numpy as np -import torch.nn as nn -from basicts.archs.DCRNN_arch.DCRNN_cell import DCGRUCell -from basicts.archs.registry import ARCH_REGISTRY -""" - Paper: Diffusion Convolutional Recurrent Neural Network: Data-Driven Traffic Forecasting - Ref Official Code: - https://github.com/chnsh/DCRNN_PyTorch/blob/pytorch_scratch/model/pytorch/dcrnn_cell.py, - https://github.com/chnsh/DCRNN_PyTorch/blob/pytorch_scratch/model/pytorch/dcrnn_model.py -""" +from .dcrnn_cell import DCGRUCell + def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) + class Seq2SeqAttrs: def __init__(self, adj_mx, **model_kwargs): self.adj_mx = adj_mx - self.max_diffusion_step = int(model_kwargs.get('max_diffusion_step', 2)) - self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 1000)) - self.filter_type = model_kwargs.get('filter_type', 'laplacian') - self.num_nodes = int(model_kwargs.get('num_nodes', 1)) - self.num_rnn_layers = int(model_kwargs.get('num_rnn_layers', 1)) - self.rnn_units = int(model_kwargs.get('rnn_units')) + self.max_diffusion_step = int( + model_kwargs.get("max_diffusion_step", 2)) + self.cl_decay_steps = int(model_kwargs.get("cl_decay_steps", 1000)) + self.filter_type = model_kwargs.get("filter_type", "laplacian") + self.num_nodes = int(model_kwargs.get("num_nodes", 1)) + self.num_rnn_layers = int(model_kwargs.get("num_rnn_layers", 1)) + self.rnn_units = int(model_kwargs.get("rnn_units")) self.hidden_state_size = self.num_nodes * self.rnn_units + class EncoderModel(nn.Module, Seq2SeqAttrs): def __init__(self, adj_mx, **model_kwargs): nn.Module.__init__(self) Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs) - self.input_dim = int(model_kwargs.get('input_dim', 1)) - self.seq_len = int(model_kwargs.get('seq_len')) # for the encoder + self.input_dim = int(model_kwargs.get("input_dim", 1)) + self.seq_len = int(model_kwargs.get("seq_len")) # for the encoder self.dcgru_layers = nn.ModuleList( [DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes) for _ in range(self.num_rnn_layers)]) - + def forward(self, inputs, hidden_state=None): - """ - Encoder forward pass. - - :param inputs: shape (batch_size, self.num_nodes * self.input_dim) - :param hidden_state: (num_layers, batch_size, self.hidden_state_size) - optional, zeros if not provided - :return: output: # shape (batch_size, self.hidden_state_size) - hidden_state # shape (num_layers, batch_size, self.hidden_state_size) - (lower indices mean lower layers) - """ batch_size, _ = inputs.size() if hidden_state is None: - hidden_state = torch.zeros((self.num_rnn_layers, batch_size, self.hidden_state_size)).to(inputs.device) + hidden_state = torch.zeros( + (self.num_rnn_layers, batch_size, self.hidden_state_size)).to(inputs.device) hidden_states = [] output = inputs for layer_num, dcgru_layer in enumerate(self.dcgru_layers): @@ -55,7 +43,8 @@ def forward(self, inputs, hidden_state=None): hidden_states.append(next_hidden_state) output = next_hidden_state - return output, torch.stack(hidden_states) # runs in O(num_layers) so not too slow + # runs in O(num_layers) so not too slow + return output, torch.stack(hidden_states) class DecoderModel(nn.Module, Seq2SeqAttrs): @@ -63,23 +52,13 @@ def __init__(self, adj_mx, **model_kwargs): # super().__init__(is_training, adj_mx, **model_kwargs) nn.Module.__init__(self) Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs) - self.output_dim = int(model_kwargs.get('output_dim', 1)) - self.horizon = int(model_kwargs.get('horizon', 1)) # for the decoder + self.output_dim = int(model_kwargs.get("output_dim", 1)) + self.horizon = int(model_kwargs.get("horizon", 1)) # for the decoder self.projection_layer = nn.Linear(self.rnn_units, self.output_dim) self.dcgru_layers = nn.ModuleList( [DCGRUCell(self.rnn_units, adj_mx, self.max_diffusion_step, self.num_nodes) for _ in range(self.num_rnn_layers)]) - + def forward(self, inputs, hidden_state=None): - """ - Decoder forward pass. - - :param inputs: shape (batch_size, self.num_nodes * self.output_dim) - :param hidden_state: (num_layers, batch_size, self.hidden_state_size) - optional, zeros if not provided - :return: output: # shape (batch_size, self.num_nodes * self.output_dim) - hidden_state # shape (num_layers, batch_size, self.hidden_state_size) - (lower indices mean lower layers) - """ hidden_states = [] output = inputs for layer_num, dcgru_layer in enumerate(self.dcgru_layers): @@ -93,49 +72,48 @@ def forward(self, inputs, hidden_state=None): return output, torch.stack(hidden_states) -@ARCH_REGISTRY.register() class DCRNN(nn.Module, Seq2SeqAttrs): + """ + Paper: Diffusion Convolutional Recurrent Neural Network: Data-Driven Traffic Forecasting + Link: https://arxiv.org/abs/1707.01926 + Codes are modified from the official repo: + https://github.com/chnsh/DCRNN_PyTorch/blob/pytorch_scratch/model/pytorch/dcrnn_cell.py, + https://github.com/chnsh/DCRNN_PyTorch/blob/pytorch_scratch/model/pytorch/dcrnn_model.py + """ + def __init__(self, adj_mx, **model_kwargs): super().__init__() Seq2SeqAttrs.__init__(self, adj_mx, **model_kwargs) self.encoder_model = EncoderModel(adj_mx, **model_kwargs) self.decoder_model = DecoderModel(adj_mx, **model_kwargs) - self.cl_decay_steps = int(model_kwargs.get('cl_decay_steps', 2000)) - self.use_curriculum_learning = bool(model_kwargs.get('use_curriculum_learning', False)) + self.cl_decay_steps = int(model_kwargs.get("cl_decay_steps", 2000)) + self.use_curriculum_learning = bool( + model_kwargs.get("use_curriculum_learning", False)) def _compute_sampling_threshold(self, batches_seen): return self.cl_decay_steps / ( - self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps)) + self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps)) def encoder(self, inputs): - """ - encoder forward pass on t time steps - :param inputs: shape (seq_len, batch_size, num_sensor * input_dim) - :return: encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size) - """ encoder_hidden_state = None for t in range(self.encoder_model.seq_len): - _, encoder_hidden_state = self.encoder_model(inputs[t], encoder_hidden_state) + _, encoder_hidden_state = self.encoder_model( + inputs[t], encoder_hidden_state) return encoder_hidden_state def decoder(self, encoder_hidden_state, labels=None, batches_seen=None): - """ - Decoder forward pass - :param encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size) - :param labels: (self.horizon, batch_size, self.num_nodes * self.output_dim) [optional, not exist for inference] - :param batches_seen: global step [optional, not exist for inference] - :return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim) - """ batch_size = encoder_hidden_state.size(1) - go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim)).to(encoder_hidden_state.device) + go_symbol = torch.zeros( + (batch_size, self.num_nodes * self.decoder_model.output_dim)).to(encoder_hidden_state.device) decoder_hidden_state = encoder_hidden_state decoder_input = go_symbol outputs = [] for t in range(self.decoder_model.horizon): - decoder_output, decoder_hidden_state = self.decoder_model(decoder_input, decoder_hidden_state) + decoder_output, decoder_hidden_state = self.decoder_model( + decoder_input, decoder_hidden_state) decoder_input = decoder_output outputs.append(decoder_output) if self.training and self.use_curriculum_learning: @@ -146,7 +124,7 @@ def decoder(self, encoder_hidden_state, labels=None, batches_seen=None): return outputs def forward(self, history_data: torch.Tensor, future_data: torch.Tensor = None, batch_seen: int = None, **kwargs) -> torch.Tensor: - """feedforward function of DCRNN. + """Feedforward function of DCRNN. Args: history_data (torch.Tensor): history data with shape [L, B, N*C] @@ -157,19 +135,32 @@ def forward(self, history_data: torch.Tensor, future_data: torch.Tensor = None, torch.Tensor: prediction wit shape [L, B, N*C_out] """ + # reshape data + batch_size, length, num_nodes, channels = history_data.shape + history_data = history_data.reshape(batch_size, length, num_nodes * channels) # [B, L, N*C] + history_data = history_data.transpose(0, 1) # [L, B, N*C] + + if future_data is not None: + batch_size, length, num_nodes, channels = future_data.shape + future_data = future_data.reshape(batch_size, length, num_nodes * channels) # [B, L, N*C] + future_data = future_data.transpose(0, 1) # [L, B, N*C] + + # DCRNN encoder_hidden_state = self.encoder(history_data) - outputs = self.decoder(encoder_hidden_state, future_data, batches_seen=batch_seen) # [L, B, N*C_out] + outputs = self.decoder(encoder_hidden_state, future_data, + batches_seen=batch_seen) # [L, B, N*C_out] # reshape to B, L, N, C L, B, _ = outputs.shape - outputs = outputs.transpose(0, 1) #[B, L, N*C_out] - outputs = outputs.view(B, L, self.num_nodes, self.decoder_model.output_dim) + outputs = outputs.transpose(0, 1) # [B, L, N*C_out] + outputs = outputs.view(B, L, self.num_nodes, + self.decoder_model.output_dim) if not self.training: - assert future_data == None, 'Future data should not be visible when validating/testing.' + assert future_data == None, "Future data should not be visible when validating/testing." else: pass - + if batch_seen == 0: print("Warning: decoder only takes the first dimension as groundtruth.") print("Parameter Number: ".format(count_parameters(self))) diff --git a/basicts/archs/DCRNN_arch/DCRNN_cell.py b/basicts/archs/arch_zoo/dcrnn_arch/dcrnn_cell.py similarity index 72% rename from basicts/archs/DCRNN_arch/DCRNN_cell.py rename to basicts/archs/arch_zoo/dcrnn_arch/dcrnn_cell.py index c52043f2..4453fb14 100644 --- a/basicts/archs/DCRNN_arch/DCRNN_cell.py +++ b/basicts/archs/arch_zoo/dcrnn_arch/dcrnn_cell.py @@ -1,15 +1,9 @@ import torch -""" - Paper: Diffusion Convolutional Recurrent Neural Network: Data-Driven Traffic Forecasting - Ref Official Code: - https://github.com/chnsh/DCRNN_PyTorch/blob/pytorch_scratch/model/pytorch/dcrnn_cell.py, - https://github.com/chnsh/DCRNN_PyTorch/blob/pytorch_scratch/model/pytorch/dcrnn_model.py - Watch out the input groundtruth of decoder, which may cause bugs when you try to extend this code. - In order to train the model on multi-GPU, we send the parameter to different gpus in the feedforward process, which would hurt the efficiency. -""" class LayerParams: + """Layer parameters.""" + def __init__(self, rnn_network: torch.nn.Module, layer_type: str): self._rnn_network = rnn_network self._params_dict = {} @@ -22,8 +16,8 @@ def get_weights(self, shape): torch.nn.init.xavier_normal_(nn_param) self._params_dict[shape] = nn_param self._rnn_network.register_parameter( - '{}_weight_{}'.format(self._type, str(shape)), - nn_param) + '{}_weight_{}'.format(self._type, str(shape)), + nn_param) return self._params_dict[shape] def get_biases(self, length, bias_start=0.0): @@ -31,23 +25,24 @@ def get_biases(self, length, bias_start=0.0): biases = torch.nn.Parameter(torch.empty(length)) torch.nn.init.constant_(biases, bias_start) self._biases_dict[length] = biases - self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)), biases) + self._rnn_network.register_parameter( + '{}_biases_{}'.format(self._type, str(length)), biases) return self._biases_dict[length] -class DCGRUCell(torch.nn.Module): - def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, nonlinearity='tanh', use_gc_for_ru=True): - """ - :param num_units: - :param adj_mx: - :param max_diffusion_step: - :param num_nodes: - :param nonlinearity: - :param filter_type: "laplacian", "random_walk", "dual_random_walk". - :param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates. - """ +class DCGRUCell(torch.nn.Module): + """ + Paper: Diffusion Convolutional Recurrent Neural Network: Data-Driven Traffic Forecasting + Link: https://arxiv.org/abs/1707.01926 + Codes are modified from the official repo: + https://github.com/chnsh/DCRNN_PyTorch/blob/pytorch_scratch/model/pytorch/dcrnn_cell.py, + https://github.com/chnsh/DCRNN_PyTorch/blob/pytorch_scratch/model/pytorch/dcrnn_model.py + Watch out the input groundtruth of decoder, which may cause bugs when you try to extend this code. + In order to train the model on multi-GPU, we send the parameter to different gpus in the feedforward process, which might hurt the efficiency. + """ + def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, nonlinearity='tanh', use_gc_for_ru=True): super().__init__() self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu # support other nonlinearities up here? @@ -56,20 +51,13 @@ def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, nonlinearit self._max_diffusion_step = max_diffusion_step self._use_gc_for_ru = use_gc_for_ru # for support in supports: - # self._supports.append(self._build_sparse_matrix(support)) + # self._supports.append(self._build_sparse_matrix(support)) self._supports = adj_mx self._fc_params = LayerParams(self, 'fc') self._gconv_params = LayerParams(self, 'gconv') - - def forward(self, inputs, hx): - """Gated recurrent unit (GRU) with Graph Convolution. - :param inputs: (B, num_nodes * input_dim) - :param hx: (B, num_nodes * rnn_units) - :return - - Output: A `2-D` tensor with shape `(B, num_nodes * rnn_units)`. - """ + def forward(self, inputs, hx): output_size = 2 * self._num_units if self._use_gc_for_ru: fn = self._gconv @@ -77,7 +65,8 @@ def forward(self, inputs, hx): fn = self._fc value = torch.sigmoid(fn(inputs, hx, output_size, bias_start=1.0)) value = torch.reshape(value, (-1, self._num_nodes, output_size)) - r, u = torch.split(tensor=value, split_size_or_sections=self._num_units, dim=-1) + r, u = torch.split( + tensor=value, split_size_or_sections=self._num_units, dim=-1) r = torch.reshape(r, (-1, self._num_nodes * self._num_units)) u = torch.reshape(u, (-1, self._num_nodes * self._num_units)) @@ -92,19 +81,20 @@ def forward(self, inputs, hx): def _concat(x, x_): x_ = x_.unsqueeze(0) return torch.cat([x, x_], dim=0) - + def _fc(self, inputs, state, output_size, bias_start=0.0): batch_size = inputs.shape[0] inputs = torch.reshape(inputs, (batch_size * self._num_nodes, -1)) state = torch.reshape(state, (batch_size * self._num_nodes, -1)) inputs_and_state = torch.cat([inputs, state], dim=-1) input_size = inputs_and_state.shape[-1] - weights = self._fc_params.get_weights((input_size, output_size)).to(inputs_and_state.device) + weights = self._fc_params.get_weights( + (input_size, output_size)).to(inputs_and_state.device) value = torch.sigmoid(torch.matmul(inputs_and_state, weights)) biases = self._fc_params.get_biases(output_size, bias_start) value += biases.to(inputs_and_state.device) return value - + def _gconv(self, inputs, state, output_size, bias_start=0.0): # Reshape input and state to (batch_size, num_nodes, input_dim/state_dim) batch_size = inputs.shape[0] @@ -115,7 +105,8 @@ def _gconv(self, inputs, state, output_size, bias_start=0.0): x = inputs_and_state x0 = x.permute(1, 2, 0) # (num_nodes, total_arg_size, batch_size) - x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size]) + x0 = torch.reshape( + x0, shape=[self._num_nodes, input_size * batch_size]) x = torch.unsqueeze(x0, 0) if self._max_diffusion_step == 0: @@ -130,14 +121,20 @@ def _gconv(self, inputs, state, output_size, bias_start=0.0): x = self._concat(x, x2) x1, x0 = x2, x1 - num_matrices = len(self._supports) * self._max_diffusion_step + 1 # Adds for x itself. - x = torch.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size]) + # Adds for x itself. + num_matrices = len(self._supports) * self._max_diffusion_step + 1 + x = torch.reshape( + x, shape=[num_matrices, self._num_nodes, input_size, batch_size]) x = x.permute(3, 1, 2, 0) # (batch_size, num_nodes, input_size, order) - x = torch.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) - weights = self._gconv_params.get_weights((input_size * num_matrices, output_size)).to(x.device) - x = torch.matmul(x, weights) # (batch_size * self._num_nodes, output_size) - - biases = self._gconv_params.get_biases(output_size, bias_start).to(x.device) + x = torch.reshape( + x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) + weights = self._gconv_params.get_weights( + (input_size * num_matrices, output_size)).to(x.device) + # (batch_size * self._num_nodes, output_size) + x = torch.matmul(x, weights) + + biases = self._gconv_params.get_biases( + output_size, bias_start).to(x.device) x += biases # Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim) return torch.reshape(x, [batch_size, self._num_nodes * output_size]) diff --git a/basicts/archs/arch_zoo/dgcrn_arch/__init__.py b/basicts/archs/arch_zoo/dgcrn_arch/__init__.py new file mode 100644 index 00000000..8303eb46 --- /dev/null +++ b/basicts/archs/arch_zoo/dgcrn_arch/__init__.py @@ -0,0 +1,3 @@ +from .dgcrn_arch import DGCRN + +__all__ = ["DGCRN"] diff --git a/basicts/archs/DGCRN_arch/DGCRN_arch.py b/basicts/archs/arch_zoo/dgcrn_arch/dgcrn_arch.py similarity index 60% rename from basicts/archs/DGCRN_arch/DGCRN_arch.py rename to basicts/archs/arch_zoo/dgcrn_arch/dgcrn_arch.py index 470ce7a2..34ae93f4 100644 --- a/basicts/archs/DGCRN_arch/DGCRN_arch.py +++ b/basicts/archs/arch_zoo/dgcrn_arch/dgcrn_arch.py @@ -1,14 +1,14 @@ -import torch.nn.functional as F +import sys + +import numpy as np import torch import torch.nn as nn +import torch.nn.functional as F from torch.autograd import Variable -import numpy as np -from basicts.archs.DGCRN_arch.DGCRN_layer import * -from basicts.archs.registry import ARCH_REGISTRY -import sys + +from .dgcrn_layer import * -@ARCH_REGISTRY.register() class DGCRN(nn.Module): def __init__(self, gcn_depth, num_nodes, predefined_A=None, dropout=0.3, subgraph_size=20, node_dim=40, middle_dim=2, seq_length=12, in_dim=2, list_weight=[0.05, 0.95, 0.95], tanhalpha=3, cl_decay_steps=4000, rnn_size=64, hyperGNN_dim=16): super(DGCRN, self).__init__() @@ -32,23 +32,32 @@ def __init__(self, gcn_depth, num_nodes, predefined_A=None, dropout=0.3, subgrap self.hidden_size = self.rnn_size - dims_hyper = [self.hidden_size + in_dim, hyperGNN_dim, middle_dim, node_dim] + dims_hyper = [self.hidden_size + in_dim, + hyperGNN_dim, middle_dim, node_dim] - self.GCN1_tg = gcn(dims_hyper, gcn_depth, dropout, *list_weight, 'hyper') + self.GCN1_tg = gcn(dims_hyper, gcn_depth, + dropout, *list_weight, 'hyper') - self.GCN2_tg = gcn(dims_hyper, gcn_depth, dropout, *list_weight, 'hyper') + self.GCN2_tg = gcn(dims_hyper, gcn_depth, + dropout, *list_weight, 'hyper') - self.GCN1_tg_de = gcn(dims_hyper, gcn_depth, dropout, *list_weight, 'hyper') + self.GCN1_tg_de = gcn(dims_hyper, gcn_depth, + dropout, *list_weight, 'hyper') - self.GCN2_tg_de = gcn(dims_hyper, gcn_depth, dropout, *list_weight, 'hyper') + self.GCN2_tg_de = gcn(dims_hyper, gcn_depth, + dropout, *list_weight, 'hyper') - self.GCN1_tg_1 = gcn(dims_hyper, gcn_depth, dropout, *list_weight, 'hyper') + self.GCN1_tg_1 = gcn(dims_hyper, gcn_depth, + dropout, *list_weight, 'hyper') - self.GCN2_tg_1 = gcn(dims_hyper, gcn_depth, dropout, *list_weight, 'hyper') + self.GCN2_tg_1 = gcn(dims_hyper, gcn_depth, + dropout, *list_weight, 'hyper') - self.GCN1_tg_de_1 = gcn(dims_hyper, gcn_depth, dropout, *list_weight, 'hyper') + self.GCN1_tg_de_1 = gcn(dims_hyper, gcn_depth, + dropout, *list_weight, 'hyper') - self.GCN2_tg_de_1 = gcn(dims_hyper, gcn_depth, dropout, *list_weight, 'hyper') + self.GCN2_tg_de_1 = gcn(dims_hyper, gcn_depth, + dropout, *list_weight, 'hyper') self.fc_final = nn.Linear(self.hidden_size, self.output_dim) @@ -86,20 +95,26 @@ def step(self, input, Hidden_State, Cell_State, predefined_A, type='encoder', i= nodevec1 = self.emb1(self.idx) nodevec2 = self.emb2(self.idx) - hyper_input = torch.cat((x, Hidden_State.view(-1, self.num_nodes, self.hidden_size)), 2) + hyper_input = torch.cat( + (x, Hidden_State.view(-1, self.num_nodes, self.hidden_size)), 2) if type == 'encoder': - filter1 = self.GCN1_tg(hyper_input, predefined_A[0]) + self.GCN1_tg_1( hyper_input, predefined_A[1]) - filter2 = self.GCN2_tg(hyper_input, predefined_A[0]) + self.GCN2_tg_1( hyper_input, predefined_A[1]) + filter1 = self.GCN1_tg( + hyper_input, predefined_A[0]) + self.GCN1_tg_1(hyper_input, predefined_A[1]) + filter2 = self.GCN2_tg( + hyper_input, predefined_A[0]) + self.GCN2_tg_1(hyper_input, predefined_A[1]) if type == 'decoder': - filter1 = self.GCN1_tg_de(hyper_input, predefined_A[0]) + self.GCN1_tg_de_1( hyper_input, predefined_A[1]) - filter2 = self.GCN2_tg_de(hyper_input, predefined_A[0]) + self.GCN2_tg_de_1( hyper_input, predefined_A[1]) + filter1 = self.GCN1_tg_de( + hyper_input, predefined_A[0]) + self.GCN1_tg_de_1(hyper_input, predefined_A[1]) + filter2 = self.GCN2_tg_de( + hyper_input, predefined_A[0]) + self.GCN2_tg_de_1(hyper_input, predefined_A[1]) nodevec1 = torch.tanh(self.alpha * torch.mul(nodevec1, filter1)) nodevec2 = torch.tanh(self.alpha * torch.mul(nodevec2, filter2)) - a = torch.matmul(nodevec1, nodevec2.transpose(2, 1)) - torch.matmul(nodevec2, nodevec1.transpose(2, 1)) + a = torch.matmul(nodevec1, nodevec2.transpose(2, 1)) - \ + torch.matmul(nodevec2, nodevec1.transpose(2, 1)) adj = F.relu(torch.tanh(self.alpha * a)) @@ -112,36 +127,43 @@ def step(self, input, Hidden_State, Cell_State, predefined_A, type='encoder', i= combined = torch.cat((x, Hidden_State), -1) if type == 'encoder': - z = torch.sigmoid(self.gz1(combined, adp) + self.gz2(combined, adpT)) - r = torch.sigmoid(self.gr1(combined, adp) + self.gr2(combined, adpT)) + z = torch.sigmoid(self.gz1(combined, adp) + + self.gz2(combined, adpT)) + r = torch.sigmoid(self.gr1(combined, adp) + + self.gr2(combined, adpT)) temp = torch.cat((x, torch.mul(r, Hidden_State)), -1) Cell_State = torch.tanh(self.gc1(temp, adp) + self.gc2(temp, adpT)) elif type == 'decoder': - z = torch.sigmoid(self.gz1_de(combined, adp) + self.gz2_de(combined, adpT)) - r = torch.sigmoid(self.gr1_de(combined, adp) + self.gr2_de(combined, adpT)) + z = torch.sigmoid(self.gz1_de(combined, adp) + + self.gz2_de(combined, adpT)) + r = torch.sigmoid(self.gr1_de(combined, adp) + + self.gr2_de(combined, adpT)) temp = torch.cat((x, torch.mul(r, Hidden_State)), -1) - Cell_State = torch.tanh(self.gc1_de(temp, adp) + self.gc2_de(temp, adpT)) + Cell_State = torch.tanh(self.gc1_de( + temp, adp) + self.gc2_de(temp, adpT)) - Hidden_State = torch.mul(z, Hidden_State) + torch.mul(1 - z, Cell_State) + Hidden_State = torch.mul(z, Hidden_State) + \ + torch.mul(1 - z, Cell_State) return Hidden_State.view(-1, self.hidden_size), Cell_State.view(-1, self.hidden_size) - def forward(self, history_data:torch.Tensor, future_data:torch.Tensor=None, batch_seen:int=None, task_level:int=12, **kwargs) -> torch.Tensor: - """feedforward function of DGCRN. + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool,**kwargs) -> torch.Tensor: + """Feedforward function of DGCRN. Args: history_data (torch.Tensor): historical data with shape [B, L, N, C]. future_data (torch.Tensor, optional): ground truth. Defaults to None. - batches_seen (int, optional): batch num. Defaults to None. + batch_seen (int, optional): batch num. Defaults to None. task_level (int, optional): curriculum learning level. Defaults to 12. Returns: torch.Tensor: prediction with shape [B, L, N, 1] """ + task_level = kwargs["task_level"] input = history_data.transpose(1, 3) - ycl = future_data.transpose(1, 3) + ycl = future_data.transpose(1, 3) self.idx = self.idx.to(input.device) @@ -149,20 +171,23 @@ def forward(self, history_data:torch.Tensor, future_data:torch.Tensor=None, batc x = input batch_size = x.size(0) - Hidden_State, Cell_State = self.initHidden(batch_size * self.num_nodes, self.hidden_size) + Hidden_State, Cell_State = self.initHidden( + batch_size * self.num_nodes, self.hidden_size) Hidden_State = Hidden_State.to(input.device) - Cell_State = Cell_State.to(input.device) + Cell_State = Cell_State.to(input.device) outputs = None for i in range(self.seq_length): - Hidden_State, Cell_State = self.step(x[..., i].squeeze(-1), Hidden_State, Cell_State, predefined_A, 'encoder', i) + Hidden_State, Cell_State = self.step( + x[..., i].squeeze(-1), Hidden_State, Cell_State, predefined_A, 'encoder', i) if outputs is None: outputs = Hidden_State.unsqueeze(1) else: outputs = torch.cat((outputs, Hidden_State.unsqueeze(1)), 1) - go_symbol = torch.zeros((batch_size, self.output_dim, self.num_nodes)).to(input.device) + go_symbol = torch.zeros( + (batch_size, self.output_dim, self.num_nodes)).to(input.device) timeofday = ycl[:, [1], :, :] decoder_input = go_symbol @@ -171,15 +196,18 @@ def forward(self, history_data:torch.Tensor, future_data:torch.Tensor=None, batc for i in range(task_level): try: - decoder_input = torch.cat([decoder_input, timeofday[..., i]], dim=1) + decoder_input = torch.cat( + [decoder_input, timeofday[..., i]], dim=1) except: print(decoder_input.shape, timeofday.shape) sys.exit(0) - Hidden_State, Cell_State = self.step(decoder_input, Hidden_State, Cell_State, predefined_A, 'decoder', None) + Hidden_State, Cell_State = self.step( + decoder_input, Hidden_State, Cell_State, predefined_A, 'decoder', None) decoder_output = self.fc_final(Hidden_State) - decoder_input = decoder_output.view(batch_size, self.num_nodes, self.output_dim).transpose(1, 2) + decoder_input = decoder_output.view( + batch_size, self.num_nodes, self.output_dim).transpose(1, 2) outputs_final.append(decoder_output) if self.training and self.use_curriculum_learning: c = np.random.uniform(0, 1) @@ -188,11 +216,13 @@ def forward(self, history_data:torch.Tensor, future_data:torch.Tensor=None, batc outputs_final = torch.stack(outputs_final, dim=1) - outputs_final = outputs_final.view(batch_size, self.num_nodes, task_level, self.output_dim).transpose(1, 2) + outputs_final = outputs_final.view( + batch_size, self.num_nodes, task_level, self.output_dim).transpose(1, 2) - ramdom_predict= torch.zeros(batch_size, self.seq_length - task_level, self.num_nodes, self.output_dim).to(outputs_final.device) + ramdom_predict = torch.zeros(batch_size, self.seq_length - task_level, + self.num_nodes, self.output_dim).to(outputs_final.device) outputs_final = torch.cat([outputs_final, ramdom_predict], dim=1) - + return outputs_final def initHidden(self, batch_size, hidden_size): diff --git a/basicts/archs/DGCRN_arch/DGCRN_layer.py b/basicts/archs/arch_zoo/dgcrn_arch/dgcrn_layer.py similarity index 96% rename from basicts/archs/DGCRN_arch/DGCRN_layer.py rename to basicts/archs/arch_zoo/dgcrn_arch/dgcrn_layer.py index 7ad9fa7b..29ecb31f 100644 --- a/basicts/archs/DGCRN_arch/DGCRN_layer.py +++ b/basicts/archs/arch_zoo/dgcrn_arch/dgcrn_layer.py @@ -1,10 +1,8 @@ -from __future__ import division +from collections import OrderedDict + import torch import torch.nn as nn -from torch.nn import init -import numbers import torch.nn.functional as F -from collections import OrderedDict class gconv_RNN(nn.Module): diff --git a/basicts/archs/arch_zoo/gts_arch/__init__.py b/basicts/archs/arch_zoo/gts_arch/__init__.py new file mode 100644 index 00000000..8c79b7d2 --- /dev/null +++ b/basicts/archs/arch_zoo/gts_arch/__init__.py @@ -0,0 +1,3 @@ +from .gts_arch import GTS + +__all__ = ["GTS"] diff --git a/basicts/archs/GTS_arch/GTS_arch.py b/basicts/archs/arch_zoo/gts_arch/gts_arch.py similarity index 92% rename from basicts/archs/GTS_arch/GTS_arch.py rename to basicts/archs/arch_zoo/gts_arch/gts_arch.py index 28c5314a..f255526f 100644 --- a/basicts/archs/GTS_arch/GTS_arch.py +++ b/basicts/archs/arch_zoo/gts_arch/gts_arch.py @@ -1,20 +1,10 @@ +import numpy as np import torch import torch.nn as nn import torch.nn.functional as F -from basicts.archs.GTS_arch.GTS_cell import DCGRUCell -from basicts.archs.registry import ARCH_REGISTRY -import numpy as np -""" - Paper: - Discrete Graph Structure Learning for Forecasting Multiple Time Series, ICLR 2021. - Note: - Kindly note that the results of GTS may have some gaps with the original paper, - because it calculates the evaluation metrics in a slightly different manner. - Some details can be found in the appendix in the original paper and similar issues in its official repository: https://github.com/chaoshangcs/GTS/issues - Ref Official Code: - https://github.com/chaoshangcs/GTS -""" +from .gts_cell import DCGRUCell + def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) @@ -126,8 +116,20 @@ def forward(self, inputs, adj, hidden_state=None): return output, torch.stack(hidden_states) -@ARCH_REGISTRY.register() class GTS(nn.Module, Seq2SeqAttrs): + """ + Paper: + Discrete Graph Structure Learning for Forecasting Multiple Time Series, ICLR 2021. + Link: https://arxiv.org/abs/2101.06861 + Ref Official Code: + https://github.com/chaoshangcs/GTS + Note: + Kindly note that the results of GTS may have some gaps with the original paper, + because it calculates the evaluation metrics in a slightly different manner. + Some details can be found in the appendix in the original paper and + similar issues in its official repository: https://github.com/chaoshangcs/GTS/issues + """ + def __init__(self, **model_kwargs): """init GTS @@ -200,6 +202,7 @@ def encoder(self, inputs, adj): :param inputs: shape (seq_len, batch_size, num_sensor * input_dim) :return: encoder_hidden_state: (num_layers, batch_size, self.hidden_state_size) """ + encoder_hidden_state = None for t in range(self.encoder_model.seq_len): _, encoder_hidden_state = self.encoder_model(inputs[t], adj, encoder_hidden_state) @@ -214,6 +217,7 @@ def decoder(self, encoder_hidden_state, adj, labels=None, batches_seen=None): :param batches_seen: global step [optional, not exist for inference] :return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim) """ + batch_size = encoder_hidden_state.size(1) go_symbol = torch.zeros((batch_size, self.num_nodes * self.decoder_model.output_dim)).to(encoder_hidden_state.device) decoder_hidden_state = encoder_hidden_state @@ -232,13 +236,25 @@ def decoder(self, encoder_hidden_state, adj, labels=None, batches_seen=None): outputs = torch.stack(outputs) return outputs - def forward(self, history_data, future_data=None, batch_seen=None, epoch=None): + def forward(self, history_data, future_data=None, batch_seen=None, epoch=None, **kwargs): """ - :param inputs: shape (seq_len, batch_size, num_sensor * input_dim) - :param labels: shape (horizon, batch_size, num_sensor * output) - :param batches_seen: batches seen till now + :param history_data: shape (seq_len, batch_size, num_sensor * input_dim) + :param future_data: shape (horizon, batch_size, num_sensor * output) + :param batch_seen: batches seen till now :return: output: (self.horizon, batch_size, self.num_nodes * self.output_dim) """ + + # reshape data + batch_size, length, num_nodes, channels = history_data.shape + history_data = history_data.reshape(batch_size, length, num_nodes * channels) # [B, L, N*C] + history_data = history_data.transpose(0, 1) # [L, B, N*C] + + if future_data is not None: + batch_size, length, num_nodes, channels = future_data.shape + future_data = future_data.reshape(batch_size, length, num_nodes * channels) # [B, L, N*C] + future_data = future_data.transpose(0, 1) # [L, B, N*C] + + # GTS inputs = history_data labels = future_data diff --git a/basicts/archs/GTS_arch/GTS_cell.py b/basicts/archs/arch_zoo/gts_arch/gts_cell.py similarity index 67% rename from basicts/archs/GTS_arch/GTS_cell.py rename to basicts/archs/arch_zoo/gts_arch/gts_cell.py index f6e28e93..528dee02 100644 --- a/basicts/archs/GTS_arch/GTS_cell.py +++ b/basicts/archs/arch_zoo/gts_arch/gts_cell.py @@ -1,5 +1,6 @@ -import numpy as np import torch +import numpy as np + class LayerParams: def __init__(self, rnn_network: torch.nn.Module, layer_type: str): @@ -13,7 +14,8 @@ def get_weights(self, shape): nn_param = torch.nn.Parameter(torch.empty(*shape)) torch.nn.init.xavier_normal_(nn_param) self._params_dict[shape] = nn_param - self._rnn_network.register_parameter('{}_weight_{}'.format(self._type, str(shape)), nn_param) + self._rnn_network.register_parameter( + '{}_weight_{}'.format(self._type, str(shape)), nn_param) return self._params_dict[shape] def get_biases(self, length, bias_start=0.0): @@ -21,24 +23,14 @@ def get_biases(self, length, bias_start=0.0): biases = torch.nn.Parameter(torch.empty(length)) torch.nn.init.constant_(biases, bias_start) self._biases_dict[length] = biases - self._rnn_network.register_parameter('{}_biases_{}'.format(self._type, str(length)), biases) + self._rnn_network.register_parameter( + '{}_biases_{}'.format(self._type, str(length)), biases) return self._biases_dict[length] class DCGRUCell(torch.nn.Module): def __init__(self, num_units, max_diffusion_step, num_nodes, nonlinearity='tanh', filter_type="laplacian", use_gc_for_ru=True): - """ - - :param num_units: - :param adj_mx: - :param max_diffusion_step: - :param num_nodes: - :param nonlinearity: - :param filter_type: "laplacian", "random_walk", "dual_random_walk". - :param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates. - """ - super().__init__() self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu # support other nonlinearities up here? @@ -47,22 +39,6 @@ def __init__(self, num_units, max_diffusion_step, num_nodes, nonlinearity='tanh' self._max_diffusion_step = max_diffusion_step self._supports = [] self._use_gc_for_ru = use_gc_for_ru - - ''' - Option: - if filter_type == "laplacian": - supports.append(utils.calculate_scaled_laplacian(adj_mx, lambda_max=None)) - elif filter_type == "random_walk": - supports.append(utils.calculate_random_walk_matrix(adj_mx).T) - elif filter_type == "dual_random_walk": - supports.append(utils.calculate_random_walk_matrix(adj_mx).T) - supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T) - else: - supports.append(utils.calculate_scaled_laplacian(adj_mx)) - for support in supports: - self._supports.append(self._build_sparse_matrix(support)) - ''' - self._fc_params = LayerParams(self, 'fc') self._gconv_params = LayerParams(self, 'gconv') @@ -82,7 +58,8 @@ def _calculate_random_walk_matrix(self, adj_mx): adj_mx = adj_mx + torch.eye(int(adj_mx.shape[0])).to(adj_mx.device) d = torch.sum(adj_mx, 1) d_inv = 1. / d - d_inv = torch.where(torch.isinf(d_inv), torch.zeros(d_inv.shape).to(d_inv.device), d_inv) + d_inv = torch.where(torch.isinf(d_inv), torch.zeros( + d_inv.shape).to(d_inv.device), d_inv) d_mat_inv = torch.diag(d_inv) random_walk_mx = torch.mm(d_mat_inv, adj_mx) return random_walk_mx @@ -101,9 +78,11 @@ def forward(self, inputs, hx, adj): fn = self._gconv else: fn = self._fc - value = torch.sigmoid(fn(inputs, adj_mx, hx, output_size, bias_start=1.0)) + value = torch.sigmoid( + fn(inputs, adj_mx, hx, output_size, bias_start=1.0)) value = torch.reshape(value, (-1, self._num_nodes, output_size)) - r, u = torch.split(tensor=value, split_size_or_sections=self._num_units, dim=-1) + r, u = torch.split( + tensor=value, split_size_or_sections=self._num_units, dim=-1) r = torch.reshape(r, (-1, self._num_nodes * self._num_units)) u = torch.reshape(u, (-1, self._num_nodes * self._num_units)) @@ -141,7 +120,8 @@ def _gconv(self, inputs, adj_mx, state, output_size, bias_start=0.0): x = inputs_and_state x0 = x.permute(1, 2, 0) # (num_nodes, total_arg_size, batch_size) - x0 = torch.reshape(x0, shape=[self._num_nodes, input_size * batch_size]) + x0 = torch.reshape( + x0, shape=[self._num_nodes, input_size * batch_size]) x = torch.unsqueeze(x0, 0) if self._max_diffusion_step == 0: @@ -154,26 +134,20 @@ def _gconv(self, inputs, adj_mx, state, output_size, bias_start=0.0): x2 = 2 * torch.mm(adj_mx, x1) - x0 x = self._concat(x, x2) x1, x0 = x2, x1 - ''' - Option: - for support in self._supports: - x1 = torch.sparse.mm(support, x0) - x = self._concat(x, x1) - - for k in range(2, self._max_diffusion_step + 1): - x2 = 2 * torch.sparse.mm(support, x1) - x0 - x = self._concat(x, x2) - x1, x0 = x2, x1 - ''' num_matrices = self._max_diffusion_step + 1 # Adds for x itself. - x = torch.reshape(x, shape=[num_matrices, self._num_nodes, input_size, batch_size]) + x = torch.reshape( + x, shape=[num_matrices, self._num_nodes, input_size, batch_size]) x = x.permute(3, 1, 2, 0) # (batch_size, num_nodes, input_size, order) - x = torch.reshape(x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) + x = torch.reshape( + x, shape=[batch_size * self._num_nodes, input_size * num_matrices]) - weights = self._gconv_params.get_weights((input_size * num_matrices, output_size)).to(x.device) - x = torch.matmul(x, weights) # (batch_size * self._num_nodes, output_size) + weights = self._gconv_params.get_weights( + (input_size * num_matrices, output_size)).to(x.device) + # (batch_size * self._num_nodes, output_size) + x = torch.matmul(x, weights) - biases = self._gconv_params.get_biases(output_size, bias_start).to(x.device) + biases = self._gconv_params.get_biases( + output_size, bias_start).to(x.device) x += biases # Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim) return torch.reshape(x, [batch_size, self._num_nodes * output_size]) diff --git a/basicts/archs/arch_zoo/gwnet_arch/__init__.py b/basicts/archs/arch_zoo/gwnet_arch/__init__.py new file mode 100644 index 00000000..505bf98f --- /dev/null +++ b/basicts/archs/arch_zoo/gwnet_arch/__init__.py @@ -0,0 +1,3 @@ +from .gwnet_arch import GraphWaveNet + +__all__ = ["GraphWaveNet"] diff --git a/basicts/archs/GraphWaveNet_arch/GraphWaveNet_arch.py b/basicts/archs/arch_zoo/gwnet_arch/gwnet_arch.py similarity index 71% rename from basicts/archs/GraphWaveNet_arch/GraphWaveNet_arch.py rename to basicts/archs/arch_zoo/gwnet_arch/gwnet_arch.py index 2ad65990..0665ad8e 100644 --- a/basicts/archs/GraphWaveNet_arch/GraphWaveNet_arch.py +++ b/basicts/archs/arch_zoo/gwnet_arch/gwnet_arch.py @@ -1,59 +1,70 @@ import torch -import torch.nn as nn +from torch import nn import torch.nn.functional as F -from torch.autograd import Variable -import sys -from basicts.archs.registry import ARCH_REGISTRY -""" - Paper: Graph WaveNet for Deep Spatial-Temporal Graph Modeling - Ref Official Code: https://github.com/nnzhan/Graph-WaveNet/blob/master/model.py -""" class nconv(nn.Module): + """Graph conv operation.""" + def __init__(self): - super(nconv,self).__init__() + super(nconv, self).__init__() - def forward(self,x, A): - x = torch.einsum('ncvl,vw->ncwl',(x,A)) + def forward(self, x, A): + x = torch.einsum('ncvl,vw->ncwl', (x, A)) return x.contiguous() + class linear(nn.Module): - def __init__(self,c_in,c_out): - super(linear,self).__init__() - self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0,0), stride=(1,1), bias=True) + """Linear layer.""" + + def __init__(self, c_in, c_out): + super(linear, self).__init__() + self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=( + 1, 1), padding=(0, 0), stride=(1, 1), bias=True) - def forward(self,x): + def forward(self, x): return self.mlp(x) + class gcn(nn.Module): - def __init__(self,c_in,c_out,dropout,support_len=3,order=2): - super(gcn,self).__init__() + """Graph convolution network.""" + + def __init__(self, c_in, c_out, dropout, support_len=3, order=2): + super(gcn, self).__init__() self.nconv = nconv() c_in = (order*support_len+1)*c_in - self.mlp = linear(c_in,c_out) + self.mlp = linear(c_in, c_out) self.dropout = dropout self.order = order - def forward(self,x,support): + def forward(self, x, support): out = [x] for a in support: - x1 = self.nconv(x,a.to(x.device)) + x1 = self.nconv(x, a.to(x.device)) out.append(x1) for k in range(2, self.order + 1): - x2 = self.nconv(x1,a.to(x.device)) + x2 = self.nconv(x1, a.to(x.device)) out.append(x2) x1 = x2 - h = torch.cat(out,dim=1) + h = torch.cat(out, dim=1) h = self.mlp(h) h = F.dropout(h, self.dropout, training=self.training) return h -@ARCH_REGISTRY.register() class GraphWaveNet(nn.Module): - def __init__(self, num_nodes, dropout=0.3, supports=None, gcn_bool=True, addaptadj=True, aptinit=None, in_dim=2,out_dim=12,residual_channels=32,dilation_channels=32,skip_channels=256,end_channels=512,kernel_size=2,blocks=4,layers=2): + """ + Paper: Graph WaveNet for Deep Spatial-Temporal Graph Modeling + Link: https://arxiv.org/abs/1906.00121 + Ref Official Code: https://github.com/nnzhan/Graph-WaveNet/blob/master/model.py + """ + + def __init__(self, num_nodes, dropout=0.3, supports=None, + gcn_bool=True, addaptadj=True, aptinit=None, + in_dim=2, out_dim=12, residual_channels=32, + dilation_channels=32, skip_channels=256, end_channels=512, + kernel_size=2, blocks=4, layers=2): super(GraphWaveNet, self).__init__() self.dropout = dropout self.blocks = blocks @@ -70,7 +81,7 @@ def __init__(self, num_nodes, dropout=0.3, supports=None, gcn_bool=True, addapta self.start_conv = nn.Conv2d(in_channels=in_dim, out_channels=residual_channels, - kernel_size=(1,1)) + kernel_size=(1, 1)) self.supports = supports receptive_field = 1 @@ -83,9 +94,11 @@ def __init__(self, num_nodes, dropout=0.3, supports=None, gcn_bool=True, addapta if aptinit is None: if supports is None: self.supports = [] - self.nodevec1 = nn.Parameter(torch.randn(num_nodes, 10), requires_grad=True) - self.nodevec2 = nn.Parameter(torch.randn(10, num_nodes), requires_grad=True) - self.supports_len +=1 + self.nodevec1 = nn.Parameter( + torch.randn(num_nodes, 10), requires_grad=True) + self.nodevec2 = nn.Parameter( + torch.randn(10, num_nodes), requires_grad=True) + self.supports_len += 1 else: if supports is None: self.supports = [] @@ -96,9 +109,6 @@ def __init__(self, num_nodes, dropout=0.3, supports=None, gcn_bool=True, addapta self.nodevec2 = nn.Parameter(initemb2, requires_grad=True) self.supports_len += 1 - - - for b in range(blocks): additional_scope = kernel_size - 1 new_dilation = 1 @@ -106,7 +116,7 @@ def __init__(self, num_nodes, dropout=0.3, supports=None, gcn_bool=True, addapta # dilated convolutions self.filter_convs.append(nn.Conv2d(in_channels=residual_channels, out_channels=dilation_channels, - kernel_size=(1,kernel_size),dilation=new_dilation)) + kernel_size=(1, kernel_size), dilation=new_dilation)) self.gate_convs.append(nn.Conv1d(in_channels=residual_channels, out_channels=dilation_channels, @@ -122,28 +132,27 @@ def __init__(self, num_nodes, dropout=0.3, supports=None, gcn_bool=True, addapta out_channels=skip_channels, kernel_size=(1, 1))) self.bn.append(nn.BatchNorm2d(residual_channels)) - new_dilation *=2 + new_dilation *= 2 receptive_field += additional_scope additional_scope *= 2 if self.gcn_bool: - self.gconv.append(gcn(dilation_channels,residual_channels,dropout,support_len=self.supports_len)) - - + self.gconv.append( + gcn(dilation_channels, residual_channels, dropout, support_len=self.supports_len)) self.end_conv_1 = nn.Conv2d(in_channels=skip_channels, - out_channels=end_channels, - kernel_size=(1,1), - bias=True) + out_channels=end_channels, + kernel_size=(1, 1), + bias=True) self.end_conv_2 = nn.Conv2d(in_channels=end_channels, out_channels=out_dim, - kernel_size=(1,1), + kernel_size=(1, 1), bias=True) self.receptive_field = receptive_field - def forward(self, history_data: torch.Tensor, **kwargs) -> torch.Tensor: - """feedforward function of Graph WaveNet. + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: + """Feedforward function of Graph WaveNet. Args: history_data (torch.Tensor): shape [B, L, N, C] @@ -151,10 +160,12 @@ def forward(self, history_data: torch.Tensor, **kwargs) -> torch.Tensor: Returns: torch.Tensor: [B, L, N, 1] """ + input = history_data.transpose(1, 3).contiguous() in_len = input.size(3) - if in_len torch.Tensor: # calculate the current adaptive adj matrix once per iteration new_supports = None if self.gcn_bool and self.addaptadj and self.supports is not None: - adp = F.softmax(F.relu(torch.mm(self.nodevec1, self.nodevec2)), dim=1) + adp = F.softmax( + F.relu(torch.mm(self.nodevec1, self.nodevec2)), dim=1) new_supports = self.supports + [adp] # WaveNet layers @@ -199,18 +211,16 @@ def forward(self, history_data: torch.Tensor, **kwargs) -> torch.Tensor: skip = 0 skip = s + skip - if self.gcn_bool and self.supports is not None: if self.addaptadj: x = self.gconv[i](x, new_supports) else: - x = self.gconv[i](x,self.supports) + x = self.gconv[i](x, self.supports) else: x = self.residual_convs[i](x) x = x + residual[:, :, :, -x.size(3):] - x = self.bn[i](x) x = F.relu(skip) diff --git a/basicts/archs/arch_zoo/linear_arch/__init__.py b/basicts/archs/arch_zoo/linear_arch/__init__.py new file mode 100644 index 00000000..923f2f77 --- /dev/null +++ b/basicts/archs/arch_zoo/linear_arch/__init__.py @@ -0,0 +1,5 @@ +from .linear import Linear +from .dlinear import DLinear +from .nlinear import NLinear + +__all__ = ["Linear", "DLinear", "NLinear"] diff --git a/basicts/archs/arch_zoo/linear_arch/dlinear.py b/basicts/archs/arch_zoo/linear_arch/dlinear.py new file mode 100644 index 00000000..ca3135bf --- /dev/null +++ b/basicts/archs/arch_zoo/linear_arch/dlinear.py @@ -0,0 +1,98 @@ +import torch +import torch.nn as nn + + +class moving_avg(nn.Module): + """Moving average block to highlight the trend of time series""" + + def __init__(self, kernel_size, stride): + super(moving_avg, self).__init__() + self.kernel_size = kernel_size + self.avg = nn.AvgPool1d(kernel_size=kernel_size, + stride=stride, padding=0) + + def forward(self, x): + # padding on the both ends of time series + front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1) + end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1) + x = torch.cat([front, x, end], dim=1) + x = self.avg(x.permute(0, 2, 1)) + x = x.permute(0, 2, 1) + return x + + +class series_decomp(nn.Module): + """Series decomposition block""" + + def __init__(self, kernel_size): + super(series_decomp, self).__init__() + self.moving_avg = moving_avg(kernel_size, stride=1) + + def forward(self, x): + moving_mean = self.moving_avg(x) + res = x - moving_mean + return res, moving_mean + + +class DLinear(nn.Module): + """ + The implementation of the decomposition-linear model in Paper "Are Transformers Effective for Time Series Forecasting?" + Link: https://arxiv.org/abs/2205.13504 + """ + + def __init__(self, **model_args): + super(DLinear, self).__init__() + self.seq_len = model_args["seq_len"] + self.pred_len = model_args["pred_len"] + + # Decompsition Kernel Size + kernel_size = 25 + self.decompsition = series_decomp(kernel_size) + self.individual = model_args["individual"] + self.channels = model_args["enc_in"] + + if self.individual: + self.Linear_Seasonal = nn.ModuleList() + self.Linear_Trend = nn.ModuleList() + + for i in range(self.channels): + self.Linear_Seasonal.append( + nn.Linear(self.seq_len, self.pred_len)) + self.Linear_Trend.append( + nn.Linear(self.seq_len, self.pred_len)) + + else: + self.Linear_Seasonal = nn.Linear(self.seq_len, self.pred_len) + self.Linear_Trend = nn.Linear(self.seq_len, self.pred_len) + + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: + """Feed forward of STID. + + Args: + history_data (torch.Tensor): history data with shape [B, L, N, C] + + Returns: + torch.Tensor: prediction wit shape [B, L, N, C] + """ + + assert history_data.shape[-1] == 1 # only use the target feature + x = history_data[..., 0] # B, L, N + seasonal_init, trend_init = self.decompsition(x) + seasonal_init, trend_init = seasonal_init.permute( + 0, 2, 1), trend_init.permute(0, 2, 1) + if self.individual: + seasonal_output = torch.zeros([seasonal_init.size(0), seasonal_init.size( + 1), self.pred_len], dtype=seasonal_init.dtype).to(seasonal_init.device) + trend_output = torch.zeros([trend_init.size(0), trend_init.size( + 1), self.pred_len], dtype=trend_init.dtype).to(trend_init.device) + for i in range(self.channels): + seasonal_output[:, i, :] = self.Linear_Seasonal[i]( + seasonal_init[:, i, :]) + trend_output[:, i, :] = self.Linear_Trend[i]( + trend_init[:, i, :]) + else: + seasonal_output = self.Linear_Seasonal(seasonal_init) + trend_output = self.Linear_Trend(trend_init) + + prediction = seasonal_output + trend_output + return prediction.permute(0, 2, 1).unsqueeze(-1) # [B, L, N, 1] diff --git a/basicts/archs/arch_zoo/linear_arch/linear.py b/basicts/archs/arch_zoo/linear_arch/linear.py new file mode 100644 index 00000000..ff4387fc --- /dev/null +++ b/basicts/archs/arch_zoo/linear_arch/linear.py @@ -0,0 +1,29 @@ +import torch +import torch.nn as nn + +class Linear(nn.Module): + """ + The implementation of the linear model in Paper "Are Transformers Effective for Time Series Forecasting?" + Link: https://arxiv.org/abs/2205.13504 + """ + + def __init__(self, **model_args): + super(Linear, self).__init__() + self.seq_len = model_args["seq_len"] + self.pred_len = model_args["pred_len"] + self.Linear = nn.Linear(self.seq_len, self.pred_len) + + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: + """Feed forward of STID. + + Args: + history_data (torch.Tensor): history data with shape [B, L, N, C] + + Returns: + torch.Tensor: prediction wit shape [B, L, N, C] + """ + + assert history_data.shape[-1] == 1 # only use the target feature + history_data = history_data[..., 0] # B, L, N + prediction = self.Linear(history_data.permute(0, 2, 1)).permute(0, 2, 1).unsqueeze(-1) # B, L, N, 1 + return prediction diff --git a/basicts/archs/arch_zoo/linear_arch/nlinear.py b/basicts/archs/arch_zoo/linear_arch/nlinear.py new file mode 100644 index 00000000..53834c8c --- /dev/null +++ b/basicts/archs/arch_zoo/linear_arch/nlinear.py @@ -0,0 +1,32 @@ +import torch +import torch.nn as nn + +class NLinear(nn.Module): + """ + The implementation of the normalization-linear model in Paper "Are Transformers Effective for Time Series Forecasting?" + Link: https://arxiv.org/abs/2205.13504 + """ + + def __init__(self, **model_args): + super(NLinear, self).__init__() + self.seq_len = model_args["seq_len"] + self.pred_len = model_args["pred_len"] + self.Linear = nn.Linear(self.seq_len, self.pred_len) + + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: + """Feed forward of STID. + + Args: + history_data (torch.Tensor): history data with shape [B, L, N, C] + + Returns: + torch.Tensor: prediction wit shape [B, L, N, C] + """ + assert history_data.shape[-1] == 1 # only use the target feature + x = history_data[..., 0] # B, L, N + # x: [Batch, Input length, Channel] + seq_last = x[:,-1:,:].detach() + x = x - seq_last + x = self.Linear(x.permute(0,2,1)).permute(0,2,1) + prediction = x + seq_last + return prediction.unsqueeze(-1) diff --git a/basicts/archs/arch_zoo/mtgnn_arch/__init__.py b/basicts/archs/arch_zoo/mtgnn_arch/__init__.py new file mode 100644 index 00000000..3d2c7d9b --- /dev/null +++ b/basicts/archs/arch_zoo/mtgnn_arch/__init__.py @@ -0,0 +1,3 @@ +from .mtgnn_arch import MTGNN + +__all__ = ["MTGNN"] diff --git a/basicts/archs/arch_zoo/mtgnn_arch/mtgnn_arch.py b/basicts/archs/arch_zoo/mtgnn_arch/mtgnn_arch.py new file mode 100644 index 00000000..23b714b3 --- /dev/null +++ b/basicts/archs/arch_zoo/mtgnn_arch/mtgnn_arch.py @@ -0,0 +1,165 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .mtgnn_layers import graph_constructor, dilated_inception, mixprop, LayerNorm + + +class MTGNN(nn.Module): + """ + Paper: Connecting the Dots: Multivariate Time Series Forecasting with Graph Neural Networks + Ref Official Code: https://github.com/nnzhan/MTGNN + Link: https://arxiv.org/abs/2005.11650 + """ + + def __init__(self, gcn_true, buildA_true, gcn_depth, num_nodes, predefined_A=None, static_feat=None, dropout=0.3, subgraph_size=20, node_dim=40, dilation_exponential=1, conv_channels=32, residual_channels=32, skip_channels=64, end_channels=128, seq_length=12, in_dim=2, out_dim=12, layers=3, propalpha=0.05, tanhalpha=3, layer_norm_affline=True): + super(MTGNN, self).__init__() + self.gcn_true = gcn_true + self.buildA_true = buildA_true + self.num_nodes = num_nodes + self.dropout = dropout + self.predefined_A = predefined_A + self.filter_convs = nn.ModuleList() + self.gate_convs = nn.ModuleList() + self.residual_convs = nn.ModuleList() + self.skip_convs = nn.ModuleList() + self.gconv1 = nn.ModuleList() + self.gconv2 = nn.ModuleList() + self.norm = nn.ModuleList() + self.start_conv = nn.Conv2d( + in_channels=in_dim, out_channels=residual_channels, kernel_size=(1, 1)) + self.gc = graph_constructor( + num_nodes, subgraph_size, node_dim, alpha=tanhalpha, static_feat=static_feat) + + self.seq_length = seq_length + kernel_size = 7 + if dilation_exponential > 1: + self.receptive_field = int( + 1+(kernel_size-1)*(dilation_exponential**layers-1)/(dilation_exponential-1)) + else: + self.receptive_field = layers*(kernel_size-1) + 1 + + for i in range(1): + if dilation_exponential > 1: + rf_size_i = int( + 1 + i*(kernel_size-1)*(dilation_exponential**layers-1)/(dilation_exponential-1)) + else: + rf_size_i = i*layers*(kernel_size-1)+1 + new_dilation = 1 + for j in range(1, layers+1): + if dilation_exponential > 1: + rf_size_j = int( + rf_size_i + (kernel_size-1)*(dilation_exponential**j-1)/(dilation_exponential-1)) + else: + rf_size_j = rf_size_i+j*(kernel_size-1) + + self.filter_convs.append(dilated_inception( + residual_channels, conv_channels, dilation_factor=new_dilation)) + self.gate_convs.append(dilated_inception( + residual_channels, conv_channels, dilation_factor=new_dilation)) + self.residual_convs.append(nn.Conv2d( + in_channels=conv_channels, out_channels=residual_channels, kernel_size=(1, 1))) + if self.seq_length > self.receptive_field: + self.skip_convs.append(nn.Conv2d( + in_channels=conv_channels, out_channels=skip_channels, kernel_size=(1, self.seq_length-rf_size_j+1))) + else: + self.skip_convs.append(nn.Conv2d(in_channels=conv_channels, out_channels=skip_channels, kernel_size=( + 1, self.receptive_field-rf_size_j+1))) + + if self.gcn_true: + self.gconv1.append( + mixprop(conv_channels, residual_channels, gcn_depth, dropout, propalpha)) + self.gconv2.append( + mixprop(conv_channels, residual_channels, gcn_depth, dropout, propalpha)) + + if self.seq_length > self.receptive_field: + self.norm.append(LayerNorm( + (residual_channels, num_nodes, self.seq_length - rf_size_j + 1), elementwise_affine=layer_norm_affline)) + else: + self.norm.append(LayerNorm( + (residual_channels, num_nodes, self.receptive_field - rf_size_j + 1), elementwise_affine=layer_norm_affline)) + + new_dilation *= dilation_exponential + + self.layers = layers + self.end_conv_1 = nn.Conv2d( + in_channels=skip_channels, out_channels=end_channels, kernel_size=(1, 1), bias=True) + self.end_conv_2 = nn.Conv2d( + in_channels=end_channels, out_channels=out_dim, kernel_size=(1, 1), bias=True) + if self.seq_length > self.receptive_field: + self.skip0 = nn.Conv2d(in_channels=in_dim, out_channels=skip_channels, kernel_size=( + 1, self.seq_length), bias=True) + self.skipE = nn.Conv2d(in_channels=residual_channels, out_channels=skip_channels, kernel_size=( + 1, self.seq_length-self.receptive_field+1), bias=True) + + else: + self.skip0 = nn.Conv2d(in_channels=in_dim, out_channels=skip_channels, kernel_size=( + 1, self.receptive_field), bias=True) + self.skipE = nn.Conv2d(in_channels=residual_channels, + out_channels=skip_channels, kernel_size=(1, 1), bias=True) + + self.idx = torch.arange(self.num_nodes) + + def forward(self, history_data: torch.Tensor, idx: int = None, **kwargs) -> torch.Tensor: + """feedforward function of MTGNN. + + Args: + history_data (torch.Tensor): history data with shape [B, L, N, C] + idx (int, optional): Graph Learning Hyperparameter. Defaults to None. + + Returns: + torch.Tensor: prediction + """ + # select feature + history_data = history_data.transpose(1, 3).contiguous() + seq_len = history_data.size(3) + assert seq_len == self.seq_length, 'input sequence length not equal to preset sequence length' + + if self.seq_length < self.receptive_field: + history_data = nn.functional.pad( + history_data, (self.receptive_field-self.seq_length, 0, 0, 0)) + + if self.gcn_true: + if self.buildA_true: + if idx is None: + adp = self.gc(self.idx) + else: + adp = self.gc(idx) + else: + adp = self.predefined_A + + x = self.start_conv(history_data) + skip = self.skip0( + F.dropout(history_data, self.dropout, training=self.training)) + for i in range(self.layers): + residual = x + filter = self.filter_convs[i](x) + filter = torch.tanh(filter) + gate = self.gate_convs[i](x) + gate = torch.sigmoid(gate) + x = filter * gate + x = F.dropout(x, self.dropout, training=self.training) + s = x + s = self.skip_convs[i](s) + skip = s + skip + if self.gcn_true: + x = self.gconv1[i](x, adp)+self.gconv2[i](x, + adp.transpose(1, 0)) + else: + x = self.residual_convs[i](x) + + x = x + residual[:, :, :, -x.size(3):] + if idx is None: + x = self.norm[i](x, self.idx) + else: + x = self.norm[i](x, idx) + # print(x.shape) + skip = self.skipE(x) + skip + # print(skip.shape) + x = F.relu(skip) + x = F.relu(self.end_conv_1(x)) + # print(x.shape) + x = self.end_conv_2(x) + # print(x.shape) + + return x diff --git a/basicts/archs/MTGNN_arch/MTGNN_layers.py b/basicts/archs/arch_zoo/mtgnn_arch/mtgnn_layers.py similarity index 72% rename from basicts/archs/MTGNN_arch/MTGNN_layers.py rename to basicts/archs/arch_zoo/mtgnn_arch/mtgnn_layers.py index c2693801..93efea8f 100644 --- a/basicts/archs/MTGNN_arch/MTGNN_layers.py +++ b/basicts/archs/arch_zoo/mtgnn_arch/mtgnn_layers.py @@ -1,111 +1,113 @@ +import numbers + import torch import torch.nn as nn from torch.nn import init -import numbers import torch.nn.functional as F class nconv(nn.Module): def __init__(self): - super(nconv,self).__init__() + super(nconv, self).__init__() - def forward(self,x, A): - x = torch.einsum('ncvl,vw->ncwl',(x,A)) + def forward(self, x, A): + x = torch.einsum('ncvl,vw->ncwl', (x, A)) return x.contiguous() + class dy_nconv(nn.Module): def __init__(self): - super(dy_nconv,self).__init__() + super(dy_nconv, self).__init__() - def forward(self,x, A): - x = torch.einsum('ncvl,nvwl->ncwl',(x,A)) + def forward(self, x, A): + x = torch.einsum('ncvl,nvwl->ncwl', (x, A)) return x.contiguous() + class linear(nn.Module): - def __init__(self,c_in,c_out,bias=True): - super(linear,self).__init__() - self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0,0), stride=(1,1), bias=bias) + def __init__(self, c_in, c_out, bias=True): + super(linear, self).__init__() + self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=( + 1, 1), padding=(0, 0), stride=(1, 1), bias=bias) - def forward(self,x): + def forward(self, x): return self.mlp(x) class prop(nn.Module): - def __init__(self,c_in,c_out,gdep,dropout,alpha): + def __init__(self, c_in, c_out, gdep, dropout, alpha): super(prop, self).__init__() self.nconv = nconv() - self.mlp = linear(c_in,c_out) + self.mlp = linear(c_in, c_out) self.gdep = gdep self.dropout = dropout self.alpha = alpha - def forward(self,x,adj): + def forward(self, x, adj): adj = adj + torch.eye(adj.size(0)).to(x.device) d = adj.sum(1) h = x dv = d a = adj / dv.view(-1, 1) for i in range(self.gdep): - h = self.alpha*x + (1-self.alpha)*self.nconv(h,a) + h = self.alpha*x + (1-self.alpha)*self.nconv(h, a) ho = self.mlp(h) return ho class mixprop(nn.Module): - def __init__(self,c_in,c_out,gdep,dropout,alpha): + def __init__(self, c_in, c_out, gdep, dropout, alpha): super(mixprop, self).__init__() self.nconv = nconv() - self.mlp = linear((gdep+1)*c_in,c_out) + self.mlp = linear((gdep+1)*c_in, c_out) self.gdep = gdep self.dropout = dropout self.alpha = alpha - - def forward(self,x,adj): + def forward(self, x, adj): adj = adj + torch.eye(adj.size(0)).to(x.device) d = adj.sum(1) h = x out = [h] a = adj / d.view(-1, 1) for i in range(self.gdep): - h = self.alpha*x + (1-self.alpha)*self.nconv(h,a) + h = self.alpha*x + (1-self.alpha)*self.nconv(h, a) out.append(h) - ho = torch.cat(out,dim=1) + ho = torch.cat(out, dim=1) ho = self.mlp(ho) return ho + class dy_mixprop(nn.Module): - def __init__(self,c_in,c_out,gdep,dropout,alpha): + def __init__(self, c_in, c_out, gdep, dropout, alpha): super(dy_mixprop, self).__init__() self.nconv = dy_nconv() - self.mlp1 = linear((gdep+1)*c_in,c_out) - self.mlp2 = linear((gdep+1)*c_in,c_out) + self.mlp1 = linear((gdep+1)*c_in, c_out) + self.mlp2 = linear((gdep+1)*c_in, c_out) self.gdep = gdep self.dropout = dropout self.alpha = alpha - self.lin1 = linear(c_in,c_in) - self.lin2 = linear(c_in,c_in) - + self.lin1 = linear(c_in, c_in) + self.lin2 = linear(c_in, c_in) - def forward(self,x): + def forward(self, x): #adj = adj + torch.eye(adj.size(0)).to(x.device) #d = adj.sum(1) x1 = torch.tanh(self.lin1(x)) x2 = torch.tanh(self.lin2(x)) - adj = self.nconv(x1.transpose(2,1),x2) + adj = self.nconv(x1.transpose(2, 1), x2) adj0 = torch.softmax(adj, dim=2) - adj1 = torch.softmax(adj.transpose(2,1), dim=2) + adj1 = torch.softmax(adj.transpose(2, 1), dim=2) h = x out = [h] for i in range(self.gdep): - h = self.alpha*x + (1-self.alpha)*self.nconv(h,adj0) + h = self.alpha*x + (1-self.alpha)*self.nconv(h, adj0) out.append(h) - ho = torch.cat(out,dim=1) + ho = torch.cat(out, dim=1) ho1 = self.mlp1(ho) - h = x out = [h] for i in range(self.gdep): @@ -117,34 +119,36 @@ def forward(self,x): return ho1+ho2 - class dilated_1D(nn.Module): def __init__(self, cin, cout, dilation_factor=2): super(dilated_1D, self).__init__() self.tconv = nn.ModuleList() - self.kernel_set = [2,3,6,7] - self.tconv = nn.Conv2d(cin,cout,(1,7),dilation=(1,dilation_factor)) + self.kernel_set = [2, 3, 6, 7] + self.tconv = nn.Conv2d( + cin, cout, (1, 7), dilation=(1, dilation_factor)) - def forward(self,input): + def forward(self, input): x = self.tconv(input) return x + class dilated_inception(nn.Module): def __init__(self, cin, cout, dilation_factor=2): super(dilated_inception, self).__init__() self.tconv = nn.ModuleList() - self.kernel_set = [2,3,6,7] + self.kernel_set = [2, 3, 6, 7] cout = int(cout/len(self.kernel_set)) for kern in self.kernel_set: - self.tconv.append(nn.Conv2d(cin,cout,(1,kern),dilation=(1,dilation_factor))) + self.tconv.append(nn.Conv2d(cin, cout, (1, kern), + dilation=(1, dilation_factor))) - def forward(self,input): + def forward(self, input): x = [] for i in range(len(self.kernel_set)): x.append(self.tconv[i](input)) for i in range(len(self.kernel_set)): - x[i] = x[i][...,-x[-1].size(3):] - x = torch.cat(x,dim=1) + x[i] = x[i][..., -x[-1].size(3):] + x = torch.cat(x, dim=1) return x @@ -159,8 +163,8 @@ def __init__(self, nnodes, k, dim, alpha=3, static_feat=None): else: self.emb1 = nn.Embedding(nnodes, dim) self.emb2 = nn.Embedding(nnodes, dim) - self.lin1 = nn.Linear(dim,dim) - self.lin2 = nn.Linear(dim,dim) + self.lin1 = nn.Linear(dim, dim) + self.lin2 = nn.Linear(dim, dim) self.k = k self.dim = dim @@ -174,18 +178,19 @@ def forward(self, idx): nodevec2 = self.emb2(idx) else: idx = idx.to(self.static_feat.device) - nodevec1 = self.static_feat[idx,:] + nodevec1 = self.static_feat[idx, :] nodevec2 = nodevec1 nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1)) nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2)) - a = torch.mm(nodevec1, nodevec2.transpose(1,0))-torch.mm(nodevec2, nodevec1.transpose(1,0)) + a = torch.mm(nodevec1, nodevec2.transpose(1, 0)) - \ + torch.mm(nodevec2, nodevec1.transpose(1, 0)) adj = F.relu(torch.tanh(self.alpha*a)) mask = torch.zeros(idx.size(0), idx.size(0)).to(adj.device) mask.fill_(float('0')) - s1,t1 = adj.topk(self.k,1) - mask.scatter_(1,t1,s1.fill_(1)) + s1, t1 = adj.topk(self.k, 1) + mask.scatter_(1, t1, s1.fill_(1)) adj = adj*mask return adj @@ -194,16 +199,18 @@ def fullA(self, idx): nodevec1 = self.emb1(idx) nodevec2 = self.emb2(idx) else: - nodevec1 = self.static_feat[idx,:] + nodevec1 = self.static_feat[idx, :] nodevec2 = nodevec1 nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1)) nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2)) - a = torch.mm(nodevec1, nodevec2.transpose(1,0))-torch.mm(nodevec2, nodevec1.transpose(1,0)) + a = torch.mm(nodevec1, nodevec2.transpose(1, 0)) - \ + torch.mm(nodevec2, nodevec1.transpose(1, 0)) adj = F.relu(torch.tanh(self.alpha*a)) return adj + class graph_global(nn.Module): def __init__(self, nnodes, k, dim, alpha=3, static_feat=None): super(graph_global, self).__init__() @@ -223,7 +230,7 @@ def __init__(self, nnodes, k, dim, alpha=3, static_feat=None): self.lin1 = nn.Linear(xd, dim) else: self.emb1 = nn.Embedding(nnodes, dim) - self.lin1 = nn.Linear(dim,dim) + self.lin1 = nn.Linear(dim, dim) self.k = k self.dim = dim @@ -235,23 +242,22 @@ def forward(self, idx): nodevec1 = self.emb1(idx) nodevec2 = self.emb1(idx) else: - nodevec1 = self.static_feat[idx,:] + nodevec1 = self.static_feat[idx, :] nodevec2 = nodevec1 nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1)) nodevec2 = torch.tanh(self.alpha*self.lin1(nodevec2)) - a = torch.mm(nodevec1, nodevec2.transpose(1,0)) + a = torch.mm(nodevec1, nodevec2.transpose(1, 0)) adj = F.relu(torch.tanh(self.alpha*a)) mask = torch.zeros(idx.size(0), idx.size(0)) mask.fill_(float('0')) - s1,t1 = adj.topk(self.k,1) - mask.scatter_(1,t1,s1.fill_(1)) + s1, t1 = adj.topk(self.k, 1) + mask.scatter_(1, t1, s1.fill_(1)) adj = adj*mask return adj - class graph_directed(nn.Module): def __init__(self, nnodes, k, dim, alpha=3, static_feat=None): super(graph_directed, self).__init__() @@ -263,8 +269,8 @@ def __init__(self, nnodes, k, dim, alpha=3, static_feat=None): else: self.emb1 = nn.Embedding(nnodes, dim) self.emb2 = nn.Embedding(nnodes, dim) - self.lin1 = nn.Linear(dim,dim) - self.lin2 = nn.Linear(dim,dim) + self.lin1 = nn.Linear(dim, dim) + self.lin2 = nn.Linear(dim, dim) self.k = k self.dim = dim @@ -276,24 +282,26 @@ def forward(self, idx): nodevec1 = self.emb1(idx) nodevec2 = self.emb2(idx) else: - nodevec1 = self.static_feat[idx,:] + nodevec1 = self.static_feat[idx, :] nodevec2 = nodevec1 nodevec1 = torch.tanh(self.alpha*self.lin1(nodevec1)) nodevec2 = torch.tanh(self.alpha*self.lin2(nodevec2)) - a = torch.mm(nodevec1, nodevec2.transpose(1,0)) + a = torch.mm(nodevec1, nodevec2.transpose(1, 0)) adj = F.relu(torch.tanh(self.alpha*a)) mask = torch.zeros(idx.size(0), idx.size(0)) mask.fill_(float('0')) - s1,t1 = adj.topk(self.k,1) - mask.scatter_(1,t1,s1.fill_(1)) + s1, t1 = adj.topk(self.k, 1) + mask.scatter_(1, t1, s1.fill_(1)) adj = adj*mask return adj class LayerNorm(nn.Module): - __constants__ = ['normalized_shape', 'weight', 'bias', 'eps', 'elementwise_affine'] + __constants__ = ['normalized_shape', 'weight', + 'bias', 'eps', 'elementwise_affine'] + def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True): super(LayerNorm, self).__init__() if isinstance(normalized_shape, numbers.Integral): @@ -309,7 +317,6 @@ def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True): self.register_parameter('bias', None) self.reset_parameters() - def reset_parameters(self): if self.elementwise_affine: init.ones_(self.weight) @@ -317,10 +324,10 @@ def reset_parameters(self): def forward(self, input, idx): if self.elementwise_affine: - return F.layer_norm(input, tuple(input.shape[1:]), self.weight[:,idx,:], self.bias[:,idx,:], self.eps) + return F.layer_norm(input, tuple(input.shape[1:]), self.weight[:, idx, :], self.bias[:, idx, :], self.eps) else: return F.layer_norm(input, tuple(input.shape[1:]), self.weight, self.bias, self.eps) def extra_repr(self): return '{normalized_shape}, eps={eps}, ' \ - 'elementwise_affine={elementwise_affine}'.format(**self.__dict__) \ No newline at end of file + 'elementwise_affine={elementwise_affine}'.format(**self.__dict__) diff --git a/basicts/archs/arch_zoo/stemgnn_arch/__init__.py b/basicts/archs/arch_zoo/stemgnn_arch/__init__.py new file mode 100644 index 00000000..2154d27c --- /dev/null +++ b/basicts/archs/arch_zoo/stemgnn_arch/__init__.py @@ -0,0 +1,3 @@ +from .stemgnn_arch import StemGNN + +__all__ = ["StemGNN"] diff --git a/basicts/archs/StemGNN_arch/StemGNN_arch.py b/basicts/archs/arch_zoo/stemgnn_arch/stemgnn_arch.py similarity index 71% rename from basicts/archs/StemGNN_arch/StemGNN_arch.py rename to basicts/archs/arch_zoo/stemgnn_arch/stemgnn_arch.py index a9028abc..c8a6533e 100644 --- a/basicts/archs/StemGNN_arch/StemGNN_arch.py +++ b/basicts/archs/arch_zoo/stemgnn_arch/stemgnn_arch.py @@ -1,22 +1,7 @@ import torch import torch.nn as nn import torch.nn.functional as F -from basicts.archs.registry import ARCH_REGISTRY -""" - Paper: Spectral Temporal Graph Neural Network for Multivariate Time-series Forecasting - Ref Official Code: https://github.com/microsoft/StemGNN - Note: - There are some difference in implementation described in the paper as well as the source code. - Details can be found in [here](https://github.com/microsoft/StemGNN/issues/12) - We adopt the implementation of the code. - Details of difference: - - No reconstruction loss. - - No 1DConv. - - Use chebyshev polynomials to reduce time complexity. - - There is no the output layer composed of GLU and fully-connected (FC) sublayers as described in third paragraph in section 4.1. - - The experimental setting is not fair in StemGNN, and we can not reproduce the paper's performance. -""" class GLU(nn.Module): def __init__(self, input_channel, output_channel): @@ -27,6 +12,7 @@ def __init__(self, input_channel, output_channel): def forward(self, x): return torch.mul(self.linear_left(x), torch.sigmoid(self.linear_right(x))) + class StockBlockLayer(nn.Module): def __init__(self, time_step, unit, multi_layer, stack_cnt=0): super(StockBlockLayer, self).__init__() @@ -34,26 +20,36 @@ def __init__(self, time_step, unit, multi_layer, stack_cnt=0): self.unit = unit self.stack_cnt = stack_cnt self.multi = multi_layer - self.weight = nn.Parameter(torch.Tensor(1, 3 + 1, 1, self.time_step * self.multi, self.multi * self.time_step)) # [K+1, 1, in_c, out_c] + self.weight = nn.Parameter(torch.Tensor( + 1, 3 + 1, 1, self.time_step * self.multi, self.multi * self.time_step)) # [K+1, 1, in_c, out_c] nn.init.xavier_normal_(self.weight) - self.forecast = nn.Linear(self.time_step * self.multi, self.time_step * self.multi) - self.forecast_result = nn.Linear(self.time_step * self.multi, self.time_step) + self.forecast = nn.Linear( + self.time_step * self.multi, self.time_step * self.multi) + self.forecast_result = nn.Linear( + self.time_step * self.multi, self.time_step) if self.stack_cnt == 0: - self.backcast = nn.Linear(self.time_step * self.multi, self.time_step) + self.backcast = nn.Linear( + self.time_step * self.multi, self.time_step) self.backcast_short_cut = nn.Linear(self.time_step, self.time_step) self.relu = nn.ReLU() self.GLUs = nn.ModuleList() self.output_channel = 4 * self.multi for i in range(3): if i == 0: - self.GLUs.append(GLU(self.time_step * 4, self.time_step * self.output_channel)) - self.GLUs.append(GLU(self.time_step * 4, self.time_step * self.output_channel)) + self.GLUs.append( + GLU(self.time_step * 4, self.time_step * self.output_channel)) + self.GLUs.append( + GLU(self.time_step * 4, self.time_step * self.output_channel)) elif i == 1: - self.GLUs.append(GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)) - self.GLUs.append(GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)) + self.GLUs.append( + GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)) + self.GLUs.append( + GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)) else: - self.GLUs.append(GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)) - self.GLUs.append(GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)) + self.GLUs.append( + GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)) + self.GLUs.append( + GLU(self.time_step * self.output_channel, self.time_step * self.output_channel)) def spe_seq_cell(self, input): batch_size, k, input_channel, node_cnt, time_step = input.size() @@ -62,15 +58,19 @@ def spe_seq_cell(self, input): ffted = torch.fft.fft(input, dim=-1) ffted_real = ffted.real ffted_imag = ffted.imag - ffted = torch.stack([ffted_real, ffted_imag], dim=-1) + ffted = torch.stack([ffted_real, ffted_imag], dim=-1) # ffted = torch.rfft(input, 1, onesided=False) - real = ffted[..., 0].permute(0, 2, 1, 3).contiguous().reshape(batch_size, node_cnt, -1) - img = ffted[..., 1].permute(0, 2, 1, 3).contiguous().reshape(batch_size, node_cnt, -1) + real = ffted[..., 0].permute( + 0, 2, 1, 3).contiguous().reshape(batch_size, node_cnt, -1) + img = ffted[..., 1].permute(0, 2, 1, 3).contiguous().reshape( + batch_size, node_cnt, -1) for i in range(3): real = self.GLUs[i * 2](real) img = self.GLUs[2 * i + 1](img) - real = real.reshape(batch_size, node_cnt, 4, -1).permute(0, 2, 1, 3).contiguous() - img = img.reshape(batch_size, node_cnt, 4, -1).permute(0, 2, 1, 3).contiguous() + real = real.reshape(batch_size, node_cnt, 4, - + 1).permute(0, 2, 1, 3).contiguous() + img = img.reshape(batch_size, node_cnt, 4, - + 1).permute(0, 2, 1, 3).contiguous() time_step_as_inner = torch.complex(real, img) iffted = torch.fft.ifft(time_step_as_inner, dim=-1).real # time_step_as_inner = torch.cat([real.unsqueeze(-1), img.unsqueeze(-1)], dim=-1) @@ -88,14 +88,30 @@ def forward(self, x, mul_L): forecast = self.forecast_result(forecast_source) if self.stack_cnt == 0: backcast_short = self.backcast_short_cut(x).squeeze(1) - backcast_source = torch.sigmoid(self.backcast(igfted) - backcast_short) + backcast_source = torch.sigmoid( + self.backcast(igfted) - backcast_short) else: backcast_source = None return forecast, backcast_source -@ARCH_REGISTRY.register() class StemGNN(nn.Module): + """ + Paper: Spectral Temporal Graph Neural Network for Multivariate Time-series Forecasting + Link: https://arxiv.org/abs/2103.07719 + Ref Official Code: https://github.com/microsoft/StemGNN + Note: + There are some difference in implementation described in the paper as well as the source code. + Details can be found in [here](https://github.com/microsoft/StemGNN/issues/12) + We adopt the implementation of the code. + Details of difference: + - No reconstruction loss. + - No 1DConv. + - Use chebyshev polynomials to reduce time complexity. + - There is no the output layer composed of GLU and fully-connected (FC) sublayers as described in third paragraph in section 4.1. + - The experimental setting is not fair in StemGNN, and we can not reproduce the paper's performance. + """ + def __init__(self, units, stack_cnt, time_step, multi_layer, horizon, dropout_rate=0.5, leaky_rate=0.2, **kwargs): super(StemGNN, self).__init__() self.unit = units @@ -122,33 +138,27 @@ def __init__(self, units, stack_cnt, time_step, multi_layer, horizon, dropout_ra self.dropout = nn.Dropout(p=dropout_rate) def get_laplacian(self, graph, normalize): - """ - return the laplacian of the graph. - :param graph: the graph structure without self loop, [N, N]. - :param normalize: whether to used the normalized laplacian. - :return: graph laplacian. - """ if normalize: D = torch.diag(torch.sum(graph, dim=-1) ** (-1 / 2)) - L = torch.eye(graph.size(0), device=graph.device, dtype=graph.dtype) - torch.mm(torch.mm(D, graph), D) + L = torch.eye(graph.size(0), device=graph.device, + dtype=graph.dtype) - torch.mm(torch.mm(D, graph), D) else: D = torch.diag(torch.sum(graph, dim=-1)) L = D - graph return L def cheb_polynomial(self, laplacian): - """ - Compute the Chebyshev Polynomial, according to the graph laplacian. - :param laplacian: the graph laplacian, [N, N]. - :return: the multi order Chebyshev laplacian, [K, N, N]. - """ N = laplacian.size(0) # [N, N] laplacian = laplacian.unsqueeze(0) - first_laplacian = torch.zeros([1, N, N], device=laplacian.device, dtype=torch.float) + first_laplacian = torch.zeros( + [1, N, N], device=laplacian.device, dtype=torch.float) second_laplacian = laplacian - third_laplacian = (2 * torch.matmul(laplacian, second_laplacian)) - first_laplacian - forth_laplacian = 2 * torch.matmul(laplacian, third_laplacian) - second_laplacian - multi_order_laplacian = torch.cat([first_laplacian, second_laplacian, third_laplacian, forth_laplacian], dim=0) + third_laplacian = ( + 2 * torch.matmul(laplacian, second_laplacian)) - first_laplacian + forth_laplacian = 2 * \ + torch.matmul(laplacian, third_laplacian) - second_laplacian + multi_order_laplacian = torch.cat( + [first_laplacian, second_laplacian, third_laplacian, forth_laplacian], dim=0) return multi_order_laplacian def latent_correlation_layer(self, x): @@ -182,16 +192,17 @@ def self_graph_attention(self, input): def graph_fft(self, input, eigenvectors): return torch.matmul(eigenvectors, input) - def forward(self, history_data, **kwargs): - """feedforward function of StemGNN. + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: + """Feedforward function of StemGNN. Args: - history_data (torch.Tensor): [B, L, N] + history_data (torch.Tensor): [B, L, N, 1] Returns: torch.Tensor: [B, L, N, 1] """ - x = history_data + + x = history_data[..., 0] mul_L, attention = self.latent_correlation_layer(x) X = x.unsqueeze(1).permute(0, 1, 3, 2).contiguous() result = [] diff --git a/basicts/archs/arch_zoo/stgcn_arch/__init__.py b/basicts/archs/arch_zoo/stgcn_arch/__init__.py new file mode 100644 index 00000000..51e274b4 --- /dev/null +++ b/basicts/archs/arch_zoo/stgcn_arch/__init__.py @@ -0,0 +1,3 @@ +from .stgcn_arch import STGCNChebGraphConv as STGCN + +__all__ = ["STGCN"] diff --git a/basicts/archs/STGCN_arch/STGCN_arch.py b/basicts/archs/arch_zoo/stgcn_arch/stgcn_arch.py similarity index 74% rename from basicts/archs/STGCN_arch/STGCN_arch.py rename to basicts/archs/arch_zoo/stgcn_arch/stgcn_arch.py index 55bd8524..07d9b52a 100644 --- a/basicts/archs/STGCN_arch/STGCN_arch.py +++ b/basicts/archs/arch_zoo/stgcn_arch/stgcn_arch.py @@ -1,24 +1,23 @@ import torch import torch.nn as nn -from basicts.archs.STGCN_arch import STGCN_layers as layers -from basicts.archs.registry import ARCH_REGISTRY +from .stgcn_layers import STConvBlock, OutputBlock -""" + +class STGCNChebGraphConv(nn.Module): + """ Paper: Spatio-Temporal Graph Convolutional Networks: A Deep Learning Framework for Traffic Forecasting Official Code: https://github.com/VeritasYin/STGCN_IJCAI-18 (tensorflow) Ref Code: https://github.com/hazdzz/STGCN Note: https://github.com/hazdzz/STGCN/issues/9 -""" - + Link: https://arxiv.org/abs/1709.04875 + """ -@ARCH_REGISTRY.register() -class STGCN(nn.Module): # STGCNChebGraphConv contains 'TGTND TGTND TNFF' structure # ChebGraphConv is the graph convolution from ChebyNet. # Using the Chebyshev polynomials of the first kind as a graph filter. - + # T: Gated Temporal Convolution Layer (GLU or GTU) # G: Graph Convolution Layer (ChebGraphConv) # T: Gated Temporal Convolution Layer (GLU or GTU) @@ -37,17 +36,19 @@ class STGCN(nn.Module): # F: Fully-Connected Layer def __init__(self, Kt, Ks, blocks, T, n_vertex, act_func, graph_conv_type, gso, bias, droprate): - super(STGCN, self).__init__() + super(STGCNChebGraphConv, self).__init__() modules = [] for l in range(len(blocks) - 3): - modules.append(layers.STConvBlock(Kt, Ks, n_vertex, blocks[l][-1], blocks[l+1], act_func, graph_conv_type, gso, bias, droprate)) + modules.append(STConvBlock( + Kt, Ks, n_vertex, blocks[l][-1], blocks[l+1], act_func, graph_conv_type, gso, bias, droprate)) self.st_blocks = nn.Sequential(*modules) Ko = T - (len(blocks) - 3) * 2 * (Kt - 1) self.Ko = Ko assert Ko != 0, "Ko = 0." - self.output = layers.OutputBlock(Ko, blocks[-3][-1], blocks[-2], blocks[-1][0], n_vertex, act_func, bias, droprate) + self.output = OutputBlock( + Ko, blocks[-3][-1], blocks[-2], blocks[-1][0], n_vertex, act_func, bias, droprate) - def forward(self, history_data:torch.Tensor) -> torch.Tensor: + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: """feedforward function of STGCN. Args: @@ -57,7 +58,7 @@ def forward(self, history_data:torch.Tensor) -> torch.Tensor: torch.Tensor: prediction with shape [B, L, N, C] """ x = history_data.permute(0, 3, 1, 2).contiguous() - + x = self.st_blocks(x) x = self.output(x) diff --git a/basicts/archs/STGCN_arch/STGCN_layers.py b/basicts/archs/arch_zoo/stgcn_arch/stgcn_layers.py similarity index 80% rename from basicts/archs/STGCN_arch/STGCN_layers.py rename to basicts/archs/arch_zoo/stgcn_arch/stgcn_layers.py index 9230c6f3..69e6d788 100644 --- a/basicts/archs/STGCN_arch/STGCN_layers.py +++ b/basicts/archs/arch_zoo/stgcn_arch/stgcn_layers.py @@ -1,73 +1,84 @@ import math + import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.init as init + class Align(nn.Module): def __init__(self, c_in, c_out): super(Align, self).__init__() self.c_in = c_in self.c_out = c_out - self.align_conv = nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(1, 1)) + self.align_conv = nn.Conv2d( + in_channels=c_in, out_channels=c_out, kernel_size=(1, 1)) def forward(self, x): if self.c_in > self.c_out: x = self.align_conv(x) elif self.c_in < self.c_out: batch_size, _, timestep, n_vertex = x.shape - x = torch.cat([x, torch.zeros([batch_size, self.c_out - self.c_in, timestep, n_vertex]).to(x)], dim=1) + x = torch.cat([x, torch.zeros( + [batch_size, self.c_out - self.c_in, timestep, n_vertex]).to(x)], dim=1) else: x = x - + return x + class CausalConv1d(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, enable_padding=False, dilation=1, groups=1, bias=True): if enable_padding == True: self.__padding = (kernel_size - 1) * dilation else: self.__padding = 0 - super(CausalConv1d, self).__init__(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=self.__padding, dilation=dilation, groups=groups, bias=bias) + super(CausalConv1d, self).__init__(in_channels, out_channels, kernel_size=kernel_size, + stride=stride, padding=self.__padding, dilation=dilation, groups=groups, bias=bias) def forward(self, input): result = super(CausalConv1d, self).forward(input) if self.__padding != 0: - return result[: , : , : -self.__padding] - + return result[:, :, : -self.__padding] + return result + class CausalConv2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, enable_padding=False, dilation=1, groups=1, bias=True): kernel_size = nn.modules.utils._pair(kernel_size) stride = nn.modules.utils._pair(stride) dilation = nn.modules.utils._pair(dilation) if enable_padding == True: - self.__padding = [int((kernel_size[i] - 1) * dilation[i]) for i in range(len(kernel_size))] + self.__padding = [int((kernel_size[i] - 1) * dilation[i]) + for i in range(len(kernel_size))] else: self.__padding = 0 self.left_padding = nn.modules.utils._pair(self.__padding) - super(CausalConv2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=0, dilation=dilation, groups=groups, bias=bias) - + super(CausalConv2d, self).__init__(in_channels, out_channels, kernel_size, + stride=stride, padding=0, dilation=dilation, groups=groups, bias=bias) + def forward(self, input): if self.__padding != 0: - input = F.pad(input, (self.left_padding[1], 0, self.left_padding[0], 0)) + input = F.pad( + input, (self.left_padding[1], 0, self.left_padding[0], 0)) result = super(CausalConv2d, self).forward(input) return result + class TemporalConvLayer(nn.Module): # Temporal Convolution Layer (GLU) # # |--------------------------------| * Residual Connection * # | | - # | |--->--- CasualConv2d ----- + -------| + # | |--->--- CasualConv2d ----- + -------| # -------|----| ⊙ ------> - # |--->--- CasualConv2d --- Sigmoid ---| + # |--->--- CasualConv2d --- Sigmoid ---| # - - #param x: tensor, [bs, c_in, ts, n_vertex] + + # param x: tensor, [bs, c_in, ts, n_vertex] def __init__(self, Kt, c_in, c_out, n_vertex, act_func): super(TemporalConvLayer, self).__init__() @@ -77,9 +88,11 @@ def __init__(self, Kt, c_in, c_out, n_vertex, act_func): self.n_vertex = n_vertex self.align = Align(c_in, c_out) if act_func == 'glu' or act_func == 'gtu': - self.causal_conv = CausalConv2d(in_channels=c_in, out_channels=2 * c_out, kernel_size=(Kt, 1), enable_padding=False, dilation=1) + self.causal_conv = CausalConv2d( + in_channels=c_in, out_channels=2 * c_out, kernel_size=(Kt, 1), enable_padding=False, dilation=1) else: - self.causal_conv = CausalConv2d(in_channels=c_in, out_channels=c_out, kernel_size=(Kt, 1), enable_padding=False, dilation=1) + self.causal_conv = CausalConv2d(in_channels=c_in, out_channels=c_out, kernel_size=( + Kt, 1), enable_padding=False, dilation=1) self.act_func = act_func self.sigmoid = nn.Sigmoid() self.tanh = nn.Tanh() @@ -87,7 +100,7 @@ def __init__(self, Kt, c_in, c_out, n_vertex, act_func): self.leaky_relu = nn.LeakyReLU() self.silu = nn.SiLU() - def forward(self, x): + def forward(self, x): x_in = self.align(x)[:, :, self.Kt - 1:, :] x_causal_conv = self.causal_conv(x) @@ -115,18 +128,20 @@ def forward(self, x): elif self.act_func == 'relu': x = self.relu(x_causal_conv + x_in) - + elif self.act_func == 'leaky_relu': x = self.leaky_relu(x_causal_conv + x_in) elif self.act_func == 'silu': x = self.silu(x_causal_conv + x_in) - + else: - raise NotImplementedError(f'ERROR: The activation function {self.act_func} is not implemented.') - + raise NotImplementedError( + f'ERROR: The activation function {self.act_func} is not implemented.') + return x + class ChebGraphConv(nn.Module): def __init__(self, c_in, c_out, Ks, gso, bias): super(ChebGraphConv, self).__init__() @@ -147,7 +162,7 @@ def reset_parameters(self): fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 init.uniform_(self.bias, -bound, bound) - + def forward(self, x): #bs, c_in, ts, n_vertex = x.shape x = torch.permute(x, (0, 2, 3, 1)) @@ -155,7 +170,8 @@ def forward(self, x): self.gso = self.gso.to(x.device) if self.Ks - 1 < 0: - raise ValueError(f'ERROR: the graph convolution kernel size Ks has to be a positive integer, but received {self.Ks}.') + raise ValueError( + f'ERROR: the graph convolution kernel size Ks has to be a positive integer, but received {self.Ks}.') elif self.Ks - 1 == 0: x_0 = x x_list = [x_0] @@ -168,8 +184,9 @@ def forward(self, x): x_1 = torch.einsum('hi,btij->bthj', self.gso, x) x_list = [x_0, x_1] for k in range(2, self.Ks): - x_list.append(torch.einsum('hi,btij->bthj', 2 * self.gso, x_list[k - 1]) - x_list[k - 2]) - + x_list.append(torch.einsum('hi,btij->bthj', 2 * + self.gso, x_list[k - 1]) - x_list[k - 2]) + x = torch.stack(x_list, dim=2) cheb_graph_conv = torch.einsum('btkhi,kij->bthj', x, self.weight) @@ -178,9 +195,10 @@ def forward(self, x): cheb_graph_conv = torch.add(cheb_graph_conv, self.bias) else: cheb_graph_conv = cheb_graph_conv - + return cheb_graph_conv + class GraphConv(nn.Module): def __init__(self, c_in, c_out, gso, bias): super(GraphConv, self).__init__() @@ -212,9 +230,10 @@ def forward(self, x): graph_conv = torch.add(second_mul, self.bias) else: graph_conv = second_mul - + return graph_conv + class GraphConvLayer(nn.Module): def __init__(self, graph_conv_type, c_in, c_out, Ks, gso, bias): super(GraphConvLayer, self).__init__() @@ -240,6 +259,7 @@ def forward(self, x): return x_gc_out + class STConvBlock(nn.Module): # STConv Block contains 'TGTND' structure # T: Gated Temporal Convolution Layer (GLU or GTU) @@ -250,9 +270,12 @@ class STConvBlock(nn.Module): def __init__(self, Kt, Ks, n_vertex, last_block_channel, channels, act_func, graph_conv_type, gso, bias, droprate): super(STConvBlock, self).__init__() - self.tmp_conv1 = TemporalConvLayer(Kt, last_block_channel, channels[0], n_vertex, act_func) - self.graph_conv = GraphConvLayer(graph_conv_type, channels[0], channels[1], Ks, gso, bias) - self.tmp_conv2 = TemporalConvLayer(Kt, channels[1], channels[2], n_vertex, act_func) + self.tmp_conv1 = TemporalConvLayer( + Kt, last_block_channel, channels[0], n_vertex, act_func) + self.graph_conv = GraphConvLayer( + graph_conv_type, channels[0], channels[1], Ks, gso, bias) + self.tmp_conv2 = TemporalConvLayer( + Kt, channels[1], channels[2], n_vertex, act_func) self.tc2_ln = nn.LayerNorm([n_vertex, channels[2]]) self.relu = nn.ReLU() self.dropout = nn.Dropout(p=droprate) @@ -267,6 +290,7 @@ def forward(self, x): return x + class OutputBlock(nn.Module): # Output block contains 'TNFF' structure # T: Gated Temporal Convolution Layer (GLU or GTU) @@ -276,9 +300,12 @@ class OutputBlock(nn.Module): def __init__(self, Ko, last_block_channel, channels, end_channel, n_vertex, act_func, bias, droprate): super(OutputBlock, self).__init__() - self.tmp_conv1 = TemporalConvLayer(Ko, last_block_channel, channels[0], n_vertex, act_func) - self.fc1 = nn.Linear(in_features=channels[0], out_features=channels[1], bias=bias) - self.fc2 = nn.Linear(in_features=channels[1], out_features=end_channel, bias=bias) + self.tmp_conv1 = TemporalConvLayer( + Ko, last_block_channel, channels[0], n_vertex, act_func) + self.fc1 = nn.Linear( + in_features=channels[0], out_features=channels[1], bias=bias) + self.fc2 = nn.Linear( + in_features=channels[1], out_features=end_channel, bias=bias) self.tc1_ln = nn.LayerNorm([n_vertex, channels[0]]) self.relu = nn.ReLU() self.leaky_relu = nn.LeakyReLU() @@ -292,4 +319,4 @@ def forward(self, x): x = self.relu(x) x = self.fc2(x).permute(0, 3, 1, 2) - return x \ No newline at end of file + return x diff --git a/basicts/archs/arch_zoo/stid_arch/__init__.py b/basicts/archs/arch_zoo/stid_arch/__init__.py new file mode 100644 index 00000000..64b16477 --- /dev/null +++ b/basicts/archs/arch_zoo/stid_arch/__init__.py @@ -0,0 +1,3 @@ +from .stid_arch import STID + +__all__ = ["STID"] diff --git a/basicts/archs/arch_zoo/stid_arch/mlp.py b/basicts/archs/arch_zoo/stid_arch/mlp.py new file mode 100644 index 00000000..17fccbc1 --- /dev/null +++ b/basicts/archs/arch_zoo/stid_arch/mlp.py @@ -0,0 +1,29 @@ +import torch +from torch import nn + + +class MultiLayerPerceptron(nn.Module): + """Multi-Layer Perceptron with residual links.""" + + def __init__(self, input_dim, hidden_dim) -> None: + super().__init__() + self.fc1 = nn.Conv2d( + in_channels=input_dim, out_channels=hidden_dim, kernel_size=(1, 1), bias=True) + self.fc2 = nn.Conv2d( + in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=(1, 1), bias=True) + self.act = nn.ReLU() + self.drop = nn.Dropout(p=0.15) + + def forward(self, input_data: torch.Tensor) -> torch.Tensor: + """Feed forward of MLP. + + Args: + input_data (torch.Tensor): input data with shape [B, D, N] + + Returns: + torch.Tensor: latent repr + """ + + hidden = self.fc2(self.drop(self.act(self.fc1(input_data)))) # MLP + hidden = hidden + input_data # residual + return hidden diff --git a/basicts/archs/arch_zoo/stid_arch/stid_arch.py b/basicts/archs/arch_zoo/stid_arch/stid_arch.py new file mode 100644 index 00000000..9b52c1d6 --- /dev/null +++ b/basicts/archs/arch_zoo/stid_arch/stid_arch.py @@ -0,0 +1,115 @@ +import torch +from torch import nn + +from .mlp import MultiLayerPerceptron + + +class STID(nn.Module): + """ + The implementation of CIKM 2022 short paper + "Spatial-Temporal Identity: A Simple yet Effective Baseline for Multivariate Time Series Forecasting" + Link: https://arxiv.org/abs/2208.05233 + """ + + def __init__(self, **model_args): + super().__init__() + # attributes + self.num_nodes = model_args["num_nodes"] + self.node_dim = model_args["node_dim"] + self.input_len = model_args["input_len"] + self.input_dim = model_args["input_dim"] + self.embed_dim = model_args["embed_dim"] + self.output_len = model_args["output_len"] + self.num_layer = model_args["num_layer"] + self.temp_dim_tid = model_args["temp_dim_tid"] + self.temp_dim_diw = model_args["temp_dim_diw"] + + self.if_time_in_day = model_args["if_T_i_D"] + self.if_day_in_week = model_args["if_D_i_W"] + self.if_spatial = model_args["if_node"] + + # spatial embeddings + if self.if_spatial: + self.node_emb = nn.Parameter( + torch.empty(self.num_nodes, self.node_dim)) + nn.init.xavier_uniform_(self.node_emb) + # temporal embeddings + if self.if_time_in_day: + self.time_in_day_emb = nn.Parameter( + torch.empty(288, self.temp_dim_tid)) + nn.init.xavier_uniform_(self.time_in_day_emb) + if self.if_day_in_week: + self.day_in_week_emb = nn.Parameter( + torch.empty(7, self.temp_dim_diw)) + nn.init.xavier_uniform_(self.day_in_week_emb) + + # embedding layer + self.time_series_emb_layer = nn.Conv2d( + in_channels=self.input_dim * self.input_len, out_channels=self.embed_dim, kernel_size=(1, 1), bias=True) + + # encoding + self.hidden_dim = self.embed_dim+self.node_dim * \ + int(self.if_spatial)+self.temp_dim_tid*int(self.if_day_in_week) + \ + self.temp_dim_diw*int(self.if_time_in_day) + self.encoder = nn.Sequential( + *[MultiLayerPerceptron(self.hidden_dim, self.hidden_dim) for _ in range(self.num_layer)]) + + # regression + self.regression_layer = nn.Conv2d( + in_channels=self.hidden_dim, out_channels=self.output_len, kernel_size=(1, 1), bias=True) + + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: + """Feed forward of STID. + + Args: + history_data (torch.Tensor): history data with shape [B, L, N, C] + + Returns: + torch.Tensor: prediction wit shape [B, L, N, C] + """ + + # prepare data + input_data = history_data[..., range(self.input_dim)] + + if self.if_time_in_day: + t_i_d_data = history_data[..., 1] + time_in_day_emb = self.time_in_day_emb[( + t_i_d_data[:, -1, :] * 288).type(torch.LongTensor)] + else: + time_in_day_emb = None + if self.if_day_in_week: + d_i_w_data = history_data[..., 2] + day_in_week_emb = self.day_in_week_emb[( + d_i_w_data[:, -1, :]).type(torch.LongTensor)] + else: + day_in_week_emb = None + + # time series embedding + batch_size, _, num_nodes, _ = input_data.shape + input_data = input_data.transpose(1, 2).contiguous() + input_data = input_data.view( + batch_size, num_nodes, -1).transpose(1, 2).unsqueeze(-1) + time_series_emb = self.time_series_emb_layer(input_data) + + node_emb = [] + if self.if_spatial: + # expand node embeddings + node_emb.append(self.node_emb.unsqueeze(0).expand( + batch_size, -1, -1).transpose(1, 2).unsqueeze(-1)) + # temporal embeddings + tem_emb = [] + if time_in_day_emb is not None: + tem_emb.append(time_in_day_emb.transpose(1, 2).unsqueeze(-1)) + if day_in_week_emb is not None: + tem_emb.append(day_in_week_emb.transpose(1, 2).unsqueeze(-1)) + + # concate all embeddings + hidden = torch.cat([time_series_emb] + node_emb + tem_emb, dim=1) + + # encoding + hidden = self.encoder(hidden) + + # regression + prediction = self.regression_layer(hidden) + + return prediction diff --git a/basicts/archs/arch_zoo/stnorm_arch/__init__.py b/basicts/archs/arch_zoo/stnorm_arch/__init__.py new file mode 100644 index 00000000..fdc70419 --- /dev/null +++ b/basicts/archs/arch_zoo/stnorm_arch/__init__.py @@ -0,0 +1,3 @@ +from .stnorm_arch import STNorm + +__all__ = ["STNorm"] diff --git a/basicts/archs/STNorm_arch/STNorm_arch.py b/basicts/archs/arch_zoo/stnorm_arch/stnorm_arch.py similarity index 72% rename from basicts/archs/STNorm_arch/STNorm_arch.py rename to basicts/archs/arch_zoo/stnorm_arch/stnorm_arch.py index 62a9797e..81465ee4 100644 --- a/basicts/archs/STNorm_arch/STNorm_arch.py +++ b/basicts/archs/arch_zoo/stnorm_arch/stnorm_arch.py @@ -1,15 +1,8 @@ import torch import torch.nn as nn import torch.nn.functional as F -from torch.autograd import Variable from torch.nn import Parameter -import sys -from basicts.archs.registry import ARCH_REGISTRY -""" - Paper: ST-Norm: Spatial and Temporal Normalization for Multi-variate Time Series Forecasting - Ref Official Code: https://github.com/JLDeng/ST-Norm/blob/master/models/Wavenet.py -""" class SNorm(nn.Module): def __init__(self, channels): @@ -18,19 +11,24 @@ def __init__(self, channels): self.gamma = nn.Parameter(torch.ones(channels)) def forward(self, x): - x_norm = (x - x.mean(2, keepdims=True)) / (x.var(2, keepdims=True, unbiased=True) + 0.00001) ** 0.5 + x_norm = (x - x.mean(2, keepdims=True)) / \ + (x.var(2, keepdims=True, unbiased=True) + 0.00001) ** 0.5 - out = x_norm * self.gamma.view(1, -1, 1, 1) + self.beta.view(1, -1, 1, 1) + out = x_norm * self.gamma.view(1, -1, 1, 1) + \ + self.beta.view(1, -1, 1, 1) return out + class TNorm(nn.Module): def __init__(self, num_nodes, channels, track_running_stats=True, momentum=0.1): super(TNorm, self).__init__() self.track_running_stats = track_running_stats self.beta = nn.Parameter(torch.zeros(1, channels, num_nodes, 1)) self.gamma = nn.Parameter(torch.ones(1, channels, num_nodes, 1)) - self.register_buffer('running_mean', torch.zeros(1, channels, num_nodes, 1)) - self.register_buffer('running_var', torch.ones(1, channels, num_nodes, 1)) + self.register_buffer( + 'running_mean', torch.zeros(1, channels, num_nodes, 1)) + self.register_buffer( + 'running_var', torch.ones(1, channels, num_nodes, 1)) self.momentum = momentum def forward(self, x): @@ -40,8 +38,10 @@ def forward(self, x): if self.training: n = x.shape[3] * x.shape[0] with torch.no_grad(): - self.running_mean = self.momentum * mean + (1 - self.momentum) * self.running_mean - self.running_var = self.momentum * var * n / (n - 1) + (1 - self.momentum) * self.running_var + self.running_mean = self.momentum * mean + \ + (1 - self.momentum) * self.running_mean + self.running_var = self.momentum * var * n / \ + (n - 1) + (1 - self.momentum) * self.running_var else: mean = self.running_mean var = self.running_var @@ -53,9 +53,14 @@ def forward(self, x): return out -@ARCH_REGISTRY.register() class STNorm(nn.Module): - def __init__(self, num_nodes, tnorm_bool, snorm_bool, in_dim,out_dim, channels,kernel_size,blocks,layers): + """ + Paper: ST-Norm: Spatial and Temporal Normalization for Multi-variate Time Series Forecasting + Link: https://dl.acm.org/doi/10.1145/3447548.3467330 + Ref Official Code: https://github.com/JLDeng/ST-Norm/blob/master/models/Wavenet.py + """ + + def __init__(self, num_nodes, tnorm_bool, snorm_bool, in_dim, out_dim, channels, kernel_size, blocks, layers): super(STNorm, self).__init__() self.blocks = blocks self.layers = layers @@ -79,7 +84,7 @@ def __init__(self, num_nodes, tnorm_bool, snorm_bool, in_dim,out_dim, channels,k self.start_conv = nn.Conv2d(in_channels=in_dim, out_channels=channels, - kernel_size=(1,1)) + kernel_size=(1, 1)) receptive_field = 1 self.dropout = nn.Dropout(0.2) @@ -96,30 +101,35 @@ def __init__(self, num_nodes, tnorm_bool, snorm_bool, in_dim,out_dim, channels,k self.tn.append(TNorm(num_nodes, channels)) if self.snorm_bool: self.sn.append(SNorm(channels)) - self.filter_convs.append(nn.Conv2d(in_channels=num * channels, out_channels=channels, kernel_size=(1,kernel_size),dilation=new_dilation)) + self.filter_convs.append(nn.Conv2d( + in_channels=num * channels, out_channels=channels, kernel_size=(1, kernel_size), dilation=new_dilation)) - self.gate_convs.append(nn.Conv1d(in_channels=num * channels, out_channels=channels, kernel_size=(1, kernel_size), dilation=new_dilation)) + self.gate_convs.append(nn.Conv1d( + in_channels=num * channels, out_channels=channels, kernel_size=(1, kernel_size), dilation=new_dilation)) # 1x1 convolution for residual connection - self.residual_convs.append(nn.Conv1d(in_channels=channels, out_channels=channels, kernel_size=(1, 1))) + self.residual_convs.append( + nn.Conv1d(in_channels=channels, out_channels=channels, kernel_size=(1, 1))) # 1x1 convolution for skip connection - self.skip_convs.append(nn.Conv1d(in_channels=channels, out_channels=channels, kernel_size=(1, 1))) - new_dilation *=2 + self.skip_convs.append( + nn.Conv1d(in_channels=channels, out_channels=channels, kernel_size=(1, 1))) + new_dilation *= 2 receptive_field += additional_scope additional_scope *= 2 - self.end_conv_1 = nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=(1,1), bias=True) + self.end_conv_1 = nn.Conv2d( + in_channels=channels, out_channels=channels, kernel_size=(1, 1), bias=True) self.end_conv_2 = nn.Conv2d(in_channels=channels, out_channels=out_dim, - kernel_size=(1,1), + kernel_size=(1, 1), bias=True) self.receptive_field = receptive_field - def forward(self, history_data, **kwargs): - """feedforward function of STNorm. + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: + """Feedforward function of STNorm. Args: history_data (torch.Tensor): shape [B, C, N, L] @@ -127,10 +137,12 @@ def forward(self, history_data, **kwargs): Returns: torch.Tensor: [B, L, N, 1] """ + input = history_data.transpose(1, 3).contiguous() in_len = input.size(3) - if in_len None: - super().__init__() - assert mode in ['train', 'valid', 'test'], "error mode" - # read raw data (normalized) - data = load_pkl(raw_file_path) - processed_data = data['processed_data'] - self.data = torch.from_numpy(processed_data).float() # L, N, C - # read index - self.index = load_pkl(index_file_path)[mode] - - def __getitem__(self, index: int) -> tuple: - """get a sample. - - Args: - index (int): the iteration index (not the self.index) - - Returns: - tuple: (future_data, history_data), where the shape of each is L x N x C. - """ - idx = list(self.index[index]) - if isinstance(idx[0], int): - # continuous index - history_data = self.data[idx[0]:idx[1]] - future_data = self.data[idx[1]:idx[2]] - else: - # discontinuous index or custom index - # NOTE: current time $t$ should not included in the index[0] - history_index = idx[0] # list - assert idx[1] not in history_index, "current time t should not included in the idx[0]" - history_index.append(idx[1]) - history_data = self.data[history_index] - future_data = self.data[idx[1], idx[2]] - - return future_data, history_data - - def __len__(self): - """dataset length - - Returns: - int: dataset length - """ - return len(self.index) diff --git a/basicts/data/dataset.py b/basicts/data/dataset.py new file mode 100644 index 00000000..42731a2c --- /dev/null +++ b/basicts/data/dataset.py @@ -0,0 +1,73 @@ +import os + +import torch +from torch.utils.data import Dataset + +from ..utils import load_pkl + + +class TimeSeriesForecastingDataset(Dataset): + """Time series forecasting dataset.""" + + def __init__(self, data_file_path: str, index_file_path: str, mode: str) -> None: + super().__init__() + assert mode in ["train", "valid", "test"], "error mode" + self._check_if_file_exists(data_file_path, index_file_path) + # read raw data (normalized) + data = load_pkl(data_file_path) + processed_data = data["processed_data"] + self.data = torch.from_numpy(processed_data).float() + # read index + self.index = load_pkl(index_file_path)[mode] + + def _check_if_file_exists(self, data_file_path: str, index_file_path: str): + """Check if data file and index file exist. + + Args: + data_file_path (str): data file path + index_file_path (str): index file path + + Raises: + FileNotFoundError: no data file + FileNotFoundError: no index file + """ + + if not os.path.isfile(data_file_path): + raise FileNotFoundError("BasicTS can not find data file {0}".format(data_file_path)) + if not os.path.isfile(index_file_path): + raise FileNotFoundError("BasicTS can not find index file {0}".format(index_file_path)) + + def __getitem__(self, index: int) -> tuple: + """Get a sample. + + Args: + index (int): the iteration index (not the self.index) + + Returns: + tuple: (future_data, history_data), where the shape of each is L x N x C. + """ + + idx = list(self.index[index]) + if isinstance(idx[0], int): + # continuous index + history_data = self.data[idx[0]:idx[1]] + future_data = self.data[idx[1]:idx[2]] + else: + # discontinuous index or custom index + # NOTE: current time $t$ should not included in the index[0] + history_index = idx[0] # list + assert idx[1] not in history_index, "current time t should not included in the idx[0]" + history_index.append(idx[1]) + history_data = self.data[history_index] + future_data = self.data[idx[1], idx[2]] + + return future_data, history_data + + def __len__(self): + """Dataset length + + Returns: + int: dataset length + """ + + return len(self.index) diff --git a/basicts/archs/registry.py b/basicts/data/registry.py similarity index 55% rename from basicts/archs/registry.py rename to basicts/data/registry.py index bc06f826..826969df 100644 --- a/basicts/archs/registry.py +++ b/basicts/data/registry.py @@ -1,3 +1,3 @@ from easytorch.utils.registry import Registry -ARCH_REGISTRY = Registry('Arch') +SCALER_REGISTRY = Registry("Scaler") diff --git a/basicts/data/transform.py b/basicts/data/transform.py new file mode 100644 index 00000000..ec97a629 --- /dev/null +++ b/basicts/data/transform.py @@ -0,0 +1,56 @@ +import pickle + +import torch +import numpy as np + +from .registry import SCALER_REGISTRY + + +@SCALER_REGISTRY.register() +def standard_transform(data: np.array, output_dir: str, train_index: list) -> np.array: + """Standard normalization. + + Args: + data (np.array): raw time series data. + output_dir (str): output dir path. + train_index (list): train index. + + Returns: + np.array: normalized raw time series data. + """ + + # data: L, N, C + data_train = data[:train_index[-1][1], ...] + + mean, std = data_train[..., 0].mean(), data_train[..., 0].std() + + print("mean (training data):", mean) + print("std (training data):", std) + scaler = {} + scaler["func"] = re_standard_transform.__name__ + scaler["args"] = {"mean": mean, "std": std} + with open(output_dir + "/scaler.pkl", "wb") as f: + pickle.dump(scaler, f) + + def normalize(x): + return (x - mean) / std + + data_norm = normalize(data) + return data_norm + + +@SCALER_REGISTRY.register() +def re_standard_transform(data: torch.Tensor, **kwargs) -> torch.Tensor: + """Standard re-transformation. + + Args: + data (torch.Tensor): input data. + + Returns: + torch.Tensor: re-scaled data. + """ + + mean, std = kwargs["mean"], kwargs["std"] + data = data * std + data = data + mean + return data diff --git a/basicts/data/transforms.py b/basicts/data/transforms.py deleted file mode 100644 index b7d43095..00000000 --- a/basicts/data/transforms.py +++ /dev/null @@ -1,25 +0,0 @@ -from easytorch.utils.registry import Registry - -SCALER_REGISTRY = Registry('Scaler') - -""" -data normalization and re-normalization -""" - -# ====================================== re-normalizations ====================================== # -@SCALER_REGISTRY.register() -def re_max_min_normalization(x, **kwargs): - _min, _max = kwargs['min'], kwargs['max'] - x = (x + 1.) / 2. - x = 1. * x * (_max - _min) + _min - return x - -@SCALER_REGISTRY.register() -def standard_re_transform(x, **kwargs): - mean, std = kwargs['mean'], kwargs['std'] - x = x * std - x = x + mean - return x - -# ====================================== normalizations ====================================== # -# omitted to avoid redundancy, as they should only be used in data preprocessing in `scripts/data_preparation` diff --git a/basicts/launcher.py b/basicts/launcher.py new file mode 100644 index 00000000..2c7b639e --- /dev/null +++ b/basicts/launcher.py @@ -0,0 +1,19 @@ +from typing import Dict, Union + +import easytorch + +def launch_training(cfg: Union[Dict, str], gpus: str = None, node_rank: int = 0): + """Extended easytorch launch_training. + + Args: + cfg (Union[Dict, str]): Easytorch config. + gpus (str): set ``CUDA_VISIBLE_DEVICES`` environment variable. + node_rank (int): Rank of the current node. + """ + + # pre-processing of some possible future features, such as: + # registering model, runners. + # config checking + pass + # launch training based on easytorch + easytorch.launch_training(cfg=cfg, gpus=gpus, node_rank=node_rank) diff --git a/basicts/losses/__init__.py b/basicts/losses/__init__.py new file mode 100644 index 00000000..39d7c0a2 --- /dev/null +++ b/basicts/losses/__init__.py @@ -0,0 +1,4 @@ +from .losses import l1_loss, l2_loss +from ..metrics import masked_mae, masked_mape, masked_rmse, masked_mse + +__all__ = ["l1_loss", "l2_loss", "masked_mae", "masked_mape", "masked_rmse", "masked_mse"] diff --git a/basicts/losses/losses.py b/basicts/losses/losses.py index eab0a778..1691225c 100644 --- a/basicts/losses/losses.py +++ b/basicts/losses/losses.py @@ -1,12 +1,17 @@ -from basicts.metrics.mae import masked_mae as masked_l1_loss -from basicts.utils.misc import check_nan_inf -import torch import torch.nn.functional as F -def L1Loss(input, target, **kwargs): - return F.l1_loss(input, target) +from ..utils import check_nan_inf -def MSELoss(input, target, **kwargs): - check_nan_inf(input) - check_nan_inf(target) - return F.mse_loss(input, target) \ No newline at end of file + +def l1_loss(input_data, target_data): + """unmasked mae.""" + + return F.l1_loss(input_data, target_data) + + +def l2_loss(input_data, target_data): + """unmasked mse""" + + check_nan_inf(input_data) + check_nan_inf(target_data) + return F.mse_loss(input_data, target_data) diff --git a/basicts/metrics/__init__.py b/basicts/metrics/__init__.py new file mode 100644 index 00000000..5e69753e --- /dev/null +++ b/basicts/metrics/__init__.py @@ -0,0 +1,5 @@ +from .mae import masked_mae +from .mape import masked_mape +from .rmse import masked_rmse, masked_mse + +__all__ = ["masked_mae", "masked_mape", "masked_rmse", "masked_mse"] diff --git a/basicts/metrics/mae.py b/basicts/metrics/mae.py index 32601d8a..5901f7d2 100644 --- a/basicts/metrics/mae.py +++ b/basicts/metrics/mae.py @@ -1,9 +1,9 @@ import torch import numpy as np -# ============== MAE ================= # + def masked_mae(preds: torch.Tensor, labels: torch.Tensor, null_val: float = np.nan) -> torch.Tensor: - """masked mean absolute error. + """Masked mean absolute error. Args: preds (torch.Tensor): predicted values @@ -13,12 +13,13 @@ def masked_mae(preds: torch.Tensor, labels: torch.Tensor, null_val: float = np.n Returns: torch.Tensor: masked mean absolute error """ + if np.isnan(null_val): mask = ~torch.isnan(labels) else: - mask = (labels!=null_val) + mask = (labels != null_val) mask = mask.float() - mask /= torch.mean((mask)) + mask /= torch.mean((mask)) mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask) loss = torch.abs(preds-labels) loss = loss * mask diff --git a/basicts/metrics/mape.py b/basicts/metrics/mape.py index f31a7f8a..2d59ef44 100644 --- a/basicts/metrics/mape.py +++ b/basicts/metrics/mape.py @@ -1,9 +1,8 @@ import torch import numpy as np -# ============== MAPE ================== # def masked_mape(preds: torch.Tensor, labels: torch.Tensor, null_val: float = np.nan) -> torch.Tensor: - """masked mean absolute percentage error. + """Masked mean absolute percentage error. Args: preds (torch.Tensor): predicted values @@ -13,14 +12,15 @@ def masked_mape(preds: torch.Tensor, labels: torch.Tensor, null_val: float = np. Returns: torch.Tensor: masked mean absolute percentage error """ + # fix very small values of labels, which should be 0. Otherwise, nan detector will fail. - labels = torch.where(labels<1e-2, torch.zeros_like(labels), labels) + labels = torch.where(labels < 1e-2, torch.zeros_like(labels), labels) if np.isnan(null_val): mask = ~torch.isnan(labels) else: - mask = (labels!=null_val) + mask = (labels != null_val) mask = mask.float() - mask /= torch.mean((mask)) + mask /= torch.mean((mask)) mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask) loss = torch.abs(preds-labels)/labels loss = loss * mask diff --git a/basicts/metrics/rmse.py b/basicts/metrics/rmse.py index 8def1b65..31370630 100644 --- a/basicts/metrics/rmse.py +++ b/basicts/metrics/rmse.py @@ -1,9 +1,8 @@ import torch import numpy as np -# ============== RMSE ================= # def masked_mse(preds: torch.Tensor, labels: torch.Tensor, null_val: float = np.nan) -> torch.Tensor: - """masked mean squared error. + """Masked mean squared error. Args: preds (torch.Tensor): predicted values @@ -13,10 +12,11 @@ def masked_mse(preds: torch.Tensor, labels: torch.Tensor, null_val: float = np.n Returns: torch.Tensor: masked mean squared error """ + if np.isnan(null_val): mask = ~torch.isnan(labels) else: - mask = (labels!=null_val) + mask = (labels != null_val) mask = mask.float() mask /= torch.mean((mask)) mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask) @@ -36,4 +36,5 @@ def masked_rmse(preds: torch.Tensor, labels: torch.Tensor, null_val: float = np. Returns: torch.Tensor: root mean squared error """ + return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val)) diff --git a/basicts/options/AGCRN/AGCRN_Electricity336.py b/basicts/options/AGCRN/AGCRN_Electricity336.py deleted file mode 100644 index d3098a91..00000000 --- a/basicts/options/AGCRN/AGCRN_Electricity336.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.AGCRN_runner import AGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = AGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "Electricity336" -CFG.DATASET_TYPE = 'Electricity consumption' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'AGCRN' -CFG.MODEL.PARAM = { - "num_nodes" : 336, - "input_dim" : 1, - "rnn_units" : 32, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, - "default_graph": True, - "embed_dim" : 2, - "cheb_k" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 12 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/AGCRN/AGCRN_METR-LA.py b/basicts/options/AGCRN/AGCRN_METR-LA.py deleted file mode 100644 index de6041b6..00000000 --- a/basicts/options/AGCRN/AGCRN_METR-LA.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.AGCRN_runner import AGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = AGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'AGCRN' -CFG.MODEL.PARAM = { - "num_nodes" : 207, - "input_dim" : 2, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, - "default_graph": True, - "embed_dim" : 10, - "cheb_k" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/AGCRN/AGCRN_PEMS-BAY.py b/basicts/options/AGCRN/AGCRN_PEMS-BAY.py deleted file mode 100644 index f3ff316f..00000000 --- a/basicts/options/AGCRN/AGCRN_PEMS-BAY.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.AGCRN_runner import AGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = AGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'AGCRN' -CFG.MODEL.PARAM = { - "num_nodes" : 325, - "input_dim" : 2, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, - "default_graph": True, - "embed_dim" : 10, - "cheb_k" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/AGCRN/AGCRN_PEMS03.py b/basicts/options/AGCRN/AGCRN_PEMS03.py deleted file mode 100644 index 8b08a15e..00000000 --- a/basicts/options/AGCRN/AGCRN_PEMS03.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.AGCRN_runner import AGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = AGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'AGCRN' -CFG.MODEL.PARAM = { - "num_nodes" : 358, - "input_dim" : 1, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, - "default_graph": True, - "embed_dim" : 10, - "cheb_k" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/AGCRN/AGCRN_PEMS04.py b/basicts/options/AGCRN/AGCRN_PEMS04.py deleted file mode 100644 index 95c5bd38..00000000 --- a/basicts/options/AGCRN/AGCRN_PEMS04.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.AGCRN_runner import AGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = AGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'AGCRN' -CFG.MODEL.PARAM = { - "num_nodes" : 307, - "input_dim" : 1, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, - "default_graph": True, - "embed_dim" : 10, - "cheb_k" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/AGCRN/AGCRN_PEMS07.py b/basicts/options/AGCRN/AGCRN_PEMS07.py deleted file mode 100644 index fe9a06ca..00000000 --- a/basicts/options/AGCRN/AGCRN_PEMS07.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.AGCRN_runner import AGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = AGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'AGCRN' -CFG.MODEL.PARAM = { - "num_nodes" : 883, - "input_dim" : 1, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, - "default_graph": True, - "embed_dim" : 10, - "cheb_k" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/AGCRN/AGCRN_PEMS08.py b/basicts/options/AGCRN/AGCRN_PEMS08.py deleted file mode 100644 index 4feec604..00000000 --- a/basicts/options/AGCRN/AGCRN_PEMS08.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.AGCRN_runner import AGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = AGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'AGCRN' -CFG.MODEL.PARAM = { - "num_nodes" : 170, - "input_dim" : 1, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, - "default_graph": True, - "embed_dim" : 2, - "cheb_k" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/D2STGNN/D2STGNN_METR-LA.py b/basicts/options/D2STGNN/D2STGNN_METR-LA.py deleted file mode 100644 index fe3ffd43..00000000 --- a/basicts/options/D2STGNN/D2STGNN_METR-LA.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.D2STGNN_runner import D2STGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'D2STGNN model configuration' -CFG.RUNNER = D2STGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'D2STGNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_feat" : 1, - "num_hidden": 32, - "dropout" : 0.1, - "seq_length": 12, - "k_t" : 3, - "k_s" : 2, - "gap" : 3, - "num_nodes" : 207, - "adjs" : [torch.tensor(adj) for adj in adj_mx], - "num_layers": 5, - "num_modalities": 2, - "node_hidden" : 10, - "time_emb_dim" : 10, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":1.0e-5, - "eps":1.0e-8 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 30, 38, 46, 54, 62, 70, 80], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 0 -CFG.TRAIN.CL.CL_EPOCHS = 6 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/D2STGNN/D2STGNN_PEMS-BAY.py b/basicts/options/D2STGNN/D2STGNN_PEMS-BAY.py deleted file mode 100644 index 6e5bcba1..00000000 --- a/basicts/options/D2STGNN/D2STGNN_PEMS-BAY.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.D2STGNN_runner import D2STGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'D2STGNN model configuration' -CFG.RUNNER = D2STGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'D2STGNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_feat" : 1, - "num_hidden": 32, - "dropout" : 0.1, - "seq_length": 12, - "k_t" : 3, - "k_s" : 2, - "gap" : 3, - "num_nodes" : 325, - "adjs" : [torch.tensor(adj) for adj in adj_mx], - "num_layers": 5, - "num_modalities": 2, - "node_hidden" : 12, - "time_emb_dim" : 12, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":1.0e-5, - "eps":1.0e-8 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 30, 38, 46, 54, 62, 70, 80], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 30 -CFG.TRAIN.CL.CL_EPOCHS = 3 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/D2STGNN/D2STGNN_PEMS03.py b/basicts/options/D2STGNN/D2STGNN_PEMS03.py deleted file mode 100644 index 0786a9e5..00000000 --- a/basicts/options/D2STGNN/D2STGNN_PEMS03.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.D2STGNN_runner import D2STGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'D2STGNN model configuration' -CFG.RUNNER = D2STGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'D2STGNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_feat" : 1, - "num_hidden": 32, - "dropout" : 0.2, - "seq_length": 12, - "k_t" : 3, - "k_s" : 2, - "gap" : 3, - "num_nodes" : 358, - "adjs" : [torch.tensor(adj) for adj in adj_mx], - "num_layers": 5, - "num_modalities": 2, - "node_hidden" : 10, - "time_emb_dim" : 10, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":1.0e-5, - "eps":1.0e-8 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 30, 38, 46, 54, 200], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 300 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 30 -CFG.TRAIN.CL.CL_EPOCHS = 3 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 16 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 16 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/D2STGNN/D2STGNN_PEMS04.py b/basicts/options/D2STGNN/D2STGNN_PEMS04.py deleted file mode 100644 index f7afc12b..00000000 --- a/basicts/options/D2STGNN/D2STGNN_PEMS04.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.D2STGNN_runner import D2STGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'D2STGNN model configuration' -CFG.RUNNER = D2STGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'D2STGNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_feat" : 1, - "num_hidden": 32, - "dropout" : 0.1, - "seq_length": 12, - "k_t" : 3, - "k_s" : 2, - "gap" : 3, - "num_nodes" : 307, - "adjs" : [torch.tensor(adj) for adj in adj_mx], - "num_layers": 5, - "num_modalities": 2, - "node_hidden" : 12, - "time_emb_dim" : 12, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":1.0e-5, - "eps":1.0e-8 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 30, 38, 46, 54, 200], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 300 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 30 -CFG.TRAIN.CL.CL_EPOCHS = 3 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 16 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 16 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/D2STGNN/D2STGNN_PEMS07.py b/basicts/options/D2STGNN/D2STGNN_PEMS07.py deleted file mode 100644 index 6a5975e0..00000000 --- a/basicts/options/D2STGNN/D2STGNN_PEMS07.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.D2STGNN_runner import D2STGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'D2STGNN model configuration' -CFG.RUNNER = D2STGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'D2STGNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_feat" : 1, - "num_hidden": 32, - "dropout" : 0.1, - "seq_length": 12, - "k_t" : 3, - "k_s" : 2, - "gap" : 3, - "num_nodes" : 883, - "adjs" : [torch.tensor(adj) for adj in adj_mx], - "num_layers": 5, - "num_modalities": 2, - "node_hidden" : 12, - "time_emb_dim" : 12, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":1.0e-5, - "eps":1.0e-8 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 30, 38, 46, 54, 200], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 300 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 30 -CFG.TRAIN.CL.CL_EPOCHS = 3 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 16 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 16 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/D2STGNN/D2STGNN_PEMS08.py b/basicts/options/D2STGNN/D2STGNN_PEMS08.py deleted file mode 100644 index 97cd64c7..00000000 --- a/basicts/options/D2STGNN/D2STGNN_PEMS08.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.D2STGNN_runner import D2STGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'D2STGNN model configuration' -CFG.RUNNER = D2STGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'D2STGNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_feat" : 1, - "num_hidden": 32, - "dropout" : 0.1, - "seq_length": 12, - "k_t" : 3, - "k_s" : 2, - "gap" : 3, - "num_nodes" : 170, - "adjs" : [torch.tensor(adj) for adj in adj_mx], - "num_layers": 5, - "num_modalities": 2, - "node_hidden" : 10, - "time_emb_dim" : 10, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":1.0e-5, - "eps":1.0e-8 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 30, 38, 46, 54, 200], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 300 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 30 -CFG.TRAIN.CL.CL_EPOCHS = 3 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 16 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 16 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DCRNN/DCRNN_METR-LA.py b/basicts/options/DCRNN/DCRNN_METR-LA.py deleted file mode 100644 index 64050f43..00000000 --- a/basicts/options/DCRNN/DCRNN_METR-LA.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DCRNN_runner import DCRNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'DCRNN model configuration' -CFG.RUNNER = DCRNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DCRNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "horizon" : 12, - "input_dim" : 2, - "max_diffusion_step": 2, - "num_nodes" : 207, - "num_rnn_layers" : 2, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "adj_mx" : [torch.tensor(i).cuda() for i in adj_mx], - "use_curriculum_learning": True -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.01, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[20, 30, 40, 50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DCRNN/DCRNN_PEMS-BAY.py b/basicts/options/DCRNN/DCRNN_PEMS-BAY.py deleted file mode 100644 index 54b65e78..00000000 --- a/basicts/options/DCRNN/DCRNN_PEMS-BAY.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DCRNN_runner import DCRNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'DCRNN model configuration' -CFG.RUNNER = DCRNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DCRNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "horizon" : 12, - "input_dim" : 2, - "max_diffusion_step": 2, - "num_nodes" : 325, - "num_rnn_layers" : 2, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "adj_mx" : [torch.tensor(i).cuda() for i in adj_mx], - "use_curriculum_learning": True -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.01, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[20, 30, 40, 50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DCRNN/DCRNN_PEMS03.py b/basicts/options/DCRNN/DCRNN_PEMS03.py deleted file mode 100644 index 181b1b07..00000000 --- a/basicts/options/DCRNN/DCRNN_PEMS03.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DCRNN_runner import DCRNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'DCRNN model configuration' -CFG.RUNNER = DCRNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic speed' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DCRNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "horizon" : 12, - "input_dim" : 2, - "max_diffusion_step": 2, - "num_nodes" : 358, - "num_rnn_layers" : 2, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "adj_mx" : [torch.tensor(i).cuda() for i in adj_mx], - "use_curriculum_learning": True -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[80], - "gamma":0.3 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DCRNN/DCRNN_PEMS04.py b/basicts/options/DCRNN/DCRNN_PEMS04.py deleted file mode 100644 index ebcb74d7..00000000 --- a/basicts/options/DCRNN/DCRNN_PEMS04.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DCRNN_runner import DCRNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'DCRNN model configuration' -CFG.RUNNER = DCRNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DCRNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "horizon" : 12, - "input_dim" : 2, - "max_diffusion_step": 2, - "num_nodes" : 307, - "num_rnn_layers" : 2, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "adj_mx" : [torch.tensor(i).cuda() for i in adj_mx], - "use_curriculum_learning": True -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[80], - "gamma":0.3 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DCRNN/DCRNN_PEMS07.py b/basicts/options/DCRNN/DCRNN_PEMS07.py deleted file mode 100644 index c48d7258..00000000 --- a/basicts/options/DCRNN/DCRNN_PEMS07.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DCRNN_runner import DCRNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'DCRNN model configuration' -CFG.RUNNER = DCRNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic speed' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DCRNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "horizon" : 12, - "input_dim" : 2, - "max_diffusion_step": 2, - "num_nodes" : 883, - "num_rnn_layers" : 2, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "adj_mx" : [torch.tensor(i).cuda() for i in adj_mx], - "use_curriculum_learning": True -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[80], - "gamma":0.3 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DCRNN/DCRNN_PEMS08.py b/basicts/options/DCRNN/DCRNN_PEMS08.py deleted file mode 100644 index f20b02c5..00000000 --- a/basicts/options/DCRNN/DCRNN_PEMS08.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DCRNN_runner import DCRNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'DCRNN model configuration' -CFG.RUNNER = DCRNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DCRNN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "horizon" : 12, - "input_dim" : 2, - "max_diffusion_step": 2, - "num_nodes" : 170, - "num_rnn_layers" : 2, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "adj_mx" : [torch.tensor(i).cuda() for i in adj_mx], - "use_curriculum_learning": True -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[80], - "gamma":0.3 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DGCRN/DGCRN_METR-LA.py b/basicts/options/DGCRN/DGCRN_METR-LA.py deleted file mode 100644 index 26e47f49..00000000 --- a/basicts/options/DGCRN/DGCRN_METR-LA.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DGCRN_runner import DGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'DGCRN model configuration' -CFG.RUNNER = DGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DGCRN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "gcn_depth" : 2, - "num_nodes" : 207, - "predefined_A": [torch.Tensor(_) for _ in adj_mx], - "dropout" : 0.3, - "subgraph_size" : 20, - "node_dim" : 40, - "middle_dim": 2, - "seq_length": 12, - "in_dim" : 2, - "list_weight": [0.05, 0.95, 0.95], - "tanhalpha" : 3, - "cl_decay_steps" : 4000, - "rnn_size" : 64, - "hyperGNN_dim" : 16 -} - -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[100, 150], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 0 -CFG.TRAIN.CL.CL_EPOCHS = 6 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DGCRN/DGCRN_PEMS-BAY.py b/basicts/options/DGCRN/DGCRN_PEMS-BAY.py deleted file mode 100644 index 9727b5dd..00000000 --- a/basicts/options/DGCRN/DGCRN_PEMS-BAY.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DGCRN_runner import DGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'DGCRN model configuration' -CFG.RUNNER = DGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DGCRN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "gcn_depth" : 2, - "num_nodes" : 325, - "predefined_A": [torch.Tensor(_) for _ in adj_mx], - "dropout" : 0.3, - "subgraph_size" : 20, - "node_dim" : 40, - "middle_dim": 2, - "seq_length": 12, - "in_dim" : 2, - "list_weight": [0.05, 0.95, 0.95], - "tanhalpha" : 3, - "cl_decay_steps" : 5500, - "rnn_size" : 64, - "hyperGNN_dim" : 16 -} - -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[100, 150], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 0 -CFG.TRAIN.CL.CL_EPOCHS = 6 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DGCRN/DGCRN_PEMS03.py b/basicts/options/DGCRN/DGCRN_PEMS03.py deleted file mode 100644 index fd91d907..00000000 --- a/basicts/options/DGCRN/DGCRN_PEMS03.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DGCRN_runner import DGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'DGCRN model configuration' -CFG.RUNNER = DGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DGCRN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "gcn_depth" : 2, - "num_nodes" : 358, - "predefined_A": [torch.Tensor(_) for _ in adj_mx], - "dropout" : 0.3, - "subgraph_size" : 20, - "node_dim" : 40, - "middle_dim": 2, - "seq_length": 12, - "in_dim" : 2, - "list_weight": [0.05, 0.95, 0.95], - "tanhalpha" : 3, - "cl_decay_steps" : 4000, - "rnn_size" : 64, - "hyperGNN_dim" : 16 -} - -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[100, 150], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 0 -CFG.TRAIN.CL.CL_EPOCHS = 6 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DGCRN/DGCRN_PEMS04.py b/basicts/options/DGCRN/DGCRN_PEMS04.py deleted file mode 100644 index d7838cf2..00000000 --- a/basicts/options/DGCRN/DGCRN_PEMS04.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DGCRN_runner import DGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'DGCRN model configuration' -CFG.RUNNER = DGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DGCRN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "gcn_depth" : 2, - "num_nodes" : 307, - "predefined_A": [torch.Tensor(_) for _ in adj_mx], - "dropout" : 0.3, - "subgraph_size" : 20, - "node_dim" : 40, - "middle_dim": 2, - "seq_length": 12, - "in_dim" : 2, - "list_weight": [0.05, 0.95, 0.95], - "tanhalpha" : 3, - "cl_decay_steps" : 4000, - "rnn_size" : 64, - "hyperGNN_dim" : 16 -} - -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[100, 150], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 0 -CFG.TRAIN.CL.CL_EPOCHS = 6 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DGCRN/DGCRN_PEMS07.py b/basicts/options/DGCRN/DGCRN_PEMS07.py deleted file mode 100644 index 376f0f04..00000000 --- a/basicts/options/DGCRN/DGCRN_PEMS07.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DGCRN_runner import DGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'DGCRN model configuration' -CFG.RUNNER = DGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DGCRN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "gcn_depth" : 2, - "num_nodes" : 883, - "predefined_A": [torch.Tensor(_) for _ in adj_mx], - "dropout" : 0.3, - "subgraph_size" : 20, - "node_dim" : 40, - "middle_dim": 2, - "seq_length": 12, - "in_dim" : 2, - "list_weight": [0.05, 0.95, 0.95], - "tanhalpha" : 3, - "cl_decay_steps" : 4000, - "rnn_size" : 64, - "hyperGNN_dim" : 16 -} - -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[100, 150], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 24 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 0 -CFG.TRAIN.CL.CL_EPOCHS = 6 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/DGCRN/DGCRN_PEMS08.py b/basicts/options/DGCRN/DGCRN_PEMS08.py deleted file mode 100644 index 42918067..00000000 --- a/basicts/options/DGCRN/DGCRN_PEMS08.py +++ /dev/null @@ -1,126 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.DGCRN_runner import DGCRNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'DGCRN model configuration' -CFG.RUNNER = DGCRNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'DGCRN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "gcn_depth" : 2, - "num_nodes" : 170, - "predefined_A": [torch.Tensor(_) for _ in adj_mx], - "dropout" : 0.3, - "subgraph_size" : 20, - "node_dim" : 40, - "middle_dim": 2, - "seq_length": 12, - "in_dim" : 2, - "list_weight": [0.05, 0.95, 0.95], - "tanhalpha" : 3, - "cl_decay_steps" : 4000, - "rnn_size" : 64, - "hyperGNN_dim" : 16 -} - -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[100, 150], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 0 -CFG.TRAIN.CL.CL_EPOCHS = 6 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GMAN/GMAN_METR-LA.py b/basicts/options/GMAN/GMAN_METR-LA.py deleted file mode 100644 index 0b25f6df..00000000 --- a/basicts/options/GMAN/GMAN_METR-LA.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GMAN_runner import GMANRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_node2vec_emb - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'GMAN model configuration' -CFG.RUNNER = GMANRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GMAN' -spatial_embed = load_node2vec_emb("datasets/" + CFG.DATASET_NAME + "/node2vec_emb.txt") -CFG.MODEL.PARAM = { - "SE": spatial_embed, - "L" : 5, - "K" : 8, - "d" : 8, - "num_his" : 12, - "bn_decay": 0.1 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 80], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 12 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GMAN/GMAN_PEMS-BAY.py b/basicts/options/GMAN/GMAN_PEMS-BAY.py deleted file mode 100644 index 8622d225..00000000 --- a/basicts/options/GMAN/GMAN_PEMS-BAY.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GMAN_runner import GMANRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_node2vec_emb - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'GMAN model configuration' -CFG.RUNNER = GMANRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GMAN' -spatial_embed = load_node2vec_emb("datasets/" + CFG.DATASET_NAME + "/node2vec_emb.txt") -CFG.MODEL.PARAM = { - "SE": spatial_embed, - "L" : 1, - "K" : 8, - "d" : 8, - "num_his" : 12, - "bn_decay": 0.1 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 80], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GMAN/GMAN_PEMS03.py b/basicts/options/GMAN/GMAN_PEMS03.py deleted file mode 100644 index 0657b4d6..00000000 --- a/basicts/options/GMAN/GMAN_PEMS03.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GMAN_runner import GMANRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_node2vec_emb - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'GMAN model configuration' -CFG.RUNNER = GMANRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GMAN' -spatial_embed = load_node2vec_emb("datasets/" + CFG.DATASET_NAME + "/node2vec_emb.txt") -CFG.MODEL.PARAM = { - "SE": spatial_embed, - "L" : 1, - "K" : 8, - "d" : 8, - "num_his" : 12, - "bn_decay": 0.1 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 20, 40, 60, 80], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GMAN/GMAN_PEMS04.py b/basicts/options/GMAN/GMAN_PEMS04.py deleted file mode 100644 index e69f851d..00000000 --- a/basicts/options/GMAN/GMAN_PEMS04.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GMAN_runner import GMANRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_node2vec_emb - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'GMAN model configuration' -CFG.RUNNER = GMANRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GMAN' -spatial_embed = load_node2vec_emb("datasets/" + CFG.DATASET_NAME + "/node2vec_emb.txt") -CFG.MODEL.PARAM = { - "SE": spatial_embed, - "L" : 1, - "K" : 8, - "d" : 8, - "num_his" : 12, - "bn_decay": 0.1 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 20, 40, 60, 80], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GMAN/GMAN_PEMS07.py b/basicts/options/GMAN/GMAN_PEMS07.py deleted file mode 100644 index 1e252b42..00000000 --- a/basicts/options/GMAN/GMAN_PEMS07.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GMAN_runner import GMANRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_node2vec_emb - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'GMAN model configuration' -CFG.RUNNER = GMANRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GMAN' -spatial_embed = load_node2vec_emb("datasets/" + CFG.DATASET_NAME + "/node2vec_emb.txt") -CFG.MODEL.PARAM = { - "SE": spatial_embed, - "L" : 1, - "K" : 8, - "d" : 8, - "num_his" : 12, - "bn_decay": 0.1 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 80], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 4 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GMAN/GMAN_PEMS08.py b/basicts/options/GMAN/GMAN_PEMS08.py deleted file mode 100644 index 01cf0952..00000000 --- a/basicts/options/GMAN/GMAN_PEMS08.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GMAN_runner import GMANRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_node2vec_emb - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'GMAN model configuration' -CFG.RUNNER = GMANRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GMAN' -spatial_embed = load_node2vec_emb("datasets/" + CFG.DATASET_NAME + "/node2vec_emb.txt") -CFG.MODEL.PARAM = { - "SE": spatial_embed, - "L" : 1, - "K" : 8, - "d" : 8, - "num_his" : 12, - "bn_decay": 0.1 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 20, 40, 60, 80], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GTS/GTS_METR-LA.py b/basicts/options/GTS/GTS_METR-LA.py deleted file mode 100644 index 65f4af82..00000000 --- a/basicts/options/GTS/GTS_METR-LA.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GTS_runner import GTSRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_pkl - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'GTS model configuration' -CFG.RUNNER = GTSRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GTS' -node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))['processed_data'][..., 0] -train_index_list = load_pkl("datasets/{0}/index.pkl".format(CFG.DATASET_NAME))['train'] -node_feats = node_feats_full[:train_index_list[-1][-1], ...] -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "filter_type" : "dual_random_walk", - "horizon" : 12, - "input_dim" : 2, - "l1_decay" : 0, - "max_diffusion_step": 3, - "num_nodes" : 207, - "num_rnn_layers" : 1, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "use_curriculum_learning": True, - "dim_fc" : 383664, - "node_feats" : node_feats, - "temp" : 0.5, - "k" : 10 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.005, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[20, 40], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GTS/GTS_PEMS-BAY.py b/basicts/options/GTS/GTS_PEMS-BAY.py deleted file mode 100644 index 51fe5c7c..00000000 --- a/basicts/options/GTS/GTS_PEMS-BAY.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GTS_runner import GTSRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_pkl - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'GTS model configuration' -CFG.RUNNER = GTSRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GTS' -node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))['processed_data'][..., 0] -train_index_list = load_pkl("datasets/{0}/index.pkl".format(CFG.DATASET_NAME))['train'] -node_feats = node_feats_full[:train_index_list[-1][-1], ...] -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "filter_type" : "dual_random_walk", - "horizon" : 12, - "input_dim" : 2, - "l1_decay" : 0, - "max_diffusion_step": 2, - "num_nodes" : 325, - "num_rnn_layers" : 1, - "output_dim" : 1, - "rnn_units" : 128, - "seq_len" : 12, - "use_curriculum_learning": True, - "dim_fc" : 583520, - "node_feats" : node_feats, - "temp" : 0.5, - "k" : 30 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[20, 30], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GTS/GTS_PEMS03.py b/basicts/options/GTS/GTS_PEMS03.py deleted file mode 100644 index fb3f812e..00000000 --- a/basicts/options/GTS/GTS_PEMS03.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GTS_runner import GTSRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_pkl - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'GTS model configuration' -CFG.RUNNER = GTSRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GTS' -node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))['processed_data'][..., 0] -train_index_list = load_pkl("datasets/{0}/index.pkl".format(CFG.DATASET_NAME))['train'] -node_feats = node_feats_full[:train_index_list[-1][-1], ...] -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "filter_type" : "dual_random_walk", - "horizon" : 12, - "input_dim" : 2, - "l1_decay" : 0, - "max_diffusion_step": 3, - "num_nodes" : 358, - "num_rnn_layers" : 1, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "use_curriculum_learning": True, - "dim_fc" : 251456, - "node_feats" : node_feats, - "temp" : 0.5, - "k" : 30 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[20, 30], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GTS/GTS_PEMS04.py b/basicts/options/GTS/GTS_PEMS04.py deleted file mode 100644 index c5546925..00000000 --- a/basicts/options/GTS/GTS_PEMS04.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GTS_runner import GTSRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_pkl - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'GTS model configuration' -CFG.RUNNER = GTSRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GTS' -node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))['processed_data'][..., 0] -train_index_list = load_pkl("datasets/{0}/index.pkl".format(CFG.DATASET_NAME))['train'] -node_feats = node_feats_full[:train_index_list[-1][-1], ...] -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "filter_type" : "dual_random_walk", - "horizon" : 12, - "input_dim" : 2, - "l1_decay" : 0, - "max_diffusion_step": 3, - "num_nodes" : 307, - "num_rnn_layers" : 1, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "use_curriculum_learning": True, - "dim_fc" : 162976, - "node_feats" : node_feats, - "temp" : 0.5, - "k" : 30 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[20, 30], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GTS/GTS_PEMS07.py b/basicts/options/GTS/GTS_PEMS07.py deleted file mode 100644 index f3131513..00000000 --- a/basicts/options/GTS/GTS_PEMS07.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GTS_runner import GTSRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_pkl - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'GTS model configuration' -CFG.RUNNER = GTSRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GTS' -node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))['processed_data'][..., 0] -train_index_list = load_pkl("datasets/{0}/index.pkl".format(CFG.DATASET_NAME))['train'] -node_feats = node_feats_full[:train_index_list[-1][-1], ...] -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "filter_type" : "dual_random_walk", - "horizon" : 12, - "input_dim" : 2, - "l1_decay" : 0, - "max_diffusion_step": 2, - "num_nodes" : 883, - "num_rnn_layers" : 1, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "use_curriculum_learning": True, - "dim_fc" : 270816, - "node_feats" : node_feats, - "temp" : 0.5, - "k" : 30 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[20, 30], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GTS/GTS_PEMS08.py b/basicts/options/GTS/GTS_PEMS08.py deleted file mode 100644 index f53d4a30..00000000 --- a/basicts/options/GTS/GTS_PEMS08.py +++ /dev/null @@ -1,130 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.GTS_runner import GTSRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_pkl - -CFG = EasyDict() - -resume = False # DCRNN does not allow to load parameters since it creates parameters in the first iteration -if not resume: - import random - _ = random.randint(-1e6, 1e6) - -# ================= general ================= # -CFG.DESCRIPTION = 'GTS model configuration' -CFG.RUNNER = GTSRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG._ = _ -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GTS' -node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))['processed_data'][..., 0] -train_index_list = load_pkl("datasets/{0}/index.pkl".format(CFG.DATASET_NAME))['train'] -node_feats = node_feats_full[:train_index_list[-1][-1], ...] -CFG.MODEL.PARAM = { - "cl_decay_steps" : 2000, - "filter_type" : "dual_random_walk", - "horizon" : 12, - "input_dim" : 2, - "l1_decay" : 0, - "max_diffusion_step": 3, - "num_nodes" : 170, - "num_rnn_layers" : 1, - "output_dim" : 1, - "rnn_units" : 64, - "seq_len" : 12, - "use_curriculum_learning": True, - "dim_fc" : 171280, - "node_feats" : node_feats, - "temp" : 0.5, - "k" : 30 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "eps":1e-3 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[20, 30], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -CFG.TRAIN.SETUP_GRAPH = True -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GraphWaveNet/GraphWaveNet_METR-LA.py b/basicts/options/GraphWaveNet/GraphWaveNet_METR-LA.py deleted file mode 100644 index 2eb71be4..00000000 --- a/basicts/options/GraphWaveNet/GraphWaveNet_METR-LA.py +++ /dev/null @@ -1,121 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.GraphWaveNet_runner import GraphWaveNetRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Graph WaveNet model configuration' -CFG.RUNNER = GraphWaveNetRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GraphWaveNet' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_nodes" : 207, - "supports" :[torch.tensor(i) for i in adj_mx], - "dropout" : 0.3, - "gcn_bool" : True, - "addaptadj" : True, - "aptinit" : None, - "in_dim" : 2, - "out_dim" : 12, - "residual_channels" : 32, - "dilation_channels" : 32, - "skip_channels" : 256, - "end_channels" : 512, - "kernel_size" : 2, - "blocks" : 4, - "layers" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GraphWaveNet/GraphWaveNet_PEMS-BAY.py b/basicts/options/GraphWaveNet/GraphWaveNet_PEMS-BAY.py deleted file mode 100644 index 88d60b94..00000000 --- a/basicts/options/GraphWaveNet/GraphWaveNet_PEMS-BAY.py +++ /dev/null @@ -1,121 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.GraphWaveNet_runner import GraphWaveNetRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Graph WaveNet model configuration' -CFG.RUNNER = GraphWaveNetRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GraphWaveNet' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_nodes" : 325, - "supports" :[torch.tensor(i) for i in adj_mx], - "dropout" : 0.3, - "gcn_bool" : True, - "addaptadj" : True, - "aptinit" : None, - "in_dim" : 2, - "out_dim" : 12, - "residual_channels" : 32, - "dilation_channels" : 32, - "skip_channels" : 256, - "end_channels" : 512, - "kernel_size" : 2, - "blocks" : 4, - "layers" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GraphWaveNet/GraphWaveNet_PEMS03.py b/basicts/options/GraphWaveNet/GraphWaveNet_PEMS03.py deleted file mode 100644 index 39e74e7a..00000000 --- a/basicts/options/GraphWaveNet/GraphWaveNet_PEMS03.py +++ /dev/null @@ -1,121 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.GraphWaveNet_runner import GraphWaveNetRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Graph WaveNet model configuration' -CFG.RUNNER = GraphWaveNetRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GraphWaveNet' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_nodes" : 358, - "supports" :[torch.tensor(i) for i in adj_mx], - "dropout" : 0.3, - "gcn_bool" : True, - "addaptadj" : True, - "aptinit" : None, - "in_dim" : 2, - "out_dim" : 12, - "residual_channels" : 32, - "dilation_channels" : 32, - "skip_channels" : 256, - "end_channels" : 512, - "kernel_size" : 2, - "blocks" : 4, - "layers" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GraphWaveNet/GraphWaveNet_PEMS04.py b/basicts/options/GraphWaveNet/GraphWaveNet_PEMS04.py deleted file mode 100644 index 88c73a63..00000000 --- a/basicts/options/GraphWaveNet/GraphWaveNet_PEMS04.py +++ /dev/null @@ -1,121 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.GraphWaveNet_runner import GraphWaveNetRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Graph WaveNet model configuration' -CFG.RUNNER = GraphWaveNetRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GraphWaveNet' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_nodes" : 307, - "supports" :[torch.tensor(i) for i in adj_mx], - "dropout" : 0.3, - "gcn_bool" : True, - "addaptadj" : True, - "aptinit" : None, - "in_dim" : 2, - "out_dim" : 12, - "residual_channels" : 32, - "dilation_channels" : 32, - "skip_channels" : 256, - "end_channels" : 512, - "kernel_size" : 2, - "blocks" : 4, - "layers" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GraphWaveNet/GraphWaveNet_PEMS07.py b/basicts/options/GraphWaveNet/GraphWaveNet_PEMS07.py deleted file mode 100644 index af3d5c03..00000000 --- a/basicts/options/GraphWaveNet/GraphWaveNet_PEMS07.py +++ /dev/null @@ -1,121 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.GraphWaveNet_runner import GraphWaveNetRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Graph WaveNet model configuration' -CFG.RUNNER = GraphWaveNetRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GraphWaveNet' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_nodes" : 883, - "supports" :[torch.tensor(i) for i in adj_mx], - "dropout" : 0.3, - "gcn_bool" : True, - "addaptadj" : True, - "aptinit" : None, - "in_dim" : 2, - "out_dim" : 12, - "residual_channels" : 32, - "dilation_channels" : 32, - "skip_channels" : 256, - "end_channels" : 512, - "kernel_size" : 2, - "blocks" : 4, - "layers" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/GraphWaveNet/GraphWaveNet_PEMS08.py b/basicts/options/GraphWaveNet/GraphWaveNet_PEMS08.py deleted file mode 100644 index f5a79fae..00000000 --- a/basicts/options/GraphWaveNet/GraphWaveNet_PEMS08.py +++ /dev/null @@ -1,121 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.GraphWaveNet_runner import GraphWaveNetRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Graph WaveNet model configuration' -CFG.RUNNER = GraphWaveNetRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'GraphWaveNet' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") -CFG.MODEL.PARAM = { - "num_nodes" : 170, - "supports" :[torch.tensor(i) for i in adj_mx], - "dropout" : 0.3, - "gcn_bool" : True, - "addaptadj" : True, - "aptinit" : None, - "in_dim" : 2, - "out_dim" : 12, - "residual_channels" : 32, - "dilation_channels" : 32, - "skip_channels" : 256, - "end_channels" : 512, - "kernel_size" : 2, - "blocks" : 4, - "layers" : 2 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/HI/HI_Electricity336.py b/basicts/options/HI/HI_Electricity336.py deleted file mode 100644 index 3ccd54bc..00000000 --- a/basicts/options/HI/HI_Electricity336.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.HI_runner import HIRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'HI model configuration' -CFG.RUNNER = HIRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "Electricity336" -CFG.DATASET_TYPE = 'Electricity consumption' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'HINetwork' -CFG.MODEL.PARAM = { - 'input_length': 168, - 'output_length': 12, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.005, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 1 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/HI/HI_METR-LA.py b/basicts/options/HI/HI_METR-LA.py deleted file mode 100644 index 96cc7874..00000000 --- a/basicts/options/HI/HI_METR-LA.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.HI_runner import HIRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'HI model configuration' -CFG.RUNNER = HIRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'HINetwork' -CFG.MODEL.PARAM = { - 'input_length': 12, - 'output_length': 12 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.005, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 1 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/HI/HI_PEMS-BAY.py b/basicts/options/HI/HI_PEMS-BAY.py deleted file mode 100644 index a3d2e38e..00000000 --- a/basicts/options/HI/HI_PEMS-BAY.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.HI_runner import HIRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'HI model configuration' -CFG.RUNNER = HIRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'HINetwork' -CFG.MODEL.PARAM = { - 'input_length': 12, - 'output_length': 12, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.005, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 1 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/HI/HI_PEMS03.py b/basicts/options/HI/HI_PEMS03.py deleted file mode 100644 index b377f041..00000000 --- a/basicts/options/HI/HI_PEMS03.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.HI_runner import HIRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'HI model configuration' -CFG.RUNNER = HIRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'HINetwork' -CFG.MODEL.PARAM = { - 'input_length': 12, - 'output_length': 12, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.005, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 1 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/HI/HI_PEMS04.py b/basicts/options/HI/HI_PEMS04.py deleted file mode 100644 index cac1e526..00000000 --- a/basicts/options/HI/HI_PEMS04.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.HI_runner import HIRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'HI model configuration' -CFG.RUNNER = HIRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'HINetwork' -CFG.MODEL.PARAM = { - 'input_length': 12, - 'output_length': 12, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.005, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 1 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/HI/HI_PEMS07.py b/basicts/options/HI/HI_PEMS07.py deleted file mode 100644 index 841e3f56..00000000 --- a/basicts/options/HI/HI_PEMS07.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.HI_runner import HIRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'HI model configuration' -CFG.RUNNER = HIRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'HINetwork' -CFG.MODEL.PARAM = { - 'input_length': 12, - 'output_length': 12, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.005, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 1 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/HI/HI_PEMS08.py b/basicts/options/HI/HI_PEMS08.py deleted file mode 100644 index f8fce1e8..00000000 --- a/basicts/options/HI/HI_PEMS08.py +++ /dev/null @@ -1,106 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.HI_runner import HIRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'HI model configuration' -CFG.RUNNER = HIRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'HINetwork' -CFG.MODEL.PARAM = { - 'input_length': 12, - 'output_length': 12, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.005, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 1 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/LSTM/LSTM_Electricity336.py b/basicts/options/LSTM/LSTM_Electricity336.py deleted file mode 100644 index 45e5d9ab..00000000 --- a/basicts/options/LSTM/LSTM_Electricity336.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.LSTM_runner import LSTMRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = LSTMRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "Electricity336" -CFG.DATASET_TYPE = 'Electricity consumption' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'LSTM' -CFG.MODEL.PARAM = { - "input_dim" : 2, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/LSTM/LSTM_METR-LA.py b/basicts/options/LSTM/LSTM_METR-LA.py deleted file mode 100644 index 9625f3f9..00000000 --- a/basicts/options/LSTM/LSTM_METR-LA.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.LSTM_runner import LSTMRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = LSTMRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'LSTM' -CFG.MODEL.PARAM = { - "input_dim" : 2, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/LSTM/LSTM_PEMS-BAY.py b/basicts/options/LSTM/LSTM_PEMS-BAY.py deleted file mode 100644 index cca4c691..00000000 --- a/basicts/options/LSTM/LSTM_PEMS-BAY.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.LSTM_runner import LSTMRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = LSTMRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'LSTM' -CFG.MODEL.PARAM = { - "input_dim" : 2, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/LSTM/LSTM_PEMS03.py b/basicts/options/LSTM/LSTM_PEMS03.py deleted file mode 100644 index ffd1bbd3..00000000 --- a/basicts/options/LSTM/LSTM_PEMS03.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.LSTM_runner import LSTMRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = LSTMRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'LSTM' -CFG.MODEL.PARAM = { - "input_dim" : 2, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/LSTM/LSTM_PEMS04.py b/basicts/options/LSTM/LSTM_PEMS04.py deleted file mode 100644 index d5a961ed..00000000 --- a/basicts/options/LSTM/LSTM_PEMS04.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.LSTM_runner import LSTMRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = LSTMRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'LSTM' -CFG.MODEL.PARAM = { - "input_dim" : 2, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/LSTM/LSTM_PEMS07.py b/basicts/options/LSTM/LSTM_PEMS07.py deleted file mode 100644 index cbb9f532..00000000 --- a/basicts/options/LSTM/LSTM_PEMS07.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.LSTM_runner import LSTMRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = LSTMRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'LSTM' -CFG.MODEL.PARAM = { - "input_dim" : 2, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/LSTM/LSTM_PEMS08.py b/basicts/options/LSTM/LSTM_PEMS08.py deleted file mode 100644 index 71739a47..00000000 --- a/basicts/options/LSTM/LSTM_PEMS08.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.LSTM_runner import LSTMRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import L1Loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'AGCRN model configuration' -CFG.RUNNER = LSTMRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'LSTM' -CFG.MODEL.PARAM = { - "input_dim" : 2, - "rnn_units" : 64, - "output_dim": 1, - "horizon" : 12, - "num_layers": 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = L1Loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.003, -} -# CFG.TRAIN.LR_SCHEDULER = EasyDict() -# CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -# CFG.TRAIN.LR_SCHEDULER.PARAM= { -# "milestones":[5, 20, 40, 70], -# "gamma":0.3 -# } - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/MTGNN/MTGNN_Electricity336.py b/basicts/options/MTGNN/MTGNN_Electricity336.py deleted file mode 100644 index 8c73dd17..00000000 --- a/basicts/options/MTGNN/MTGNN_Electricity336.py +++ /dev/null @@ -1,139 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.MTGNN_runner import MTGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'MTGNN model configuration' -CFG.RUNNER = MTGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "Electricity336" -CFG.DATASET_TYPE = 'Electricity consumption' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'MTGNN' - -buildA_true = True -num_nodes = 336 -if buildA_true: # self-learned adjacency matrix - adj_mx = None -else: # use predefined adjacency matrix - _, adj_mx = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "doubletransition") - adj_mx = torch.tensor(adj_mx)-torch.eye(num_nodes) - -CFG.MODEL.PARAM = { - "gcn_true" : True, - "buildA_true": buildA_true, - "gcn_depth": 2, - "num_nodes": num_nodes, - "predefined_A":adj_mx, - "dropout":0.3, - "subgraph_size":20, - "node_dim":40, - "dilation_exponential":1, - "conv_channels":32, - "residual_channels":32, - "skip_channels":64, - "end_channels":128, - "seq_length":168, - "in_dim":2, - "out_dim":12, - "layers":3, - "propalpha":0.05, - "tanhalpha":3, - "layer_norm_affline":True -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001, -} - -# ================= train ================= # -CFG.TRAIN.CUSTOM = EasyDict() # MTGNN custom training args -CFG.TRAIN.CUSTOM.STEP_SIZE = 100 -CFG.TRAIN.CUSTOM.NUM_NODES = num_nodes -CFG.TRAIN.CUSTOM.NUM_SPLIT = 1 - -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False -## curriculum learning -CFG.TRAIN.CL = EasyDict() -CFG.TRAIN.CL.WARM_EPOCHS = 0 -CFG.TRAIN.CL.CL_EPOCHS = 3 -CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STGCN/STGCN_METR-LA.py b/basicts/options/STGCN/STGCN_METR-LA.py deleted file mode 100644 index 64b65445..00000000 --- a/basicts/options/STGCN/STGCN_METR-LA.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.STGCN_runner import STGCNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STGCN model configuration' -CFG.RUNNER = STGCNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STGCN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") -adj_mx = torch.Tensor(adj_mx[0]) -CFG.MODEL.PARAM = { - "Ks" : 3, - "Kt" : 3, - "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], - "T" : 12, - "n_vertex" : 207, - "act_func" : "glu", - "graph_conv_type" : "cheb_graph_conv", - "gso" : adj_mx, - "bias": True, - "droprate" : 0.5 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STGCN/STGCN_PEMS-BAY.py b/basicts/options/STGCN/STGCN_PEMS-BAY.py deleted file mode 100644 index 9550ddeb..00000000 --- a/basicts/options/STGCN/STGCN_PEMS-BAY.py +++ /dev/null @@ -1,118 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.STGCN_runner import STGCNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STGCN model configuration' -CFG.RUNNER = STGCNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STGCN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") -adj_mx = torch.Tensor(adj_mx[0]) -CFG.MODEL.PARAM = { - "Ks" : 3, - "Kt" : 3, - "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], - "T" : 12, - "n_vertex" : 325, - "act_func" : "glu", - "graph_conv_type" : "cheb_graph_conv", - "gso" : adj_mx, - "bias": True, - "droprate" : 0.5 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STGCN/STGCN_PEMS03.py b/basicts/options/STGCN/STGCN_PEMS03.py deleted file mode 100644 index 02d46662..00000000 --- a/basicts/options/STGCN/STGCN_PEMS03.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.STGCN_runner import STGCNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STGCN model configuration' -CFG.RUNNER = STGCNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STGCN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") -adj_mx = torch.Tensor(adj_mx[0]) -CFG.MODEL.PARAM = { - "Ks" : 3, - "Kt" : 3, - "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], - "T" : 12, - "n_vertex" : 358, - "act_func" : "glu", - "graph_conv_type" : "cheb_graph_conv", - "gso" : adj_mx, - "bias": True, - "droprate" : 0.5 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STGCN/STGCN_PEMS04.py b/basicts/options/STGCN/STGCN_PEMS04.py deleted file mode 100644 index d36a0d84..00000000 --- a/basicts/options/STGCN/STGCN_PEMS04.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.STGCN_runner import STGCNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STGCN model configuration' -CFG.RUNNER = STGCNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STGCN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") -adj_mx = torch.Tensor(adj_mx[0]) -CFG.MODEL.PARAM = { - "Ks" : 3, - "Kt" : 3, - "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], - "T" : 12, - "n_vertex" : 307, - "act_func" : "glu", - "graph_conv_type" : "cheb_graph_conv", - "gso" : adj_mx, - "bias": True, - "droprate" : 0.5 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STGCN/STGCN_PEMS07.py b/basicts/options/STGCN/STGCN_PEMS07.py deleted file mode 100644 index 138a3842..00000000 --- a/basicts/options/STGCN/STGCN_PEMS07.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.STGCN_runner import STGCNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STGCN model configuration' -CFG.RUNNER = STGCNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STGCN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") -adj_mx = torch.Tensor(adj_mx[0]) -CFG.MODEL.PARAM = { - "Ks" : 3, - "Kt" : 3, - "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], - "T" : 12, - "n_vertex" : 883, - "act_func" : "glu", - "graph_conv_type" : "cheb_graph_conv", - "gso" : adj_mx, - "bias": True, - "droprate" : 0.5 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STGCN/STGCN_PEMS08.py b/basicts/options/STGCN/STGCN_PEMS08.py deleted file mode 100644 index 7b4d6f04..00000000 --- a/basicts/options/STGCN/STGCN_PEMS08.py +++ /dev/null @@ -1,117 +0,0 @@ -import os -from easydict import EasyDict -import torch -# runner -from basicts.runners.STGCN_runner import STGCNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STGCN model configuration' -CFG.RUNNER = STGCNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STGCN' -adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") -adj_mx = torch.Tensor(adj_mx[0]) -CFG.MODEL.PARAM = { - "Ks" : 3, - "Kt" : 3, - "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], - "T" : 12, - "n_vertex" : 170, - "act_func" : "glu", - "graph_conv_type" : "cheb_graph_conv", - "gso" : adj_mx, - "bias": True, - "droprate" : 0.5 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STID/STID_Electricity336.py b/basicts/options/STID/STID_Electricity336.py deleted file mode 100644 index 9c5e1335..00000000 --- a/basicts/options/STID/STID_Electricity336.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STID_runner import STIDRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STID model configuration' -CFG.RUNNER = STIDRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "Electricity336" -CFG.DATASET_TYPE = 'Electricity consumption' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STID' -CFG.MODEL.PARAM = { - "num_nodes" : 336, - 'input_len' : 168, - 'input_dim' : 3, - 'embed_dim' : 32, - 'output_len': 12, - 'num_layer' : 3, - "if_node" : True, - 'node_dim' : 32, - "if_T_i_D" : True, - "if_D_i_W" : True, - 'temp_dim_tid' : 32, - 'temp_dim_diw' : 32, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic speed, time in day -CFG.MODEL.TARGET_FEATURES = [0] # traffic speed - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STID/STID_METR-LA.py b/basicts/options/STID/STID_METR-LA.py deleted file mode 100644 index bc4b9ca1..00000000 --- a/basicts/options/STID/STID_METR-LA.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STID_runner import STIDRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Basic MTS model configuration' -CFG.RUNNER = STIDRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STID' -CFG.MODEL.PARAM = { - "num_nodes" : 207, - 'input_len' : 12, - 'input_dim' : 3, - 'embed_dim' : 32, - 'output_len': 12, - 'num_layer' : 3, - "if_node" : True, - 'node_dim' : 32, - "if_T_i_D" : True, - "if_D_i_W" : True, - 'temp_dim_tid' : 32, - 'temp_dim_diw' : 32, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic speed, time in day -CFG.MODEL.TARGET_FEATURES = [0] # traffic speed - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 80], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STID/STID_PEMS-BAY.py b/basicts/options/STID/STID_PEMS-BAY.py deleted file mode 100644 index b60d9090..00000000 --- a/basicts/options/STID/STID_PEMS-BAY.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STID_runner import STIDRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Basic MTS model configuration' -CFG.RUNNER = STIDRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STID' -CFG.MODEL.PARAM = { - "num_nodes" : 325, - 'input_len' : 12, - 'input_dim' : 3, - 'embed_dim' : 32, - 'output_len': 12, - 'num_layer' : 3, - "if_node" : True, - 'node_dim' : 32, - "if_T_i_D" : True, - "if_D_i_W" : True, - 'temp_dim_tid' : 32, - 'temp_dim_diw' : 32, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic speed, time in day -CFG.MODEL.TARGET_FEATURES = [0] # traffic speed - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 80], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STID/STID_PEMS03.py b/basicts/options/STID/STID_PEMS03.py deleted file mode 100644 index 2ae57c2e..00000000 --- a/basicts/options/STID/STID_PEMS03.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STID_runner import STIDRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STID model configuration without DiW embedding' -CFG.RUNNER = STIDRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STID' -CFG.MODEL.PARAM = { - "num_nodes" : 358, - 'input_len' : 12, - 'input_dim' : 3, - 'embed_dim' : 32, - 'output_len': 12, - 'num_layer' : 3, - "if_node" : True, - 'node_dim' : 32, - "if_T_i_D" : True, - "if_D_i_W" : True, - 'temp_dim_tid' : 32, - 'temp_dim_diw' : 32, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic speed, time in day -CFG.MODEL.TARGET_FEATURES = [0] # traffic speed - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STID/STID_PEMS04.py b/basicts/options/STID/STID_PEMS04.py deleted file mode 100644 index 977ab25b..00000000 --- a/basicts/options/STID/STID_PEMS04.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STID_runner import STIDRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STID model configuration without DiW embedding' -CFG.RUNNER = STIDRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STID' -CFG.MODEL.PARAM = { - "num_nodes" : 307, - 'input_len' : 12, - 'input_dim' : 3, - 'embed_dim' : 32, - 'output_len': 12, - 'num_layer' : 3, - "if_node" : True, - 'node_dim' : 32, - "if_T_i_D" : True, - "if_D_i_W" : True, - 'temp_dim_tid' : 32, - 'temp_dim_diw' : 32, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic speed, time in day -CFG.MODEL.TARGET_FEATURES = [0] # traffic speed - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STID/STID_PEMS07.py b/basicts/options/STID/STID_PEMS07.py deleted file mode 100644 index 1d88ada4..00000000 --- a/basicts/options/STID/STID_PEMS07.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STID_runner import STIDRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STID model configuration' -CFG.RUNNER = STIDRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STID' -CFG.MODEL.PARAM = { - "num_nodes" : 883, - 'input_len' : 12, - 'input_dim' : 3, - 'embed_dim' : 32, - 'output_len': 12, - 'num_layer' : 3, - "if_node" : True, - 'node_dim' : 32, - "if_T_i_D" : True, - "if_D_i_W" : True, - 'temp_dim_tid' : 32, - 'temp_dim_diw' : 32, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic speed, time in day -CFG.MODEL.TARGET_FEATURES = [0] # traffic speed - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STID/STID_PEMS08.py b/basicts/options/STID/STID_PEMS08.py deleted file mode 100644 index f7e8cb3f..00000000 --- a/basicts/options/STID/STID_PEMS08.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STID_runner import STIDRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STID model configuration' -CFG.RUNNER = STIDRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STID' -CFG.MODEL.PARAM = { - "num_nodes" : 170, - 'input_len' : 12, - 'input_dim' : 3, - 'embed_dim' : 32, - 'output_len': 12, - 'num_layer' : 3, - "if_node" : True, - 'node_dim' : 32, - "if_T_i_D" : True, - "if_D_i_W" : True, - 'temp_dim_tid' : 32, - 'temp_dim_diw' : 32, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic speed, time in day -CFG.MODEL.TARGET_FEATURES = [0] # traffic speed - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STNorm/STNorm_Electricity336.py b/basicts/options/STNorm/STNorm_Electricity336.py deleted file mode 100644 index 7c6837b2..00000000 --- a/basicts/options/STNorm/STNorm_Electricity336.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STNorm_runner import STNormRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STNorm model configuration' -CFG.RUNNER = STNormRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "Electricity336" -CFG.DATASET_TYPE = 'Electricity consumption' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STNorm' -CFG.MODEL.PARAM = { - "num_nodes" : 336, - "tnorm_bool": True, - "snorm_bool": True, - "in_dim" : 2, - "out_dim" : 12, - "channels" : 32, - "kernel_size": 3, - "blocks" : 8, - "layers" : 4, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 4 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STNorm/STNorm_METR-LA.py b/basicts/options/STNorm/STNorm_METR-LA.py deleted file mode 100644 index 95e129dd..00000000 --- a/basicts/options/STNorm/STNorm_METR-LA.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STNorm_runner import STNormRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STNorm model configuration' -CFG.RUNNER = STNormRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STNorm' -CFG.MODEL.PARAM = { - "num_nodes" : 207, - "tnorm_bool": True, - "snorm_bool": True, - "in_dim" : 2, - "out_dim" : 12, - "channels" : 32, - "kernel_size": 2, - "blocks" : 4, - "layers" : 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STNorm/STNorm_PEMS-BAY.py b/basicts/options/STNorm/STNorm_PEMS-BAY.py deleted file mode 100644 index 582033c6..00000000 --- a/basicts/options/STNorm/STNorm_PEMS-BAY.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STNorm_runner import STNormRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STNorm model configuration' -CFG.RUNNER = STNormRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STNorm' -CFG.MODEL.PARAM = { - "num_nodes" : 325, - "tnorm_bool": True, - "snorm_bool": True, - "in_dim" : 2, - "out_dim" : 12, - "channels" : 32, - "kernel_size": 2, - "blocks" : 4, - "layers" : 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STNorm/STNorm_PEMS03.py b/basicts/options/STNorm/STNorm_PEMS03.py deleted file mode 100644 index e2ca0fb6..00000000 --- a/basicts/options/STNorm/STNorm_PEMS03.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STNorm_runner import STNormRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STNorm model configuration' -CFG.RUNNER = STNormRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STNorm' -CFG.MODEL.PARAM = { - "num_nodes" : 358, - "tnorm_bool": True, - "snorm_bool": True, - "in_dim" : 2, - "out_dim" : 12, - "channels" : 32, - "kernel_size": 2, - "blocks" : 4, - "layers" : 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STNorm/STNorm_PEMS04.py b/basicts/options/STNorm/STNorm_PEMS04.py deleted file mode 100644 index 7ee6daf5..00000000 --- a/basicts/options/STNorm/STNorm_PEMS04.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STNorm_runner import STNormRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STNorm model configuration' -CFG.RUNNER = STNormRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STNorm' -CFG.MODEL.PARAM = { - "num_nodes" : 307, - "tnorm_bool": True, - "snorm_bool": True, - "in_dim" : 2, - "out_dim" : 12, - "channels" : 32, - "kernel_size": 2, - "blocks" : 4, - "layers" : 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STNorm/STNorm_PEMS07.py b/basicts/options/STNorm/STNorm_PEMS07.py deleted file mode 100644 index 2e1fb607..00000000 --- a/basicts/options/STNorm/STNorm_PEMS07.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STNorm_runner import STNormRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STNorm model configuration' -CFG.RUNNER = STNormRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STNorm' -CFG.MODEL.PARAM = { - "num_nodes" : 883, - "tnorm_bool": True, - "snorm_bool": True, - "in_dim" : 2, - "out_dim" : 12, - "channels" : 32, - "kernel_size": 2, - "blocks" : 4, - "layers" : 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/STNorm/STNorm_PEMS08.py b/basicts/options/STNorm/STNorm_PEMS08.py deleted file mode 100644 index 137bc8a4..00000000 --- a/basicts/options/STNorm/STNorm_PEMS08.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.STNorm_runner import STNormRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'STNorm model configuration' -CFG.RUNNER = STNormRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'STNorm' -CFG.MODEL.PARAM = { - "num_nodes" : 170, - "tnorm_bool": True, - "snorm_bool": True, - "in_dim" : 2, - "out_dim" : 12, - "channels" : 32, - "kernel_size": 2, - "blocks" : 4, - "layers" : 2, -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002, - "weight_decay":0.0001, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/Stat/Stat_Electricity336.py b/basicts/options/Stat/Stat_Electricity336.py deleted file mode 100644 index 9c5bbb97..00000000 --- a/basicts/options/Stat/Stat_Electricity336.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.Stat_runner import StatRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Stat model configuration' -CFG.RUNNER = StatRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "Electricity336" -CFG.DATASET_TYPE = 'Electricity consumption' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'VectorAutoRegression' -CFG.MODEL.PARAM = { - 'p': 168, - 'input_length': 168, - 'output_length': 12, - 'num_time_series': 336 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 4 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/Stat/Stat_METR-LA.py b/basicts/options/Stat/Stat_METR-LA.py deleted file mode 100644 index 5b9edc6b..00000000 --- a/basicts/options/Stat/Stat_METR-LA.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.Stat_runner import StatRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Stat model configuration' -CFG.RUNNER = StatRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'VectorAutoRegression' -CFG.MODEL.PARAM = { - 'p': 12, - 'input_length': 12, - 'output_length': 12, - 'num_time_series': 207 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/Stat/Stat_PEMS-BAY.py b/basicts/options/Stat/Stat_PEMS-BAY.py deleted file mode 100644 index ed21c58a..00000000 --- a/basicts/options/Stat/Stat_PEMS-BAY.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.Stat_runner import StatRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Stat model configuration' -CFG.RUNNER = StatRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'VectorAutoRegression' -CFG.MODEL.PARAM = { - 'p': 12, - 'input_length': 12, - 'output_length': 12, - 'num_time_series': 325 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/Stat/Stat_PEMS03.py b/basicts/options/Stat/Stat_PEMS03.py deleted file mode 100644 index 2cad218a..00000000 --- a/basicts/options/Stat/Stat_PEMS03.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.Stat_runner import StatRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Stat model configuration' -CFG.RUNNER = StatRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'VectorAutoRegression' -CFG.MODEL.PARAM = { - 'p': 12, - 'input_length': 12, - 'output_length': 12, - 'num_time_series': 358 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/Stat/Stat_PEMS04.py b/basicts/options/Stat/Stat_PEMS04.py deleted file mode 100644 index eac8362a..00000000 --- a/basicts/options/Stat/Stat_PEMS04.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.Stat_runner import StatRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Stat model configuration' -CFG.RUNNER = StatRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'VectorAutoRegression' -CFG.MODEL.PARAM = { - 'p': 12, - 'input_length': 12, - 'output_length': 12, - 'num_time_series': 307 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/Stat/Stat_PEMS07.py b/basicts/options/Stat/Stat_PEMS07.py deleted file mode 100644 index 5c947790..00000000 --- a/basicts/options/Stat/Stat_PEMS07.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.Stat_runner import StatRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Stat model configuration' -CFG.RUNNER = StatRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'VectorAutoRegression' -CFG.MODEL.PARAM = { - 'p': 12, - 'input_length': 12, - 'output_length': 12, - 'num_time_series': 883 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 10 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/Stat/Stat_PEMS08.py b/basicts/options/Stat/Stat_PEMS08.py deleted file mode 100644 index 99d9c079..00000000 --- a/basicts/options/Stat/Stat_PEMS08.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.Stat_runner import StatRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'Stat model configuration' -CFG.RUNNER = StatRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'VectorAutoRegression' -CFG.MODEL.PARAM = { - 'p': 12, - 'input_length': 12, - 'output_length': 12, - 'num_time_series': 170 -} -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":1.0e-5, -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[50], - "gamma":0.1 -} - -# ================= train ================= # -CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 -} -CFG.TRAIN.NUM_EPOCHS = 100 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/StemGNN/StemGNN_Electricity336.py b/basicts/options/StemGNN/StemGNN_Electricity336.py deleted file mode 100644 index 1ca36c91..00000000 --- a/basicts/options/StemGNN/StemGNN_Electricity336.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.StemGNN_runner import StemGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'StemGNN model configuration' -CFG.RUNNER = StemGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "Electricity336" -CFG.DATASET_TYPE = 'Electricity consumption' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'StemGNN' -CFG.MODEL.PARAM = { - "units": 336, - "stack_cnt": 2, - "time_step": 168, - "multi_layer": 2, - "horizon": 12, - "dropout_rate": 0.5, - "leaky_rate": 0.2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 16 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/StemGNN/StemGNN_METR-LA.py b/basicts/options/StemGNN/StemGNN_METR-LA.py deleted file mode 100644 index 6f3c5b3f..00000000 --- a/basicts/options/StemGNN/StemGNN_METR-LA.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.StemGNN_runner import StemGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'StemGNN model configuration' -CFG.RUNNER = StemGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'StemGNN' -CFG.MODEL.PARAM = { - "units": 207, - "stack_cnt": 2, - "time_step": 12, - "multi_layer": 5, - "horizon": 12, - "dropout_rate": 0.5, - "leaky_rate": 0.2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.0004 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/StemGNN/StemGNN_PEMS-BAY.py b/basicts/options/StemGNN/StemGNN_PEMS-BAY.py deleted file mode 100644 index 55c4915b..00000000 --- a/basicts/options/StemGNN/StemGNN_PEMS-BAY.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.StemGNN_runner import StemGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'StemGNN model configuration' -CFG.RUNNER = StemGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'StemGNN' -CFG.MODEL.PARAM = { - "units": 325, - "stack_cnt": 2, - "time_step": 12, - "multi_layer": 5, - "horizon": 12, - "dropout_rate": 0.5, - "leaky_rate": 0.2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.0004 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50], - "gamma":0.5 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/StemGNN/StemGNN_PEMS03.py b/basicts/options/StemGNN/StemGNN_PEMS03.py deleted file mode 100644 index 0f02ea52..00000000 --- a/basicts/options/StemGNN/StemGNN_PEMS03.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.StemGNN_runner import StemGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'StemGNN model configuration' -CFG.RUNNER = StemGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'StemGNN' -CFG.MODEL.PARAM = { - "units": 358, - "stack_cnt": 2, - "time_step": 12, - "multi_layer": 5, - "horizon": 12, - "dropout_rate": 0.5, - "leaky_rate": 0.2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/StemGNN/StemGNN_PEMS04.py b/basicts/options/StemGNN/StemGNN_PEMS04.py deleted file mode 100644 index 68447440..00000000 --- a/basicts/options/StemGNN/StemGNN_PEMS04.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.StemGNN_runner import StemGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'StemGNN model configuration' -CFG.RUNNER = StemGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'StemGNN' -CFG.MODEL.PARAM = { - "units": 307, - "stack_cnt": 2, - "time_step": 12, - "multi_layer": 5, - "horizon": 12, - "dropout_rate": 0.5, - "leaky_rate": 0.2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "RMSprop" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/StemGNN/StemGNN_PEMS07.py b/basicts/options/StemGNN/StemGNN_PEMS07.py deleted file mode 100644 index 29ddd0dc..00000000 --- a/basicts/options/StemGNN/StemGNN_PEMS07.py +++ /dev/null @@ -1,111 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.StemGNN_runner import StemGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'StemGNN model configuration' -CFG.RUNNER = StemGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'StemGNN' -CFG.MODEL.PARAM = { - "units": 883, - "stack_cnt": 2, - "time_step": 12, - "multi_layer": 5, - "horizon": 12, - "dropout_rate": 0.5, - "leaky_rate": 0.2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/StemGNN/StemGNN_PEMS08.py b/basicts/options/StemGNN/StemGNN_PEMS08.py deleted file mode 100644 index daeaca4f..00000000 --- a/basicts/options/StemGNN/StemGNN_PEMS08.py +++ /dev/null @@ -1,112 +0,0 @@ -import os -from easydict import EasyDict -# runner -from basicts.runners.StemGNN_runner import StemGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss - -"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" - -CFG = EasyDict() - -# ================= general ================= # -CFG.DESCRIPTION = 'StemGNN model configuration' -CFG.RUNNER = StemGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' -CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} - -# ================= environment ================= # -CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 -CFG.ENV.CUDNN = EasyDict() -CFG.ENV.CUDNN.ENABLED = True - - -# ================= model ================= # -CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'StemGNN' -CFG.MODEL.PARAM = { - "units": 170, - "stack_cnt": 2, - "time_step": 12, - "multi_layer": 5, - "horizon": 12, - "dropout_rate": 0.5, - "leaky_rate": 0.2 -} -CFG.MODEL.FROWARD_FEATURES = [0] -CFG.MODEL.TARGET_FEATURES = [0] - -# ================= optim ================= # -CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss -CFG.TRAIN.OPTIM = EasyDict() -CFG.TRAIN.OPTIM.TYPE = "Adam" -CFG.TRAIN.OPTIM.PARAM= { - "lr":0.002 -} -CFG.TRAIN.LR_SCHEDULER = EasyDict() -CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" -CFG.TRAIN.LR_SCHEDULER.PARAM= { - "milestones":[1, 50, 100], - "gamma":0.5 -} - -# ================= train ================= # -# CFG.TRAIN.CLIP_GRAD_PARAM = { -# 'max_norm': 5.0 -# } -CFG.TRAIN.NUM_EPOCHS = 200 -CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) -) -# train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 64 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False - -# ================= validate ================= # -CFG.VAL = EasyDict() -CFG.VAL.INTERVAL = 1 -# validating data -CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 64 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False - -# ================= test ================= # -CFG.TEST = EasyDict() -CFG.TEST.INTERVAL = 1 -# validating data -CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 64 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/run.py b/basicts/run.py deleted file mode 100644 index 3b2af878..00000000 --- a/basicts/run.py +++ /dev/null @@ -1,135 +0,0 @@ -import os -import sys -sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) -from argparse import ArgumentParser -from easytorch import launch_training - -def parse_args(): - parser = ArgumentParser(description='Run time series forecasting model in BasicTS framework based on EasyTorch!') - # parser.add_argument('-c', '--cfg', required=True, help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/HI/HI_PEMS08.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_PEMS08.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/Stat/Stat_Electricity336.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/DCRNN/DCRNN_PEMS08.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_PEMS08.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/StemGNN/StemGNN_Electricity336.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GraphWaveNet/GraphWaveNet_PEMS08.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_PEMS-BAY.py', help='training config') - parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STGCN/STGCN_PEMS08.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS08.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/STID/STID_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STID/STID_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STID/STID_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STID/STID_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STID/STID_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STID/STID_PEMSS8.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STID/STID_Electricity336.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/D2STGNN/D2STGNN_PEMS08.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_PEMS08.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/MTGNN/MTGNN_Electricity336.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS08.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_Electricity336.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/AGCRN/AGCRN_PEMS07.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/LSTM/LSTM_Electricity336.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_PEMS08.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/STNorm/STNorm_Electricity336.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/DGCRN/DGCRN_PEMS08.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GMAN/GMAN_PEMS08.py', help='training config') - - # parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_METR-LA.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_PEMS-BAY.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_PEMS03.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_PEMS04.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_PEMS07.py', help='training config') - # parser.add_argument('-c', '--cfg', default='basicts/options/GTS/GTS_PEMS08.py', help='training config') - - parser.add_argument('--gpus', default='0', help='visible gpus') - return parser.parse_args() - -if __name__ == "__main__": - args = parse_args() - - launch_training(args.cfg, args.gpus) diff --git a/basicts/runners/AGCRN_runner.py b/basicts/runners/AGCRN_runner.py deleted file mode 100644 index c0847358..00000000 --- a/basicts/runners/AGCRN_runner.py +++ /dev/null @@ -1,60 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class AGCRNRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features] - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - - # feed forward - prediction_data = self.model(history_data=history_data) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value diff --git a/basicts/runners/D2STGNN_runner.py b/basicts/runners/D2STGNN_runner.py deleted file mode 100644 index 459ecd23..00000000 --- a/basicts/runners/D2STGNN_runner.py +++ /dev/null @@ -1,60 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class D2STGNNRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features] - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - - # feed forward - prediction_data = self.model(history_data=history_data) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value diff --git a/basicts/runners/DCRNN_runner.py b/basicts/runners/DCRNN_runner.py deleted file mode 100644 index f1019c71..00000000 --- a/basicts/runners/DCRNN_runner.py +++ /dev/null @@ -1,79 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class DCRNNRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def setup_graph(self, data): - try: - self.train_iters(data, 0, 0) - except AttributeError: - pass - - def data_reshaper(self, data: torch.Tensor, channel=None) -> torch.Tensor: - """select input features and reshape data to fit the target model. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - channel (list): self-defined selected channels - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None and channel is None: - data = data[:, :, :, self.forward_features] - if channel is not None: - data = data[:, :, :, channel] - # reshape data [B, L, N, C] -> [L, B, N*C] (DCRNN required) - B, L, N, C = data.shape - data = data.reshape(B, L, N*C) # [B, L, N*C] - data = data.transpose(0, 1) # [L, B, N*C] - return data - - def data_i_reshape(self, data: torch.Tensor) -> torch.Tensor: - """select target features and reshape data back to the BasicTS framework - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # reshape data - pass - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.data_reshaper(history_data) - if train: - future_data_ = self.data_reshaper(future_data, channel=[0]) # teacher forcing only use the first dimension. - else: - future_data_ = None - - # feed forward - prediction_data = self.model(history_data=history_data, future_data=future_data_, batch_seen=iter_num, epoch=epoch) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.data_i_reshape(prediction_data) - real_value = self.data_i_reshape(future_data) - return prediction, real_value diff --git a/basicts/runners/GMAN_runner.py b/basicts/runners/GMAN_runner.py deleted file mode 100644 index f029a015..00000000 --- a/basicts/runners/GMAN_runner.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class GMANRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features] - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - future_data = self.select_input_features(future_data) - - # feed forward - prediction_data = self.model(history_data=history_data, future_data=future_data) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value diff --git a/basicts/runners/GTS_runner.py b/basicts/runners/GTS_runner.py deleted file mode 100644 index 8daff157..00000000 --- a/basicts/runners/GTS_runner.py +++ /dev/null @@ -1,190 +0,0 @@ -from typing import Tuple, Union -import torch -from basicts.runners.short_mts_runner import MTSRunner -from basicts.data.transforms import SCALER_REGISTRY -from easytorch.utils.dist import master_only - -""" - TODO: - 模块化train_iters, val_iters, and test_iters中的过程。 - 否则就会像GTS一样, 一旦模型有一点特殊 (例如多一个返回和不同的loss), 就必须重写整个train_iters, val_iters, and test_iters。 -""" - -class GTSRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def setup_graph(self, data): - try: - self.train_iters(data, 0, 0) - except: - pass - - def data_reshaper(self, data: torch.Tensor, channel=None) -> torch.Tensor: - """select input features and reshape data to fit the target model. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - channel (list): self-defined selected channels - - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None and channel is None: - data = data[:, :, :, self.forward_features] - if channel is not None: - data = data[:, :, :, channel] - # reshape data [B, L, N, C] -> [L, B, N*C] (DCRNN required) - B, L, N, C = data.shape - data = data.reshape(B, L, N*C) # [B, L, N*C] - data = data.transpose(0, 1) # [L, B, N*C] - return data - - def data_i_reshape(self, data: torch.Tensor) -> torch.Tensor: - """select target features and reshape data back to the BasicTS framework - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # reshape data - pass - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.data_reshaper(history_data) - if train: - future_data_ = self.data_reshaper(future_data, channel=[0]) # teacher forcing only use the first dimension. - else: - future_data_ = None - - # feed forward - prediction_data, pred_adj, prior_adj = self.model(history_data=history_data, future_data=future_data_, batch_seen=iter_num, epoch=epoch) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.data_i_reshape(prediction_data) - real_value = self.data_i_reshape(future_data) - return prediction, real_value, pred_adj, prior_adj - - def train_iters(self, epoch: int, iter_index: int, data: Union[torch.Tensor, Tuple]) -> torch.Tensor: - """Training details. - - Args: - data (Union[torch.Tensor, Tuple]): Data provided by DataLoader - epoch (int): current epoch. - iter_index (int): current iter. - - Returns: - loss (torch.Tensor) - """ - iter_num = (epoch-1) * self.iter_per_epoch + iter_index - prediction, real_value, pred_adj, prior_adj = self.forward(data=data, epoch=epoch, iter_num=iter_num, train=True) - # re-scale data - prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args']) - real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args']) - # loss - if self.cl_param: - cl_length = self.curriculum_learning(epoch=epoch) - loss = self.loss(prediction[:, :cl_length, :, :], real_value[:, :cl_length, :, :], null_val=self.null_val) - else: - loss = self.loss(prediction, real_value, null_val=self.null_val) - # graph structure loss - prior_label = prior_adj.view(prior_adj.shape[0] * prior_adj.shape[1]).to(pred_adj.device) - pred_label = pred_adj.view(pred_adj.shape[0] * pred_adj.shape[1]) - graph_loss_function = torch.nn.BCELoss() - loss_g = graph_loss_function(pred_label, prior_label) - loss = loss + loss_g - # metrics - for metric_name, metric_func in self.metrics.items(): - metric_item = metric_func(prediction, real_value, null_val=self.null_val) - self.update_epoch_meter('train_'+metric_name, metric_item.item()) - return loss - - def val_iters(self, iter_index: int, data: Union[torch.Tensor, Tuple]): - """Validation details. - - Args: - data (Union[torch.Tensor, Tuple]): Data provided by DataLoader - train_epoch (int): current epoch if in training process. Else None. - iter_index (int): current iter. - """ - prediction, real_value, pred_adj, prior_adj = self.forward(data=data, epoch=None, iter_num=None, train=False) - # re-scale data - prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args']) - real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args']) - # loss - loss = self.loss(prediction, real_value, null_val=self.null_val) - # graph structure loss - prior_label = prior_adj.view(prior_adj.shape[0] * prior_adj.shape[1]).to(pred_adj.device) - pred_label = pred_adj.view(pred_adj.shape[0] * pred_adj.shape[1]) - graph_loss_function = torch.nn.BCELoss() - loss_g = graph_loss_function(pred_label, prior_label) - loss = loss + loss_g - - # metrics - for metric_name, metric_func in self.metrics.items(): - metric_item = metric_func(prediction, real_value, null_val=self.null_val) - self.update_epoch_meter('val_'+metric_name, metric_item.item()) - return loss - - @torch.no_grad() - @master_only - def test(self, train_epoch: int = None): - """test model. - - Args: - train_epoch (int, optional): current epoch if in training process. - """ - # test loop - prediction = [] - real_value = [] - for iter_index, data in enumerate(self.test_data_loader): - preds, testy, pred_adj, prior_adj = self.forward(data=data, epoch=train_epoch, iter_num=None, train=False) - prediction.append(preds) - real_value.append(testy) - prediction = torch.cat(prediction,dim=0) - real_value = torch.cat(real_value, dim=0) - # re-scale data - prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args']) - real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args']) - # summarize the results. - ## test performance of different horizon - for i in range(12): - # For horizon i, only calculate the metrics **at that time** slice here. - pred = prediction[:,i,:,:] - real = real_value[:,i,:,:] - # metrics - metric_results = {} - for metric_name, metric_func in self.metrics.items(): - metric_item = metric_func(pred, real, null_val=self.null_val) - metric_results[metric_name] = metric_item.item() - log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test RMSE: {:.4f}, Test MAPE: {:.4f}' - log = log.format(i+1, metric_results['MAE'], metric_results['RMSE'], metric_results['MAPE']) - self.logger.info(log) - ## test performance overall - for metric_name, metric_func in self.metrics.items(): - metric_item = metric_func(prediction, real_value, null_val=self.null_val) - self.update_epoch_meter('test_'+metric_name, metric_item.item()) - metric_results[metric_name] = metric_item.item() diff --git a/basicts/runners/GraphWaveNet_runner.py b/basicts/runners/GraphWaveNet_runner.py deleted file mode 100644 index a71155ff..00000000 --- a/basicts/runners/GraphWaveNet_runner.py +++ /dev/null @@ -1,59 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class GraphWaveNetRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features] - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history ata) - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - - # feed forward - prediction_data = self.model(history_data=history_data) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value diff --git a/basicts/runners/HI_runner.py b/basicts/runners/HI_runner.py deleted file mode 100644 index f5dd04dc..00000000 --- a/basicts/runners/HI_runner.py +++ /dev/null @@ -1,68 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class HIRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features] - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - - # feed forward - prediction_data = self.model(history_data=history_data) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value - - def backward(self, loss: torch.Tensor): - """Backward and update params. - - Args: - loss (torch.Tensor): loss - """ - pass diff --git a/basicts/runners/LSTM_runner.py b/basicts/runners/LSTM_runner.py deleted file mode 100644 index c1afff63..00000000 --- a/basicts/runners/LSTM_runner.py +++ /dev/null @@ -1,60 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class LSTMRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features] - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - - # feed forward - prediction_data = self.model(history_data=history_data) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value diff --git a/basicts/runners/STGCN_runner.py b/basicts/runners/STGCN_runner.py deleted file mode 100644 index ccd3b534..00000000 --- a/basicts/runners/STGCN_runner.py +++ /dev/null @@ -1,60 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class STGCNRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features] - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - - # feed forward - prediction_data = self.model(history_data=history_data) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value diff --git a/basicts/runners/STID_runner.py b/basicts/runners/STID_runner.py deleted file mode 100644 index b5c85faa..00000000 --- a/basicts/runners/STID_runner.py +++ /dev/null @@ -1,60 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class STIDRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features] - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - - # feed forward - prediction_data = self.model(history_data=history_data) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value diff --git a/basicts/runners/STNorm_runner.py b/basicts/runners/STNorm_runner.py deleted file mode 100644 index d0526424..00000000 --- a/basicts/runners/STNorm_runner.py +++ /dev/null @@ -1,60 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class STNormRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features] - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - - # feed forward - prediction_data = self.model(history_data=history_data) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value diff --git a/basicts/runners/Stat_runner.py b/basicts/runners/Stat_runner.py deleted file mode 100644 index 01e72862..00000000 --- a/basicts/runners/Stat_runner.py +++ /dev/null @@ -1,60 +0,0 @@ -import torch -from basicts.runners.short_mts_runner import MTSRunner - -class StatRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features] - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - - # feed forward - prediction_data = self.model(history_data=history_data, batch_seen=iter_num, epoch=epoch) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value diff --git a/basicts/runners/StemGNN_runner.py b/basicts/runners/StemGNN_runner.py deleted file mode 100644 index f6da133e..00000000 --- a/basicts/runners/StemGNN_runner.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from tqdm import tqdm -from basicts.runners.short_mts_runner import MTSRunner - -class StemGNNRunner(MTSRunner): - def __init__(self, cfg: dict): - super().__init__(cfg) - - def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. - - Args: - data (torch.Tensor): input history data, shape [B, L, N, C] - Returns: - torch.Tensor: reshaped data - """ - # select feature using self.forward_features - if self.forward_features is not None: - data = data[:, :, :, self.forward_features][..., 0] # stemgnn only uses the first dimension - return data - - def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature - - Args: - data (torch.Tensor): prediction of the model with arbitrary shape. - - Returns: - torch.Tensor: reshaped data with shape [B, L, N, C] - """ - # select feature using self.target_features - data = data[:, :, :, self.target_features] - return data - - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. - - Args: - data (tuple): data (future data, history data). [B, L, N, C] for each of them - epoch (int, optional): epoch number. Defaults to None. - iter_num (int, optional): iteration number. Defaults to None. - train (bool, optional): if in the training process. Defaults to True. - - Returns: - tuple: (prediction, real_value) - """ - # preprocess - future_data, history_data = data - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape - - history_data = self.select_input_features(history_data) - - # feed forward - prediction_data = self.model(history_data=history_data, batch_seen=iter_num, epoch=epoch) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" - # post process - prediction = self.select_target_features(prediction_data) - real_value = self.select_target_features(future_data) - return prediction, real_value diff --git a/basicts/runners/__init__.py b/basicts/runners/__init__.py new file mode 100644 index 00000000..a5b5eaa5 --- /dev/null +++ b/basicts/runners/__init__.py @@ -0,0 +1,21 @@ +from .base_tsf_runner import BaseTimeSeriesForecastingRunner +from .runner_zoo.simple_tsf_runner import SimpleTimeSeriesForecastingRunner +from .runner_zoo.stid_runner import STIDRunner +from .runner_zoo.gwnet_runner import GraphWaveNetRunner +from .runner_zoo.dcrnn_runner import DCRNNRunner +from .runner_zoo.d2stgnn_runner import D2STGNNRunner +from .runner_zoo.stgcn_runner import STGCNRunner +from .runner_zoo.mtgnn_runner import MTGNNRunner +from .runner_zoo.stnorm_runner import STNormRunner +from .runner_zoo.agcrn_runner import AGCRNRunner +from .runner_zoo.stemgnn_runner import StemGNNRunner +from .runner_zoo.gts_runner import GTSRunner +from .runner_zoo.dgcrn_runner import DGCRNRunner +from .runner_zoo.linear_runner import LinearRunner + +__all__ = ["BaseTimeSeriesForecastingRunner", + "SimpleTimeSeriesForecastingRunner", "STIDRunner", + "GraphWaveNetRunner", "DCRNNRunner", "D2STGNNRunner", + "STGCNRunner", "MTGNNRunner", "STNormRunner", + "AGCRNRunner", "StemGNNRunner", "GTSRunner", + "DGCRNRunner", "LinearRunner"] diff --git a/basicts/runners/base_runner.py b/basicts/runners/base_runner.py index 38b77a45..070f0118 100644 --- a/basicts/runners/base_runner.py +++ b/basicts/runners/base_runner.py @@ -2,24 +2,22 @@ from typing import Dict import setproctitle - import torch from torch import nn from torch.utils.data import DataLoader - from easytorch import Runner -from easytorch.utils.dist import master_only from easytorch.utils import master_only from easytorch.core.data_loader import build_data_loader -from basicts.data.transforms import * -from basicts.archs import ARCH_REGISTRY class BaseRunner(Runner): - def __init__(self, cfg: dict): - """An expanded easytorch runner for benchmarking time series models. + """ + An expanded easytorch runner for benchmarking time series models. - Support test loader and test process. - Support setup_graph for the models acting like tensorflow. + """ + def __init__(self, cfg: dict): + """Init Args: cfg (dict): all in one configurations @@ -27,23 +25,26 @@ def __init__(self, cfg: dict): super().__init__(cfg) - self.val_interval = cfg['VAL'].get('INTERVAL', 1) # validate every `val_interval` epoch - self.test_interval = cfg['TEST'].get('INTERVAL', 1) # test every `test_interval` epoch + # validate every `val_interval` epoch + self.val_interval = cfg["VAL"].get("INTERVAL", 1) + # test every `test_interval` epoch + self.test_interval = cfg["TEST"].get("INTERVAL", 1) # declare data loader self.train_data_loader = None self.val_data_loader = None # set proctitle - proctitle_name = "{0}({1})".format(cfg['MODEL'].get("NAME", " "), cfg.get("DATASET_NAME", " ")) + proctitle_name = "{0}({1})".format(cfg["MODEL"].get( + "NAME", " "), cfg.get("DATASET_NAME", " ")) setproctitle.setproctitle("{0}@BasicTS".format(proctitle_name)) @staticmethod def define_model(cfg: Dict) -> nn.Module: - return ARCH_REGISTRY.build(cfg['MODEL']['NAME'], cfg['MODEL'].get('PARAM', {})) + return cfg["MODEL"]["ARCH"](**cfg.MODEL.PARAM) def build_train_data_loader(self, cfg: dict) -> DataLoader: - """Support 'setup_graph' for the models acting like tensorflow. + """Support "setup_graph" for the models acting like tensorflow. Args: cfg (dict): all in one configurations @@ -53,19 +54,19 @@ def build_train_data_loader(self, cfg: dict) -> DataLoader: """ train_data_loader = super().build_train_data_loader(cfg) - if cfg['TRAIN'].get('SETUP_GRAPH', False): + if cfg["TRAIN"].get("SETUP_GRAPH", False): for data in train_data_loader: self.setup_graph(data) break return train_data_loader - + def setup_graph(self, data: torch.Tensor): """Setup all parameters and the computation graph. Args: data (torch.Tensor): data necessary for a forward pass """ - + pass def init_training(self, cfg: dict): @@ -77,7 +78,7 @@ def init_training(self, cfg: dict): super().init_training(cfg) # init test - if hasattr(cfg, 'TEST'): + if hasattr(cfg, "TEST"): self.init_test(cfg) @master_only @@ -88,9 +89,9 @@ def init_test(self, cfg: dict): cfg (dict): config """ - self.test_interval = cfg['TEST'].get('INTERVAL', 1) + self.test_interval = cfg["TEST"].get("INTERVAL", 1) self.test_data_loader = self.build_test_data_loader(cfg) - self.register_epoch_meter('test_time', 'test', '{:.2f} (s)', plt=False) + self.register_epoch_meter("test_time", "test", "{:.2f} (s)", plt=False) def build_test_data_loader(self, cfg: dict) -> DataLoader: """Build val dataset and dataloader. @@ -105,7 +106,7 @@ def build_test_data_loader(self, cfg: dict) -> DataLoader: """ dataset = self.build_test_dataset(cfg) - return build_data_loader(dataset, cfg['TEST']['DATA']) + return build_data_loader(dataset, cfg["TEST"]["DATA"]) @staticmethod def build_test_dataset(cfg: dict): @@ -129,9 +130,9 @@ def on_epoch_end(self, epoch: int): """ # print train meters - self.print_epoch_meters('train') + self.print_epoch_meters("train") # tensorboard plt meters - self.plt_epoch_meters('train', epoch) + self.plt_epoch_meters("train", epoch) # validate if self.val_data_loader is not None and epoch % self.val_interval == 0: self.validate(train_epoch=epoch) @@ -163,15 +164,15 @@ def test_process(self, cfg: dict = None, train_epoch: int = None): self.model.eval() # test - self.test(train_epoch=train_epoch) + self.test() test_end_time = time.time() - self.update_epoch_meter('test_time', test_start_time - test_end_time) + self.update_epoch_meter("test_time", test_start_time - test_end_time) # print test meters - self.print_epoch_meters('test') + self.print_epoch_meters("test") if train_epoch is not None: # tensorboard plt meters - self.plt_epoch_meters('test', train_epoch // self.test_interval) + self.plt_epoch_meters("test", train_epoch // self.test_interval) self.on_test_end() diff --git a/basicts/runners/short_mts_runner.py b/basicts/runners/base_tsf_runner.py similarity index 51% rename from basicts/runners/short_mts_runner.py rename to basicts/runners/base_tsf_runner.py index cd9a1f05..5b271571 100644 --- a/basicts/runners/short_mts_runner.py +++ b/basicts/runners/base_tsf_runner.py @@ -1,45 +1,48 @@ import math from typing import Tuple, Union, Optional + import torch -from torch import nn -import numpy as np -from basicts.runners.base_runner import BaseRunner -from basicts.data.transforms import SCALER_REGISTRY -from basicts.utils.serialization import load_pkl from easytorch.utils.dist import master_only -class MTSRunner(BaseRunner): - """Runner for short term multivariate time series forecasting datasets. Typically, models predict the future 12 time steps based on historical time series. +from .base_runner import BaseRunner +from ..data import SCALER_REGISTRY +from ..utils import load_pkl +from ..metrics import masked_mae, masked_mape, masked_rmse + + +class BaseTimeSeriesForecastingRunner(BaseRunner): + """ + Runner for short term multivariate time series forecasting datasets. + Typically, models predict the future 12 time steps based on historical time series. Features: - Evaluate at horizon 3, 6, 12, and overall. - - Metrics: MAE, RMSE, MAPE. + - Metrics: MAE, RMSE, MAPE. The best model is the one with the smallest mae at validation. + - Loss: MAE (masked_mae). Allow customization. - Support curriculum learning. - Users only need to implement the `forward` function. - - Args: - BaseRunner (easytorch.easytorch.runner): base runner """ def __init__(self, cfg: dict): super().__init__(cfg) - self.dataset_name = cfg['DATASET_NAME'] - self.null_val = cfg['TRAIN'].get('NULL_VAL', 0) # different datasets have different null_values, e.g., 0.0 or np.nan. - self.dataset_type = cfg['DATASET_TYPE'] - self.forward_features = cfg['MODEL'].get('FROWARD_FEATURES', None) - self.target_features = cfg['MODEL'].get('TARGET_FEATURES', None) + self.dataset_name = cfg["DATASET_NAME"] + # different datasets have different null_values, e.g., 0.0 or np.nan. + self.null_val = cfg["TRAIN"].get("NULL_VAL", 0) + self.dataset_type = cfg["DATASET_TYPE"] # read scaler for re-normalization self.scaler = load_pkl("datasets/" + self.dataset_name + "/scaler.pkl") # define loss - self.loss = cfg['TRAIN']['LOSS'] + self.loss = cfg["TRAIN"]["LOSS"] # define metric - self.metrics = cfg['METRICS'] + self.metrics = {"MAE": masked_mae, "RMSE": masked_rmse, "MAPE": masked_mape} # curriculum learning for output. Note that this is different from the CL in Seq2Seq archs. - self.cl_param = cfg.TRAIN.get('CL', None) + self.cl_param = cfg.TRAIN.get("CL", None) if self.cl_param is not None: - self.warm_up_epochs = cfg.TRAIN.CL.get('WARM_EPOCHS', 0) - self.cl_epochs = cfg.TRAIN.CL.get('CL_EPOCHS') - self.prediction_length = cfg.TRAIN.CL.get('PREDICTION_LENGTH') + self.warm_up_epochs = cfg.TRAIN.CL.get("WARM_EPOCHS", 0) + self.cl_epochs = cfg.TRAIN.CL.get("CL_EPOCHS") + self.prediction_length = cfg.TRAIN.CL.get("PREDICTION_LENGTH") + # evaluation horizon + self.evaluation_horizons = cfg["TEST"].get("EVALUATION_HORIZONS", range(12)) def init_training(self, cfg: dict): """Initialize training. @@ -49,10 +52,10 @@ def init_training(self, cfg: dict): Args: cfg (dict): config """ - + super().init_training(cfg) - for key, value in self.metrics.items(): - self.register_epoch_meter("train_"+key, 'train', '{:.4f}') + for key, _ in self.metrics.items(): + self.register_epoch_meter("train_"+key, "train", "{:.4f}") def init_validation(self, cfg: dict): """Initialize validation. @@ -64,8 +67,8 @@ def init_validation(self, cfg: dict): """ super().init_validation(cfg) - for key, value in self.metrics.items(): - self.register_epoch_meter("val_"+key, 'val', '{:.4f}') + for key, _ in self.metrics.items(): + self.register_epoch_meter("val_"+key, "val", "{:.4f}") def init_test(self, cfg: dict): """Initialize test. @@ -77,8 +80,8 @@ def init_test(self, cfg: dict): """ super().init_test(cfg) - for key, value in self.metrics.items(): - self.register_epoch_meter("test_"+key, 'test', '{:.4f}') + for key, _ in self.metrics.items(): + self.register_epoch_meter("test_"+key, "test", "{:.4f}") def build_train_dataset(self, cfg: dict): """Build MNIST train dataset @@ -90,14 +93,16 @@ def build_train_dataset(self, cfg: dict): train dataset (Dataset) """ - raw_file_path = cfg["TRAIN"]["DATA"]["DIR"] + "/data.pkl" - index_file_path = cfg["TRAIN"]["DATA"]["DIR"] + "/index.pkl" - batch_size = cfg['TRAIN']['DATA']['BATCH_SIZE'] - dataset = cfg['DATASET_CLS'](raw_file_path, index_file_path, mode='train') + raw_file_path = "{0}/data.pkl".format(cfg["TRAIN"]["DATA"]["DIR"]) + index_file_path = "{0}/index_in{1}_out{2}.pkl".format( + cfg["TRAIN"]["DATA"]["DIR"], cfg["DATASET_INPUT_LEN"], cfg["DATASET_OUTPUT_LEN"]) + batch_size = cfg["TRAIN"]["DATA"]["BATCH_SIZE"] + dataset = cfg["DATASET_CLS"]( + raw_file_path, index_file_path, mode="train") print("train len: {0}".format(len(dataset))) - + self.iter_per_epoch = math.ceil(len(dataset) / batch_size) - + return dataset @staticmethod @@ -108,12 +113,14 @@ def build_val_dataset(cfg: dict): cfg (dict): config Returns: - train dataset (Dataset) + validation dataset (Dataset) """ - raw_file_path = cfg["VAL"]["DATA"]["DIR"] + "/data.pkl" - index_file_path = cfg["VAL"]["DATA"]["DIR"] + "/index.pkl" - dataset = cfg['DATASET_CLS'](raw_file_path, index_file_path, mode='valid') + raw_file_path = "{0}/data.pkl".format(cfg["VAL"]["DATA"]["DIR"]) + index_file_path = "{0}/index_in{1}_out{2}.pkl".format( + cfg["VAL"]["DATA"]["DIR"], cfg["DATASET_INPUT_LEN"], cfg["DATASET_OUTPUT_LEN"]) + dataset = cfg["DATASET_CLS"]( + raw_file_path, index_file_path, mode="valid") print("val len: {0}".format(len(dataset))) return dataset @@ -128,14 +135,16 @@ def build_test_dataset(cfg: dict): train dataset (Dataset) """ - raw_file_path = cfg["TEST"]["DATA"]["DIR"] + "/data.pkl" - index_file_path = cfg["TEST"]["DATA"]["DIR"] + "/index.pkl" - dataset = cfg['DATASET_CLS'](raw_file_path, index_file_path, mode='test') + raw_file_path = "{0}/data.pkl".format(cfg["TEST"]["DATA"]["DIR"]) + index_file_path = "{0}/index_in{1}_out{2}.pkl".format( + cfg["TEST"]["DATA"]["DIR"], cfg["DATASET_INPUT_LEN"], cfg["DATASET_OUTPUT_LEN"]) + dataset = cfg["DATASET_CLS"]( + raw_file_path, index_file_path, mode="test") print("test len: {0}".format(len(dataset))) return dataset def curriculum_learning(self, epoch: int = None) -> int: - """calculate task level in curriculum learning. + """Calculate task level in curriculum learning. Args: epoch (int, optional): current epoch if in training process, else None. Defaults to None. @@ -156,8 +165,8 @@ def curriculum_learning(self, epoch: int = None) -> int: cl_length = min(_, self.prediction_length) return cl_length - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. + def forward(self, data: tuple, epoch: int = None, iter_num: int = None, train: bool = True, **kwargs) -> tuple: + """Feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. Args: data (tuple): data (future data, history data). [B, L, N, C] for each of them @@ -168,6 +177,7 @@ def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:boo Returns: tuple: (prediction, real_value). [B, L, N, C] for each of them. """ + raise NotImplementedError() def train_iters(self, epoch: int, iter_index: int, data: Union[torch.Tensor, Tuple]) -> torch.Tensor: @@ -183,20 +193,23 @@ def train_iters(self, epoch: int, iter_index: int, data: Union[torch.Tensor, Tup """ iter_num = (epoch-1) * self.iter_per_epoch + iter_index - prediction, real_value = self.forward(data=data, epoch=epoch, iter_num=iter_num, train=True) + forward_return = list(self.forward(data=data, epoch=epoch, iter_num=iter_num, train=True)) # re-scale data - prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args']) - real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args']) + prediction_rescaled = SCALER_REGISTRY.get(self.scaler["func"])(forward_return[0], **self.scaler["args"]) + real_value_rescaled = SCALER_REGISTRY.get(self.scaler["func"])(forward_return[1], **self.scaler["args"]) # loss if self.cl_param: cl_length = self.curriculum_learning(epoch=epoch) - loss = self.loss(prediction[:, :cl_length, :, :], real_value[:, :cl_length, :, :], null_val=self.null_val) + forward_return[0] = prediction_rescaled[:, :cl_length, :, :] + forward_return[1] = real_value_rescaled[:, :cl_length, :, :] else: - loss = self.loss(prediction, real_value, null_val=self.null_val) + forward_return[0] = prediction_rescaled + forward_return[1] = real_value_rescaled + loss = self.loss(*forward_return, null_val=self.null_val) # metrics for metric_name, metric_func in self.metrics.items(): - metric_item = metric_func(prediction, real_value, null_val=self.null_val) - self.update_epoch_meter('train_'+metric_name, metric_item.item()) + metric_item = metric_func(*forward_return[:2], null_val=self.null_val) + self.update_epoch_meter("train_"+metric_name, metric_item.item()) return loss def val_iters(self, iter_index: int, data: Union[torch.Tensor, Tuple]): @@ -208,20 +221,18 @@ def val_iters(self, iter_index: int, data: Union[torch.Tensor, Tuple]): iter_index (int): current iter. """ - prediction, real_value = self.forward(data=data, epoch=None, iter_num=None, train=False) + forward_return = self.forward(data=data, epoch=None, iter_num=None, train=False) # re-scale data - prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args']) - real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args']) - # loss - mae = self.loss(prediction, real_value, null_val=self.null_val) + prediction_rescaled = SCALER_REGISTRY.get(self.scaler["func"])(forward_return[0], **self.scaler["args"]) + real_value_rescaled = SCALER_REGISTRY.get(self.scaler["func"])(forward_return[1], **self.scaler["args"]) # metrics for metric_name, metric_func in self.metrics.items(): - metric_item = metric_func(prediction, real_value, null_val=self.null_val) - self.update_epoch_meter('val_'+metric_name, metric_item.item()) + metric_item = metric_func(prediction_rescaled, real_value_rescaled, null_val=self.null_val) + self.update_epoch_meter("val_"+metric_name, metric_item.item()) @torch.no_grad() @master_only - def test(self, train_epoch: int = None): + def test(self): """Evaluate the model. Args: @@ -230,34 +241,38 @@ def test(self, train_epoch: int = None): # test loop prediction = [] - real_value = [] - for iter_index, data in enumerate(self.test_data_loader): - preds, testy = self.forward(data, epoch=train_epoch, iter_num=None, train=False) - prediction.append(preds) - real_value.append(testy) - prediction = torch.cat(prediction,dim=0) + real_value = [] + for _, data in enumerate(self.test_data_loader): + forward_return = self.forward(data, epoch=None, iter_num=None, train=False) + prediction.append(forward_return[0]) # preds = forward_return[0] + real_value.append(forward_return[1]) # testy = forward_return[1] + prediction = torch.cat(prediction, dim=0) real_value = torch.cat(real_value, dim=0) # re-scale data - prediction = SCALER_REGISTRY.get(self.scaler['func'])(prediction, **self.scaler['args']) - real_value = SCALER_REGISTRY.get(self.scaler['func'])(real_value, **self.scaler['args']) + prediction = SCALER_REGISTRY.get(self.scaler["func"])( + prediction, **self.scaler["args"]) + real_value = SCALER_REGISTRY.get(self.scaler["func"])( + real_value, **self.scaler["args"]) # summarize the results. - ## test performance of different horizon - for i in range(12): + # test performance of different horizon + for i in self.evaluation_horizons: # For horizon i, only calculate the metrics **at that time** slice here. - pred = prediction[:,i,:,:] - real = real_value[:,i,:,:] + pred = prediction[:, i, :, :] + real = real_value[:, i, :, :] # metrics metric_results = {} for metric_name, metric_func in self.metrics.items(): metric_item = metric_func(pred, real, null_val=self.null_val) metric_results[metric_name] = metric_item.item() - log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test RMSE: {:.4f}, Test MAPE: {:.4f}' - log = log.format(i+1, metric_results['MAE'], metric_results['RMSE'], metric_results['MAPE']) + log = "Evaluate best model on test data for horizon " + \ + "{:d}, Test MAE: {:.4f}, Test RMSE: {:.4f}, Test MAPE: {:.4f}" + log = log.format( + i+1, metric_results["MAE"], metric_results["RMSE"], metric_results["MAPE"]) self.logger.info(log) - ## test performance overall + # test performance overall for metric_name, metric_func in self.metrics.items(): metric_item = metric_func(prediction, real_value, null_val=self.null_val) - self.update_epoch_meter('test_'+metric_name, metric_item.item()) + self.update_epoch_meter("test_"+metric_name, metric_item.item()) metric_results[metric_name] = metric_item.item() @master_only @@ -269,4 +284,4 @@ def on_validating_end(self, train_epoch: Optional[int]): """ if train_epoch is not None: - self.save_best_model(train_epoch, 'val_MAE', greater_best=False) + self.save_best_model(train_epoch, "val_MAE", greater_best=False) diff --git a/basicts/runners/runner_zoo/agcrn_runner.py b/basicts/runners/runner_zoo/agcrn_runner.py new file mode 100644 index 00000000..cc3dbe74 --- /dev/null +++ b/basicts/runners/runner_zoo/agcrn_runner.py @@ -0,0 +1 @@ +from .simple_tsf_runner import SimpleTimeSeriesForecastingRunner as AGCRNRunner diff --git a/basicts/runners/runner_zoo/d2stgnn_runner.py b/basicts/runners/runner_zoo/d2stgnn_runner.py new file mode 100644 index 00000000..88b9f4a3 --- /dev/null +++ b/basicts/runners/runner_zoo/d2stgnn_runner.py @@ -0,0 +1 @@ +from .simple_tsf_runner import SimpleTimeSeriesForecastingRunner as D2STGNNRunner diff --git a/basicts/runners/runner_zoo/dcrnn_runner.py b/basicts/runners/runner_zoo/dcrnn_runner.py new file mode 100644 index 00000000..60c37bc6 --- /dev/null +++ b/basicts/runners/runner_zoo/dcrnn_runner.py @@ -0,0 +1,84 @@ +import torch + +from ..base_tsf_runner import BaseTimeSeriesForecastingRunner + + +class DCRNNRunner(BaseTimeSeriesForecastingRunner): + """Runner for DCRNN: add setup_graph and teacher forcing.""" + + def __init__(self, cfg: dict): + super().__init__(cfg) + self.forward_features = cfg["MODEL"].get("FROWARD_FEATURES", None) + self.target_features = cfg["MODEL"].get("TARGET_FEATURES", None) + + def setup_graph(self, data): + """The dcrnn official codes act like tensorflow, which create parameters in the first feedforward process.""" + try: + self.train_iters(1, 0, data) + except AttributeError: + pass + + def select_input_features(self, data: torch.Tensor) -> torch.Tensor: + """Select input features and reshape data to fit the target model. + + Args: + data (torch.Tensor): input history data, shape [B, L, N, C]. + + Returns: + torch.Tensor: reshaped data + """ + + # select feature using self.forward_features + if self.forward_features is not None: + data = data[:, :, :, self.forward_features] + return data + + def select_target_features(self, data: torch.Tensor) -> torch.Tensor: + """Select target features and reshape data back to the BasicTS framework + + Args: + data (torch.Tensor): prediction of the model with arbitrary shape. + + Returns: + torch.Tensor: reshaped data with shape [B, L, N, C] + """ + + # select feature using self.target_features + data = data[:, :, :, self.target_features] + return data + + def forward(self, data: tuple, epoch: int = None, iter_num: int = None, train: bool = True) -> tuple: + """Feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. + + Args: + data (tuple): data (future data, history data). [B, L, N, C] for each of them + epoch (int, optional): epoch number. Defaults to None. + iter_num (int, optional): iteration number. Defaults to None. + train (bool, optional): if in the training process. Defaults to True. + + Returns: + tuple: (prediction, real_value) + """ + + # preprocess + future_data, history_data = data + history_data = self.to_running_device(history_data) # B, L, N, C + future_data = self.to_running_device(future_data) # B, L, N, C + batch_size, length, num_nodes, _ = future_data.shape + + history_data = self.select_input_features(history_data) + if train: + # teacher forcing only use the first dimension. + _future_data = future_data[..., [0]] + else: + _future_data = None + + # feed forward + prediction_data = self.model(history_data=history_data, future_data=_future_data, + batch_seen=iter_num if self.model.training else None, epoch=epoch) + assert list(prediction_data.shape)[:3] == [batch_size, length, num_nodes], \ + "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" + # post process + prediction = self.select_target_features(prediction_data) + real_value = self.select_target_features(future_data) + return prediction, real_value diff --git a/basicts/runners/runner_zoo/dgcrn_runner.py b/basicts/runners/runner_zoo/dgcrn_runner.py new file mode 100644 index 00000000..97568230 --- /dev/null +++ b/basicts/runners/runner_zoo/dgcrn_runner.py @@ -0,0 +1 @@ +from .simple_tsf_runner import SimpleTimeSeriesForecastingRunner as DGCRNRunner diff --git a/basicts/runners/DGCRN_runner.py b/basicts/runners/runner_zoo/gts_runner.py similarity index 60% rename from basicts/runners/DGCRN_runner.py rename to basicts/runners/runner_zoo/gts_runner.py index dccb465d..838954c1 100644 --- a/basicts/runners/DGCRN_runner.py +++ b/basicts/runners/runner_zoo/gts_runner.py @@ -1,25 +1,37 @@ import torch -from basicts.runners.short_mts_runner import MTSRunner -class DGCRNRunner(MTSRunner): +from ..base_tsf_runner import BaseTimeSeriesForecastingRunner + + +class GTSRunner(BaseTimeSeriesForecastingRunner): def __init__(self, cfg: dict): super().__init__(cfg) + self.forward_features = cfg["MODEL"].get("FROWARD_FEATURES", None) + self.target_features = cfg["MODEL"].get("TARGET_FEATURES", None) + + def setup_graph(self, data): + try: + self.train_iters(1, 0, data) + except AttributeError: + pass def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features and reshape data to fit the target model. + """Select input features and reshape data to fit the target model. Args: - data (torch.Tensor): input history data, shape [B, L, N, C] + data (torch.Tensor): input history data, shape [B, L, N, C]. + Returns: torch.Tensor: reshaped data """ + # select feature using self.forward_features if self.forward_features is not None: data = data[:, :, :, self.forward_features] return data - + def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target features and reshape data back to the BasicTS framework + """Select target features and reshape data back to the BasicTS framework Args: data (torch.Tensor): prediction of the model with arbitrary shape. @@ -27,6 +39,7 @@ def select_target_features(self, data: torch.Tensor) -> torch.Tensor: Returns: torch.Tensor: reshaped data with shape [B, L, N, C] """ + # select feature using self.target_features data = data[:, :, :, self.target_features] return data @@ -43,19 +56,25 @@ def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:boo Returns: tuple: (prediction, real_value) """ + # preprocess future_data, history_data = data history_data = self.to_running_device(history_data) # B, L, N, C future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape + batch_size, length, num_nodes, _ = future_data.shape - history_data = self.select_input_features(history_data) - future_data_ = self.select_input_features(future_data) + history_data = self.select_input_features(history_data) + if train: + # teacher forcing only use the first dimension. + _future_data = future_data[..., [0]] + else: + _future_data = None # feed forward - prediction_data = self.model(history_data=history_data, future_data=future_data_, batch_seen=iter_num, task_level=self.curriculum_learning(epoch)) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" + prediction_data, pred_adj, prior_adj = self.model(history_data=history_data, future_data=_future_data, batch_seen=iter_num, epoch=epoch) # B, L, N, C + assert list(prediction_data.shape)[:3] == [batch_size, length, num_nodes], \ + "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" # post process prediction = self.select_target_features(prediction_data) real_value = self.select_target_features(future_data) - return prediction, real_value + return prediction, real_value, pred_adj, prior_adj diff --git a/basicts/runners/runner_zoo/gwnet_runner.py b/basicts/runners/runner_zoo/gwnet_runner.py new file mode 100644 index 00000000..e5c51c38 --- /dev/null +++ b/basicts/runners/runner_zoo/gwnet_runner.py @@ -0,0 +1 @@ +from .simple_tsf_runner import SimpleTimeSeriesForecastingRunner as GraphWaveNetRunner diff --git a/basicts/runners/runner_zoo/linear_runner.py b/basicts/runners/runner_zoo/linear_runner.py new file mode 100644 index 00000000..6b60a658 --- /dev/null +++ b/basicts/runners/runner_zoo/linear_runner.py @@ -0,0 +1,3 @@ +from .simple_tsf_runner import SimpleTimeSeriesForecastingRunner as LinearRunner + +__all__ = ["LinearRunner"] diff --git a/basicts/runners/MTGNN_runner.py b/basicts/runners/runner_zoo/mtgnn_runner.py similarity index 70% rename from basicts/runners/MTGNN_runner.py rename to basicts/runners/runner_zoo/mtgnn_runner.py index 057e69b5..459ead8b 100644 --- a/basicts/runners/MTGNN_runner.py +++ b/basicts/runners/runner_zoo/mtgnn_runner.py @@ -1,32 +1,39 @@ +from typing import Tuple, Union + import torch import numpy as np -from typing import Tuple, Union -from basicts.runners.short_mts_runner import MTSRunner +from ..base_tsf_runner import BaseTimeSeriesForecastingRunner -class MTGNNRunner(MTSRunner): + +class MTGNNRunner(BaseTimeSeriesForecastingRunner): def __init__(self, cfg: dict): super().__init__(cfg) + self.forward_features = cfg["MODEL"].get("FROWARD_FEATURES", None) + self.target_features = cfg["MODEL"].get("TARGET_FEATURES", None) + # graph training self.step_size = cfg.TRAIN.CUSTOM.STEP_SIZE self.num_nodes = cfg.TRAIN.CUSTOM.NUM_NODES self.num_split = cfg.TRAIN.CUSTOM.NUM_SPLIT - self.perm = None + self.perm = None def select_input_features(self, data: torch.Tensor) -> torch.Tensor: - """select input features. + """Select input features. Args: data (torch.Tensor): input history data, shape [B, L, N, C] + Returns: torch.Tensor: reshaped data """ + # select feature using self.forward_features if self.forward_features is not None: data = data[:, :, :, self.forward_features] return data - + def select_target_features(self, data: torch.Tensor) -> torch.Tensor: - """select target feature + """Select target feature Args: data (torch.Tensor): prediction of the model with arbitrary shape. @@ -34,12 +41,13 @@ def select_target_features(self, data: torch.Tensor) -> torch.Tensor: Returns: torch.Tensor: reshaped data with shape [B, L, N, C] """ + # select feature using self.target_features data = data[:, :, :, self.target_features] return data - def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:bool = True, **kwargs) -> tuple: - """feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. + def forward(self, data: tuple, epoch: int = None, iter_num: int = None, train: bool = True, **kwargs) -> tuple: + """Feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. Args: data (tuple): data (future data, history data). [B, L, N, C] for each of them @@ -50,20 +58,23 @@ def forward(self, data: tuple, epoch:int = None, iter_num: int = None, train:boo Returns: tuple: (prediction, real_value). [B, L, N, C] for each of them. """ + if train: future_data, history_data, idx = data else: future_data, history_data = data idx = None - history_data = self.to_running_device(history_data) # B, L, N, C - future_data = self.to_running_device(future_data) # B, L, N, C - B, L, N, C = future_data.shape + history_data = self.to_running_device(history_data) # B, L, N, C + future_data = self.to_running_device(future_data) # B, L, N, C + B, L, N, C = future_data.shape - history_data = self.select_input_features(history_data) - - prediction_data = self.model(history_data=history_data, idx=idx, batch_seen=iter_num, epoch=epoch) # B, L, N, C - assert list(prediction_data.shape)[:3] == [B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" + history_data = self.select_input_features(history_data) + + prediction_data = self.model( + history_data=history_data, idx=idx, batch_seen=iter_num, epoch=epoch) # B, L, N, C + assert list(prediction_data.shape)[:3] == [ + B, L, N], "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" # post process prediction = self.select_target_features(prediction_data) real_value = self.select_target_features(future_data) @@ -82,7 +93,8 @@ def train_iters(self, epoch: int, iter_index: int, data: Union[torch.Tensor, Tup Returns: loss (torch.Tensor) """ - if iter_index%self.step_size==0: + + if iter_index % self.step_size == 0: self.perm = np.random.permutation(range(self.num_nodes)) num_sub = int(self.num_nodes/self.num_split) for j in range(self.num_split): @@ -91,7 +103,7 @@ def train_iters(self, epoch: int, iter_index: int, data: Union[torch.Tensor, Tup raise else: idx = self.perm[j * num_sub:] - idx = torch.tensor(idx) + idx = torch.tensor(idx) future_data, history_data = data data = future_data[:, :, idx, :], history_data[:, :, idx, :], idx loss = super().train_iters(epoch, iter_index, data) diff --git a/basicts/runners/runner_zoo/simple_tsf_runner.py b/basicts/runners/runner_zoo/simple_tsf_runner.py new file mode 100644 index 00000000..26261a9e --- /dev/null +++ b/basicts/runners/runner_zoo/simple_tsf_runner.py @@ -0,0 +1,77 @@ +import torch + +from ..base_tsf_runner import BaseTimeSeriesForecastingRunner + + +class SimpleTimeSeriesForecastingRunner(BaseTimeSeriesForecastingRunner): + """Simple Runner: select forward features and target features.""" + + def __init__(self, cfg: dict): + super().__init__(cfg) + self.forward_features = cfg["MODEL"].get("FROWARD_FEATURES", None) + self.target_features = cfg["MODEL"].get("TARGET_FEATURES", None) + + def select_input_features(self, data: torch.Tensor) -> torch.Tensor: + """Select input features. + + Args: + data (torch.Tensor): input history data, shape [B, L, N, C] + + Returns: + torch.Tensor: reshaped data + """ + + # select feature using self.forward_features + if self.forward_features is not None: + data = data[:, :, :, self.forward_features] + return data + + def select_target_features(self, data: torch.Tensor) -> torch.Tensor: + """Select target feature. + + Args: + data (torch.Tensor): prediction of the model with arbitrary shape. + + Returns: + torch.Tensor: reshaped data with shape [B, L, N, C] + """ + + # select feature using self.target_features + data = data[:, :, :, self.target_features] + return data + + def forward(self, data: tuple, epoch: int = None, iter_num: int = None, train: bool = True, **kwargs) -> tuple: + """Feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. + + Args: + data (tuple): data (future data, history ata). + epoch (int, optional): epoch number. Defaults to None. + iter_num (int, optional): iteration number. Defaults to None. + train (bool, optional): if in the training process. Defaults to True. + + Returns: + tuple: (prediction, real_value) + """ + + # preprocess + future_data, history_data = data + history_data = self.to_running_device(history_data) # B, L, N, C + future_data = self.to_running_device(future_data) # B, L, N, C + batch_size, length, num_nodes, _ = future_data.shape + + history_data = self.select_input_features(history_data) + _future_data = self.select_input_features(future_data) + + # curriculum learning + if self.cl_param is None: + prediction_data = self.model(history_data=history_data, future_data=_future_data, batch_seen=iter_num, epoch=epoch, train=train) + else: + task_level = self.curriculum_learning(epoch) + prediction_data = self.model(history_data=history_data, future_data=_future_data, batch_seen=iter_num, epoch=epoch, train=train, task_level=task_level) + # feed forward + assert list(prediction_data.shape)[:3] == [batch_size, length, num_nodes], \ + "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" + # post process + prediction = self.select_target_features(prediction_data) + real_value = self.select_target_features(future_data) + return prediction, real_value diff --git a/basicts/runners/runner_zoo/stemgnn_runner.py b/basicts/runners/runner_zoo/stemgnn_runner.py new file mode 100644 index 00000000..2260cab4 --- /dev/null +++ b/basicts/runners/runner_zoo/stemgnn_runner.py @@ -0,0 +1 @@ +from .simple_tsf_runner import SimpleTimeSeriesForecastingRunner as StemGNNRunner diff --git a/basicts/runners/runner_zoo/stgcn_runner.py b/basicts/runners/runner_zoo/stgcn_runner.py new file mode 100644 index 00000000..2eb8b807 --- /dev/null +++ b/basicts/runners/runner_zoo/stgcn_runner.py @@ -0,0 +1 @@ +from .simple_tsf_runner import SimpleTimeSeriesForecastingRunner as STGCNRunner diff --git a/basicts/runners/runner_zoo/stid_runner.py b/basicts/runners/runner_zoo/stid_runner.py new file mode 100644 index 00000000..7f5b98c5 --- /dev/null +++ b/basicts/runners/runner_zoo/stid_runner.py @@ -0,0 +1 @@ +from .simple_tsf_runner import SimpleTimeSeriesForecastingRunner as STIDRunner diff --git a/basicts/runners/runner_zoo/stnorm_runner.py b/basicts/runners/runner_zoo/stnorm_runner.py new file mode 100644 index 00000000..aeeaa236 --- /dev/null +++ b/basicts/runners/runner_zoo/stnorm_runner.py @@ -0,0 +1 @@ +from .simple_tsf_runner import SimpleTimeSeriesForecastingRunner as STNormRunner diff --git a/basicts/utils/__init__.py b/basicts/utils/__init__.py new file mode 100644 index 00000000..cad8f2e0 --- /dev/null +++ b/basicts/utils/__init__.py @@ -0,0 +1,4 @@ +from .serialization import load_adj, load_pkl, dump_pkl, load_node2vec_emb +from .misc import clock, check_nan_inf, remove_nan_inf + +__all__ = ["load_adj", "load_pkl", "dump_pkl", "load_node2vec_emb", "clock", "check_nan_inf", "remove_nan_inf"] diff --git a/basicts/utils/adjacent_matrix_norm.py b/basicts/utils/adjacent_matrix_norm.py index 058a0928..417d4ac6 100644 --- a/basicts/utils/adjacent_matrix_norm.py +++ b/basicts/utils/adjacent_matrix_norm.py @@ -1,13 +1,12 @@ -#!/usr/bin/env python -# -*- encoding: utf-8 -*- -import scipy.sparse as sp import numpy as np +import scipy.sparse as sp from scipy.sparse import linalg + def calculate_symmetric_normalized_laplacian(adj: np.ndarray) -> np.matrix: - """Calculate yymmetric normalized laplacian. + """Calculate yymmetric normalized laplacian. Assuming unnormalized laplacian matrix is `L = D - A`, - then symmetric normalized laplacian matrix is: + then symmetric normalized laplacian matrix is: `L^{Sym} = D^-1/2 L D^-1/2 = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2` For node `i` and `j` where `i!=j`, L^{sym}_{ij} <=0. @@ -17,14 +16,18 @@ def calculate_symmetric_normalized_laplacian(adj: np.ndarray) -> np.matrix: Returns: np.matrix: Symmetric normalized laplacian L^{Sym} """ - adj = sp.coo_matrix(adj) - D = np.array(adj.sum(1)) - D_inv_sqrt = np.power(D, -0.5).flatten() # diagonals of D^{-1/2} - D_inv_sqrt[np.isinf(D_inv_sqrt)] = 0. - matrix_D_inv_sqrt = sp.diags(D_inv_sqrt) # D^{-1/2} - symmetric_normalized_laplacian = sp.eye(adj.shape[0]) - matrix_D_inv_sqrt.dot(adj).dot(matrix_D_inv_sqrt).tocoo() + + adj = sp.coo_matrix(adj) + degree = np.array(adj.sum(1)) + # diagonals of D^{-1/2} + degree_inv_sqrt = np.power(degree, -0.5).flatten() + degree_inv_sqrt[np.isinf(degree_inv_sqrt)] = 0. + matrix_degree_inv_sqrt = sp.diags(degree_inv_sqrt) # D^{-1/2} + symmetric_normalized_laplacian = sp.eye( + adj.shape[0]) - matrix_degree_inv_sqrt.dot(adj).dot(matrix_degree_inv_sqrt).tocoo() return symmetric_normalized_laplacian + def calculate_scaled_laplacian(adj: np.ndarray, lambda_max: int = 2, undirected: bool = True) -> np.matrix: """Re-scaled the eigenvalue to [-1, 1] by scaled the normalized laplacian matrix for chebyshev pol. According to `2017 ICLR GCN`, the lambda max is set to 2, and the graph is set to undirected. @@ -39,20 +42,23 @@ def calculate_scaled_laplacian(adj: np.ndarray, lambda_max: int = 2, undirected: Returns: np.matrix: The rescaled laplacian matrix. """ + if undirected: adj = np.maximum.reduce([adj, adj.T]) - L = calculate_symmetric_normalized_laplacian(adj) + laplacian_matrix = calculate_symmetric_normalized_laplacian(adj) if lambda_max is None: # manually cal the max lambda - lambda_max, _ = linalg.eigsh(L, 1, which='LM') + lambda_max, _ = linalg.eigsh(laplacian_matrix, 1, which='LM') lambda_max = lambda_max[0] - L = sp.csr_matrix(L) - M, _ = L.shape - I = sp.identity(M, format='csr', dtype=L.dtype) - L_res = (2 / lambda_max * L) - I - return L_res - -def symmetric_message_passing_adj(adj: np.ndarray) -> np.matrix: - """ Calculate the renormalized message passing adj in `GCN`. + laplacian_matrix = sp.csr_matrix(laplacian_matrix) + num_nodes, _ = laplacian_matrix.shape + identity_matrix = sp.identity( + num_nodes, format='csr', dtype=laplacian_matrix.dtype) + laplacian_res = (2 / lambda_max * laplacian_matrix) - identity_matrix + return laplacian_res + + +def calculate_symmetric_message_passing_adj(adj: np.ndarray) -> np.matrix: + """Calculate the renormalized message passing adj in `GCN`. A = A + I return D^{-1/2} A D^{-1/2} @@ -64,17 +70,19 @@ def symmetric_message_passing_adj(adj: np.ndarray) -> np.matrix: """ # add self loop - adj = adj + np.diag(np.ones(adj.shape[0], dtype=np.float32)) + adj = adj + np.diag(np.ones(adj.shape[0], dtype=np.float32)) # print("calculating the renormalized message passing adj, please ensure that self-loop has added to adj.") - adj = sp.coo_matrix(adj) - rowsum = np.array(adj.sum(1)) - d_inv_sqrt = np.power(rowsum, -0.5).flatten() + adj = sp.coo_matrix(adj) + row_sum = np.array(adj.sum(1)) + d_inv_sqrt = np.power(row_sum, -0.5).flatten() d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. - d_mat_inv_sqrt = sp.diags(d_inv_sqrt) - mp_adj = d_mat_inv_sqrt.dot(adj).transpose().dot(d_mat_inv_sqrt).astype(np.float32) + d_mat_inv_sqrt = sp.diags(d_inv_sqrt) + mp_adj = d_mat_inv_sqrt.dot(adj).transpose().dot( + d_mat_inv_sqrt).astype(np.float32) return mp_adj -def transition_matrix(adj: np.ndarray) -> np.matrix: + +def calculate_transition_matrix(adj: np.ndarray) -> np.matrix: """Calculate the transition matrix `P` proposed in DCRNN and Graph WaveNet. P = D^{-1}A = A/rowsum(A) @@ -84,11 +92,11 @@ def transition_matrix(adj: np.ndarray) -> np.matrix: Returns: np.matrix: Transition matrix P """ + adj = sp.coo_matrix(adj) - rowsum = np.array(adj.sum(1)).flatten() - d_inv = np.power(rowsum, -1).flatten() + row_sum = np.array(adj.sum(1)).flatten() + d_inv = np.power(row_sum, -1).flatten() d_inv[np.isinf(d_inv)] = 0. - d_mat= sp.diags(d_inv) - # P = d_mat.dot(adj) - P = d_mat.dot(adj).astype(np.float32).todense() - return P + d_mat = sp.diags(d_inv) + prob_matrix = d_mat.dot(adj).astype(np.float32).todense() + return prob_matrix diff --git a/basicts/utils/distance.py b/basicts/utils/distance.py deleted file mode 100644 index e69de29b..00000000 diff --git a/basicts/utils/misc.py b/basicts/utils/misc.py index 5ee91d41..35387c53 100644 --- a/basicts/utils/misc.py +++ b/basicts/utils/misc.py @@ -1,18 +1,22 @@ import time + import torch + def clock(func): + """clock decorator""" def clocked(*args, **kw): """decorator for clock""" t0 = time.perf_counter() result = func(*args, **kw) elapsed = time.perf_counter() - t0 name = func.__name__ - print('%s: %0.8fs...' % (name, elapsed)) + print("%s: %0.8fs..." % (name, elapsed)) return result return clocked -def check_nan_inf(tensor: torch.Tensor, raise_ex: bool = True): + +def check_nan_inf(tensor: torch.Tensor, raise_ex: bool = True) -> tuple: """check nan and in in tensor Args: @@ -24,26 +28,29 @@ def check_nan_inf(tensor: torch.Tensor, raise_ex: bool = True): Returns: dict: {'nan': bool, 'inf': bool} - torch.Tensor: Tensor + bool: if exist nan or if """ + # nan nan = torch.any(torch.isnan(tensor)) # inf inf = torch.any(torch.isinf(tensor)) # raise if raise_ex and (nan or inf): - raise Exception({"nan":nan, "inf":inf}) - return {"nan":nan, "inf":inf}, nan or inf + raise Exception({"nan": nan, "inf": inf}) + return {"nan": nan, "inf": inf}, nan or inf + def remove_nan_inf(tensor: torch.Tensor): """remove nan and inf in tensor Args: - tensor (_type_): _description_ + tensor (torch.Tensor): input tensor Returns: - _type_: _description_ + torch.Tensor: output tensor """ + tensor = torch.where(torch.isnan(tensor), torch.zeros_like(tensor), tensor) tensor = torch.where(torch.isinf(tensor), torch.zeros_like(tensor), tensor) return tensor diff --git a/basicts/utils/options.py b/basicts/utils/options.py deleted file mode 100644 index e69de29b..00000000 diff --git a/basicts/utils/serialization.py b/basicts/utils/serialization.py index 817c5975..4b579c93 100644 --- a/basicts/utils/serialization.py +++ b/basicts/utils/serialization.py @@ -1,29 +1,45 @@ -import torch import pickle -from basicts.utils.adjacent_matrix_norm import * -def load_pkl(pickle_file: str): - """load pickle data. +import torch +import numpy as np + +from .adjacent_matrix_norm import calculate_scaled_laplacian, calculate_symmetric_normalized_laplacian, calculate_symmetric_message_passing_adj, calculate_transition_matrix + + +def load_pkl(pickle_file: str) -> object: + """Load pickle data. Args: pickle_file (str): file path + + Returns: + object: loaded objected """ + try: - with open(pickle_file, 'rb') as f: - pickle_data = pickle.load(f) - except UnicodeDecodeError as e: - with open(pickle_file, 'rb') as f: - pickle_data = pickle.load(f, encoding='latin1') + with open(pickle_file, "rb") as f: + pickle_data = pickle.load(f) + except UnicodeDecodeError: + with open(pickle_file, "rb") as f: + pickle_data = pickle.load(f, encoding="latin1") except Exception as e: - print('Unable to load data ', pickle_file, ':', e) + print("Unable to load data ", pickle_file, ":", e) raise return pickle_data -def dump_pkl(obj, file_path): - """dumplicate pickle data.""" - with open(file_path, 'wb') as f: + +def dump_pkl(obj: object, file_path: str): + """Dumplicate pickle data. + + Args: + obj (object): object + file_path (str): file path + """ + + with open(file_path, "wb") as f: pickle.dump(obj, f) + def load_adj(file_path: str, adj_type: str): """load adjacency matrix. @@ -35,31 +51,35 @@ def load_adj(file_path: str, adj_type: str): list of numpy.matrix: list of preproceesed adjacency matrices np.ndarray: raw adjacency matrix """ + try: # METR and PEMS_BAY - sensor_ids, sensor_id_to_ind, adj_mx = load_pkl(file_path) - except: + _, _, adj_mx = load_pkl(file_path) + except ValueError: # PEMS04 adj_mx = load_pkl(file_path) if adj_type == "scalap": adj = [calculate_scaled_laplacian(adj_mx).astype(np.float32).todense()] elif adj_type == "normlap": - adj = [ calculate_symmetric_normalized_laplacian(adj_mx).astype(np.float32).todense()] + adj = [calculate_symmetric_normalized_laplacian( + adj_mx).astype(np.float32).todense()] elif adj_type == "symnadj": - adj = [symmetric_message_passing_adj(adj_mx).astype(np.float32).todense()] + adj = [calculate_symmetric_message_passing_adj( + adj_mx).astype(np.float32).todense()] elif adj_type == "transition": - adj = [transition_matrix(adj_mx).T] + adj = [calculate_transition_matrix(adj_mx).T] elif adj_type == "doubletransition": - adj = [transition_matrix(adj_mx).T, transition_matrix(adj_mx.T).T] + adj = [calculate_transition_matrix(adj_mx).T, calculate_transition_matrix(adj_mx.T).T] elif adj_type == "identity": adj = [np.diag(np.ones(adj_mx.shape[0])).astype(np.float32).todense()] - elif adj_type == 'original': + elif adj_type == "original": adj = adj_mx else: error = 0 assert error, "adj type not defined" return adj, adj_mx + def load_node2vec_emb(file_path: str) -> torch.Tensor: """load node2vec embedding @@ -69,14 +89,15 @@ def load_node2vec_emb(file_path: str) -> torch.Tensor: Returns: torch.Tensor: node2vec embedding """ + # spatial embedding - with open(file_path, mode='r') as f: + with open(file_path, mode="r") as f: lines = f.readlines() - temp = lines[0].split(' ') + temp = lines[0].split(" ") num_vertex, dims = int(temp[0]), int(temp[1]) - SE = torch.zeros((num_vertex, dims), dtype=torch.float32) + spatial_embeddings = torch.zeros((num_vertex, dims), dtype=torch.float32) for line in lines[1:]: - temp = line.split(' ') + temp = line.split(" ") index = int(temp[0]) - SE[index] = torch.tensor([float(ch) for ch in temp[1:]]) - return SE + spatial_embeddings[index] = torch.Tensor([float(ch) for ch in temp[1:]]) + return spatial_embeddings diff --git a/docs/DataFormat_CN.md b/docs/DataFormat_CN.md deleted file mode 100644 index 1d460295..00000000 --- a/docs/DataFormat_CN.md +++ /dev/null @@ -1,51 +0,0 @@ -# 数据格式 - -- [数据格式](#数据格式) - - [数据存储形式](#数据存储形式) - - [为什么要这样存储数据](#为什么要这样存储数据) - - [Reference](#reference) - -## 数据存储形式 - -为了灵活的读取、预处理数据,我们设计了一种高效、通用的数据读取/预处理流程。 - -假设原始时间序列为$\mathbf{X}$,时刻$t$处的数据为$\mathbf{X}_t$。 -那么在一般情况下,时间序列预测的一个训练样本就是指:在时间$t$时刻,使用历史的$p$个时间片:$\mathbf{X}_{t-p+1}, ..., \mathbf{X}_{t}$个时间片,预测未来$f$个时间片$\mathbf{X}_{t+1}, ..., \mathbf{X}_{t+f}$。 -但也有一些特殊情况,时间序列的样本是不连续的,例如许多包含多分辨率思想的工作[1][2]. - -为了提供**统一、通用**的data pipline,我们采用存储所有样本的index,而非训练样本的方式。 -具体来说,预处理代码将为每个数据集产生四个文件: - -- `index.pkl`: dict of list - - keys: train, valid, test - - values: 每个训练样本的index,分三种情况,用户可以方便地自定义 - - 连续(默认): [current_time_step_index-p+1, current_time_step_index, current_time_step_index+f] - - 不连续: [[x, x, ..., x], current_time_step_index, current_time_step_index+f] - - 其他 - -- `data.pkl`: dict of list - - keys: processed_data, other - - values: 归一化后并且添加好特征后的“原始”时间序列序列或者其他辅助时间序列 - - processed_data: np.array, L x N x C. L: 时间序列总长度, N: 多变量时间序列数量, C: 特征数量 - - other - -- `scaler.pkl`: dict of list - - keys: - - args: 归一化/反归一化参数,例如mean/std, min/max - - func: 反归一化函数 - - values - -- `adj_mx.pkl`: the pre-defined adjacent matrix - -借助numpy强大的功能,这样的操作方式可以在保证速度的情况下,极大地满足可扩展性,满足几乎所有模型的数据读取需求。 - -## 为什么要这样存储数据 - -统一的数据读取/预处理流程可以方便地、公平地对比不同的Baseline的效率,更简单的理解代码。 - -需要注意的是,尽管这种读取方法是高效的,但并不总是最快的。例如,将所有的样本预处理到本地可能更快一些,但那会损失通用能力。 - -## Reference - -[1] Attention Based Spatial-Temporal Graph Convolutional Networks for Traffic Flow Forecasting\ -[2] Learning Dynamics and Heterogeneity of Spatial-Temporal Graph Data for Traffic Forecasting diff --git a/docs/DataPreprocess_CN.md b/docs/DataPreprocess_CN.md deleted file mode 100644 index 150e4dcf..00000000 --- a/docs/DataPreprocess_CN.md +++ /dev/null @@ -1,169 +0,0 @@ -# 数据预处理 (以PEMS04数据集为例) - -[TOC] - -本文档以PEMS04为例,介绍BasicTS的数据预处理过程。包括:原始数据格式、预处理过程、预处理后数据格式。 - -PEMS04数据集的预处理代码位于`scripts/data_preparation/PEMS04/generate_training_data.py`。 - -您可以通过借鉴PEMS04数据集的预处理,添加您自己的数据集。 - -## 1 原始数据 - -PEMS04数据集来自于交通系统中,共包含来自307个交通传感器的数据。 - -原始数据位于`datasets/raw_data/PEMS04/PEMS04.npz`,它是一个`[16992, 307, 3]`大小的numpy数组。 - -其中,16992代表时间序列有16992个时间片,307代表总共有来自307个传感器的307条时间序列,3代表传感器每次采样三种特征。 - -## 2 预处理过程 - -时间序列的训练样本通常由一个长度为P+F的滑窗在原始时间序列上滑动得到。 -其中,前P个时刻作为历史数据,后F个时刻作为未来数据。 - -### 2.1 预处理参数 - -- `output_dir`: 预处理后文件存储位置。 -- `data_file_path`: 原生数据位置。 -- `graph_file_path`: 图结构数据位置(图结构是非必须的,假如您的数据集没有自带图结构或者您不知道如何构造图结构,可以忽略这一个参数)。 -- `history_seq_len`: 历史数据长度,即P的大小。 -- `future_seq_len`: 未来数据长度,即F的大小。 -- `steps_per_day`: 每天时间片的数量,和采样频率有关。例如每5分钟采样一次,那么该值为288。 -- `dow`: 是否添加day in week特征。 -- `C`: 选择要使用的特征维度。例如在PEMS04中,我们只需要使用传感器采集的3中特征数据值的第一个维度,所以`C=[0]`。 -- `train_ratio`:训练集占总样本量的比例。 -- `valid_ratio`:验证集占总样本量的比例。 - -### 2.2 主要的预处理过程 - -1. 读取原始数据 - -```python -import numpy as np -data = np.load(args.data_file_path)['data'] # 大小: [16992, 307, 3] -``` - -2. 根据原始时间序列的长度和`history_seq_len`和`future_seq_len`的大小,计算总样本的数量,并进一步计算训练、验证、测试样本的数量 - -```python -num_samples = L - (history_seq_len + future_seq_len) + 1 # 总样本数量 -train_num_short = round(num_samples * train_ratio) # 训练样本数量 -valid_num_short = round(num_samples * valid_ratio) # 验证样本数量 -test_num_short = num_samples - train_num_short - valid_num_short # 测试样本数量 -``` - -3. 产生样本的index list - -对于给定的时刻`t`,它的index是:`[t-history_seq_len, t, t+future_seq_len]` - -```python -index_list = [] -for t in range(history_seq_len, num_samples + history_seq_len): - index = (t-history_seq_len, t, t+future_seq_len) - index_list.append(index) -``` - -4. 数据归一化 - -不同的数据有不同的量级PEMS04数据集的量级在0到数百之间。 - -因此,对数据集归一化进行是必须的。 - -数据归一化最常用的是Z-score归一化,当然也有min-max归一化等其他方法。 - -PEMS04数据集使用Z-Score归一化函数`standard_transform`。 - -```python -scaler = standard_transform # 归一化函数工具 -data_norm = scaler(data, output_dir, train_index) -# data_norm:归一化后的数据 -# output_dir: 用来保存归一化过程中产生的一些参数方便以后使用,例如均值和方差大小。 -# train_index: 我们只在训练样本上计算归一化的参数。 -``` - -5. 添加额外的数据 - -通常我们会为数据集添加一些额外特征。例如PEMS04数据集中,我们为它添加了两个时间特征:tid和dow。 - -tid是一个大小在[0, 1]范围内的特征,它代表了每天的当前的时间 (time in day)。例如每天有288个时间片,那么每天的第10个时间片的tid特征的值就是`10/288`。 - -dow代表当前时间是周几(day of week),因此他的选值自然就是{0, 1, 2, 3, 4, 5, 6}中的一个。 - -需要注意的是,PEMS04数据集可能没有包含绝对时刻,因此我们只能计算相对的时刻:即假设第一个时间片的特征是`tid=0, dow=0`,然后顺序向后计算所有时间片的特征。 - -```python -# add external feature -feature_list = [data_norm] -if add_time_in_day: - # numerical time_in_day - time_ind = [i%args.steps_per_day / args.steps_per_day for i in range(data_norm.shape[0])] - time_ind = np.array(time_ind) - time_in_day = np.tile(time_ind, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(time_in_day) -if add_day_in_week: - # numerical day_in_week - day_in_week = [(i // args.steps_per_day)%7 for i in range(data_norm.shape[0])] - day_in_week = np.array(day_in_week) - day_in_week = np.tile(day_in_week, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(day_in_week) -processed_data = np.concatenate(feature_list, axis=-1) # 添加完external特征后的数据 -``` - -6. 保存预处理后的数据 - -```python -# 保存index -index = {} -index['train'] = train_index -index['valid'] = valid_index -index['test'] = test_index -pickle.dump(index, open(output_dir + "/index.pkl", "wb")) - -# 保存预处理后的data -data = {} -data['processed_data'] = processed_data -pickle.dump(data, open(output_dir + "/data.pkl", "wb")) - -# 保存图结构 -# 假如没有的话,可以跳过 -if os.path.exists(args.graph_file_path): - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models -else: - generate_adj_PEMS04() - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models -``` - -## 3 预处理后的数据 - -数据的存储形式的规定可以参考[data_preparation_CN.md](docs/DataFormat_CN.md)。 - -预处理后的数据会被保存在`datasets/PEMS04/`中。 - -以下所有文件都可以使用`utils/serialization.py`中的`load_pkl`函数读取。 - -### 3.1 data.pkl - -字典类型。`data['processed_data']`保存着预处理后的数据(数组)。 - -### 3.2 index.pkl - -字典类型。产生的训练、验证、测试的index list。 - -```python -index['train'] # train dataset的index list -index['valid'] # valid dataset的index list -index['test'] # test dataset的index list -``` - -### 3.3 scaler.pkl - -字典类型,保存着归一化函数以及需要使用的一些参数。例如: - -```python -scaler['func'] # 归一化函数 -scaler['args'] # 归一化函数需要使用到的参数,字典类型。例如 {"mean": mean, "std": std}. -``` - -### 3.4 adj_mx.pkl - -预定义的邻接矩阵。假如您的数据集没有自带图结构或者您不知道如何为他构造图结构,可以忽略这一个参数。 diff --git a/examples/AGCRN/AGCRN_METR-LA.py b/examples/AGCRN/AGCRN_METR-LA.py new file mode 100644 index 00000000..f0b7b321 --- /dev/null +++ b/examples/AGCRN/AGCRN_METR-LA.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import AGCRN +from basicts.runners import AGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "AGCRN model configuration" +CFG.RUNNER = AGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "AGCRN" +CFG.MODEL.ARCH = AGCRN +CFG.MODEL.PARAM = { + "num_nodes" : 207, + "input_dim" : 2, + "rnn_units" : 64, + "output_dim": 1, + "horizon" : 12, + "num_layers": 2, + "default_graph": True, + "embed_dim" : 10, + "cheb_k" : 2 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/AGCRN/AGCRN_PEMS-BAY.py b/examples/AGCRN/AGCRN_PEMS-BAY.py new file mode 100644 index 00000000..944c78ac --- /dev/null +++ b/examples/AGCRN/AGCRN_PEMS-BAY.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import AGCRN +from basicts.runners import AGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "AGCRN model configuration" +CFG.RUNNER = AGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "AGCRN" +CFG.MODEL.ARCH = AGCRN +CFG.MODEL.PARAM = { + "num_nodes" : 325, + "input_dim" : 2, + "rnn_units" : 64, + "output_dim": 1, + "horizon" : 12, + "num_layers": 2, + "default_graph": True, + "embed_dim" : 10, + "cheb_k" : 2 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/AGCRN/AGCRN_PEMS03.py b/examples/AGCRN/AGCRN_PEMS03.py new file mode 100644 index 00000000..31612b35 --- /dev/null +++ b/examples/AGCRN/AGCRN_PEMS03.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import AGCRN +from basicts.runners import AGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "AGCRN model configuration" +CFG.RUNNER = AGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "AGCRN" +CFG.MODEL.ARCH = AGCRN +CFG.MODEL.PARAM = { + "num_nodes" : 358, + "input_dim" : 1, + "rnn_units" : 64, + "output_dim": 1, + "horizon" : 12, + "num_layers": 2, + "default_graph": True, + "embed_dim" : 10, + "cheb_k" : 2 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/AGCRN/AGCRN_PEMS04.py b/examples/AGCRN/AGCRN_PEMS04.py new file mode 100644 index 00000000..bf06c487 --- /dev/null +++ b/examples/AGCRN/AGCRN_PEMS04.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import AGCRN +from basicts.runners import AGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "AGCRN model configuration" +CFG.RUNNER = AGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "AGCRN" +CFG.MODEL.ARCH = AGCRN +CFG.MODEL.PARAM = { + "num_nodes" : 307, + "input_dim" : 1, + "rnn_units" : 64, + "output_dim": 1, + "horizon" : 12, + "num_layers": 2, + "default_graph": True, + "embed_dim" : 10, + "cheb_k" : 2 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/AGCRN/AGCRN_PEMS07.py b/examples/AGCRN/AGCRN_PEMS07.py new file mode 100644 index 00000000..c50547cf --- /dev/null +++ b/examples/AGCRN/AGCRN_PEMS07.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import AGCRN +from basicts.runners import AGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "AGCRN model configuration" +CFG.RUNNER = AGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "AGCRN" +CFG.MODEL.ARCH = AGCRN +CFG.MODEL.PARAM = { + "num_nodes" : 883, + "input_dim" : 1, + "rnn_units" : 64, + "output_dim": 1, + "horizon" : 12, + "num_layers": 2, + "default_graph": True, + "embed_dim" : 10, + "cheb_k" : 2 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/AGCRN/AGCRN_PEMS08.py b/examples/AGCRN/AGCRN_PEMS08.py new file mode 100644 index 00000000..c7307a36 --- /dev/null +++ b/examples/AGCRN/AGCRN_PEMS08.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import AGCRN +from basicts.runners import AGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "AGCRN model configuration" +CFG.RUNNER = AGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "AGCRN" +CFG.MODEL.ARCH = AGCRN +CFG.MODEL.PARAM = { + "num_nodes" : 170, + "input_dim" : 1, + "rnn_units" : 64, + "output_dim": 1, + "horizon" : 12, + "num_layers": 2, + "default_graph": True, + "embed_dim" : 2, + "cheb_k" : 2 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.003, +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/D2STGNN/D2STGNN_METR-LA.py b/examples/D2STGNN/D2STGNN_METR-LA.py new file mode 100644 index 00000000..65bf3f4f --- /dev/null +++ b/examples/D2STGNN/D2STGNN_METR-LA.py @@ -0,0 +1,125 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import D2STGNN +from basicts.runners import D2STGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "D2STGNN model configuration" +CFG.RUNNER = D2STGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "D2STGNN" +CFG.MODEL.ARCH = D2STGNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_feat": 1, + "num_hidden": 32, + "dropout": 0.1, + "seq_length": 12, + "k_t": 3, + "k_s": 2, + "gap": 3, + "num_nodes": 207, + "adjs": [torch.tensor(adj) for adj in adj_mx], + "num_layers": 5, + "num_modalities": 2, + "node_hidden": 10, + "time_emb_dim": 10, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 1.0e-5, + "eps": 1.0e-8 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 30, 38, 46, 54, 62, 70, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +# curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 0 +CFG.TRAIN.CL.CL_EPOCHS = 6 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 32 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 32 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/D2STGNN/D2STGNN_PEMS-BAY.py b/examples/D2STGNN/D2STGNN_PEMS-BAY.py new file mode 100644 index 00000000..b79ee317 --- /dev/null +++ b/examples/D2STGNN/D2STGNN_PEMS-BAY.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import D2STGNN +from basicts.runners import D2STGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "D2STGNN model configuration" +CFG.RUNNER = D2STGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "D2STGNN" +CFG.MODEL.ARCH = D2STGNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_feat": 1, + "num_hidden": 32, + "dropout": 0.1, + "seq_length": 12, + "k_t": 3, + "k_s": 2, + "gap": 3, + "num_nodes": 325, + "adjs": [torch.tensor(adj) for adj in adj_mx], + "num_layers": 5, + "num_modalities": 2, + "node_hidden": 12, + "time_emb_dim": 12, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 1.0e-5, + "eps": 1.0e-8 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 30, 38, 46, 54, 62, 70, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +# curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 30 +CFG.TRAIN.CL.CL_EPOCHS = 3 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 32 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 32 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/D2STGNN/D2STGNN_PEMS03.py b/examples/D2STGNN/D2STGNN_PEMS03.py new file mode 100644 index 00000000..b7bd9e66 --- /dev/null +++ b/examples/D2STGNN/D2STGNN_PEMS03.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import D2STGNN +from basicts.runners import D2STGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "D2STGNN model configuration" +CFG.RUNNER = D2STGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "D2STGNN" +CFG.MODEL.ARCH = D2STGNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_feat": 1, + "num_hidden": 32, + "dropout": 0.1, + "seq_length": 12, + "k_t": 3, + "k_s": 2, + "gap": 3, + "num_nodes": 358, + "adjs": [torch.tensor(adj) for adj in adj_mx], + "num_layers": 5, + "num_modalities": 2, + "node_hidden": 12, + "time_emb_dim": 12, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 1.0e-5, + "eps": 1.0e-8 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 30, 38, 46, 54, 150], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 16 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +# curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 30 +CFG.TRAIN.CL.CL_EPOCHS = 3 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 16 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 16 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/D2STGNN/D2STGNN_PEMS04.py b/examples/D2STGNN/D2STGNN_PEMS04.py new file mode 100644 index 00000000..e6f085d4 --- /dev/null +++ b/examples/D2STGNN/D2STGNN_PEMS04.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import D2STGNN +from basicts.runners import D2STGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "D2STGNN model configuration" +CFG.RUNNER = D2STGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "D2STGNN" +CFG.MODEL.ARCH = D2STGNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_feat": 1, + "num_hidden": 32, + "dropout": 0.1, + "seq_length": 12, + "k_t": 3, + "k_s": 2, + "gap": 3, + "num_nodes": 307, + "adjs": [torch.tensor(adj) for adj in adj_mx], + "num_layers": 5, + "num_modalities": 2, + "node_hidden": 12, + "time_emb_dim": 12, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 1.0e-5, + "eps": 1.0e-8 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 30, 38, 46, 54, 150], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 16 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +# curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 30 +CFG.TRAIN.CL.CL_EPOCHS = 3 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 16 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 16 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/D2STGNN/D2STGNN_PEMS07.py b/examples/D2STGNN/D2STGNN_PEMS07.py new file mode 100644 index 00000000..e10bf270 --- /dev/null +++ b/examples/D2STGNN/D2STGNN_PEMS07.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import D2STGNN +from basicts.runners import D2STGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "D2STGNN model configuration" +CFG.RUNNER = D2STGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "D2STGNN" +CFG.MODEL.ARCH = D2STGNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_feat": 1, + "num_hidden": 32, + "dropout": 0.1, + "seq_length": 12, + "k_t": 3, + "k_s": 2, + "gap": 3, + "num_nodes": 883, + "adjs": [torch.tensor(adj) for adj in adj_mx], + "num_layers": 5, + "num_modalities": 2, + "node_hidden": 10, + "time_emb_dim": 10, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 1.0e-5, + "eps": 1.0e-8 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 30, 38, 46, 54, 150], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 16 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +# curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 30 +CFG.TRAIN.CL.CL_EPOCHS = 3 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 16 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 16 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/D2STGNN/D2STGNN_PEMS08.py b/examples/D2STGNN/D2STGNN_PEMS08.py new file mode 100644 index 00000000..1ed7e0fd --- /dev/null +++ b/examples/D2STGNN/D2STGNN_PEMS08.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import D2STGNN +from basicts.runners import D2STGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "D2STGNN model configuration" +CFG.RUNNER = D2STGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "D2STGNN" +CFG.MODEL.ARCH = D2STGNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_feat": 1, + "num_hidden": 32, + "dropout": 0.1, + "seq_length": 12, + "k_t": 3, + "k_s": 2, + "gap": 3, + "num_nodes": 170, + "adjs": [torch.tensor(adj) for adj in adj_mx], + "num_layers": 5, + "num_modalities": 2, + "node_hidden": 10, + "time_emb_dim": 10, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 1.0e-5, + "eps": 1.0e-8 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 30, 38, 46, 54, 150], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 16 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +# curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 30 +CFG.TRAIN.CL.CL_EPOCHS = 3 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 16 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 16 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DCRNN/DCRNN_METR-LA.py b/examples/DCRNN/DCRNN_METR-LA.py new file mode 100644 index 00000000..d6eb0ffe --- /dev/null +++ b/examples/DCRNN/DCRNN_METR-LA.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DCRNN +from basicts.runners import DCRNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# DCRNN does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "DCRNN model configuration" +CFG.RUNNER = DCRNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DCRNN" +CFG.MODEL.ARCH = DCRNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "horizon": 12, + "input_dim": 2, + "max_diffusion_step": 2, + "num_nodes": 207, + "num_rnn_layers": 2, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "adj_mx": [torch.tensor(i).cuda() for i in adj_mx], + "use_curriculum_learning": True +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.01, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [20, 30, 40, 50], + "gamma": 0.1 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DCRNN/DCRNN_PEMS-BAY.py b/examples/DCRNN/DCRNN_PEMS-BAY.py new file mode 100644 index 00000000..e5e1da11 --- /dev/null +++ b/examples/DCRNN/DCRNN_PEMS-BAY.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DCRNN +from basicts.runners import DCRNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# DCRNN does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "DCRNN model configuration" +CFG.RUNNER = DCRNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DCRNN" +CFG.MODEL.ARCH = DCRNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "horizon": 12, + "input_dim": 2, + "max_diffusion_step": 2, + "num_nodes": 325, + "num_rnn_layers": 2, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "adj_mx": [torch.tensor(i).cuda() for i in adj_mx], + "use_curriculum_learning": True +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.01, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [20, 30, 40, 50], + "gamma": 0.1 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DCRNN/DCRNN_PEMS03.py b/examples/DCRNN/DCRNN_PEMS03.py new file mode 100644 index 00000000..11fb9e0d --- /dev/null +++ b/examples/DCRNN/DCRNN_PEMS03.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DCRNN +from basicts.runners import DCRNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# DCRNN does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "DCRNN model configuration" +CFG.RUNNER = DCRNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DCRNN" +CFG.MODEL.ARCH = DCRNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "horizon": 12, + "input_dim": 2, + "max_diffusion_step": 2, + "num_nodes": 358, + "num_rnn_layers": 2, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "adj_mx": [torch.tensor(i).cuda() for i in adj_mx], + "use_curriculum_learning": True +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.003, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [80], + "gamma": 0.3 +} + +# ================= train ================= # +# CFG.TRAIN.CLIP_GRAD_PARAM = { +# "max_norm": 5.0 +# } +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DCRNN/DCRNN_PEMS04.py b/examples/DCRNN/DCRNN_PEMS04.py new file mode 100644 index 00000000..9e03f4e6 --- /dev/null +++ b/examples/DCRNN/DCRNN_PEMS04.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DCRNN +from basicts.runners import DCRNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# DCRNN does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "DCRNN model configuration" +CFG.RUNNER = DCRNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DCRNN" +CFG.MODEL.ARCH = DCRNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "horizon": 12, + "input_dim": 2, + "max_diffusion_step": 2, + "num_nodes": 307, + "num_rnn_layers": 2, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "adj_mx": [torch.tensor(i).cuda() for i in adj_mx], + "use_curriculum_learning": True +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.003, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [80], + "gamma": 0.3 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DCRNN/DCRNN_PEMS07.py b/examples/DCRNN/DCRNN_PEMS07.py new file mode 100644 index 00000000..8514f226 --- /dev/null +++ b/examples/DCRNN/DCRNN_PEMS07.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DCRNN +from basicts.runners import DCRNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# DCRNN does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "DCRNN model configuration" +CFG.RUNNER = DCRNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DCRNN" +CFG.MODEL.ARCH = DCRNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "horizon": 12, + "input_dim": 2, + "max_diffusion_step": 2, + "num_nodes": 883, + "num_rnn_layers": 2, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "adj_mx": [torch.tensor(i).cuda() for i in adj_mx], + "use_curriculum_learning": True +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.003, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [80], + "gamma": 0.3 +} + +# ================= train ================= # +# CFG.TRAIN.CLIP_GRAD_PARAM = { +# "max_norm": 5.0 +# } +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DCRNN/DCRNN_PEMS08.py b/examples/DCRNN/DCRNN_PEMS08.py new file mode 100644 index 00000000..82693ac4 --- /dev/null +++ b/examples/DCRNN/DCRNN_PEMS08.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DCRNN +from basicts.runners import DCRNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# DCRNN does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "DCRNN model configuration" +CFG.RUNNER = DCRNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DCRNN" +CFG.MODEL.ARCH = DCRNN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "horizon": 12, + "input_dim": 2, + "max_diffusion_step": 2, + "num_nodes": 170, + "num_rnn_layers": 2, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "adj_mx": [torch.tensor(i).cuda() for i in adj_mx], + "use_curriculum_learning": True +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.003, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [80], + "gamma": 0.3 +} + +# ================= train ================= # +# CFG.TRAIN.CLIP_GRAD_PARAM = { +# "max_norm": 5.0 +# } +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DGCRN/DGCRN_METR-LA.py b/examples/DGCRN/DGCRN_METR-LA.py new file mode 100644 index 00000000..227019b9 --- /dev/null +++ b/examples/DGCRN/DGCRN_METR-LA.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DGCRN +from basicts.runners import DGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DGCRN model configuration" +CFG.RUNNER = DGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DGCRN" +CFG.MODEL.ARCH = DGCRN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "gcn_depth": 2, + "num_nodes": 207, + "predefined_A": [torch.Tensor(_) for _ in adj_mx], + "dropout": 0.3, + "subgraph_size": 20, + "node_dim": 40, + "middle_dim": 2, + "seq_length": 12, + "in_dim": 2, + "list_weight": [0.05, 0.95, 0.95], + "tanhalpha": 3, + "cl_decay_steps": 4000, + "rnn_size": 64, + "hyperGNN_dim": 16 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr":0.001, + "weight_decay":0.0001 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones":[100, 150], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +## curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 0 +CFG.TRAIN.CL.CL_EPOCHS = 6 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DGCRN/DGCRN_PEMS-BAY.py b/examples/DGCRN/DGCRN_PEMS-BAY.py new file mode 100644 index 00000000..f89c2a0e --- /dev/null +++ b/examples/DGCRN/DGCRN_PEMS-BAY.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DGCRN +from basicts.runners import DGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DGCRN model configuration" +CFG.RUNNER = DGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DGCRN" +CFG.MODEL.ARCH = DGCRN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "gcn_depth": 2, + "num_nodes": 325, + "predefined_A": [torch.Tensor(_) for _ in adj_mx], + "dropout": 0.3, + "subgraph_size": 20, + "node_dim": 40, + "middle_dim": 2, + "seq_length": 12, + "in_dim": 2, + "list_weight": [0.05, 0.95, 0.95], + "tanhalpha": 3, + "cl_decay_steps": 5500, + "rnn_size": 64, + "hyperGNN_dim": 16 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr":0.001, + "weight_decay":0.0001 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones":[100, 150], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +## curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 0 +CFG.TRAIN.CL.CL_EPOCHS = 6 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DGCRN/DGCRN_PEMS03.py b/examples/DGCRN/DGCRN_PEMS03.py new file mode 100644 index 00000000..b2feebe9 --- /dev/null +++ b/examples/DGCRN/DGCRN_PEMS03.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DGCRN +from basicts.runners import DGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DGCRN model configuration" +CFG.RUNNER = DGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DGCRN" +CFG.MODEL.ARCH = DGCRN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "gcn_depth": 2, + "num_nodes": 358, + "predefined_A": [torch.Tensor(_) for _ in adj_mx], + "dropout": 0.3, + "subgraph_size": 20, + "node_dim": 40, + "middle_dim": 2, + "seq_length": 12, + "in_dim": 2, + "list_weight": [0.05, 0.95, 0.95], + "tanhalpha": 3, + "cl_decay_steps": 4000, + "rnn_size": 64, + "hyperGNN_dim": 16 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr":0.001, + "weight_decay":0.0001 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones":[100, 150], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +## curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 0 +CFG.TRAIN.CL.CL_EPOCHS = 6 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DGCRN/DGCRN_PEMS04.py b/examples/DGCRN/DGCRN_PEMS04.py new file mode 100644 index 00000000..076c9ba4 --- /dev/null +++ b/examples/DGCRN/DGCRN_PEMS04.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DGCRN +from basicts.runners import DGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DGCRN model configuration" +CFG.RUNNER = DGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DGCRN" +CFG.MODEL.ARCH = DGCRN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "gcn_depth": 2, + "num_nodes": 307, + "predefined_A": [torch.Tensor(_) for _ in adj_mx], + "dropout": 0.3, + "subgraph_size": 20, + "node_dim": 40, + "middle_dim": 2, + "seq_length": 12, + "in_dim": 2, + "list_weight": [0.05, 0.95, 0.95], + "tanhalpha": 3, + "cl_decay_steps": 4000, + "rnn_size": 64, + "hyperGNN_dim": 16 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr":0.001, + "weight_decay":0.0001 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones":[100, 150], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +## curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 0 +CFG.TRAIN.CL.CL_EPOCHS = 6 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DGCRN/DGCRN_PEMS07.py b/examples/DGCRN/DGCRN_PEMS07.py new file mode 100644 index 00000000..f718e828 --- /dev/null +++ b/examples/DGCRN/DGCRN_PEMS07.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DGCRN +from basicts.runners import DGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DGCRN model configuration" +CFG.RUNNER = DGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DGCRN" +CFG.MODEL.ARCH = DGCRN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "gcn_depth": 2, + "num_nodes": 883, + "predefined_A": [torch.Tensor(_) for _ in adj_mx], + "dropout": 0.3, + "subgraph_size": 20, + "node_dim": 40, + "middle_dim": 2, + "seq_length": 12, + "in_dim": 2, + "list_weight": [0.05, 0.95, 0.95], + "tanhalpha": 3, + "cl_decay_steps": 4000, + "rnn_size": 64, + "hyperGNN_dim": 16 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr":0.001, + "weight_decay":0.0001 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones":[100, 150], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 24 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +## curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 0 +CFG.TRAIN.CL.CL_EPOCHS = 6 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DGCRN/DGCRN_PEMS08.py b/examples/DGCRN/DGCRN_PEMS08.py new file mode 100644 index 00000000..4228976b --- /dev/null +++ b/examples/DGCRN/DGCRN_PEMS08.py @@ -0,0 +1,126 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import DGCRN +from basicts.runners import DGCRNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "DGCRN model configuration" +CFG.RUNNER = DGCRNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DGCRN" +CFG.MODEL.ARCH = DGCRN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "gcn_depth": 2, + "num_nodes": 170, + "predefined_A": [torch.Tensor(_) for _ in adj_mx], + "dropout": 0.3, + "subgraph_size": 20, + "node_dim": 40, + "middle_dim": 2, + "seq_length": 12, + "in_dim": 2, + "list_weight": [0.05, 0.95, 0.95], + "tanhalpha": 3, + "cl_decay_steps": 4000, + "rnn_size": 64, + "hyperGNN_dim": 16 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr":0.001, + "weight_decay":0.0001 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones":[100, 150], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False +## curriculum learning +CFG.TRAIN.CL = EasyDict() +CFG.TRAIN.CL.WARM_EPOCHS = 0 +CFG.TRAIN.CL.CL_EPOCHS = 6 +CFG.TRAIN.CL.PREDICTION_LENGTH = 12 + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DLinear/DLinear_METR-LA.py b/examples/DLinear/DLinear_METR-LA.py new file mode 100644 index 00000000..cf5729d3 --- /dev/null +++ b/examples/DLinear/DLinear_METR-LA.py @@ -0,0 +1,104 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import DLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DLinear" +CFG.MODEL.ARCH = DLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12, + "individual": False, + "enc_in": 207 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DLinear/DLinear_PEMS-BAY.py b/examples/DLinear/DLinear_PEMS-BAY.py new file mode 100644 index 00000000..ec250a86 --- /dev/null +++ b/examples/DLinear/DLinear_PEMS-BAY.py @@ -0,0 +1,104 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import DLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DLinear" +CFG.MODEL.ARCH = DLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12, + "individual": False, + "enc_in": 325 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DLinear/DLinear_PEMS03.py b/examples/DLinear/DLinear_PEMS03.py new file mode 100644 index 00000000..d276ff6a --- /dev/null +++ b/examples/DLinear/DLinear_PEMS03.py @@ -0,0 +1,104 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import DLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DLinear" +CFG.MODEL.ARCH = DLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12, + "individual": False, + "enc_in": 358 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DLinear/DLinear_PEMS04.py b/examples/DLinear/DLinear_PEMS04.py new file mode 100644 index 00000000..b1722a22 --- /dev/null +++ b/examples/DLinear/DLinear_PEMS04.py @@ -0,0 +1,104 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import DLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DLinear" +CFG.MODEL.ARCH = DLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12, + "individual": False, + "enc_in": 307 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DLinear/DLinear_PEMS07.py b/examples/DLinear/DLinear_PEMS07.py new file mode 100644 index 00000000..6255e3cb --- /dev/null +++ b/examples/DLinear/DLinear_PEMS07.py @@ -0,0 +1,104 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import DLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DLinear" +CFG.MODEL.ARCH = DLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12, + "individual": False, + "enc_in": 307 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/DLinear/DLinear_PEMS08.py b/examples/DLinear/DLinear_PEMS08.py new file mode 100644 index 00000000..c1e1cc36 --- /dev/null +++ b/examples/DLinear/DLinear_PEMS08.py @@ -0,0 +1,104 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import DLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "DLinear" +CFG.MODEL.ARCH = DLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12, + "individual": False, + "enc_in": 170 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GTS/GTS_METR-LA.py b/examples/GTS/GTS_METR-LA.py new file mode 100644 index 00000000..cf535f49 --- /dev/null +++ b/examples/GTS/GTS_METR-LA.py @@ -0,0 +1,132 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import GTS +from basicts.runners import GTSRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.utils.serialization import load_pkl + +from .loss import gts_loss + + +CFG = EasyDict() + +# GTS does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "GTS model configuration" +CFG.RUNNER = GTSRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GTS" +CFG.MODEL.ARCH = GTS +node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))["processed_data"][..., 0] +train_index_list = load_pkl("datasets/{0}/index_in{1}_out{2}.pkl".format(CFG.DATASET_NAME, CFG.DATASET_INPUT_LEN, CFG.DATASET_OUTPUT_LEN))["train"] +node_feats = node_feats_full[:train_index_list[-1][-1], ...] +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "filter_type": "dual_random_walk", + "horizon": 12, + "input_dim": 2, + "l1_decay": 0, + "max_diffusion_step": 3, + "num_nodes": 207, + "num_rnn_layers": 1, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "use_curriculum_learning": True, + "dim_fc": 383664, + "node_feats": node_feats, + "temp": 0.5, + "k": 10 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gts_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.005, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [20, 40], + "gamma": 0.1 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GTS/GTS_PEMS-BAY.py b/examples/GTS/GTS_PEMS-BAY.py new file mode 100644 index 00000000..2272607a --- /dev/null +++ b/examples/GTS/GTS_PEMS-BAY.py @@ -0,0 +1,132 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import GTS +from basicts.runners import GTSRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.utils.serialization import load_pkl + +from .loss import gts_loss + + +CFG = EasyDict() + +# GTS does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "GTS model configuration" +CFG.RUNNER = GTSRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GTS" +CFG.MODEL.ARCH = GTS +node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))["processed_data"][..., 0] +train_index_list = load_pkl("datasets/{0}/index_in{1}_out{2}.pkl".format(CFG.DATASET_NAME, CFG.DATASET_INPUT_LEN, CFG.DATASET_OUTPUT_LEN))["train"] +node_feats = node_feats_full[:train_index_list[-1][-1], ...] +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "filter_type": "dual_random_walk", + "horizon": 12, + "input_dim": 2, + "l1_decay": 0, + "max_diffusion_step": 2, + "num_nodes": 325, + "num_rnn_layers": 1, + "output_dim": 1, + "rnn_units": 128, + "seq_len": 12, + "use_curriculum_learning": True, + "dim_fc": 583520, + "node_feats": node_feats, + "temp": 0.5, + "k": 30 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gts_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.001, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [20, 30], + "gamma": 0.1 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GTS/GTS_PEMS03.py b/examples/GTS/GTS_PEMS03.py new file mode 100644 index 00000000..f57fd50f --- /dev/null +++ b/examples/GTS/GTS_PEMS03.py @@ -0,0 +1,132 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import GTS +from basicts.runners import GTSRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.utils.serialization import load_pkl + +from .loss import gts_loss + + +CFG = EasyDict() + +# GTS does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "GTS model configuration" +CFG.RUNNER = GTSRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GTS" +CFG.MODEL.ARCH = GTS +node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))["processed_data"][..., 0] +train_index_list = load_pkl("datasets/{0}/index_in{1}_out{2}.pkl".format(CFG.DATASET_NAME, CFG.DATASET_INPUT_LEN, CFG.DATASET_OUTPUT_LEN))["train"] +node_feats = node_feats_full[:train_index_list[-1][-1], ...] +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "filter_type": "dual_random_walk", + "horizon": 12, + "input_dim": 2, + "l1_decay": 0, + "max_diffusion_step": 3, + "num_nodes": 358, + "num_rnn_layers": 1, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "use_curriculum_learning": True, + "dim_fc": 251456, + "node_feats": node_feats, + "temp": 0.5, + "k": 30 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gts_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.001, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [20, 30], + "gamma": 0.1 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GTS/GTS_PEMS04.py b/examples/GTS/GTS_PEMS04.py new file mode 100644 index 00000000..61db4a91 --- /dev/null +++ b/examples/GTS/GTS_PEMS04.py @@ -0,0 +1,132 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import GTS +from basicts.runners import GTSRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.utils.serialization import load_pkl + +from .loss import gts_loss + + +CFG = EasyDict() + +# GTS does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "GTS model configuration" +CFG.RUNNER = GTSRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GTS" +CFG.MODEL.ARCH = GTS +node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))["processed_data"][..., 0] +train_index_list = load_pkl("datasets/{0}/index_in{1}_out{2}.pkl".format(CFG.DATASET_NAME, CFG.DATASET_INPUT_LEN, CFG.DATASET_OUTPUT_LEN))["train"] +node_feats = node_feats_full[:train_index_list[-1][-1], ...] +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "filter_type": "dual_random_walk", + "horizon": 12, + "input_dim": 2, + "l1_decay": 0, + "max_diffusion_step": 3, + "num_nodes": 307, + "num_rnn_layers": 1, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "use_curriculum_learning": True, + "dim_fc": 162976, + "node_feats": node_feats, + "temp": 0.5, + "k": 30 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gts_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.001, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [20, 30], + "gamma": 0.1 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GTS/GTS_PEMS07.py b/examples/GTS/GTS_PEMS07.py new file mode 100644 index 00000000..1fb6c172 --- /dev/null +++ b/examples/GTS/GTS_PEMS07.py @@ -0,0 +1,132 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import GTS +from basicts.runners import GTSRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.utils.serialization import load_pkl + +from .loss import gts_loss + + +CFG = EasyDict() + +# GTS does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "GTS model configuration" +CFG.RUNNER = GTSRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GTS" +CFG.MODEL.ARCH = GTS +node_feats_full = load_pkl("datasets/{0}/data.pkl".format(CFG.DATASET_NAME))["processed_data"][..., 0] +train_index_list = load_pkl("datasets/{0}/index_in{1}_out{2}.pkl".format(CFG.DATASET_NAME, CFG.DATASET_INPUT_LEN, CFG.DATASET_OUTPUT_LEN))["train"] +node_feats = node_feats_full[:train_index_list[-1][-1], ...] +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "filter_type": "dual_random_walk", + "horizon": 12, + "input_dim": 2, + "l1_decay": 0, + "max_diffusion_step": 2, + "num_nodes": 883, + "num_rnn_layers": 1, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "use_curriculum_learning": True, + "dim_fc": 270816, + "node_feats": node_feats, + "temp": 0.5, + "k": 30 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gts_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.001, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [20, 30], + "gamma": 0.1 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GTS/GTS_PEMS08.py b/examples/GTS/GTS_PEMS08.py new file mode 100644 index 00000000..f31513f5 --- /dev/null +++ b/examples/GTS/GTS_PEMS08.py @@ -0,0 +1,134 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import GTS +from basicts.runners import GTSRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.utils.serialization import load_pkl + +from .loss import gts_loss + + +CFG = EasyDict() + +# GTS does not allow to load parameters since it creates parameters in the first iteration +resume = False +if not resume: + import random + _ = random.randint(-1e6, 1e6) + +# ================= general ================= # +CFG.DESCRIPTION = "GTS model configuration" +CFG.RUNNER = GTSRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG._ = _ +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GTS" +CFG.MODEL.ARCH = GTS +node_feats_full = load_pkl( + "datasets/{0}/data.pkl".format(CFG.DATASET_NAME))["processed_data"][..., 0] +train_index_list = load_pkl("datasets/{0}/index_in{1}_out{2}.pkl".format( + CFG.DATASET_NAME, CFG.DATASET_INPUT_LEN, CFG.DATASET_OUTPUT_LEN))["train"] +node_feats = node_feats_full[:train_index_list[-1][-1], ...] +CFG.MODEL.PARAM = { + "cl_decay_steps": 2000, + "filter_type": "dual_random_walk", + "horizon": 12, + "input_dim": 2, + "l1_decay": 0, + "max_diffusion_step": 3, + "num_nodes": 170, + "num_rnn_layers": 1, + "output_dim": 1, + "rnn_units": 64, + "seq_len": 12, + "use_curriculum_learning": True, + "dim_fc": 171280, + "node_feats": node_feats, + "temp": 0.5, + "k": 30 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = gts_loss +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.001, + "eps": 1e-3 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [20, 30], + "gamma": 0.1 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +CFG.TRAIN.SETUP_GRAPH = True +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GTS/loss.py b/examples/GTS/loss.py new file mode 100644 index 00000000..2ad0a866 --- /dev/null +++ b/examples/GTS/loss.py @@ -0,0 +1,16 @@ +import torch +import numpy as np +from basicts.losses import masked_mae + + +def gts_loss(prediction, real_value, pred_adj, prior_adj, null_val = np.nan): + # graph loss + prior_label = prior_adj.view(prior_adj.shape[0] * prior_adj.shape[1]).to(pred_adj.device) + pred_label = pred_adj.view(pred_adj.shape[0] * pred_adj.shape[1]) + graph_loss_function = torch.nn.BCELoss() + loss_g = graph_loss_function(pred_label, prior_label) + # regression loss + loss_r = masked_mae(prediction, real_value, null_val=null_val) + # total loss + loss = loss_r + loss_g + return loss diff --git a/examples/GWNet/GWNet_METR-LA.py b/examples/GWNet/GWNet_METR-LA.py new file mode 100644 index 00000000..1142a956 --- /dev/null +++ b/examples/GWNet/GWNet_METR-LA.py @@ -0,0 +1,122 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import GraphWaveNet +from basicts.runners import GraphWaveNetRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Graph WaveNet model configuration" +CFG.RUNNER = GraphWaveNetRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GraphWaveNet" +CFG.MODEL.ARCH = GraphWaveNet +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_nodes": 207, + "supports": [torch.tensor(i) for i in adj_mx], + "dropout": 0.3, + "gcn_bool": True, + "addaptadj": True, + "aptinit": None, + "in_dim": 2, + "out_dim": 12, + "residual_channels": 32, + "dilation_channels": 32, + "skip_channels": 256, + "end_channels": 512, + "kernel_size": 2, + "blocks": 4, + "layers": 2 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GWNet/GWNet_PEMS-BAY.py b/examples/GWNet/GWNet_PEMS-BAY.py new file mode 100644 index 00000000..b5b3ceb8 --- /dev/null +++ b/examples/GWNet/GWNet_PEMS-BAY.py @@ -0,0 +1,122 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import GraphWaveNet +from basicts.runners import GraphWaveNetRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Graph WaveNet model configuration" +CFG.RUNNER = GraphWaveNetRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GraphWaveNet" +CFG.MODEL.ARCH = GraphWaveNet +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_nodes": 325, + "supports": [torch.tensor(i) for i in adj_mx], + "dropout": 0.3, + "gcn_bool": True, + "addaptadj": True, + "aptinit": None, + "in_dim": 2, + "out_dim": 12, + "residual_channels": 32, + "dilation_channels": 32, + "skip_channels": 256, + "end_channels": 512, + "kernel_size": 2, + "blocks": 4, + "layers": 2 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GWNet/GWNet_PEMS03.py b/examples/GWNet/GWNet_PEMS03.py new file mode 100644 index 00000000..b7200664 --- /dev/null +++ b/examples/GWNet/GWNet_PEMS03.py @@ -0,0 +1,122 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import GraphWaveNet +from basicts.runners import GraphWaveNetRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Graph WaveNet model configuration" +CFG.RUNNER = GraphWaveNetRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GraphWaveNet" +CFG.MODEL.ARCH = GraphWaveNet +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_nodes": 358, + "supports": [torch.tensor(i) for i in adj_mx], + "dropout": 0.3, + "gcn_bool": True, + "addaptadj": True, + "aptinit": None, + "in_dim": 2, + "out_dim": 12, + "residual_channels": 32, + "dilation_channels": 32, + "skip_channels": 256, + "end_channels": 512, + "kernel_size": 2, + "blocks": 4, + "layers": 2 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GWNet/GWNet_PEMS04.py b/examples/GWNet/GWNet_PEMS04.py new file mode 100644 index 00000000..33fab968 --- /dev/null +++ b/examples/GWNet/GWNet_PEMS04.py @@ -0,0 +1,122 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import GraphWaveNet +from basicts.runners import GraphWaveNetRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Graph WaveNet model configuration" +CFG.RUNNER = GraphWaveNetRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GraphWaveNet" +CFG.MODEL.ARCH = GraphWaveNet +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_nodes": 307, + "supports": [torch.tensor(i) for i in adj_mx], + "dropout": 0.3, + "gcn_bool": True, + "addaptadj": True, + "aptinit": None, + "in_dim": 2, + "out_dim": 12, + "residual_channels": 32, + "dilation_channels": 32, + "skip_channels": 256, + "end_channels": 512, + "kernel_size": 2, + "blocks": 4, + "layers": 2 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GWNet/GWNet_PEMS07.py b/examples/GWNet/GWNet_PEMS07.py new file mode 100644 index 00000000..a5c33e43 --- /dev/null +++ b/examples/GWNet/GWNet_PEMS07.py @@ -0,0 +1,122 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import GraphWaveNet +from basicts.runners import GraphWaveNetRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Graph WaveNet model configuration" +CFG.RUNNER = GraphWaveNetRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GraphWaveNet" +CFG.MODEL.ARCH = GraphWaveNet +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_nodes": 883, + "supports": [torch.tensor(i) for i in adj_mx], + "dropout": 0.3, + "gcn_bool": True, + "addaptadj": True, + "aptinit": None, + "in_dim": 2, + "out_dim": 12, + "residual_channels": 32, + "dilation_channels": 32, + "skip_channels": 256, + "end_channels": 512, + "kernel_size": 2, + "blocks": 4, + "layers": 2 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# validating data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/GWNet/GWNet_PEMS08.py b/examples/GWNet/GWNet_PEMS08.py new file mode 100644 index 00000000..d1c4bee5 --- /dev/null +++ b/examples/GWNet/GWNet_PEMS08.py @@ -0,0 +1,122 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import GraphWaveNet +from basicts.runners import GraphWaveNetRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Graph WaveNet model configuration" +CFG.RUNNER = GraphWaveNetRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "GraphWaveNet" +CFG.MODEL.ARCH = GraphWaveNet +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + + "/adj_mx.pkl", "doubletransition") +CFG.MODEL.PARAM = { + "num_nodes": 170, + "supports": [torch.tensor(i) for i in adj_mx], + "dropout": 0.3, + "gcn_bool": True, + "addaptadj": True, + "aptinit": None, + "in_dim": 2, + "out_dim": 12, + "residual_channels": 32, + "dilation_channels": 32, + "skip_channels": 256, + "end_channels": 512, + "kernel_size": 2, + "blocks": 4, + "layers": 2 +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/Linear/Linear_METR-LA.py b/examples/Linear/Linear_METR-LA.py new file mode 100644 index 00000000..e5f8f28a --- /dev/null +++ b/examples/Linear/Linear_METR-LA.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import Linear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "Linear" +CFG.MODEL.ARCH = Linear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/Linear/Linear_PEMS-BAY.py b/examples/Linear/Linear_PEMS-BAY.py new file mode 100644 index 00000000..e57a6907 --- /dev/null +++ b/examples/Linear/Linear_PEMS-BAY.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import Linear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "Linear" +CFG.MODEL.ARCH = Linear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/Linear/Linear_PEMS03.py b/examples/Linear/Linear_PEMS03.py new file mode 100644 index 00000000..3a35632b --- /dev/null +++ b/examples/Linear/Linear_PEMS03.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import Linear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "Linear" +CFG.MODEL.ARCH = Linear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/Linear/Linear_PEMS04.py b/examples/Linear/Linear_PEMS04.py new file mode 100644 index 00000000..f80c9c5f --- /dev/null +++ b/examples/Linear/Linear_PEMS04.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import Linear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "Linear" +CFG.MODEL.ARCH = Linear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/Linear/Linear_PEMS07.py b/examples/Linear/Linear_PEMS07.py new file mode 100644 index 00000000..dc8fae8b --- /dev/null +++ b/examples/Linear/Linear_PEMS07.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import Linear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "Linear" +CFG.MODEL.ARCH = Linear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/Linear/Linear_PEMS08.py b/examples/Linear/Linear_PEMS08.py new file mode 100644 index 00000000..363421da --- /dev/null +++ b/examples/Linear/Linear_PEMS08.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import Linear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "Linear" +CFG.MODEL.ARCH = Linear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/MLP/MLP_METR-LA.py b/examples/MLP/MLP_METR-LA.py new file mode 100644 index 00000000..603db610 --- /dev/null +++ b/examples/MLP/MLP_METR-LA.py @@ -0,0 +1,110 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.data import TimeSeriesForecastingDataset + +from .MLP_arch import MultiLayerPerceptron +from .MLP_runner import SimpleTimeSeriesForecastingRunner +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Multi-layer perceptron model configuration" +CFG.RUNNER = SimpleTimeSeriesForecastingRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "MultiLayerPerceptron" +CFG.MODEL.ARCH = MultiLayerPerceptron +CFG.MODEL.PARAM = { + "history_seq_len": CFG.DATASET_INPUT_LEN, + "prediction_seq_len": CFG.DATASET_OUTPUT_LEN, + "hidden_dim": 32 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 1.0e-5, + "eps": 1.0e-8 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 30, 38, 46, 54, 62, 70, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 32 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# evluation +CFG.TEST.EVALUATION_HORIZONS = range(12) +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 32 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/MLP/MLP_arch.py b/examples/MLP/MLP_arch.py new file mode 100644 index 00000000..0e242cd0 --- /dev/null +++ b/examples/MLP/MLP_arch.py @@ -0,0 +1,25 @@ +import torch +from torch import nn + +class MultiLayerPerceptron(nn.Module): + """Two fully connected layer.""" + + def __init__(self, history_seq_len: int, prediction_seq_len: int, hidden_dim: int): + super().__init__() + self.fc1 = nn.Linear(history_seq_len, hidden_dim) + self.fc2 = nn.Linear(hidden_dim, prediction_seq_len) + self.act = nn.ReLU() + + def forward(self, history_data: torch.Tensor, future_data: torch.Tensor, batch_seen: int, epoch: int, train: bool, **kwargs) -> torch.Tensor: + """Feedforward function of AGCRN. + + Args: + history_data (torch.Tensor): inputs with shape [B, L, N, C]. + + Returns: + torch.Tensor: outputs with shape [B, L, N, C] + """ + + history_data = history_data[..., 0].transpose(1, 2) # B, N, L + prediction = self.fc2(self.act(self.fc1(history_data))).transpose(1, 2) # B, L, N + return prediction.unsqueeze(-1) # B, L, N, C diff --git a/examples/MLP/MLP_runner.py b/examples/MLP/MLP_runner.py new file mode 100644 index 00000000..ad337c81 --- /dev/null +++ b/examples/MLP/MLP_runner.py @@ -0,0 +1,80 @@ +import torch + +from basicts.runners import BaseTimeSeriesForecastingRunner + + +class SimpleTimeSeriesForecastingRunner(BaseTimeSeriesForecastingRunner): + """Simple Runner: select forward features and target features. + Copy from basicts.runners.simple_tsf_runner.py.""" + + def __init__(self, cfg: dict): + super().__init__(cfg) + self.forward_features = cfg["MODEL"].get("FROWARD_FEATURES", None) + self.target_features = cfg["MODEL"].get("TARGET_FEATURES", None) + + def select_input_features(self, data: torch.Tensor) -> torch.Tensor: + """Select input features. + + Args: + data (torch.Tensor): input history data, shape [B, L, N, C] + + Returns: + torch.Tensor: reshaped data + """ + + # select feature using self.forward_features + if self.forward_features is not None: + data = data[:, :, :, self.forward_features] + return data + + def select_target_features(self, data: torch.Tensor) -> torch.Tensor: + """Select target feature. + + Args: + data (torch.Tensor): prediction of the model with arbitrary shape. + + Returns: + torch.Tensor: reshaped data with shape [B, L, N, C] + """ + + # select feature using self.target_features + data = data[:, :, :, self.target_features] + return data + + def forward(self, data: tuple, epoch: int = None, iter_num: int = None, train: bool = True, **kwargs) -> tuple: + """Feed forward process for train, val, and test. Note that the outputs are NOT re-scaled. + + Args: + data (tuple): data (future data, history ata). + epoch (int, optional): epoch number. Defaults to None. + iter_num (int, optional): iteration number. Defaults to None. + train (bool, optional): if in the training process. Defaults to True. + + Returns: + tuple: (prediction, real_value) + """ + + # preprocess + future_data, history_data = data + history_data = self.to_running_device(history_data) # B, L, N, C + future_data = self.to_running_device(future_data) # B, L, N, C + batch_size, length, num_nodes, _ = future_data.shape + + history_data = self.select_input_features(history_data) + _future_data = self.select_input_features(future_data) + + # curriculum learning + if self.cl_param is None: + prediction_data = self.model( + history_data=history_data, future_data=_future_data, batch_seen=iter_num, epoch=epoch, train=train) + else: + task_level = self.curriculum_learning(epoch) + prediction_data = self.model(history_data=history_data, future_data=_future_data, + batch_seen=iter_num, epoch=epoch, train=train, task_level=task_level) + # feed forward + assert list(prediction_data.shape)[:3] == [batch_size, length, num_nodes], \ + "error shape of the output, edit the forward function to reshape it to [B, L, N, C]" + # post process + prediction = self.select_target_features(prediction_data) + real_value = self.select_target_features(future_data) + return prediction, real_value diff --git a/basicts/options/MTGNN/MTGNN_METR-LA.py b/examples/MTGNN/MTGNN_METR-LA.py similarity index 52% rename from basicts/options/MTGNN/MTGNN_METR-LA.py rename to examples/MTGNN/MTGNN_METR-LA.py index a740610a..0026a6f4 100644 --- a/basicts/options/MTGNN/MTGNN_METR-LA.py +++ b/examples/MTGNN/MTGNN_METR-LA.py @@ -1,39 +1,39 @@ import os -from easydict import EasyDict +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) import torch -# runner -from basicts.runners.MTGNN_runner import MTGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj +from easydict import EasyDict +from basicts.archs import MTGNN +from basicts.runners import MTGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + CFG = EasyDict() # ================= general ================= # -CFG.DESCRIPTION = 'MTGNN model configuration' -CFG.RUNNER = MTGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "METR-LA" -CFG.DATASET_TYPE = 'Traffic speed' +CFG.DESCRIPTION = "MTGNN model configuration" +CFG.RUNNER = MTGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} # ================= environment ================= # CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 +CFG.ENV.SEED = 1 CFG.ENV.CUDNN = EasyDict() CFG.ENV.CUDNN.ENABLED = True # ================= model ================= # CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'MTGNN' +CFG.MODEL.NAME = "MTGNN" +CFG.MODEL.ARCH = MTGNN buildA_true = True num_nodes = 207 if buildA_true: # self-learned adjacency matrix @@ -43,7 +43,7 @@ adj_mx = torch.tensor(adj_mx)-torch.eye(num_nodes) CFG.MODEL.PARAM = { - "gcn_true" : True, + "gcn_true" : True, "buildA_true": buildA_true, "gcn_depth": 2, "num_nodes": num_nodes, @@ -64,17 +64,17 @@ "tanhalpha":3, "layer_norm_affline":True } -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] # ================= optim ================= # CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss +CFG.TRAIN.LOSS = masked_mae CFG.TRAIN.OPTIM = EasyDict() CFG.TRAIN.OPTIM.TYPE = "Adam" CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001, + "lr": 0.001, + "weight_decay": 0.0001, } # ================= train ================= # @@ -84,24 +84,24 @@ CFG.TRAIN.CUSTOM.NUM_SPLIT = 1 CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 + "max_norm": 5.0 } CFG.TRAIN.NUM_EPOCHS = 100 CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) ) # train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False ## curriculum learning CFG.TRAIN.CL = EasyDict() CFG.TRAIN.CL.WARM_EPOCHS = 0 @@ -113,25 +113,25 @@ CFG.VAL.INTERVAL = 1 # validating data CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 32 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False # ================= test ================= # CFG.TEST = EasyDict() CFG.TEST.INTERVAL = 1 -# validating data +# test data CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 32 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/MTGNN/MTGNN_PEMS-BAY.py b/examples/MTGNN/MTGNN_PEMS-BAY.py similarity index 52% rename from basicts/options/MTGNN/MTGNN_PEMS-BAY.py rename to examples/MTGNN/MTGNN_PEMS-BAY.py index cfa34edb..43b11670 100644 --- a/basicts/options/MTGNN/MTGNN_PEMS-BAY.py +++ b/examples/MTGNN/MTGNN_PEMS-BAY.py @@ -1,39 +1,39 @@ import os -from easydict import EasyDict +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) import torch -# runner -from basicts.runners.MTGNN_runner import MTGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj +from easydict import EasyDict +from basicts.archs import MTGNN +from basicts.runners import MTGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + CFG = EasyDict() # ================= general ================= # -CFG.DESCRIPTION = 'MTGNN model configuration' -CFG.RUNNER = MTGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS-BAY" -CFG.DATASET_TYPE = 'Traffic speed' +CFG.DESCRIPTION = "MTGNN model configuration" +CFG.RUNNER = MTGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} # ================= environment ================= # CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 +CFG.ENV.SEED = 1 CFG.ENV.CUDNN = EasyDict() CFG.ENV.CUDNN.ENABLED = True # ================= model ================= # CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'MTGNN' +CFG.MODEL.NAME = "MTGNN" +CFG.MODEL.ARCH = MTGNN buildA_true = True num_nodes = 325 if buildA_true: # self-learned adjacency matrix @@ -43,7 +43,7 @@ adj_mx = torch.tensor(adj_mx)-torch.eye(num_nodes) CFG.MODEL.PARAM = { - "gcn_true" : True, + "gcn_true" : True, "buildA_true": buildA_true, "gcn_depth": 2, "num_nodes": num_nodes, @@ -64,17 +64,17 @@ "tanhalpha":3, "layer_norm_affline":True } -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] # ================= optim ================= # CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss +CFG.TRAIN.LOSS = masked_mae CFG.TRAIN.OPTIM = EasyDict() CFG.TRAIN.OPTIM.TYPE = "Adam" CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001, + "lr": 0.001, + "weight_decay": 0.0001, } # ================= train ================= # @@ -84,55 +84,54 @@ CFG.TRAIN.CUSTOM.NUM_SPLIT = 1 CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 + "max_norm": 5.0 } CFG.TRAIN.NUM_EPOCHS = 100 CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) ) # train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False ## curriculum learning CFG.TRAIN.CL = EasyDict() CFG.TRAIN.CL.WARM_EPOCHS = 0 CFG.TRAIN.CL.CL_EPOCHS = 3 CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - # ================= validate ================= # CFG.VAL = EasyDict() CFG.VAL.INTERVAL = 1 # validating data CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 32 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False # ================= test ================= # CFG.TEST = EasyDict() CFG.TEST.INTERVAL = 1 -# validating data +# test data CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 32 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/MTGNN/MTGNN_PEMS03.py b/examples/MTGNN/MTGNN_PEMS03.py similarity index 52% rename from basicts/options/MTGNN/MTGNN_PEMS03.py rename to examples/MTGNN/MTGNN_PEMS03.py index dbd84455..3adcc25e 100644 --- a/basicts/options/MTGNN/MTGNN_PEMS03.py +++ b/examples/MTGNN/MTGNN_PEMS03.py @@ -1,39 +1,39 @@ import os -from easydict import EasyDict +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) import torch -# runner -from basicts.runners.MTGNN_runner import MTGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj +from easydict import EasyDict +from basicts.archs import MTGNN +from basicts.runners import MTGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + CFG = EasyDict() # ================= general ================= # -CFG.DESCRIPTION = 'MTGNN model configuration' -CFG.RUNNER = MTGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS03" -CFG.DATASET_TYPE = 'Traffic flow' +CFG.DESCRIPTION = "MTGNN model configuration" +CFG.RUNNER = MTGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} # ================= environment ================= # CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 +CFG.ENV.SEED = 1 CFG.ENV.CUDNN = EasyDict() CFG.ENV.CUDNN.ENABLED = True # ================= model ================= # CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'MTGNN' +CFG.MODEL.NAME = "MTGNN" +CFG.MODEL.ARCH = MTGNN buildA_true = True num_nodes = 358 if buildA_true: # self-learned adjacency matrix @@ -43,7 +43,7 @@ adj_mx = torch.tensor(adj_mx)-torch.eye(num_nodes) CFG.MODEL.PARAM = { - "gcn_true" : True, + "gcn_true" : True, "buildA_true": buildA_true, "gcn_depth": 2, "num_nodes": num_nodes, @@ -64,17 +64,17 @@ "tanhalpha":3, "layer_norm_affline":True } -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] # ================= optim ================= # CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss +CFG.TRAIN.LOSS = masked_mae CFG.TRAIN.OPTIM = EasyDict() CFG.TRAIN.OPTIM.TYPE = "Adam" CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001, + "lr": 0.001, + "weight_decay": 0.0001, } # ================= train ================= # @@ -84,55 +84,54 @@ CFG.TRAIN.CUSTOM.NUM_SPLIT = 1 CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 + "max_norm": 5.0 } CFG.TRAIN.NUM_EPOCHS = 100 CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) ) # train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False ## curriculum learning CFG.TRAIN.CL = EasyDict() CFG.TRAIN.CL.WARM_EPOCHS = 0 CFG.TRAIN.CL.CL_EPOCHS = 3 CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - # ================= validate ================= # CFG.VAL = EasyDict() CFG.VAL.INTERVAL = 1 # validating data CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 32 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False # ================= test ================= # CFG.TEST = EasyDict() CFG.TEST.INTERVAL = 1 -# validating data +# test data CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 32 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/MTGNN/MTGNN_PEMS04.py b/examples/MTGNN/MTGNN_PEMS04.py similarity index 52% rename from basicts/options/MTGNN/MTGNN_PEMS04.py rename to examples/MTGNN/MTGNN_PEMS04.py index fccf5b40..d4f1397f 100644 --- a/basicts/options/MTGNN/MTGNN_PEMS04.py +++ b/examples/MTGNN/MTGNN_PEMS04.py @@ -1,39 +1,39 @@ import os -from easydict import EasyDict +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) import torch -# runner -from basicts.runners.MTGNN_runner import MTGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj +from easydict import EasyDict +from basicts.archs import MTGNN +from basicts.runners import MTGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + CFG = EasyDict() # ================= general ================= # -CFG.DESCRIPTION = 'MTGNN model configuration' -CFG.RUNNER = MTGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS04" -CFG.DATASET_TYPE = 'Traffic flow' +CFG.DESCRIPTION = "MTGNN model configuration" +CFG.RUNNER = MTGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} # ================= environment ================= # CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 +CFG.ENV.SEED = 1 CFG.ENV.CUDNN = EasyDict() CFG.ENV.CUDNN.ENABLED = True # ================= model ================= # CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'MTGNN' +CFG.MODEL.NAME = "MTGNN" +CFG.MODEL.ARCH = MTGNN buildA_true = True num_nodes = 307 if buildA_true: # self-learned adjacency matrix @@ -43,7 +43,7 @@ adj_mx = torch.tensor(adj_mx)-torch.eye(num_nodes) CFG.MODEL.PARAM = { - "gcn_true" : True, + "gcn_true" : True, "buildA_true": buildA_true, "gcn_depth": 2, "num_nodes": num_nodes, @@ -64,17 +64,17 @@ "tanhalpha":3, "layer_norm_affline":True } -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] # ================= optim ================= # CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss +CFG.TRAIN.LOSS = masked_mae CFG.TRAIN.OPTIM = EasyDict() CFG.TRAIN.OPTIM.TYPE = "Adam" CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001, + "lr": 0.001, + "weight_decay": 0.0001, } # ================= train ================= # @@ -84,55 +84,54 @@ CFG.TRAIN.CUSTOM.NUM_SPLIT = 1 CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 + "max_norm": 5.0 } CFG.TRAIN.NUM_EPOCHS = 100 CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) ) # train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False ## curriculum learning CFG.TRAIN.CL = EasyDict() CFG.TRAIN.CL.WARM_EPOCHS = 0 CFG.TRAIN.CL.CL_EPOCHS = 3 CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - # ================= validate ================= # CFG.VAL = EasyDict() CFG.VAL.INTERVAL = 1 # validating data CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 32 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False # ================= test ================= # CFG.TEST = EasyDict() CFG.TEST.INTERVAL = 1 -# validating data +# test data CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 32 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/MTGNN/MTGNN_PEMS07.py b/examples/MTGNN/MTGNN_PEMS07.py similarity index 52% rename from basicts/options/MTGNN/MTGNN_PEMS07.py rename to examples/MTGNN/MTGNN_PEMS07.py index 2962e27d..b554907d 100644 --- a/basicts/options/MTGNN/MTGNN_PEMS07.py +++ b/examples/MTGNN/MTGNN_PEMS07.py @@ -1,39 +1,39 @@ import os -from easydict import EasyDict +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) import torch -# runner -from basicts.runners.MTGNN_runner import MTGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj +from easydict import EasyDict +from basicts.archs import MTGNN +from basicts.runners import MTGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + CFG = EasyDict() # ================= general ================= # -CFG.DESCRIPTION = 'MTGNN model configuration' -CFG.RUNNER = MTGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS07" -CFG.DATASET_TYPE = 'Traffic flow' +CFG.DESCRIPTION = "MTGNN model configuration" +CFG.RUNNER = MTGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} # ================= environment ================= # CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 +CFG.ENV.SEED = 1 CFG.ENV.CUDNN = EasyDict() CFG.ENV.CUDNN.ENABLED = True # ================= model ================= # CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'MTGNN' +CFG.MODEL.NAME = "MTGNN" +CFG.MODEL.ARCH = MTGNN buildA_true = True num_nodes = 883 if buildA_true: # self-learned adjacency matrix @@ -43,7 +43,7 @@ adj_mx = torch.tensor(adj_mx)-torch.eye(num_nodes) CFG.MODEL.PARAM = { - "gcn_true" : True, + "gcn_true" : True, "buildA_true": buildA_true, "gcn_depth": 2, "num_nodes": num_nodes, @@ -64,17 +64,17 @@ "tanhalpha":3, "layer_norm_affline":True } -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] # ================= optim ================= # CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss +CFG.TRAIN.LOSS = masked_mae CFG.TRAIN.OPTIM = EasyDict() CFG.TRAIN.OPTIM.TYPE = "Adam" CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001, + "lr": 0.001, + "weight_decay": 0.0001, } # ================= train ================= # @@ -84,55 +84,54 @@ CFG.TRAIN.CUSTOM.NUM_SPLIT = 1 CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 + "max_norm": 5.0 } CFG.TRAIN.NUM_EPOCHS = 100 CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) ) # train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False ## curriculum learning CFG.TRAIN.CL = EasyDict() CFG.TRAIN.CL.WARM_EPOCHS = 0 CFG.TRAIN.CL.CL_EPOCHS = 3 CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - # ================= validate ================= # CFG.VAL = EasyDict() CFG.VAL.INTERVAL = 1 # validating data CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 32 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False # ================= test ================= # CFG.TEST = EasyDict() CFG.TEST.INTERVAL = 1 -# validating data +# test data CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 32 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/basicts/options/MTGNN/MTGNN_PEMS08.py b/examples/MTGNN/MTGNN_PEMS08.py similarity index 52% rename from basicts/options/MTGNN/MTGNN_PEMS08.py rename to examples/MTGNN/MTGNN_PEMS08.py index da672872..96003f82 100644 --- a/basicts/options/MTGNN/MTGNN_PEMS08.py +++ b/examples/MTGNN/MTGNN_PEMS08.py @@ -1,39 +1,39 @@ import os -from easydict import EasyDict +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) import torch -# runner -from basicts.runners.MTGNN_runner import MTGNNRunner -from basicts.data.base_dataset import BaseDataset -from basicts.metrics.mae import masked_mae -from basicts.metrics.mape import masked_mape -from basicts.metrics.rmse import masked_rmse -from basicts.losses.losses import masked_l1_loss -from basicts.utils.serialization import load_adj +from easydict import EasyDict +from basicts.archs import MTGNN +from basicts.runners import MTGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + CFG = EasyDict() # ================= general ================= # -CFG.DESCRIPTION = 'MTGNN model configuration' -CFG.RUNNER = MTGNNRunner -CFG.DATASET_CLS = BaseDataset -CFG.DATASET_NAME = "PEMS08" -CFG.DATASET_TYPE = 'Traffic flow' +CFG.DESCRIPTION = "MTGNN model configuration" +CFG.RUNNER = MTGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 CFG.GPU_NUM = 1 -CFG.METRICS = { - "MAE": masked_mae, - "RMSE": masked_rmse, - "MAPE": masked_mape -} # ================= environment ================= # CFG.ENV = EasyDict() -CFG.ENV.SEED = 1 +CFG.ENV.SEED = 1 CFG.ENV.CUDNN = EasyDict() CFG.ENV.CUDNN.ENABLED = True # ================= model ================= # CFG.MODEL = EasyDict() -CFG.MODEL.NAME = 'MTGNN' +CFG.MODEL.NAME = "MTGNN" +CFG.MODEL.ARCH = MTGNN buildA_true = True num_nodes = 170 if buildA_true: # self-learned adjacency matrix @@ -43,7 +43,7 @@ adj_mx = torch.tensor(adj_mx)-torch.eye(num_nodes) CFG.MODEL.PARAM = { - "gcn_true" : True, + "gcn_true" : True, "buildA_true": buildA_true, "gcn_depth": 2, "num_nodes": num_nodes, @@ -64,17 +64,17 @@ "tanhalpha":3, "layer_norm_affline":True } -CFG.MODEL.FROWARD_FEATURES = [0, 1] -CFG.MODEL.TARGET_FEATURES = [0] +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] # ================= optim ================= # CFG.TRAIN = EasyDict() -CFG.TRAIN.LOSS = masked_l1_loss +CFG.TRAIN.LOSS = masked_mae CFG.TRAIN.OPTIM = EasyDict() CFG.TRAIN.OPTIM.TYPE = "Adam" CFG.TRAIN.OPTIM.PARAM= { - "lr":0.001, - "weight_decay":0.0001, + "lr": 0.001, + "weight_decay": 0.0001, } # ================= train ================= # @@ -84,55 +84,54 @@ CFG.TRAIN.CUSTOM.NUM_SPLIT = 1 CFG.TRAIN.CLIP_GRAD_PARAM = { - 'max_norm': 5.0 + "max_norm": 5.0 } CFG.TRAIN.NUM_EPOCHS = 100 CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( - 'checkpoints', - '_'.join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) ) # train data -CFG.TRAIN.DATA = EasyDict() -CFG.TRAIN.NULL_VAL = 0.0 -## read data -CFG.TRAIN.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TRAIN.DATA.BATCH_SIZE = 32 -CFG.TRAIN.DATA.PREFETCH = False -CFG.TRAIN.DATA.SHUFFLE = True -CFG.TRAIN.DATA.NUM_WORKERS = 2 -CFG.TRAIN.DATA.PIN_MEMORY = False +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False ## curriculum learning CFG.TRAIN.CL = EasyDict() CFG.TRAIN.CL.WARM_EPOCHS = 0 CFG.TRAIN.CL.CL_EPOCHS = 3 CFG.TRAIN.CL.PREDICTION_LENGTH = 12 - # ================= validate ================= # CFG.VAL = EasyDict() CFG.VAL.INTERVAL = 1 # validating data CFG.VAL.DATA = EasyDict() -## read data -CFG.VAL.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.VAL.DATA.BATCH_SIZE = 32 -CFG.VAL.DATA.PREFETCH = False -CFG.VAL.DATA.SHUFFLE = False -CFG.VAL.DATA.NUM_WORKERS = 2 -CFG.VAL.DATA.PIN_MEMORY = False +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 32 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False # ================= test ================= # CFG.TEST = EasyDict() CFG.TEST.INTERVAL = 1 -# validating data +# test data CFG.TEST.DATA = EasyDict() -## read data -CFG.TEST.DATA.DIR = 'datasets/' + CFG.DATASET_NAME -## dataloader args, optional -CFG.TEST.DATA.BATCH_SIZE = 32 -CFG.TEST.DATA.PREFETCH = False -CFG.TEST.DATA.SHUFFLE = False -CFG.TEST.DATA.NUM_WORKERS = 2 -CFG.TEST.DATA.PIN_MEMORY = False +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 32 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/NLinear/NLinear_METR-LA.py b/examples/NLinear/NLinear_METR-LA.py new file mode 100644 index 00000000..c9e56558 --- /dev/null +++ b/examples/NLinear/NLinear_METR-LA.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import NLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "NLinear" +CFG.MODEL.ARCH = NLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/NLinear/NLinear_PEMS-BAY.py b/examples/NLinear/NLinear_PEMS-BAY.py new file mode 100644 index 00000000..c3efccf5 --- /dev/null +++ b/examples/NLinear/NLinear_PEMS-BAY.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import NLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "NLinear" +CFG.MODEL.ARCH = NLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/NLinear/NLinear_PEMS03.py b/examples/NLinear/NLinear_PEMS03.py new file mode 100644 index 00000000..3abecb11 --- /dev/null +++ b/examples/NLinear/NLinear_PEMS03.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import NLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "NLinear" +CFG.MODEL.ARCH = NLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/NLinear/NLinear_PEMS04.py b/examples/NLinear/NLinear_PEMS04.py new file mode 100644 index 00000000..d7d5ccd7 --- /dev/null +++ b/examples/NLinear/NLinear_PEMS04.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import NLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "NLinear" +CFG.MODEL.ARCH = NLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/NLinear/NLinear_PEMS07.py b/examples/NLinear/NLinear_PEMS07.py new file mode 100644 index 00000000..dd11c10f --- /dev/null +++ b/examples/NLinear/NLinear_PEMS07.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import NLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "NLinear" +CFG.MODEL.ARCH = NLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/NLinear/NLinear_PEMS08.py b/examples/NLinear/NLinear_PEMS08.py new file mode 100644 index 00000000..fcdf42fe --- /dev/null +++ b/examples/NLinear/NLinear_PEMS08.py @@ -0,0 +1,102 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import LinearRunner +from basicts.archs import NLinear + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "Linear model configuration" +CFG.RUNNER = LinearRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "NLinear" +CFG.MODEL.ARCH = NLinear +CFG.MODEL.PARAM = { + "seq_len": 12, + "pred_len": 12 +} +CFG.MODEL.FROWARD_FEATURES = [0] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STGCN/STGCN_METR-LA.py b/examples/STGCN/STGCN_METR-LA.py new file mode 100644 index 00000000..049aa794 --- /dev/null +++ b/examples/STGCN/STGCN_METR-LA.py @@ -0,0 +1,117 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import STGCN +from basicts.runners import STGCNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STGCN model configuration" +CFG.RUNNER = STGCNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STGCN" +CFG.MODEL.ARCH = STGCN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") +adj_mx = torch.Tensor(adj_mx[0]) +CFG.MODEL.PARAM = { + "Ks" : 3, + "Kt" : 3, + "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], + "T" : 12, + "n_vertex" : 207, + "act_func" : "glu", + "graph_conv_type" : "cheb_graph_conv", + "gso" : adj_mx, + "bias": True, + "droprate" : 0.5 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STGCN/STGCN_PEMS-BAY.py b/examples/STGCN/STGCN_PEMS-BAY.py new file mode 100644 index 00000000..77fc04e4 --- /dev/null +++ b/examples/STGCN/STGCN_PEMS-BAY.py @@ -0,0 +1,117 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import STGCN +from basicts.runners import STGCNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STGCN model configuration" +CFG.RUNNER = STGCNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STGCN" +CFG.MODEL.ARCH = STGCN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") +adj_mx = torch.Tensor(adj_mx[0]) +CFG.MODEL.PARAM = { + "Ks" : 3, + "Kt" : 3, + "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], + "T" : 12, + "n_vertex" : 325, + "act_func" : "glu", + "graph_conv_type" : "cheb_graph_conv", + "gso" : adj_mx, + "bias": True, + "droprate" : 0.5 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STGCN/STGCN_PEMS03.py b/examples/STGCN/STGCN_PEMS03.py new file mode 100644 index 00000000..563305bb --- /dev/null +++ b/examples/STGCN/STGCN_PEMS03.py @@ -0,0 +1,117 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import STGCN +from basicts.runners import STGCNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STGCN model configuration" +CFG.RUNNER = STGCNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STGCN" +CFG.MODEL.ARCH = STGCN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") +adj_mx = torch.Tensor(adj_mx[0]) +CFG.MODEL.PARAM = { + "Ks" : 3, + "Kt" : 3, + "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], + "T" : 12, + "n_vertex" : 358, + "act_func" : "glu", + "graph_conv_type" : "cheb_graph_conv", + "gso" : adj_mx, + "bias": True, + "droprate" : 0.5 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STGCN/STGCN_PEMS04.py b/examples/STGCN/STGCN_PEMS04.py new file mode 100644 index 00000000..b98ce67c --- /dev/null +++ b/examples/STGCN/STGCN_PEMS04.py @@ -0,0 +1,117 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import STGCN +from basicts.runners import STGCNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STGCN model configuration" +CFG.RUNNER = STGCNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STGCN" +CFG.MODEL.ARCH = STGCN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") +adj_mx = torch.Tensor(adj_mx[0]) +CFG.MODEL.PARAM = { + "Ks" : 3, + "Kt" : 3, + "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], + "T" : 12, + "n_vertex" : 307, + "act_func" : "glu", + "graph_conv_type" : "cheb_graph_conv", + "gso" : adj_mx, + "bias": True, + "droprate" : 0.5 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STGCN/STGCN_PEMS07.py b/examples/STGCN/STGCN_PEMS07.py new file mode 100644 index 00000000..50546138 --- /dev/null +++ b/examples/STGCN/STGCN_PEMS07.py @@ -0,0 +1,117 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import STGCN +from basicts.runners import STGCNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STGCN model configuration" +CFG.RUNNER = STGCNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STGCN" +CFG.MODEL.ARCH = STGCN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") +adj_mx = torch.Tensor(adj_mx[0]) +CFG.MODEL.PARAM = { + "Ks" : 3, + "Kt" : 3, + "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], + "T" : 12, + "n_vertex" : 883, + "act_func" : "glu", + "graph_conv_type" : "cheb_graph_conv", + "gso" : adj_mx, + "bias": True, + "droprate" : 0.5 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STGCN/STGCN_PEMS08.py b/examples/STGCN/STGCN_PEMS08.py new file mode 100644 index 00000000..2b714ca9 --- /dev/null +++ b/examples/STGCN/STGCN_PEMS08.py @@ -0,0 +1,117 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +import torch +from easydict import EasyDict +from basicts.archs import STGCN +from basicts.runners import STGCNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae +from basicts.utils import load_adj + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STGCN model configuration" +CFG.RUNNER = STGCNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STGCN" +CFG.MODEL.ARCH = STGCN +adj_mx, _ = load_adj("datasets/" + CFG.DATASET_NAME + "/adj_mx.pkl", "normlap") +adj_mx = torch.Tensor(adj_mx[0]) +CFG.MODEL.PARAM = { + "Ks" : 3, + "Kt" : 3, + "blocks" : [[1], [64, 16, 64], [64, 16, 64], [128, 128], [12]], + "T" : 12, + "n_vertex" : 170, + "act_func" : "glu", + "graph_conv_type" : "cheb_graph_conv", + "gso" : adj_mx, + "bias": True, + "droprate" : 0.5 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STID/STID_METR-LA.py b/examples/STID/STID_METR-LA.py new file mode 100644 index 00000000..71a45a23 --- /dev/null +++ b/examples/STID/STID_METR-LA.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import STIDRunner +from basicts.archs import STID + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STID model configuration" +CFG.RUNNER = STIDRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STID" +CFG.MODEL.ARCH = STID +CFG.MODEL.PARAM = { + "num_nodes": 207, + "input_len": 12, + "input_dim": 3, + "embed_dim": 32, + "output_len": 12, + "num_layer": 3, + "if_node": True, + "node_dim": 32, + "if_T_i_D": True, + "if_D_i_W": True, + "temp_dim_tid": 32, + "temp_dim_diw": 32, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STID/STID_PEMS-BAY.py b/examples/STID/STID_PEMS-BAY.py new file mode 100644 index 00000000..7ed1cee7 --- /dev/null +++ b/examples/STID/STID_PEMS-BAY.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import STIDRunner +from basicts.archs import STID + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STID model configuration" +CFG.RUNNER = STIDRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STID" +CFG.MODEL.ARCH = STID +CFG.MODEL.PARAM = { + "num_nodes": 325, + "input_len": 12, + "input_dim": 3, + "embed_dim": 32, + "output_len": 12, + "num_layer": 3, + "if_node": True, + "node_dim": 32, + "if_T_i_D": True, + "if_D_i_W": True, + "temp_dim_tid": 32, + "temp_dim_diw": 32, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic speed, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic speed + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STID/STID_PEMS03.py b/examples/STID/STID_PEMS03.py new file mode 100644 index 00000000..b06197c8 --- /dev/null +++ b/examples/STID/STID_PEMS03.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import STIDRunner +from basicts.archs import STID + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STID model configuration" +CFG.RUNNER = STIDRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STID" +CFG.MODEL.ARCH = STID +CFG.MODEL.PARAM = { + "num_nodes": 358, + "input_len": 12, + "input_dim": 3, + "embed_dim": 32, + "output_len": 12, + "num_layer": 3, + "if_node": True, + "node_dim": 32, + "if_T_i_D": True, + "if_D_i_W": True, + "temp_dim_tid": 32, + "temp_dim_diw": 32, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic flow, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic flow + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STID/STID_PEMS04.py b/examples/STID/STID_PEMS04.py new file mode 100644 index 00000000..77d68084 --- /dev/null +++ b/examples/STID/STID_PEMS04.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import STIDRunner +from basicts.archs import STID + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STID model configuration" +CFG.RUNNER = STIDRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STID" +CFG.MODEL.ARCH = STID +CFG.MODEL.PARAM = { + "num_nodes": 307, + "input_len": 12, + "input_dim": 3, + "embed_dim": 32, + "output_len": 12, + "num_layer": 3, + "if_node": True, + "node_dim": 32, + "if_T_i_D": True, + "if_D_i_W": True, + "temp_dim_tid": 32, + "temp_dim_diw": 32, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic flow, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic flow + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STID/STID_PEMS07.py b/examples/STID/STID_PEMS07.py new file mode 100644 index 00000000..53cc26b4 --- /dev/null +++ b/examples/STID/STID_PEMS07.py @@ -0,0 +1,113 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) + +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import STIDRunner +from basicts.archs import STID + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STID model configuration" +CFG.RUNNER = STIDRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STID" +CFG.MODEL.ARCH = STID +CFG.MODEL.PARAM = { + "num_nodes": 883, + "input_len": 12, + "input_dim": 3, + "embed_dim": 32, + "output_len": 12, + "num_layer": 3, + "if_node": True, + "node_dim": 32, + "if_T_i_D": True, + "if_D_i_W": True, + "temp_dim_tid": 32, + "temp_dim_diw": 32, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic flow, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic flow + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STID/STID_PEMS08.py b/examples/STID/STID_PEMS08.py new file mode 100644 index 00000000..7af2c7e3 --- /dev/null +++ b/examples/STID/STID_PEMS08.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.losses import masked_mae +from basicts.data import TimeSeriesForecastingDataset +from basicts.runners import STIDRunner +from basicts.archs import STID + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STID model configuration" +CFG.RUNNER = STIDRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STID" +CFG.MODEL.ARCH = STID +CFG.MODEL.PARAM = { + "num_nodes": 170, + "input_len": 12, + "input_dim": 3, + "embed_dim": 32, + "output_len": 12, + "num_layer": 3, + "if_node": True, + "node_dim": 32, + "if_T_i_D": True, + "if_D_i_W": True, + "temp_dim_tid": 32, + "temp_dim_diw": 32, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1, 2] # traffic flow, time in day +CFG.MODEL.TARGET_FEATURES = [0] # traffic flow + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 80], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 32 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STNorm/STNorm_METR-LA.py b/examples/STNorm/STNorm_METR-LA.py new file mode 100644 index 00000000..b53d122d --- /dev/null +++ b/examples/STNorm/STNorm_METR-LA.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import STNorm +from basicts.runners import STNormRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STNorm model configuration" +CFG.RUNNER = STNormRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STNorm" +CFG.MODEL.ARCH = STNorm +CFG.MODEL.PARAM = { + "num_nodes" : 207, + "tnorm_bool": True, + "snorm_bool": True, + "in_dim" : 2, + "out_dim" : 12, + "channels" : 32, + "kernel_size": 2, + "blocks" : 4, + "layers" : 2, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STNorm/STNorm_PEMS-BAY.py b/examples/STNorm/STNorm_PEMS-BAY.py new file mode 100644 index 00000000..2357f84c --- /dev/null +++ b/examples/STNorm/STNorm_PEMS-BAY.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import STNorm +from basicts.runners import STNormRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STNorm model configuration" +CFG.RUNNER = STNormRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STNorm" +CFG.MODEL.ARCH = STNorm +CFG.MODEL.PARAM = { + "num_nodes" : 325, + "tnorm_bool": True, + "snorm_bool": True, + "in_dim" : 2, + "out_dim" : 12, + "channels" : 32, + "kernel_size": 2, + "blocks" : 4, + "layers" : 2, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STNorm/STNorm_PEMS03.py b/examples/STNorm/STNorm_PEMS03.py new file mode 100644 index 00000000..6321d01c --- /dev/null +++ b/examples/STNorm/STNorm_PEMS03.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import STNorm +from basicts.runners import STNormRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STNorm model configuration" +CFG.RUNNER = STNormRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STNorm" +CFG.MODEL.ARCH = STNorm +CFG.MODEL.PARAM = { + "num_nodes" : 358, + "tnorm_bool": True, + "snorm_bool": True, + "in_dim" : 2, + "out_dim" : 12, + "channels" : 32, + "kernel_size": 2, + "blocks" : 4, + "layers" : 2, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STNorm/STNorm_PEMS04.py b/examples/STNorm/STNorm_PEMS04.py new file mode 100644 index 00000000..f166fce0 --- /dev/null +++ b/examples/STNorm/STNorm_PEMS04.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import STNorm +from basicts.runners import STNormRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STNorm model configuration" +CFG.RUNNER = STNormRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STNorm" +CFG.MODEL.ARCH = STNorm +CFG.MODEL.PARAM = { + "num_nodes" : 307, + "tnorm_bool": True, + "snorm_bool": True, + "in_dim" : 2, + "out_dim" : 12, + "channels" : 32, + "kernel_size": 2, + "blocks" : 4, + "layers" : 2, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STNorm/STNorm_PEMS07.py b/examples/STNorm/STNorm_PEMS07.py new file mode 100644 index 00000000..b66bc040 --- /dev/null +++ b/examples/STNorm/STNorm_PEMS07.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import STNorm +from basicts.runners import STNormRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STNorm model configuration" +CFG.RUNNER = STNormRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STNorm" +CFG.MODEL.ARCH = STNorm +CFG.MODEL.PARAM = { + "num_nodes" : 883, + "tnorm_bool": True, + "snorm_bool": True, + "in_dim" : 2, + "out_dim" : 12, + "channels" : 32, + "kernel_size": 2, + "blocks" : 4, + "layers" : 2, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/STNorm/STNorm_PEMS08.py b/examples/STNorm/STNorm_PEMS08.py new file mode 100644 index 00000000..d3d17d1f --- /dev/null +++ b/examples/STNorm/STNorm_PEMS08.py @@ -0,0 +1,112 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import STNorm +from basicts.runners import STNormRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "STNorm model configuration" +CFG.RUNNER = STNormRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "STNorm" +CFG.MODEL.ARCH = STNorm +CFG.MODEL.PARAM = { + "num_nodes" : 170, + "tnorm_bool": True, + "snorm_bool": True, + "in_dim" : 2, + "out_dim" : 12, + "channels" : 32, + "kernel_size": 2, + "blocks" : 4, + "layers" : 2, +} +CFG.MODEL.FROWARD_FEATURES = [0, 1] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM = { + "lr": 0.002, + "weight_decay": 0.0001, +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.CLIP_GRAD_PARAM = { + "max_norm": 5.0 +} +CFG.TRAIN.NUM_EPOCHS = 100 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/StemGNN/StemGNN_METR-LA.py b/examples/StemGNN/StemGNN_METR-LA.py new file mode 100644 index 00000000..8a789a42 --- /dev/null +++ b/examples/StemGNN/StemGNN_METR-LA.py @@ -0,0 +1,107 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import StemGNN +from basicts.runners import StemGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + +"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "StemGNN model configuration" +CFG.RUNNER = StemGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "METR-LA" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "StemGNN" +CFG.MODEL.ARCH = StemGNN +CFG.MODEL.PARAM = { + "units": 207, + "stack_cnt": 2, + "time_step": 12, + "multi_layer": 5, + "horizon": 12, + "dropout_rate": 0.5, + "leaky_rate": 0.2 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.0004 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/StemGNN/StemGNN_PEMS-BAY.py b/examples/StemGNN/StemGNN_PEMS-BAY.py new file mode 100644 index 00000000..a49f2252 --- /dev/null +++ b/examples/StemGNN/StemGNN_PEMS-BAY.py @@ -0,0 +1,107 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import StemGNN +from basicts.runners import StemGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + +"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "StemGNN model configuration" +CFG.RUNNER = StemGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS-BAY" +CFG.DATASET_TYPE = "Traffic speed" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "StemGNN" +CFG.MODEL.ARCH = StemGNN +CFG.MODEL.PARAM = { + "units": 325, + "stack_cnt": 2, + "time_step": 12, + "multi_layer": 5, + "horizon": 12, + "dropout_rate": 0.5, + "leaky_rate": 0.2 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.0004 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/StemGNN/StemGNN_PEMS03.py b/examples/StemGNN/StemGNN_PEMS03.py new file mode 100644 index 00000000..b52d7f09 --- /dev/null +++ b/examples/StemGNN/StemGNN_PEMS03.py @@ -0,0 +1,107 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import StemGNN +from basicts.runners import StemGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + +"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "StemGNN model configuration" +CFG.RUNNER = StemGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS03" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "StemGNN" +CFG.MODEL.ARCH = StemGNN +CFG.MODEL.PARAM = { + "units": 358, + "stack_cnt": 2, + "time_step": 12, + "multi_layer": 5, + "horizon": 12, + "dropout_rate": 0.5, + "leaky_rate": 0.2 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.002 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/StemGNN/StemGNN_PEMS04.py b/examples/StemGNN/StemGNN_PEMS04.py new file mode 100644 index 00000000..07f81e94 --- /dev/null +++ b/examples/StemGNN/StemGNN_PEMS04.py @@ -0,0 +1,107 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import StemGNN +from basicts.runners import StemGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + +"""Different from the official code, we use MAE as the loss function since they bring better performance.""" + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "StemGNN model configuration" +CFG.RUNNER = StemGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS04" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "StemGNN" +CFG.MODEL.ARCH = StemGNN +CFG.MODEL.PARAM = { + "units": 307, + "stack_cnt": 2, + "time_step": 12, + "multi_layer": 5, + "horizon": 12, + "dropout_rate": 0.5, + "leaky_rate": 0.2 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "RMSprop" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.002 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/StemGNN/StemGNN_PEMS07.py b/examples/StemGNN/StemGNN_PEMS07.py new file mode 100644 index 00000000..2e64559c --- /dev/null +++ b/examples/StemGNN/StemGNN_PEMS07.py @@ -0,0 +1,107 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import StemGNN +from basicts.runners import StemGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + +"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "StemGNN model configuration" +CFG.RUNNER = StemGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS07" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "StemGNN" +CFG.MODEL.ARCH = StemGNN +CFG.MODEL.PARAM = { + "units": 883, + "stack_cnt": 2, + "time_step": 12, + "multi_layer": 5, + "horizon": 12, + "dropout_rate": 0.5, + "leaky_rate": 0.2 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.002 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/StemGNN/StemGNN_PEMS08.py b/examples/StemGNN/StemGNN_PEMS08.py new file mode 100644 index 00000000..0d2ebd02 --- /dev/null +++ b/examples/StemGNN/StemGNN_PEMS08.py @@ -0,0 +1,107 @@ +import os +import sys + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../..")) +from easydict import EasyDict +from basicts.archs import StemGNN +from basicts.runners import StemGNNRunner +from basicts.data import TimeSeriesForecastingDataset +from basicts.losses import masked_mae + +"""Different from the official code, we use Adam as the optimizer and MAE as the loss function since they bring better performance.""" + +CFG = EasyDict() + +# ================= general ================= # +CFG.DESCRIPTION = "StemGNN model configuration" +CFG.RUNNER = StemGNNRunner +CFG.DATASET_CLS = TimeSeriesForecastingDataset +CFG.DATASET_NAME = "PEMS08" +CFG.DATASET_TYPE = "Traffic flow" +CFG.DATASET_INPUT_LEN = 12 +CFG.DATASET_OUTPUT_LEN = 12 +CFG.GPU_NUM = 1 + +# ================= environment ================= # +CFG.ENV = EasyDict() +CFG.ENV.SEED = 1 +CFG.ENV.CUDNN = EasyDict() +CFG.ENV.CUDNN.ENABLED = True + +# ================= model ================= # +CFG.MODEL = EasyDict() +CFG.MODEL.NAME = "StemGNN" +CFG.MODEL.ARCH = StemGNN +CFG.MODEL.PARAM = { + "units": 170, + "stack_cnt": 2, + "time_step": 12, + "multi_layer": 5, + "horizon": 12, + "dropout_rate": 0.5, + "leaky_rate": 0.2 +} +CFG.MODEL.FROWARD_FEATURES = [0] +CFG.MODEL.TARGET_FEATURES = [0] + +# ================= optim ================= # +CFG.TRAIN = EasyDict() +CFG.TRAIN.LOSS = masked_mae +CFG.TRAIN.OPTIM = EasyDict() +CFG.TRAIN.OPTIM.TYPE = "Adam" +CFG.TRAIN.OPTIM.PARAM= { + "lr":0.002 +} +CFG.TRAIN.LR_SCHEDULER = EasyDict() +CFG.TRAIN.LR_SCHEDULER.TYPE = "MultiStepLR" +CFG.TRAIN.LR_SCHEDULER.PARAM = { + "milestones": [1, 50, 100], + "gamma": 0.5 +} + +# ================= train ================= # +CFG.TRAIN.NUM_EPOCHS = 200 +CFG.TRAIN.CKPT_SAVE_DIR = os.path.join( + "checkpoints", + "_".join([CFG.MODEL.NAME, str(CFG.TRAIN.NUM_EPOCHS)]) +) +# train data +CFG.TRAIN.DATA = EasyDict() +CFG.TRAIN.NULL_VAL = 0.0 +# read data +CFG.TRAIN.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TRAIN.DATA.BATCH_SIZE = 64 +CFG.TRAIN.DATA.PREFETCH = False +CFG.TRAIN.DATA.SHUFFLE = True +CFG.TRAIN.DATA.NUM_WORKERS = 2 +CFG.TRAIN.DATA.PIN_MEMORY = False + +# ================= validate ================= # +CFG.VAL = EasyDict() +CFG.VAL.INTERVAL = 1 +# validating data +CFG.VAL.DATA = EasyDict() +# read data +CFG.VAL.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.VAL.DATA.BATCH_SIZE = 64 +CFG.VAL.DATA.PREFETCH = False +CFG.VAL.DATA.SHUFFLE = False +CFG.VAL.DATA.NUM_WORKERS = 2 +CFG.VAL.DATA.PIN_MEMORY = False + +# ================= test ================= # +CFG.TEST = EasyDict() +CFG.TEST.INTERVAL = 1 +# test data +CFG.TEST.DATA = EasyDict() +# read data +CFG.TEST.DATA.DIR = "datasets/" + CFG.DATASET_NAME +# dataloader args, optional +CFG.TEST.DATA.BATCH_SIZE = 64 +CFG.TEST.DATA.PREFETCH = False +CFG.TEST.DATA.SHUFFLE = False +CFG.TEST.DATA.NUM_WORKERS = 2 +CFG.TEST.DATA.PIN_MEMORY = False diff --git a/examples/run.py b/examples/run.py new file mode 100644 index 00000000..188395a8 --- /dev/null +++ b/examples/run.py @@ -0,0 +1,23 @@ +import os +import sys +from argparse import ArgumentParser + +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../..")) +from basicts import launch_training + +def parse_args(): + parser = ArgumentParser(description="Run time series forecasting model in BasicTS framework!") + # parser.add_argument("-c", "--cfg", default="examples/DGCRN/DGCRN_METR-LA.py", help="training config") + # parser.add_argument("-c", "--cfg", default="examples/STID/STID_METR-LA.py", help="training config") + # parser.add_argument("-c", "--cfg", default="examples/DCRNN/DCRNN_METR-LA.py", help="training config") + # parser.add_argument("-c", "--cfg", default="examples/GTS/GTS_PEMS03.py", help="training config") + # parser.add_argument("-c", "--cfg", default="examples/STID/STID_PEMS-BAY.py", help="training config") + parser.add_argument("-c", "--cfg", default="examples/Linear/NLinear_PEMS08.py", help="training config") + parser.add_argument("--gpus", default="0", help="visible gpus") + return parser.parse_args() + +if __name__ == "__main__": + args = parse_args() + + launch_training(args.cfg, args.gpus) diff --git a/results/result.png b/results/result.png deleted file mode 100644 index 00636e11..00000000 Binary files a/results/result.png and /dev/null differ diff --git a/results/results.png b/results/results.png new file mode 100644 index 00000000..71049485 Binary files /dev/null and b/results/results.png differ diff --git a/scripts/data_preparation/Electricity336/generate_training_data.py b/scripts/data_preparation/Electricity336/generate_training_data.py deleted file mode 100644 index 77e4deba..00000000 --- a/scripts/data_preparation/Electricity336/generate_training_data.py +++ /dev/null @@ -1,158 +0,0 @@ -import argparse -import pickle -import shutil -import numpy as np -import os - -""" -Electricity336 dataset (traffic speed dataset) default settings: - - source: - STNorm: https://github.com/JLDeng/ST-Norm - - normalization: - standard norm - - dataset division: - 6:2:2 - - windows size: - 12 - - features: - traffic speed - time in day - day in week - - target: - predicting the traffic speed -""" - -def standard_transform(data: np.array, output_dir: str, train_index: list) -> np.array: - """standard normalization. - - Args: - data (np.array): raw time series data. - output_dir (str): output dir path. - train_index (list): train index. - - Returns: - np.array: normalized raw time series data. - """ - # data: L, N, C - data_train = data[:train_index[-1][1], ...] - - mean, std = data_train[..., 0].mean(), data_train[..., 0].std() - - print("mean (training data):", mean) - print("std (training data):", std) - scaler = {} - scaler['func'] = standard_re_transform.__name__ - scaler['args'] = {"mean":mean, "std":std} - pickle.dump(scaler, open(output_dir + "/scaler.pkl", 'wb')) - - def normalize(x): - return (x - mean) / std - - data_norm = normalize(data) - return data_norm - -def standard_re_transform(x, **kwargs): - mean, std = kwargs['mean'], kwargs['std'] - x = x * std - x = x + mean - return x - -def generate_data(args): - """preprocess and generate train/valid/test datasets. - - Args: - args (Namespace): args for processing data. - """ - C = args.C - future_seq_len = args.future_seq_len - history_seq_len = args.history_seq_len - add_time_in_day = True - add_day_in_week = args.dow - output_dir = args.output_dir - - # read data - data = np.loadtxt(args.data_file_path, delimiter=',') - data = np.expand_dims(data, axis=-1) - data = data[..., C] - print("Data shape: {0}".format(data.shape)) - - L, N, F = data.shape - num_samples = L - (history_seq_len + future_seq_len) + 1 - train_num_short = round(num_samples * train_ratio) - valid_num_short = round(num_samples * valid_ratio) - test_num_short = num_samples - train_num_short - valid_num_short - print("train_num_short:{0}".format(train_num_short)) - print("valid_num_short:{0}".format(valid_num_short)) - print("test_num_short:{0}".format(test_num_short)) - - index_list = [] - for t in range(history_seq_len, num_samples + history_seq_len): - index = (t-history_seq_len, t, t+future_seq_len) - index_list.append(index) - train_index = index_list[:train_num_short] - valid_index = index_list[train_num_short: train_num_short + valid_num_short] - test_index = index_list[train_num_short + valid_num_short: train_num_short + valid_num_short + test_num_short] - - scaler = standard_transform - data_norm = scaler(data, output_dir, train_index) - - # add external feature - feature_list = [data_norm] - if add_time_in_day: - # numerical time_in_day - time_ind = [i%args.steps_per_day / args.steps_per_day for i in range(data_norm.shape[0])] - time_ind = np.array(time_ind) - time_in_day = np.tile(time_ind, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(time_in_day) - if add_day_in_week: - # numerical day_in_week - day_in_week = [(i // args.steps_per_day)%7 for i in range(data_norm.shape[0])] - day_in_week = np.array(day_in_week) - day_in_week = np.tile(day_in_week, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(day_in_week) - - processed_data = np.concatenate(feature_list, axis=-1) - - # dump data - index = {} - index['train'] = train_index - index['valid'] = valid_index - index['test'] = test_index - pickle.dump(index, open(output_dir + "/index.pkl", "wb")) - - data = {} - data['processed_data'] = processed_data - pickle.dump(data, open(output_dir + "/data.pkl", "wb")) - -if __name__ == "__main__": - history_seq_len = 168 # sliding window size for generating history sequence and target sequence - future_seq_len = 12 - - train_ratio = 0.6 - valid_ratio = 0.2 - C = [0] # selected channels - steps_per_day = 12 # 60min - - name = "Electricity336" - dow = True # if add day_of_week feature - output_dir = 'datasets/' + name - data_file_path = 'datasets/raw_data/{0}/{1}.csv'.format(name, name) - - parser = argparse.ArgumentParser() - parser.add_argument("--output_dir", type=str, default=output_dir, help="Output directory.") - parser.add_argument("--data_file_path", type=str, default=data_file_path, help="Raw traffic readings.") - parser.add_argument("--history_seq_len", type=int, default=history_seq_len, help="Sequence Length.") - parser.add_argument("--future_seq_len", type=int, default=future_seq_len, help="Sequence Length.") - parser.add_argument("--steps_per_day", type=int, default=steps_per_day, help="Sequence Length.") - parser.add_argument("--dow", type=bool, default=dow, help='Add feature day_of_week.') - parser.add_argument("--C", type=list, default=C, help='Selected channels.') - parser.add_argument("--train_ratio", type=float, default=train_ratio, help='Train ratio') - parser.add_argument("--valid_ratio", type=float, default=valid_ratio, help='Validate ratio.') - - args = parser.parse_args() - if os.path.exists(args.output_dir): - reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip() - if reply[0] != 'y': exit - else: - os.makedirs(args.output_dir) - generate_data(args) diff --git a/scripts/data_preparation/METR-LA/generate_training_data.py b/scripts/data_preparation/METR-LA/generate_training_data.py index 8e0f953f..d44d69f9 100644 --- a/scripts/data_preparation/METR-LA/generate_training_data.py +++ b/scripts/data_preparation/METR-LA/generate_training_data.py @@ -1,89 +1,56 @@ -import argparse -import pickle +import os +import sys import shutil +import pickle +import argparse + import numpy as np -import os import pandas as pd -""" -METR-LA dataset (traffic speed dataset) default settings: - - normalization: - standard norm - - dataset division: - 7:1:2 - - windows size: - 12 - - features: - traffic speed - time in day - day in week - - target: - predicting the traffic speed -""" - -def standard_transform(data: np.array, output_dir: str, train_index: list) -> np.array: - """standard normalization. +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../../..")) +from basicts.data.transform import standard_transform - Args: - data (np.array): raw time series data. - output_dir (str): output dir path. - train_index (list): train index. - Returns: - np.array: normalized raw time series data. - """ - # data: L, N, C - data_train = data[:train_index[-1][1], ...] - - mean, std = data_train[..., 0].mean(), data_train[..., 0].std() - - print("mean (training data):", mean) - print("std (training data):", std) - scaler = {} - scaler['func'] = standard_re_transform.__name__ - scaler['args'] = {"mean":mean, "std":std} - pickle.dump(scaler, open(output_dir + "/scaler.pkl", 'wb')) - - def normalize(x): - return (x - mean) / std - - data_norm = normalize(data) - return data_norm - -def standard_re_transform(x, **kwargs): - mean, std = kwargs['mean'], kwargs['std'] - x = x * std - x = x + mean - return x - -def generate_data(args): - """preprocess and generate train/valid/test datasets. +def generate_data(args: argparse.Namespace): + """Preprocess and generate train/valid/test datasets. + Default settings of METR-LA dataset: + - Normalization method: standard norm. + - Dataset division: 7:1:2. + - Window size: history 12, future 12. + - Channels (features): three channels [traffic speed, time of day, day of week] + - Target: predict the traffic speed of the future 12 time steps. Args: - args (Namespace): args for processing data. + args (argparse): configurations of preprocessing """ - C = args.C - future_seq_len = args.future_seq_len + + target_channel = args.target_channel + future_seq_len = args.future_seq_len history_seq_len = args.history_seq_len - add_time_in_day = True - add_day_in_week = args.dow - output_dir = args.output_dir + add_time_of_day = args.tod + add_day_of_week = args.dow + output_dir = args.output_dir + train_ratio = args.train_ratio + valid_ratio = args.valid_ratio + data_file_path = args.data_file_path + graph_file_path = args.graph_file_path # read data - df = pd.read_hdf(args.data_file_path) + df = pd.read_hdf(data_file_path) data = np.expand_dims(df.values, axis=-1) - data = data[..., C] - print("Data shape: {0}".format(data.shape)) + data = data[..., target_channel] + print("raw time series shape: {0}".format(data.shape)) - L, N, F = data.shape - num_samples = L - (history_seq_len + future_seq_len) + 1 + l, n, f = data.shape + num_samples = l - (history_seq_len + future_seq_len) + 1 train_num_short = round(num_samples * train_ratio) valid_num_short = round(num_samples * valid_ratio) - test_num_short = num_samples - train_num_short - valid_num_short - print("train_num_short:{0}".format(train_num_short)) - print("valid_num_short:{0}".format(valid_num_short)) - print("test_num_short:{0}".format(test_num_short)) + test_num_short = num_samples - train_num_short - valid_num_short + print("number of training samples:{0}".format(train_num_short)) + print("number of validation samples:{0}".format(valid_num_short)) + print("number of test samples:{0}".format(test_num_short)) index_list = [] for t in range(history_seq_len, num_samples + history_seq_len): @@ -92,69 +59,95 @@ def generate_data(args): train_index = index_list[:train_num_short] valid_index = index_list[train_num_short: train_num_short + valid_num_short] - test_index = index_list[train_num_short + valid_num_short: train_num_short + valid_num_short + test_num_short] - - scaler = standard_transform - data_norm = scaler(data, output_dir, train_index) + test_index = index_list[train_num_short + + valid_num_short: train_num_short + valid_num_short + test_num_short] + + scaler = standard_transform + data_norm = scaler(data, output_dir, train_index) # add external feature feature_list = [data_norm] - if add_time_in_day: - # numerical time_in_day - time_ind = (df.index.values - df.index.values.astype("datetime64[D]")) / np.timedelta64(1, "D") - time_in_day = np.tile(time_ind, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(time_in_day) - - if add_day_in_week: - # numerical day_in_week + if add_time_of_day: + # numerical time_of_day + tod = ( + df.index.values - df.index.values.astype("datetime64[D]")) / np.timedelta64(1, "D") + tod_tiled = np.tile(tod, [1, n, 1]).transpose((2, 1, 0)) + feature_list.append(tod_tiled) + + if add_day_of_week: + # numerical day_of_week dow = df.index.dayofweek - dow_tiled = np.tile(dow, [1, N, 1]).transpose((2, 1, 0)) + dow_tiled = np.tile(dow, [1, n, 1]).transpose((2, 1, 0)) feature_list.append(dow_tiled) processed_data = np.concatenate(feature_list, axis=-1) # dump data index = {} - index['train'] = train_index - index['valid'] = valid_index - index['test'] = test_index - pickle.dump(index, open(output_dir + "/index.pkl", "wb")) + index["train"] = train_index + index["valid"] = valid_index + index["test"] = test_index + with open(output_dir + "/index_in{0}_out{1}.pkl".format(history_seq_len, future_seq_len), "wb") as f: + pickle.dump(index, f) data = {} - data['processed_data'] = processed_data - pickle.dump(data, open(output_dir + "/data.pkl", "wb")) + data["processed_data"] = processed_data + with open(output_dir + "/data.pkl", "wb") as f: + pickle.dump(data, f) # copy adj - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models + shutil.copyfile(graph_file_path, output_dir + "/adj_mx.pkl") + if __name__ == "__main__": - history_seq_len = 12 # sliding window size for generating history sequence and target sequence - future_seq_len = 12 - - train_ratio = 0.7 - valid_ratio = 0.1 - C = [0] # selected channels - - name = "METR-LA" - dow = True # if add day_of_week feature - output_dir = 'datasets/' + name - data_file_path = 'datasets/raw_data/{0}/{1}.h5'.format(name, name) - graph_file_path = 'datasets/raw_data/{0}/adj_{1}.pkl'.format(name, name) - - parser = argparse.ArgumentParser() - parser.add_argument("--output_dir", type=str, default=output_dir, help="Output directory.") - parser.add_argument("--data_file_path", type=str, default=data_file_path, help="Raw traffic readings.") - parser.add_argument("--graph_file_path", type=str, default=graph_file_path, help="Raw traffic readings.") - parser.add_argument("--history_seq_len", type=int, default=history_seq_len, help="Sequence Length.") - parser.add_argument("--future_seq_len", type=int, default=future_seq_len, help="Sequence Length.") - parser.add_argument("--dow", type=bool, default=dow, help='Add feature day_of_week.') - parser.add_argument("--C", type=list, default=C, help='Selected channels.') - parser.add_argument("--train_ratio", type=float, default=train_ratio, help='Train ratio') - parser.add_argument("--valid_ratio", type=float, default=valid_ratio, help='Validate ratio.') - - args = parser.parse_args() - if os.path.exists(args.output_dir): - reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip() - if reply[0] != 'y': exit + # sliding window size for generating history sequence and target sequence + HISTORY_SEQ_LEN = 12 + FUTURE_SEQ_LEN = 12 + + TRAIN_RATIO = 0.7 + VALID_RATIO = 0.1 + TARGET_CHANNEL = [0] # target channel(s) + + DATASET_NAME = "METR-LA" + TOD = True # if add time_of_day feature + DOW = True # if add day_of_week feature + OUTPUT_DIR = "datasets/" + DATASET_NAME + DATA_FILE_PATH = "datasets/raw_data/{0}/{0}.h5".format(DATASET_NAME) + GRAPH_FILE_PATH = "datasets/raw_data/{0}/adj_{0}.pkl".format(DATASET_NAME) + + parser = argparse.ArgumentParser() + parser.add_argument("--output_dir", type=str, + default=OUTPUT_DIR, help="Output directory.") + parser.add_argument("--data_file_path", type=str, + default=DATA_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--graph_file_path", type=str, + default=GRAPH_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--history_seq_len", type=int, + default=HISTORY_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--future_seq_len", type=int, + default=FUTURE_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--tod", type=bool, default=TOD, + help="Add feature time_of_day.") + parser.add_argument("--dow", type=bool, default=DOW, + help="Add feature day_of_week.") + parser.add_argument("--target_channel", type=list, + default=TARGET_CHANNEL, help="Selected channels.") + parser.add_argument("--train_ratio", type=float, + default=TRAIN_RATIO, help="Train ratio") + parser.add_argument("--valid_ratio", type=float, + default=VALID_RATIO, help="Validate ratio.") + args_metr = parser.parse_args() + + # print args + print("-"*(20+45+5)) + for key, value in sorted(vars(args_metr).items()): + print("|{0:>20} = {1:<45}|".format(key, str(value))) + print("-"*(20+45+5)) + + if os.path.exists(args_metr.output_dir): + reply = str(input( + f"{args_metr.output_dir} exists. Do you want to overwrite it? (y/n)")).lower().strip() + if reply[0] != "y": + sys.exit(0) else: - os.makedirs(args.output_dir) - generate_data(args) + os.makedirs(args_metr.output_dir) + generate_data(args_metr) diff --git a/scripts/data_preparation/PEMS-BAY/generate_training_data.py b/scripts/data_preparation/PEMS-BAY/generate_training_data.py index e41234c9..789db414 100644 --- a/scripts/data_preparation/PEMS-BAY/generate_training_data.py +++ b/scripts/data_preparation/PEMS-BAY/generate_training_data.py @@ -1,89 +1,56 @@ -import argparse -import pickle +import os +import sys import shutil +import pickle +import argparse + import numpy as np -import os import pandas as pd -""" -PEMS-BAY dataset (traffic speed dataset) default settings: - - normalization: - standard norm - - dataset division: - 7:1:2 - - windows size: - 12 - - features: - traffic speed - time in day - day in week - - target: - predicting the traffic speed -""" - -def standard_transform(data: np.array, output_dir: str, train_index: list) -> np.array: - """standard normalization. +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../../..")) +from basicts.data.transform import standard_transform - Args: - data (np.array): raw time series data. - output_dir (str): output dir path. - train_index (list): train index. - Returns: - np.array: normalized raw time series data. - """ - # data: L, N, C - data_train = data[:train_index[-1][1], ...] - - mean, std = data_train[..., 0].mean(), data_train[..., 0].std() - - print("mean (training data):", mean) - print("std (training data):", std) - scaler = {} - scaler['func'] = standard_re_transform.__name__ - scaler['args'] = {"mean":mean, "std":std} - pickle.dump(scaler, open(output_dir + "/scaler.pkl", 'wb')) - - def normalize(x): - return (x - mean) / std - - data_norm = normalize(data) - return data_norm - -def standard_re_transform(x, **kwargs): - mean, std = kwargs['mean'], kwargs['std'] - x = x * std - x = x + mean - return x - -def generate_data(args): - """preprocess and generate train/valid/test datasets. +def generate_data(args: argparse.Namespace): + """Preprocess and generate train/valid/test datasets. + Default settings of PEMS-BAY dataset: + - Normalization method: standard norm. + - Dataset division: 7:1:2. + - Window size: history 12, future 12. + - Channels (features): three channels [traffic speed, time of day, day of week] + - Target: predict the traffic speed of the future 12 time steps. Args: - args (Namespace): args for processing data. + args (argparse): configurations of preprocessing """ - C = args.C - future_seq_len = args.future_seq_len + + target_channel = args.target_channel + future_seq_len = args.future_seq_len history_seq_len = args.history_seq_len - add_time_in_day = True - add_day_in_week = args.dow - output_dir = args.output_dir + add_time_of_day = args.tod + add_day_of_week = args.dow + output_dir = args.output_dir + train_ratio = args.train_ratio + valid_ratio = args.valid_ratio + data_file_path = args.data_file_path + graph_file_path = args.graph_file_path # read data - df = pd.read_hdf(args.data_file_path) + df = pd.read_hdf(data_file_path) data = np.expand_dims(df.values, axis=-1) - data = data[..., C] - print("Data shape: {0}".format(data.shape)) + data = data[..., target_channel] + print("raw time series shape: {0}".format(data.shape)) - L, N, F = data.shape - num_samples = L - (history_seq_len + future_seq_len) + 1 + l, n, f = data.shape + num_samples = l - (history_seq_len + future_seq_len) + 1 train_num_short = round(num_samples * train_ratio) valid_num_short = round(num_samples * valid_ratio) - test_num_short = num_samples - train_num_short - valid_num_short - print("train_num_short:{0}".format(train_num_short)) - print("valid_num_short:{0}".format(valid_num_short)) - print("test_num_short:{0}".format(test_num_short)) + test_num_short = num_samples - train_num_short - valid_num_short + print("number of training samples:{0}".format(train_num_short)) + print("number of validation samples:{0}".format(valid_num_short)) + print("number of test samples:{0}".format(test_num_short)) index_list = [] for t in range(history_seq_len, num_samples + history_seq_len): @@ -92,69 +59,95 @@ def generate_data(args): train_index = index_list[:train_num_short] valid_index = index_list[train_num_short: train_num_short + valid_num_short] - test_index = index_list[train_num_short + valid_num_short: train_num_short + valid_num_short + test_num_short] - - scaler = standard_transform - data_norm = scaler(data, output_dir, train_index) + test_index = index_list[train_num_short + + valid_num_short: train_num_short + valid_num_short + test_num_short] + + scaler = standard_transform + data_norm = scaler(data, output_dir, train_index) # add external feature feature_list = [data_norm] - if add_time_in_day: - # numerical time_in_day - time_ind = (df.index.values - df.index.values.astype("datetime64[D]")) / np.timedelta64(1, "D") - time_in_day = np.tile(time_ind, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(time_in_day) - - if add_day_in_week: - # numerical day_in_week + if add_time_of_day: + # numerical time_of_day + tod = ( + df.index.values - df.index.values.astype("datetime64[D]")) / np.timedelta64(1, "D") + tod_tiled = np.tile(tod, [1, n, 1]).transpose((2, 1, 0)) + feature_list.append(tod_tiled) + + if add_day_of_week: + # numerical day_of_week dow = df.index.dayofweek - dow_tiled = np.tile(dow, [1, N, 1]).transpose((2, 1, 0)) + dow_tiled = np.tile(dow, [1, n, 1]).transpose((2, 1, 0)) feature_list.append(dow_tiled) processed_data = np.concatenate(feature_list, axis=-1) # dump data index = {} - index['train'] = train_index - index['valid'] = valid_index - index['test'] = test_index - pickle.dump(index, open(output_dir + "/index.pkl", "wb")) + index["train"] = train_index + index["valid"] = valid_index + index["test"] = test_index + with open(output_dir + "/index_in{0}_out{1}.pkl".format(history_seq_len, future_seq_len), "wb") as f: + pickle.dump(index, f) data = {} - data['processed_data'] = processed_data - pickle.dump(data, open(output_dir + "/data.pkl", "wb")) + data["processed_data"] = processed_data + with open(output_dir + "/data.pkl", "wb") as f: + pickle.dump(data, f) # copy adj - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models + shutil.copyfile(graph_file_path, output_dir + "/adj_mx.pkl") + if __name__ == "__main__": - history_seq_len = 12 # sliding window size for generating history sequence and target sequence - future_seq_len = 12 - - train_ratio = 0.7 - valid_ratio = 0.1 - C = [0] # selected channels - - name = "PEMS-BAY" - dow = True # if add day_of_week feature - output_dir = 'datasets/' + name - data_file_path = 'datasets/raw_data/{0}/{1}.h5'.format(name, name) - graph_file_path = 'datasets/raw_data/{0}/adj_{1}.pkl'.format(name, name) - - parser = argparse.ArgumentParser() - parser.add_argument("--output_dir", type=str, default=output_dir, help="Output directory.") - parser.add_argument("--data_file_path", type=str, default=data_file_path, help="Raw traffic readings.") - parser.add_argument("--graph_file_path", type=str, default=graph_file_path, help="Raw traffic readings.") - parser.add_argument("--history_seq_len", type=int, default=history_seq_len, help="Sequence Length.") - parser.add_argument("--future_seq_len", type=int, default=future_seq_len, help="Sequence Length.") - parser.add_argument("--dow", type=bool, default=dow, help='Add feature day_of_week.') - parser.add_argument("--C", type=list, default=C, help='Selected channels.') - parser.add_argument("--train_ratio", type=float, default=train_ratio, help='Train ratio') - parser.add_argument("--valid_ratio", type=float, default=valid_ratio, help='Validate ratio.') - - args = parser.parse_args() - if os.path.exists(args.output_dir): - reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip() - if reply[0] != 'y': exit + # sliding window size for generating history sequence and target sequence + HISTORY_SEQ_LEN = 12 + FUTURE_SEQ_LEN = 12 + + TRAIN_RATIO = 0.7 + VALID_RATIO = 0.1 + TARGET_CHANNEL = [0] # target channel(s) + + DATASET_NAME = "PEMS-BAY" + TOD = True # if add time_of_day feature + DOW = True # if add day_of_week feature + OUTPUT_DIR = "datasets/" + DATASET_NAME + DATA_FILE_PATH = "datasets/raw_data/{0}/{0}.h5".format(DATASET_NAME) + GRAPH_FILE_PATH = "datasets/raw_data/{0}/adj_{0}.pkl".format(DATASET_NAME) + + parser = argparse.ArgumentParser() + parser.add_argument("--output_dir", type=str, + default=OUTPUT_DIR, help="Output directory.") + parser.add_argument("--data_file_path", type=str, + default=DATA_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--graph_file_path", type=str, + default=GRAPH_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--history_seq_len", type=int, + default=HISTORY_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--future_seq_len", type=int, + default=FUTURE_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--tod", type=bool, default=TOD, + help="Add feature time_of_day.") + parser.add_argument("--dow", type=bool, default=DOW, + help="Add feature day_of_week.") + parser.add_argument("--target_channel", type=list, + default=TARGET_CHANNEL, help="Selected channels.") + parser.add_argument("--train_ratio", type=float, + default=TRAIN_RATIO, help="Train ratio") + parser.add_argument("--valid_ratio", type=float, + default=VALID_RATIO, help="Validate ratio.") + args_metr = parser.parse_args() + + # print args + print("-"*(20+45+5)) + for key, value in sorted(vars(args_metr).items()): + print("|{0:>20} = {1:<45}|".format(key, str(value))) + print("-"*(20+45+5)) + + if os.path.exists(args_metr.output_dir): + reply = str(input( + f"{args_metr.output_dir} exists. Do you want to overwrite it? (y/n)")).lower().strip() + if reply[0] != "y": + sys.exit(0) else: - os.makedirs(args.output_dir) - generate_data(args) + os.makedirs(args_metr.output_dir) + generate_data(args_metr) diff --git a/scripts/data_preparation/PEMS03/generate_adj_mx.py b/scripts/data_preparation/PEMS03/generate_adj_mx.py index 88c07932..33c6ab2e 100644 --- a/scripts/data_preparation/PEMS03/generate_adj_mx.py +++ b/scripts/data_preparation/PEMS03/generate_adj_mx.py @@ -1,170 +1,84 @@ import os -import numpy as np import csv import pickle -def get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information - - num_of_vertices: int, the number of vertices - - Returns - ---------- - A: np.ndarray, adjacency matrix - - ''' - if 'npy' in distance_df_filename: - adj_mx = np.load(distance_df_filename) - return adj_mx, None - else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 - if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: - f.readline() - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - distaneA[i, j] = distance - return A, distaneA - -def get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information - num_of_vertices: int, the number of vertices - - Returns - ---------- - A: np.ndarray, adjacency matrix - ''' - if 'npy' in distance_df_filename: - adj_mx = np.load(distance_df_filename) - return adj_mx, None - else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 - if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - A[id_dict[j], id_dict[i]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - distaneA[id_dict[j], id_dict[i]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: - f.readline() - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - A[j, i] = 1 - distaneA[i, j] = distance - distaneA[j, i] = distance - return A, distaneA +import numpy as np -def get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information +def get_adjacency_matrix(distance_df_filename: str, num_of_vertices: int, id_filename: str = None) -> tuple: + """Generate adjacency matrix. - num_of_vertices: int, the number of vertices + Args: + distance_df_filename (str): path of the csv file contains edges information + num_of_vertices (int): number of vertices + id_filename (str, optional): id filename. Defaults to None. - Returns - ---------- - A: np.ndarray, adjacency matrix + Returns: + tuple: two adjacency matrix. + np.array: connectivity-based adjacency matrix A (A[i, j]=0 or A[i, j]=1) + np.array: distance-based adjacency matrix A + """ - ''' - if 'npy' in distance_df_filename: + if "npy" in distance_df_filename: adj_mx = np.load(distance_df_filename) return adj_mx, None else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 + adjacency_matrix_connectivity = np.zeros((int(num_of_vertices), int( + num_of_vertices)), dtype=np.float32) + adjacency_matrix_distance = np.zeros((int(num_of_vertices), int(num_of_vertices)), + dtype=np.float32) if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 + # the id in the distance file does not start from 0, so it needs to be remapped + with open(id_filename, "r") as f: + id_dict = {int(i): idx for idx, i in enumerate( + f.read().strip().split("\n"))} # map node idx to 0-based index (start from 0) + with open(distance_df_filename, "r") as f: + f.readline() # omit the first line reader = csv.reader(f) for row in reader: if len(row) != 3: continue i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - A[id_dict[j], id_dict[i]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - distaneA[id_dict[j], id_dict[i]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: + adjacency_matrix_connectivity[id_dict[i], id_dict[j]] = 1 + adjacency_matrix_connectivity[id_dict[j], id_dict[i]] = 1 + adjacency_matrix_distance[id_dict[i], + id_dict[j]] = distance + adjacency_matrix_distance[id_dict[j], + id_dict[i]] = distance + return adjacency_matrix_connectivity, adjacency_matrix_distance + else: + # ids in distance file start from 0 + with open(distance_df_filename, "r") as f: f.readline() reader = csv.reader(f) for row in reader: if len(row) != 3: continue i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - A[j, i] = 1 - distaneA[i, j] = distance - distaneA[j, i] = distance - return A, distaneA + adjacency_matrix_connectivity[i, j] = 1 + adjacency_matrix_connectivity[j, i] = 1 + adjacency_matrix_distance[i, j] = distance + adjacency_matrix_distance[j, i] = distance + return adjacency_matrix_connectivity, adjacency_matrix_distance + -def generate_adj_PEMS03(): - direction = True +def generate_adj_pems03(): distance_df_filename, num_of_vertices = "datasets/raw_data/PEMS03/PEMS03.csv", 358 - if os.path.exists(distance_df_filename.split(".")[0] + ".txt"): - id_filename = distance_df_filename.split(".")[0] + ".txt" + if os.path.exists(distance_df_filename.split(".", maxsplit=1)[0] + ".txt"): + id_filename = distance_df_filename.split(".", maxsplit=1)[0] + ".txt" else: id_filename = None - if direction: - adj_mx, distance_mx = get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=id_filename) - else: - adj_mx, distance_mx = get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=id_filename) - # TODO: the self loop is missing + adj_mx, distance_mx = get_adjacency_matrix( + distance_df_filename, num_of_vertices, id_filename=id_filename) + # the self loop is missing add_self_loop = False if add_self_loop: + print("adding self loop to adjacency matrices.") adj_mx = adj_mx + np.identity(adj_mx.shape[0]) distance_mx = distance_mx + np.identity(distance_mx.shape[0]) - pickle.dump(adj_mx, open("datasets/raw_data/PEMS03/adj_PEMS03.pkl", 'wb')) - pickle.dump(distance_mx, open("datasets/raw_data/PEMS03/adj_PEMS03_distance.pkl", 'wb')) + else: + print("kindly note that there is no self loop in adjacency matrices.") + with open("datasets/raw_data/PEMS03/adj_PEMS03.pkl", "wb") as f: + pickle.dump(adj_mx, f) + with open("datasets/raw_data/PEMS03/adj_PEMS03_distance.pkl", "wb") as f: + pickle.dump(distance_mx, f) diff --git a/scripts/data_preparation/PEMS03/generate_training_data.py b/scripts/data_preparation/PEMS03/generate_training_data.py index 4bc38b64..f02e5954 100644 --- a/scripts/data_preparation/PEMS03/generate_training_data.py +++ b/scripts/data_preparation/PEMS03/generate_training_data.py @@ -1,169 +1,163 @@ -import argparse -import pickle +import os +import sys import shutil +import pickle +import argparse + import numpy as np -import os -from generate_adj_mx import generate_adj_PEMS03 - -""" -PEMS03 dataset (traffic flow dataset) default settings: - - sampling frequency: - 5min - - normalization: - standard norm - - dataset division: - 6:2:2 - - windows size: - 12 - - features: - traffic flow - --traffic occupy--(not used) - --traffic speed--(not used) - time in day - day in week - - target: - predicting the traffic speed -""" - -def standard_transform(data: np.array, output_dir: str, train_index: list) -> np.array: - """standard normalization. +from generate_adj_mx import generate_adj_pems03 +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../../..")) +from basicts.data.transform import standard_transform - Args: - data (np.array): raw time series data. - output_dir (str): output dir path. - train_index (list): train index. - Returns: - np.array: normalized raw time series data. - """ - # data: L, N, C - data_train = data[:train_index[-1][1], ...] - - mean, std = data_train[..., 0].mean(), data_train[..., 0].std() - - print("mean (training data):", mean) - print("std (training data):", std) - scaler = {} - scaler['func'] = standard_re_transform.__name__ - scaler['args'] = {"mean":mean, "std":std} - pickle.dump(scaler, open(output_dir + "/scaler.pkl", 'wb')) - - def normalize(x): - return (x - mean) / std - - data_norm = normalize(data) - return data_norm - -def standard_re_transform(x, **kwargs): - mean, std = kwargs['mean'], kwargs['std'] - x = x * std - x = x + mean - return x - -def generate_data(args): - """preprocess and generate train/valid/test datasets. +def generate_data(args: argparse.Namespace): + """Preprocess and generate train/valid/test datasets. + Default settings of PEMS03 dataset: + - Normalization method: standard norm. + - Dataset division: 6:2:2. + - Window size: history 12, future 12. + - Channels (features): three channels [traffic flow, time of day, day of week] + - Target: predict the traffic speed of the future 12 time steps. Args: - args (Namespace): args for processing data. + args (argparse): configurations of preprocessing """ - C = args.C - future_seq_len = args.future_seq_len + + target_channel = args.target_channel + future_seq_len = args.future_seq_len history_seq_len = args.history_seq_len - add_time_in_day = True - add_day_in_week = args.dow + add_time_of_day = args.tod + add_day_of_week = args.dow output_dir = args.output_dir + train_ratio = args.train_ratio + valid_ratio = args.valid_ratio + data_file_path = args.data_file_path + graph_file_path = args.graph_file_path + steps_per_day = args.steps_per_day # read data - data = np.load(args.data_file_path)['data'] - data = data[..., C] - print("Data shape: {0}".format(data.shape)) + data = np.load(data_file_path)["data"] + data = data[..., target_channel] + print("raw time series shape: {0}".format(data.shape)) - L, N, F = data.shape - num_samples = L - (history_seq_len + future_seq_len) + 1 + l, n, f = data.shape + num_samples = l - (history_seq_len + future_seq_len) + 1 train_num_short = round(num_samples * train_ratio) valid_num_short = round(num_samples * valid_ratio) - test_num_short = num_samples - train_num_short - valid_num_short - print("train_num_short:{0}".format(train_num_short)) - print("valid_num_short:{0}".format(valid_num_short)) - print("test_num_short:{0}".format(test_num_short)) + test_num_short = num_samples - train_num_short - valid_num_short + print("number of training samples:{0}".format(train_num_short)) + print("number of validation samples:{0}".format(valid_num_short)) + print("number of test samples:{0}".format(test_num_short)) - index_list = [] + index_list = [] for t in range(history_seq_len, num_samples + history_seq_len): index = (t-history_seq_len, t, t+future_seq_len) index_list.append(index) + train_index = index_list[:train_num_short] valid_index = index_list[train_num_short: train_num_short + valid_num_short] - test_index = index_list[train_num_short + valid_num_short: train_num_short + valid_num_short + test_num_short] - + test_index = index_list[train_num_short + + valid_num_short: train_num_short + valid_num_short + test_num_short] + scaler = standard_transform data_norm = scaler(data, output_dir, train_index) # add external feature feature_list = [data_norm] - if add_time_in_day: - # numerical time_in_day - time_ind = [i%args.steps_per_day / args.steps_per_day for i in range(data_norm.shape[0])] - time_ind = np.array(time_ind) - time_in_day = np.tile(time_ind, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(time_in_day) - if add_day_in_week: - # numerical day_in_week - day_in_week = [(i // args.steps_per_day)%7 for i in range(data_norm.shape[0])] - day_in_week = np.array(day_in_week) - day_in_week = np.tile(day_in_week, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(day_in_week) + if add_time_of_day: + # numerical time_of_day + tod = [i % steps_per_day / + steps_per_day for i in range(data_norm.shape[0])] + tod = np.array(tod) + tod_tiled = np.tile(tod, [1, n, 1]).transpose((2, 1, 0)) + feature_list.append(tod_tiled) + + if add_day_of_week: + # numerical day_of_week + dow = [(i // steps_per_day) % 7 for i in range(data_norm.shape[0])] + dow = np.array(dow) + dow_tiled = np.tile(dow, [1, n, 1]).transpose((2, 1, 0)) + feature_list.append(dow_tiled) processed_data = np.concatenate(feature_list, axis=-1) # dump data index = {} - index['train'] = train_index - index['valid'] = valid_index - index['test'] = test_index - pickle.dump(index, open(output_dir + "/index.pkl", "wb")) + index["train"] = train_index + index["valid"] = valid_index + index["test"] = test_index + with open(output_dir + "/index_in{0}_out{1}.pkl".format(history_seq_len, future_seq_len), "wb") as f: + pickle.dump(index, f) data = {} - data['processed_data'] = processed_data - pickle.dump(data, open(output_dir + "/data.pkl", "wb")) + data["processed_data"] = processed_data + with open(output_dir + "/data.pkl", "wb") as f: + pickle.dump(data, f) # copy adj - if os.path.exists(args.graph_file_path): - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models + if os.path.exists(graph_file_path): + # copy + shutil.copyfile(graph_file_path, output_dir + "/adj_mx.pkl") else: - generate_adj_PEMS03() - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models + # generate and copy + generate_adj_pems03() + shutil.copyfile(graph_file_path, output_dir + "/adj_mx.pkl") + if __name__ == "__main__": - history_seq_len = 12 # sliding window size for generating history sequence and target sequence - future_seq_len = 12 - - train_ratio = 0.6 - valid_ratio = 0.2 - C = [0] # selected channels - steps_per_day = 288 # 5min - - name = "PEMS03" - dow = True # if add day_of_week feature - output_dir = 'datasets/' + name - data_file_path = 'datasets/raw_data/{0}/{1}.npz'.format(name, name) - graph_file_path = 'datasets/raw_data/{0}/adj_{1}.pkl'.format(name, name) - - parser = argparse.ArgumentParser() - parser.add_argument("--output_dir", type=str, default=output_dir, help="Output directory.") - parser.add_argument("--data_file_path", type=str, default=data_file_path, help="Raw traffic readings.") - parser.add_argument("--graph_file_path", type=str, default=graph_file_path, help="Raw traffic readings.") - parser.add_argument("--history_seq_len", type=int, default=history_seq_len, help="Sequence Length.") - parser.add_argument("--future_seq_len", type=int, default=future_seq_len, help="Sequence Length.") - parser.add_argument("--steps_per_day", type=int, default=steps_per_day, help="Sequence Length.") - parser.add_argument("--dow", type=bool, default=dow, help='Add feature day_of_week.') - parser.add_argument("--C", type=list, default=C, help='Selected channels.') - parser.add_argument("--train_ratio", type=float, default=train_ratio, help='Train ratio') - parser.add_argument("--valid_ratio", type=float, default=valid_ratio, help='Validate ratio.') - - args = parser.parse_args() - if os.path.exists(args.output_dir): - reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip() - if reply[0] != 'y': exit + # sliding window size for generating history sequence and target sequence + HISTORY_SEQ_LEN = 12 + FUTURE_SEQ_LEN = 12 + + TRAIN_RATIO = 0.6 + VALID_RATIO = 0.2 + TARGET_CHANNEL = [0] # target channel(s) + STEPS_PER_DAY = 288 + + DATASET_NAME = "PEMS03" + TOD = True # if add time_of_day feature + DOW = True # if add day_of_week feature + OUTPUT_DIR = "datasets/" + DATASET_NAME + DATA_FILE_PATH = "datasets/raw_data/{0}/{0}.npz".format(DATASET_NAME) + GRAPH_FILE_PATH = "datasets/raw_data/{0}/adj_{0}.pkl".format(DATASET_NAME) + + parser = argparse.ArgumentParser() + parser.add_argument("--output_dir", type=str, + default=OUTPUT_DIR, help="Output directory.") + parser.add_argument("--data_file_path", type=str, + default=DATA_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--graph_file_path", type=str, + default=GRAPH_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--history_seq_len", type=int, + default=HISTORY_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--future_seq_len", type=int, + default=FUTURE_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--steps_per_day", type=int, + default=STEPS_PER_DAY, help="Sequence Length.") + parser.add_argument("--tod", type=bool, default=TOD, + help="Add feature time_of_day.") + parser.add_argument("--dow", type=bool, default=DOW, + help="Add feature day_of_week.") + parser.add_argument("--target_channel", type=list, + default=TARGET_CHANNEL, help="Selected channels.") + parser.add_argument("--train_ratio", type=float, + default=TRAIN_RATIO, help="Train ratio") + parser.add_argument("--valid_ratio", type=float, + default=VALID_RATIO, help="Validate ratio.") + args_metr = parser.parse_args() + + # print args + print("-"*(20+45+5)) + for key, value in sorted(vars(args_metr).items()): + print("|{0:>20} = {1:<45}|".format(key, str(value))) + print("-"*(20+45+5)) + + if os.path.exists(args_metr.output_dir): + reply = str(input( + f"{args_metr.output_dir} exists. Do you want to overwrite it? (y/n)")).lower().strip() + if reply[0] != "y": + sys.exit(0) else: - os.makedirs(args.output_dir) - generate_data(args) + os.makedirs(args_metr.output_dir) + generate_data(args_metr) diff --git a/scripts/data_preparation/PEMS04/generate_adj_mx.py b/scripts/data_preparation/PEMS04/generate_adj_mx.py index 27dcd4d9..47d97ffc 100644 --- a/scripts/data_preparation/PEMS04/generate_adj_mx.py +++ b/scripts/data_preparation/PEMS04/generate_adj_mx.py @@ -1,170 +1,84 @@ import os -import numpy as np import csv import pickle -def get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information - - num_of_vertices: int, the number of vertices - - Returns - ---------- - A: np.ndarray, adjacency matrix - - ''' - if 'npy' in distance_df_filename: - adj_mx = np.load(distance_df_filename) - return adj_mx, None - else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 - if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: - f.readline() - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - distaneA[i, j] = distance - return A, distaneA - -def get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information - num_of_vertices: int, the number of vertices - - Returns - ---------- - A: np.ndarray, adjacency matrix - ''' - if 'npy' in distance_df_filename: - adj_mx = np.load(distance_df_filename) - return adj_mx, None - else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 - if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - A[id_dict[j], id_dict[i]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - distaneA[id_dict[j], id_dict[i]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: - f.readline() - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - A[j, i] = 1 - distaneA[i, j] = distance - distaneA[j, i] = distance - return A, distaneA +import numpy as np -def get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information +def get_adjacency_matrix(distance_df_filename: str, num_of_vertices: int, id_filename: str = None) -> tuple: + """Generate adjacency matrix. - num_of_vertices: int, the number of vertices + Args: + distance_df_filename (str): path of the csv file contains edges information + num_of_vertices (int): number of vertices + id_filename (str, optional): id filename. Defaults to None. - Returns - ---------- - A: np.ndarray, adjacency matrix + Returns: + tuple: two adjacency matrix. + np.array: connectivity-based adjacency matrix A (A[i, j]=0 or A[i, j]=1) + np.array: distance-based adjacency matrix A + """ - ''' - if 'npy' in distance_df_filename: + if "npy" in distance_df_filename: adj_mx = np.load(distance_df_filename) return adj_mx, None else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 + adjacency_matrix_connectivity = np.zeros((int(num_of_vertices), int( + num_of_vertices)), dtype=np.float32) + adjacency_matrix_distance = np.zeros((int(num_of_vertices), int(num_of_vertices)), + dtype=np.float32) if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 + # the id in the distance file does not start from 0, so it needs to be remapped + with open(id_filename, "r") as f: + id_dict = {int(i): idx for idx, i in enumerate( + f.read().strip().split("\n"))} # map node idx to 0-based index (start from 0) + with open(distance_df_filename, "r") as f: + f.readline() # omit the first line reader = csv.reader(f) for row in reader: if len(row) != 3: continue i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - A[id_dict[j], id_dict[i]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - distaneA[id_dict[j], id_dict[i]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: + adjacency_matrix_connectivity[id_dict[i], id_dict[j]] = 1 + adjacency_matrix_connectivity[id_dict[j], id_dict[i]] = 1 + adjacency_matrix_distance[id_dict[i], + id_dict[j]] = distance + adjacency_matrix_distance[id_dict[j], + id_dict[i]] = distance + return adjacency_matrix_connectivity, adjacency_matrix_distance + else: + # ids in distance file start from 0 + with open(distance_df_filename, "r") as f: f.readline() reader = csv.reader(f) for row in reader: if len(row) != 3: continue i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - A[j, i] = 1 - distaneA[i, j] = distance - distaneA[j, i] = distance - return A, distaneA + adjacency_matrix_connectivity[i, j] = 1 + adjacency_matrix_connectivity[j, i] = 1 + adjacency_matrix_distance[i, j] = distance + adjacency_matrix_distance[j, i] = distance + return adjacency_matrix_connectivity, adjacency_matrix_distance + -def generate_adj_PEMS04(): - direction = True +def generate_adj_pems04(): distance_df_filename, num_of_vertices = "datasets/raw_data/PEMS04/PEMS04.csv", 307 - if os.path.exists(distance_df_filename.split(".")[0] + ".txt"): - id_filename = distance_df_filename.split(".")[0] + ".txt" + if os.path.exists(distance_df_filename.split(".", maxsplit=1)[0] + ".txt"): + id_filename = distance_df_filename.split(".", maxsplit=1)[0] + ".txt" else: id_filename = None - if direction: - adj_mx, distance_mx = get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=id_filename) - else: - adj_mx, distance_mx = get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=id_filename) - # TODO: the self loop is missing + adj_mx, distance_mx = get_adjacency_matrix( + distance_df_filename, num_of_vertices, id_filename=id_filename) + # the self loop is missing add_self_loop = False if add_self_loop: + print("adding self loop to adjacency matrices.") adj_mx = adj_mx + np.identity(adj_mx.shape[0]) distance_mx = distance_mx + np.identity(distance_mx.shape[0]) - pickle.dump(adj_mx, open("datasets/raw_data/PEMS04/adj_PEMS04.pkl", 'wb')) - pickle.dump(distance_mx, open("datasets/raw_data/PEMS04/adj_PEMS04_distance.pkl", 'wb')) + else: + print("kindly note that there is no self loop in adjacency matrices.") + with open("datasets/raw_data/PEMS04/adj_PEMS04.pkl", "wb") as f: + pickle.dump(adj_mx, f) + with open("datasets/raw_data/PEMS04/adj_PEMS04_distance.pkl", "wb") as f: + pickle.dump(distance_mx, f) diff --git a/scripts/data_preparation/PEMS04/generate_training_data.py b/scripts/data_preparation/PEMS04/generate_training_data.py index 653616f6..8ad3e771 100644 --- a/scripts/data_preparation/PEMS04/generate_training_data.py +++ b/scripts/data_preparation/PEMS04/generate_training_data.py @@ -1,167 +1,163 @@ -import argparse -import pickle +import os +import sys import shutil +import pickle +import argparse + import numpy as np -import os -from generate_adj_mx import generate_adj_PEMS04 - -""" -PEMS04 dataset (traffic flow dataset) default settings: - - normalization: - standard norm - - dataset division: - 6:2:2 - - windows size: - 12 - - features: - traffic flow - --traffic occupy--(not used) - --traffic speed--(not used) - time in day - day in week - - target: - predicting the traffic speed -""" - -def standard_transform(data: np.array, output_dir: str, train_index: list) -> np.array: - """standard normalization. +from generate_adj_mx import generate_adj_pems04 +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../../..")) +from basicts.data.transform import standard_transform - Args: - data (np.array): raw time series data. - output_dir (str): output dir path. - train_index (list): train index. - Returns: - np.array: normalized raw time series data. - """ - # data: L, N, C - data_train = data[:train_index[-1][1], ...] - - mean, std = data_train[..., 0].mean(), data_train[..., 0].std() - - print("mean (training data):", mean) - print("std (training data):", std) - scaler = {} - scaler['func'] = standard_re_transform.__name__ - scaler['args'] = {"mean":mean, "std":std} - pickle.dump(scaler, open(output_dir + "/scaler.pkl", 'wb')) - - def normalize(x): - return (x - mean) / std - - data_norm = normalize(data) - return data_norm - -def standard_re_transform(x, **kwargs): - mean, std = kwargs['mean'], kwargs['std'] - x = x * std - x = x + mean - return x - -def generate_data(args): - """preprocess and generate train/valid/test datasets. +def generate_data(args: argparse.Namespace): + """Preprocess and generate train/valid/test datasets. + Default settings of PEMS04 dataset: + - Normalization method: standard norm. + - Dataset division: 6:2:2. + - Window size: history 12, future 12. + - Channels (features): three channels [traffic flow, time of day, day of week] + - Target: predict the traffic speed of the future 12 time steps. Args: - args (Namespace): args for processing data. + args (argparse): configurations of preprocessing """ - C = args.C - future_seq_len = args.future_seq_len + + target_channel = args.target_channel + future_seq_len = args.future_seq_len history_seq_len = args.history_seq_len - add_time_in_day = True - add_day_in_week = args.dow + add_time_of_day = args.tod + add_day_of_week = args.dow output_dir = args.output_dir + train_ratio = args.train_ratio + valid_ratio = args.valid_ratio + data_file_path = args.data_file_path + graph_file_path = args.graph_file_path + steps_per_day = args.steps_per_day # read data - data = np.load(args.data_file_path)['data'] - data = data[..., C] - print("Data shape: {0}".format(data.shape)) + data = np.load(data_file_path)["data"] + data = data[..., target_channel] + print("raw time series shape: {0}".format(data.shape)) - L, N, F = data.shape - num_samples = L - (history_seq_len + future_seq_len) + 1 + l, n, f = data.shape + num_samples = l - (history_seq_len + future_seq_len) + 1 train_num_short = round(num_samples * train_ratio) valid_num_short = round(num_samples * valid_ratio) - test_num_short = num_samples - train_num_short - valid_num_short - print("train_num_short:{0}".format(train_num_short)) - print("valid_num_short:{0}".format(valid_num_short)) - print("test_num_short:{0}".format(test_num_short)) + test_num_short = num_samples - train_num_short - valid_num_short + print("number of training samples:{0}".format(train_num_short)) + print("number of validation samples:{0}".format(valid_num_short)) + print("number of test samples:{0}".format(test_num_short)) - index_list = [] + index_list = [] for t in range(history_seq_len, num_samples + history_seq_len): index = (t-history_seq_len, t, t+future_seq_len) index_list.append(index) + train_index = index_list[:train_num_short] valid_index = index_list[train_num_short: train_num_short + valid_num_short] - test_index = index_list[train_num_short + valid_num_short: train_num_short + valid_num_short + test_num_short] - + test_index = index_list[train_num_short + + valid_num_short: train_num_short + valid_num_short + test_num_short] + scaler = standard_transform data_norm = scaler(data, output_dir, train_index) # add external feature feature_list = [data_norm] - if add_time_in_day: - # numerical time_in_day - time_ind = [i%args.steps_per_day / args.steps_per_day for i in range(data_norm.shape[0])] - time_ind = np.array(time_ind) - time_in_day = np.tile(time_ind, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(time_in_day) - if add_day_in_week: - # numerical day_in_week - day_in_week = [(i // args.steps_per_day)%7 for i in range(data_norm.shape[0])] - day_in_week = np.array(day_in_week) - day_in_week = np.tile(day_in_week, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(day_in_week) + if add_time_of_day: + # numerical time_of_day + tod = [i % steps_per_day / + steps_per_day for i in range(data_norm.shape[0])] + tod = np.array(tod) + tod_tiled = np.tile(tod, [1, n, 1]).transpose((2, 1, 0)) + feature_list.append(tod_tiled) + + if add_day_of_week: + # numerical day_of_week + dow = [(i // steps_per_day) % 7 for i in range(data_norm.shape[0])] + dow = np.array(dow) + dow_tiled = np.tile(dow, [1, n, 1]).transpose((2, 1, 0)) + feature_list.append(dow_tiled) processed_data = np.concatenate(feature_list, axis=-1) # dump data index = {} - index['train'] = train_index - index['valid'] = valid_index - index['test'] = test_index - pickle.dump(index, open(output_dir + "/index.pkl", "wb")) + index["train"] = train_index + index["valid"] = valid_index + index["test"] = test_index + with open(output_dir + "/index_in{0}_out{1}.pkl".format(history_seq_len, future_seq_len), "wb") as f: + pickle.dump(index, f) data = {} - data['processed_data'] = processed_data - pickle.dump(data, open(output_dir + "/data.pkl", "wb")) + data["processed_data"] = processed_data + with open(output_dir + "/data.pkl", "wb") as f: + pickle.dump(data, f) # copy adj if os.path.exists(args.graph_file_path): - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models + # copy + shutil.copyfile(args.graph_file_path, output_dir + "/adj_mx.pkl") else: - generate_adj_PEMS04() - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models + # generate and copy + generate_adj_pems04() + shutil.copyfile(graph_file_path, output_dir + "/adj_mx.pkl") + if __name__ == "__main__": - history_seq_len = 12 # sliding window size for generating history sequence and target sequence - future_seq_len = 12 - - train_ratio = 0.6 - valid_ratio = 0.2 - C = [0] # selected channels - steps_per_day = 288 # 5min - - name = "PEMS04" - dow = True # if add day_of_week feature - output_dir = 'datasets/' + name - data_file_path = 'datasets/raw_data/{0}/{1}.npz'.format(name, name) - graph_file_path = 'datasets/raw_data/{0}/adj_{1}.pkl'.format(name, name) - - parser = argparse.ArgumentParser() - parser.add_argument("--output_dir", type=str, default=output_dir, help="Output directory.") - parser.add_argument("--data_file_path", type=str, default=data_file_path, help="Raw traffic readings.") - parser.add_argument("--graph_file_path", type=str, default=graph_file_path, help="Raw traffic readings.") - parser.add_argument("--history_seq_len", type=int, default=history_seq_len, help="Sequence Length.") - parser.add_argument("--future_seq_len", type=int, default=future_seq_len, help="Sequence Length.") - parser.add_argument("--steps_per_day", type=int, default=steps_per_day, help="Sequence Length.") - parser.add_argument("--dow", type=bool, default=dow, help='Add feature day_of_week.') - parser.add_argument("--C", type=list, default=C, help='Selected channels.') - parser.add_argument("--train_ratio", type=float, default=train_ratio, help='Train ratio') - parser.add_argument("--valid_ratio", type=float, default=valid_ratio, help='Validate ratio.') - - args = parser.parse_args() - if os.path.exists(args.output_dir): - reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip() - if reply[0] != 'y': exit + # sliding window size for generating history sequence and target sequence + HISTORY_SEQ_LEN = 12 + FUTURE_SEQ_LEN = 12 + + TRAIN_RATIO = 0.6 + VALID_RATIO = 0.2 + TARGET_CHANNEL = [0] # target channel(s) + STEPS_PER_DAY = 288 + + DATASET_NAME = "PEMS04" + TOD = True # if add time_of_day feature + DOW = True # if add day_of_week feature + OUTPUT_DIR = "datasets/" + DATASET_NAME + DATA_FILE_PATH = "datasets/raw_data/{0}/{0}.npz".format(DATASET_NAME) + GRAPH_FILE_PATH = "datasets/raw_data/{0}/adj_{0}.pkl".format(DATASET_NAME) + + parser = argparse.ArgumentParser() + parser.add_argument("--output_dir", type=str, + default=OUTPUT_DIR, help="Output directory.") + parser.add_argument("--data_file_path", type=str, + default=DATA_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--graph_file_path", type=str, + default=GRAPH_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--history_seq_len", type=int, + default=HISTORY_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--future_seq_len", type=int, + default=FUTURE_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--steps_per_day", type=int, + default=STEPS_PER_DAY, help="Sequence Length.") + parser.add_argument("--tod", type=bool, default=TOD, + help="Add feature time_of_day.") + parser.add_argument("--dow", type=bool, default=DOW, + help="Add feature day_of_week.") + parser.add_argument("--target_channel", type=list, + default=TARGET_CHANNEL, help="Selected channels.") + parser.add_argument("--train_ratio", type=float, + default=TRAIN_RATIO, help="Train ratio") + parser.add_argument("--valid_ratio", type=float, + default=VALID_RATIO, help="Validate ratio.") + args_metr = parser.parse_args() + + # print args + print("-"*(20+45+5)) + for key, value in sorted(vars(args_metr).items()): + print("|{0:>20} = {1:<45}|".format(key, str(value))) + print("-"*(20+45+5)) + + if os.path.exists(args_metr.output_dir): + reply = str(input( + f"{args_metr.output_dir} exists. Do you want to overwrite it? (y/n)")).lower().strip() + if reply[0] != "y": + sys.exit(0) else: - os.makedirs(args.output_dir) - generate_data(args) + os.makedirs(args_metr.output_dir) + generate_data(args_metr) diff --git a/scripts/data_preparation/PEMS07/generate_adj_mx.py b/scripts/data_preparation/PEMS07/generate_adj_mx.py index ae9f5c71..e307a013 100644 --- a/scripts/data_preparation/PEMS07/generate_adj_mx.py +++ b/scripts/data_preparation/PEMS07/generate_adj_mx.py @@ -1,170 +1,84 @@ import os -import numpy as np import csv import pickle -def get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information - - num_of_vertices: int, the number of vertices - - Returns - ---------- - A: np.ndarray, adjacency matrix - - ''' - if 'npy' in distance_df_filename: - adj_mx = np.load(distance_df_filename) - return adj_mx, None - else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 - if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: - f.readline() - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - distaneA[i, j] = distance - return A, distaneA - -def get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information - num_of_vertices: int, the number of vertices - - Returns - ---------- - A: np.ndarray, adjacency matrix - ''' - if 'npy' in distance_df_filename: - adj_mx = np.load(distance_df_filename) - return adj_mx, None - else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 - if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - A[id_dict[j], id_dict[i]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - distaneA[id_dict[j], id_dict[i]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: - f.readline() - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - A[j, i] = 1 - distaneA[i, j] = distance - distaneA[j, i] = distance - return A, distaneA +import numpy as np -def get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information +def get_adjacency_matrix(distance_df_filename: str, num_of_vertices: int, id_filename: str = None) -> tuple: + """Generate adjacency matrix. - num_of_vertices: int, the number of vertices + Args: + distance_df_filename (str): path of the csv file contains edges information + num_of_vertices (int): number of vertices + id_filename (str, optional): id filename. Defaults to None. - Returns - ---------- - A: np.ndarray, adjacency matrix + Returns: + tuple: two adjacency matrix. + np.array: connectivity-based adjacency matrix A (A[i, j]=0 or A[i, j]=1) + np.array: distance-based adjacency matrix A + """ - ''' - if 'npy' in distance_df_filename: + if "npy" in distance_df_filename: adj_mx = np.load(distance_df_filename) return adj_mx, None else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 + adjacency_matrix_connectivity = np.zeros((int(num_of_vertices), int( + num_of_vertices)), dtype=np.float32) + adjacency_matrix_distance = np.zeros((int(num_of_vertices), int(num_of_vertices)), + dtype=np.float32) if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 + # the id in the distance file does not start from 0, so it needs to be remapped + with open(id_filename, "r") as f: + id_dict = {int(i): idx for idx, i in enumerate( + f.read().strip().split("\n"))} # map node idx to 0-based index (start from 0) + with open(distance_df_filename, "r") as f: + f.readline() # omit the first line reader = csv.reader(f) for row in reader: if len(row) != 3: continue i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - A[id_dict[j], id_dict[i]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - distaneA[id_dict[j], id_dict[i]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: + adjacency_matrix_connectivity[id_dict[i], id_dict[j]] = 1 + adjacency_matrix_connectivity[id_dict[j], id_dict[i]] = 1 + adjacency_matrix_distance[id_dict[i], + id_dict[j]] = distance + adjacency_matrix_distance[id_dict[j], + id_dict[i]] = distance + return adjacency_matrix_connectivity, adjacency_matrix_distance + else: + # ids in distance file start from 0 + with open(distance_df_filename, "r") as f: f.readline() reader = csv.reader(f) for row in reader: if len(row) != 3: continue i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - A[j, i] = 1 - distaneA[i, j] = distance - distaneA[j, i] = distance - return A, distaneA + adjacency_matrix_connectivity[i, j] = 1 + adjacency_matrix_connectivity[j, i] = 1 + adjacency_matrix_distance[i, j] = distance + adjacency_matrix_distance[j, i] = distance + return adjacency_matrix_connectivity, adjacency_matrix_distance + -def generate_adj_PEMS07(): - direction = True +def generate_adj_pems07(): distance_df_filename, num_of_vertices = "datasets/raw_data/PEMS07/PEMS07.csv", 883 - if os.path.exists(distance_df_filename.split(".")[0] + ".txt"): - id_filename = distance_df_filename.split(".")[0] + ".txt" + if os.path.exists(distance_df_filename.split(".", maxsplit=1)[0] + ".txt"): + id_filename = distance_df_filename.split(".", maxsplit=1)[0] + ".txt" else: id_filename = None - if direction: - adj_mx, distance_mx = get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=id_filename) - else: - adj_mx, distance_mx = get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=id_filename) - # TODO: the self loop is missing + adj_mx, distance_mx = get_adjacency_matrix( + distance_df_filename, num_of_vertices, id_filename=id_filename) + # the self loop is missing add_self_loop = False if add_self_loop: + print("adding self loop to adjacency matrices.") adj_mx = adj_mx + np.identity(adj_mx.shape[0]) distance_mx = distance_mx + np.identity(distance_mx.shape[0]) - pickle.dump(adj_mx, open("datasets/raw_data/PEMS07/adj_PEMS07.pkl", 'wb')) - pickle.dump(distance_mx, open("datasets/raw_data/PEMS07/adj_PEMS07_distance.pkl", 'wb')) + else: + print("kindly note that there is no self loop in adjacency matrices.") + with open("datasets/raw_data/PEMS07/adj_PEMS07.pkl", "wb") as f: + pickle.dump(adj_mx, f) + with open("datasets/raw_data/PEMS07/adj_PEMS07_distance.pkl", "wb") as f: + pickle.dump(distance_mx, f) diff --git a/scripts/data_preparation/PEMS07/generate_training_data.py b/scripts/data_preparation/PEMS07/generate_training_data.py index 86995a41..3d490aee 100644 --- a/scripts/data_preparation/PEMS07/generate_training_data.py +++ b/scripts/data_preparation/PEMS07/generate_training_data.py @@ -1,167 +1,163 @@ -import argparse -import pickle +import os +import sys import shutil +import pickle +import argparse + import numpy as np -import os -from generate_adj_mx import generate_adj_PEMS07 - -""" -PEMS07 dataset (traffic flow dataset) default settings: - - normalization: - standard norm - - dataset division: - 6:2:2 - - windows size: - 12 - - features: - traffic flow - --traffic occupy--(not used) - --traffic speed--(not used) - time in day - day in week - - target: - predicting the traffic speed -""" - -def standard_transform(data: np.array, output_dir: str, train_index: list) -> np.array: - """standard normalization. +from generate_adj_mx import generate_adj_pems07 +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../../..")) +from basicts.data.transform import standard_transform - Args: - data (np.array): raw time series data. - output_dir (str): output dir path. - train_index (list): train index. - Returns: - np.array: normalized raw time series data. - """ - # data: L, N, C - data_train = data[:train_index[-1][1], ...] - - mean, std = data_train[..., 0].mean(), data_train[..., 0].std() - - print("mean (training data):", mean) - print("std (training data):", std) - scaler = {} - scaler['func'] = standard_re_transform.__name__ - scaler['args'] = {"mean":mean, "std":std} - pickle.dump(scaler, open(output_dir + "/scaler.pkl", 'wb')) - - def normalize(x): - return (x - mean) / std - - data_norm = normalize(data) - return data_norm - -def standard_re_transform(x, **kwargs): - mean, std = kwargs['mean'], kwargs['std'] - x = x * std - x = x + mean - return x - -def generate_data(args): - """preprocess and generate train/valid/test datasets. +def generate_data(args: argparse.Namespace): + """Preprocess and generate train/valid/test datasets. + Default settings of PEMS07 dataset: + - Normalization method: standard norm. + - Dataset division: 6:2:2. + - Window size: history 12, future 12. + - Channels (features): three channels [traffic flow, time of day, day of week] + - Target: predict the traffic speed of the future 12 time steps. Args: - args (Namespace): args for processing data. + args (argparse): configurations of preprocessing """ - C = args.C - future_seq_len = args.future_seq_len + + target_channel = args.target_channel + future_seq_len = args.future_seq_len history_seq_len = args.history_seq_len - add_time_in_day = True - add_day_in_week = args.dow + add_time_of_day = args.tod + add_day_of_week = args.dow output_dir = args.output_dir + train_ratio = args.train_ratio + valid_ratio = args.valid_ratio + data_file_path = args.data_file_path + graph_file_path = args.graph_file_path + steps_per_day = args.steps_per_day # read data - data = np.load(args.data_file_path)['data'] - data = data[..., C] - print("Data shape: {0}".format(data.shape)) + data = np.load(data_file_path)["data"] + data = data[..., target_channel] + print("raw time series shape: {0}".format(data.shape)) - L, N, F = data.shape - num_samples = L - (history_seq_len + future_seq_len) + 1 + l, n, f = data.shape + num_samples = l - (history_seq_len + future_seq_len) + 1 train_num_short = round(num_samples * train_ratio) valid_num_short = round(num_samples * valid_ratio) - test_num_short = num_samples - train_num_short - valid_num_short - print("train_num_short:{0}".format(train_num_short)) - print("valid_num_short:{0}".format(valid_num_short)) - print("test_num_short:{0}".format(test_num_short)) + test_num_short = num_samples - train_num_short - valid_num_short + print("number of training samples:{0}".format(train_num_short)) + print("number of validation samples:{0}".format(valid_num_short)) + print("number of test samples:{0}".format(test_num_short)) - index_list = [] + index_list = [] for t in range(history_seq_len, num_samples + history_seq_len): index = (t-history_seq_len, t, t+future_seq_len) index_list.append(index) + train_index = index_list[:train_num_short] valid_index = index_list[train_num_short: train_num_short + valid_num_short] - test_index = index_list[train_num_short + valid_num_short: train_num_short + valid_num_short + test_num_short] - + test_index = index_list[train_num_short + + valid_num_short: train_num_short + valid_num_short + test_num_short] + scaler = standard_transform data_norm = scaler(data, output_dir, train_index) # add external feature feature_list = [data_norm] - if add_time_in_day: - # numerical time_in_day - time_ind = [i%args.steps_per_day / args.steps_per_day for i in range(data_norm.shape[0])] - time_ind = np.array(time_ind) - time_in_day = np.tile(time_ind, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(time_in_day) - if add_day_in_week: - # numerical day_in_week - day_in_week = [(i // args.steps_per_day)%7 for i in range(data_norm.shape[0])] - day_in_week = np.array(day_in_week) - day_in_week = np.tile(day_in_week, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(day_in_week) + if add_time_of_day: + # numerical time_of_day + tod = [i % steps_per_day / + steps_per_day for i in range(data_norm.shape[0])] + tod = np.array(tod) + tod_tiled = np.tile(tod, [1, n, 1]).transpose((2, 1, 0)) + feature_list.append(tod_tiled) + + if add_day_of_week: + # numerical day_of_week + dow = [(i // steps_per_day) % 7 for i in range(data_norm.shape[0])] + dow = np.array(dow) + dow_tiled = np.tile(dow, [1, n, 1]).transpose((2, 1, 0)) + feature_list.append(dow_tiled) processed_data = np.concatenate(feature_list, axis=-1) # dump data index = {} - index['train'] = train_index - index['valid'] = valid_index - index['test'] = test_index - pickle.dump(index, open(output_dir + "/index.pkl", "wb")) + index["train"] = train_index + index["valid"] = valid_index + index["test"] = test_index + with open(output_dir + "/index_in{0}_out{1}.pkl".format(history_seq_len, future_seq_len), "wb") as f: + pickle.dump(index, f) data = {} - data['processed_data'] = processed_data - pickle.dump(data, open(output_dir + "/data.pkl", "wb")) + data["processed_data"] = processed_data + with open(output_dir + "/data.pkl", "wb") as f: + pickle.dump(data, f) # copy adj if os.path.exists(args.graph_file_path): - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models + # copy + shutil.copyfile(args.graph_file_path, output_dir + "/adj_mx.pkl") else: - generate_adj_PEMS07() - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models + # generate and copy + generate_adj_pems07() + shutil.copyfile(graph_file_path, output_dir + "/adj_mx.pkl") + if __name__ == "__main__": - history_seq_len = 12 # sliding window size for generating history sequence and target sequence - future_seq_len = 12 - - train_ratio = 0.6 - valid_ratio = 0.2 - C = [0] # selected channels - steps_per_day = 288 # 5min - - name = "PEMS07" - dow = True # if add day_of_week feature - output_dir = 'datasets/' + name - data_file_path = 'datasets/raw_data/{0}/{1}.npz'.format(name, name) - graph_file_path = 'datasets/raw_data/{0}/adj_{1}.pkl'.format(name, name) - - parser = argparse.ArgumentParser() - parser.add_argument("--output_dir", type=str, default=output_dir, help="Output directory.") - parser.add_argument("--data_file_path", type=str, default=data_file_path, help="Raw traffic readings.") - parser.add_argument("--graph_file_path", type=str, default=graph_file_path, help="Raw traffic readings.") - parser.add_argument("--history_seq_len", type=int, default=history_seq_len, help="Sequence Length.") - parser.add_argument("--future_seq_len", type=int, default=future_seq_len, help="Sequence Length.") - parser.add_argument("--steps_per_day", type=int, default=steps_per_day, help="Sequence Length.") - parser.add_argument("--dow", type=bool, default=dow, help='Add feature day_of_week.') - parser.add_argument("--C", type=list, default=C, help='Selected channels.') - parser.add_argument("--train_ratio", type=float, default=train_ratio, help='Train ratio') - parser.add_argument("--valid_ratio", type=float, default=valid_ratio, help='Validate ratio.') - - args = parser.parse_args() - if os.path.exists(args.output_dir): - reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip() - if reply[0] != 'y': exit + # sliding window size for generating history sequence and target sequence + HISTORY_SEQ_LEN = 12 + FUTURE_SEQ_LEN = 12 + + TRAIN_RATIO = 0.6 + VALID_RATIO = 0.2 + TARGET_CHANNEL = [0] # target channel(s) + STEPS_PER_DAY = 288 + + DATASET_NAME = "PEMS07" + TOD = True # if add time_of_day feature + DOW = True # if add day_of_week feature + OUTPUT_DIR = "datasets/" + DATASET_NAME + DATA_FILE_PATH = "datasets/raw_data/{0}/{0}.npz".format(DATASET_NAME) + GRAPH_FILE_PATH = "datasets/raw_data/{0}/adj_{0}.pkl".format(DATASET_NAME) + + parser = argparse.ArgumentParser() + parser.add_argument("--output_dir", type=str, + default=OUTPUT_DIR, help="Output directory.") + parser.add_argument("--data_file_path", type=str, + default=DATA_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--graph_file_path", type=str, + default=GRAPH_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--history_seq_len", type=int, + default=HISTORY_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--future_seq_len", type=int, + default=FUTURE_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--steps_per_day", type=int, + default=STEPS_PER_DAY, help="Sequence Length.") + parser.add_argument("--tod", type=bool, default=TOD, + help="Add feature time_of_day.") + parser.add_argument("--dow", type=bool, default=DOW, + help="Add feature day_of_week.") + parser.add_argument("--target_channel", type=list, + default=TARGET_CHANNEL, help="Selected channels.") + parser.add_argument("--train_ratio", type=float, + default=TRAIN_RATIO, help="Train ratio") + parser.add_argument("--valid_ratio", type=float, + default=VALID_RATIO, help="Validate ratio.") + args_metr = parser.parse_args() + + # print args + print("-"*(20+45+5)) + for key, value in sorted(vars(args_metr).items()): + print("|{0:>20} = {1:<45}|".format(key, str(value))) + print("-"*(20+45+5)) + + if os.path.exists(args_metr.output_dir): + reply = str(input( + f"{args_metr.output_dir} exists. Do you want to overwrite it? (y/n)")).lower().strip() + if reply[0] != "y": + sys.exit(0) else: - os.makedirs(args.output_dir) - generate_data(args) + os.makedirs(args_metr.output_dir) + generate_data(args_metr) diff --git a/scripts/data_preparation/PEMS08/generate_adj_mx.py b/scripts/data_preparation/PEMS08/generate_adj_mx.py index 1f9fd2ff..1484b46e 100644 --- a/scripts/data_preparation/PEMS08/generate_adj_mx.py +++ b/scripts/data_preparation/PEMS08/generate_adj_mx.py @@ -1,170 +1,84 @@ -import numpy as np +import os import csv import pickle -import os - -def get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information - - num_of_vertices: int, the number of vertices - - Returns - ---------- - A: np.ndarray, adjacency matrix - ''' - if 'npy' in distance_df_filename: - adj_mx = np.load(distance_df_filename) - return adj_mx, None - else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 - if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: - f.readline() - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - distaneA[i, j] = distance - return A, distaneA - -def get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information - num_of_vertices: int, the number of vertices - - Returns - ---------- - A: np.ndarray, adjacency matrix - ''' - if 'npy' in distance_df_filename: - adj_mx = np.load(distance_df_filename) - return adj_mx, None - else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 - if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - A[id_dict[j], id_dict[i]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - distaneA[id_dict[j], id_dict[i]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: - f.readline() - reader = csv.reader(f) - for row in reader: - if len(row) != 3: - continue - i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - A[j, i] = 1 - distaneA[i, j] = distance - distaneA[j, i] = distance - return A, distaneA +import numpy as np -def get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=None): - ''' - Parameters - ---------- - distance_df_filename: str, path of the csv file contains edges information +def get_adjacency_matrix(distance_df_filename: str, num_of_vertices: int, id_filename: str = None) -> tuple: + """Generate adjacency matrix. - num_of_vertices: int, the number of vertices + Args: + distance_df_filename (str): path of the csv file contains edges information + num_of_vertices (int): number of vertices + id_filename (str, optional): id filename. Defaults to None. - Returns - ---------- - A: np.ndarray, adjacency matrix + Returns: + tuple: two adjacency matrix. + np.array: connectivity-based adjacency matrix A (A[i, j]=0 or A[i, j]=1) + np.array: distance-based adjacency matrix A + """ - ''' - if 'npy' in distance_df_filename: + if "npy" in distance_df_filename: adj_mx = np.load(distance_df_filename) return adj_mx, None else: - A = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)), - dtype=np.float32) - # distance file中的id并不是从0开始的 所以要进行重新的映射;id_filename是节点的顺序 + adjacency_matrix_connectivity = np.zeros((int(num_of_vertices), int( + num_of_vertices)), dtype=np.float32) + adjacency_matrix_distance = np.zeros((int(num_of_vertices), int(num_of_vertices)), + dtype=np.float32) if id_filename: - with open(id_filename, 'r') as f: - id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引 - with open(distance_df_filename, 'r') as f: - f.readline() # 略过表头那一行 + # the id in the distance file does not start from 0, so it needs to be remapped + with open(id_filename, "r") as f: + id_dict = {int(i): idx for idx, i in enumerate( + f.read().strip().split("\n"))} # map node idx to 0-based index (start from 0) + with open(distance_df_filename, "r") as f: + f.readline() # omit the first line reader = csv.reader(f) for row in reader: if len(row) != 3: continue i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[id_dict[i], id_dict[j]] = 1 - A[id_dict[j], id_dict[i]] = 1 - distaneA[id_dict[i], id_dict[j]] = distance - distaneA[id_dict[j], id_dict[i]] = distance - return A, distaneA - else: # distance file中的id直接从0开始 - with open(distance_df_filename, 'r') as f: + adjacency_matrix_connectivity[id_dict[i], id_dict[j]] = 1 + adjacency_matrix_connectivity[id_dict[j], id_dict[i]] = 1 + adjacency_matrix_distance[id_dict[i], + id_dict[j]] = distance + adjacency_matrix_distance[id_dict[j], + id_dict[i]] = distance + return adjacency_matrix_connectivity, adjacency_matrix_distance + else: + # ids in distance file start from 0 + with open(distance_df_filename, "r") as f: f.readline() reader = csv.reader(f) for row in reader: if len(row) != 3: continue i, j, distance = int(row[0]), int(row[1]), float(row[2]) - A[i, j] = 1 - A[j, i] = 1 - distaneA[i, j] = distance - distaneA[j, i] = distance - return A, distaneA + adjacency_matrix_connectivity[i, j] = 1 + adjacency_matrix_connectivity[j, i] = 1 + adjacency_matrix_distance[i, j] = distance + adjacency_matrix_distance[j, i] = distance + return adjacency_matrix_connectivity, adjacency_matrix_distance + -def generate_adj_PEMS08(): - direction = True +def generate_adj_pems08(): distance_df_filename, num_of_vertices = "datasets/raw_data/PEMS08/PEMS08.csv", 170 - if os.path.exists(distance_df_filename.split(".")[0] + ".txt"): - id_filename = distance_df_filename.split(".")[0] + ".txt" + if os.path.exists(distance_df_filename.split(".", maxsplit=1)[0] + ".txt"): + id_filename = distance_df_filename.split(".", maxsplit=1)[0] + ".txt" else: id_filename = None - if direction: - adj_mx, distance_mx = get_adjacency_matrix_2direction(distance_df_filename, num_of_vertices, id_filename=id_filename) - else: - adj_mx, distance_mx = get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=id_filename) - # TODO: the self loop is missing + adj_mx, distance_mx = get_adjacency_matrix( + distance_df_filename, num_of_vertices, id_filename=id_filename) + # the self loop is missing add_self_loop = False if add_self_loop: + print("adding self loop to adjacency matrices.") adj_mx = adj_mx + np.identity(adj_mx.shape[0]) distance_mx = distance_mx + np.identity(distance_mx.shape[0]) - pickle.dump(adj_mx, open("datasets/raw_data/PEMS08/adj_PEMS08.pkl", 'wb')) - pickle.dump(distance_mx, open("datasets/raw_data/PEMS08/adj_PEMS08_distance.pkl", 'wb')) + else: + print("kindly note that there is no self loop in adjacency matrices.") + with open("datasets/raw_data/PEMS08/adj_PEMS08.pkl", "wb") as f: + pickle.dump(adj_mx, f) + with open("datasets/raw_data/PEMS08/adj_PEMS08_distance.pkl", "wb") as f: + pickle.dump(distance_mx, f) diff --git a/scripts/data_preparation/PEMS08/generate_training_data.py b/scripts/data_preparation/PEMS08/generate_training_data.py index 4ce76e88..c7786a3e 100644 --- a/scripts/data_preparation/PEMS08/generate_training_data.py +++ b/scripts/data_preparation/PEMS08/generate_training_data.py @@ -1,167 +1,163 @@ -import argparse -import pickle +import os +import sys import shutil +import pickle +import argparse + import numpy as np -import os -from generate_adj_mx import generate_adj_PEMS08 - -""" -PEMS08 dataset (traffic flow dataset) default settings: - - normalization: - standard norm - - dataset division: - 6:2:2 - - windows size: - 12 - - features: - traffic flow - --traffic occupy--(not used) - --traffic speed--(not used) - time in day - day in week - - target: - predicting the traffic speed -""" - -def standard_transform(data: np.array, output_dir: str, train_index: list) -> np.array: - """standard normalization. +from generate_adj_mx import generate_adj_pems08 +# TODO: remove it when basicts can be installed by pip +sys.path.append(os.path.abspath(__file__ + "/../../../..")) +from basicts.data.transform import standard_transform - Args: - data (np.array): raw time series data. - output_dir (str): output dir path. - train_index (list): train index. - Returns: - np.array: normalized raw time series data. - """ - # data: L, N, C - data_train = data[:train_index[-1][1], ...] - - mean, std = data_train[..., 0].mean(), data_train[..., 0].std() - - print("mean (training data):", mean) - print("std (training data):", std) - scaler = {} - scaler['func'] = standard_re_transform.__name__ - scaler['args'] = {"mean":mean, "std":std} - pickle.dump(scaler, open(output_dir + "/scaler.pkl", 'wb')) - - def normalize(x): - return (x - mean) / std - - data_norm = normalize(data) - return data_norm - -def standard_re_transform(x, **kwargs): - mean, std = kwargs['mean'], kwargs['std'] - x = x * std - x = x + mean - return x - -def generate_data(args): - """preprocess and generate train/valid/test datasets. +def generate_data(args: argparse.Namespace): + """Preprocess and generate train/valid/test datasets. + Default settings of PEMS08 dataset: + - Normalization method: standard norm. + - Dataset division: 6:2:2. + - Window size: history 12, future 12. + - Channels (features): three channels [traffic flow, time of day, day of week] + - Target: predict the traffic speed of the future 12 time steps. Args: - args (Namespace): args for processing data. + args (argparse): configurations of preprocessing """ - C = args.C - future_seq_len = args.future_seq_len + + target_channel = args.target_channel + future_seq_len = args.future_seq_len history_seq_len = args.history_seq_len - add_time_in_day = True - add_day_in_week = args.dow + add_time_of_day = args.tod + add_day_of_week = args.dow output_dir = args.output_dir + train_ratio = args.train_ratio + valid_ratio = args.valid_ratio + data_file_path = args.data_file_path + graph_file_path = args.graph_file_path + steps_per_day = args.steps_per_day # read data - data = np.load(args.data_file_path)['data'] - data = data[..., C] - print("Data shape: {0}".format(data.shape)) + data = np.load(data_file_path)["data"] + data = data[..., target_channel] + print("raw time series shape: {0}".format(data.shape)) - L, N, F = data.shape - num_samples = L - (history_seq_len + future_seq_len) + 1 + l, n, f = data.shape + num_samples = l - (history_seq_len + future_seq_len) + 1 train_num_short = round(num_samples * train_ratio) valid_num_short = round(num_samples * valid_ratio) - test_num_short = num_samples - train_num_short - valid_num_short - print("train_num_short:{0}".format(train_num_short)) - print("valid_num_short:{0}".format(valid_num_short)) - print("test_num_short:{0}".format(test_num_short)) + test_num_short = num_samples - train_num_short - valid_num_short + print("number of training samples:{0}".format(train_num_short)) + print("number of validation samples:{0}".format(valid_num_short)) + print("number of test samples:{0}".format(test_num_short)) - index_list = [] + index_list = [] for t in range(history_seq_len, num_samples + history_seq_len): index = (t-history_seq_len, t, t+future_seq_len) index_list.append(index) + train_index = index_list[:train_num_short] valid_index = index_list[train_num_short: train_num_short + valid_num_short] - test_index = index_list[train_num_short + valid_num_short: train_num_short + valid_num_short + test_num_short] - + test_index = index_list[train_num_short + + valid_num_short: train_num_short + valid_num_short + test_num_short] + scaler = standard_transform data_norm = scaler(data, output_dir, train_index) # add external feature feature_list = [data_norm] - if add_time_in_day: - # numerical time_in_day - time_ind = [i%args.steps_per_day / args.steps_per_day for i in range(data_norm.shape[0])] - time_ind = np.array(time_ind) - time_in_day = np.tile(time_ind, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(time_in_day) - if add_day_in_week: - # numerical day_in_week - day_in_week = [(i // args.steps_per_day)%7 for i in range(data_norm.shape[0])] - day_in_week = np.array(day_in_week) - day_in_week = np.tile(day_in_week, [1, N, 1]).transpose((2, 1, 0)) - feature_list.append(day_in_week) - + if add_time_of_day: + # numerical time_of_day + tod = [i % steps_per_day / + steps_per_day for i in range(data_norm.shape[0])] + tod = np.array(tod) + tod_tiled = np.tile(tod, [1, n, 1]).transpose((2, 1, 0)) + feature_list.append(tod_tiled) + + if add_day_of_week: + # numerical day_of_week + dow = [(i // steps_per_day) % 7 for i in range(data_norm.shape[0])] + dow = np.array(dow) + dow_tiled = np.tile(dow, [1, n, 1]).transpose((2, 1, 0)) + feature_list.append(dow_tiled) + processed_data = np.concatenate(feature_list, axis=-1) # dump data index = {} - index['train'] = train_index - index['valid'] = valid_index - index['test'] = test_index - pickle.dump(index, open(output_dir + "/index.pkl", "wb")) + index["train"] = train_index + index["valid"] = valid_index + index["test"] = test_index + with open(output_dir + "/index_in{0}_out{1}.pkl".format(history_seq_len, future_seq_len), "wb") as f: + pickle.dump(index, f) data = {} - data['processed_data'] = processed_data - pickle.dump(data, open(output_dir + "/data.pkl", "wb")) + data["processed_data"] = processed_data + with open(output_dir + "/data.pkl", "wb") as f: + pickle.dump(data, f) # copy adj if os.path.exists(args.graph_file_path): - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models + # copy + shutil.copyfile(args.graph_file_path, output_dir + "/adj_mx.pkl") else: - generate_adj_PEMS08() - shutil.copyfile(args.graph_file_path, output_dir + '/adj_mx.pkl') # copy models + # generate and copy + generate_adj_pems08() + shutil.copyfile(graph_file_path, output_dir + "/adj_mx.pkl") + if __name__ == "__main__": - history_seq_len = 12 # sliding window size for generating history sequence and target sequence - future_seq_len = 12 - - train_ratio = 0.6 - valid_ratio = 0.2 - C = [0] # selected channels - steps_per_day = 288 # 5min - - name = "PEMS08" - dow = True # if add day_of_week feature - output_dir = 'datasets/' + name - data_file_path = 'datasets/raw_data/{0}/{1}.npz'.format(name, name) - graph_file_path = 'datasets/raw_data/{0}/adj_{1}.pkl'.format(name, name) - - parser = argparse.ArgumentParser() - parser.add_argument("--output_dir", type=str, default=output_dir, help="Output directory.") - parser.add_argument("--data_file_path", type=str, default=data_file_path, help="Raw traffic readings.",) - parser.add_argument("--graph_file_path", type=str, default=graph_file_path, help="Raw traffic readings.",) - parser.add_argument("--history_seq_len", type=int, default=history_seq_len, help="Sequence Length.") - parser.add_argument("--future_seq_len", type=int, default=future_seq_len, help="Sequence Length.") - parser.add_argument("--steps_per_day", type=int, default=steps_per_day, help="Sequence Length.") - parser.add_argument("--dow", type=bool, default=dow, help='Add feature day_of_week.') - parser.add_argument("--C", type=list, default=C, help='Selected channels.') - parser.add_argument("--train_ratio", type=float, default=train_ratio, help='Train ratio') - parser.add_argument("--valid_ratio", type=float, default=valid_ratio, help='Validate ratio.') - - args = parser.parse_args() - if os.path.exists(args.output_dir): - reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip() - if reply[0] != 'y': exit + # sliding window size for generating history sequence and target sequence + HISTORY_SEQ_LEN = 12 + FUTURE_SEQ_LEN = 12 + + TRAIN_RATIO = 0.6 + VALID_RATIO = 0.2 + TARGET_CHANNEL = [0] # target channel(s) + STEPS_PER_DAY = 288 + + DATASET_NAME = "PEMS08" + TOD = True # if add time_of_day feature + DOW = True # if add day_of_week feature + OUTPUT_DIR = "datasets/" + DATASET_NAME + DATA_FILE_PATH = "datasets/raw_data/{0}/{0}.npz".format(DATASET_NAME) + GRAPH_FILE_PATH = "datasets/raw_data/{0}/adj_{0}.pkl".format(DATASET_NAME) + + parser = argparse.ArgumentParser() + parser.add_argument("--output_dir", type=str, + default=OUTPUT_DIR, help="Output directory.") + parser.add_argument("--data_file_path", type=str, + default=DATA_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--graph_file_path", type=str, + default=GRAPH_FILE_PATH, help="Raw traffic readings.") + parser.add_argument("--history_seq_len", type=int, + default=HISTORY_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--future_seq_len", type=int, + default=FUTURE_SEQ_LEN, help="Sequence Length.") + parser.add_argument("--steps_per_day", type=int, + default=STEPS_PER_DAY, help="Sequence Length.") + parser.add_argument("--tod", type=bool, default=TOD, + help="Add feature time_of_day.") + parser.add_argument("--dow", type=bool, default=DOW, + help="Add feature day_of_week.") + parser.add_argument("--target_channel", type=list, + default=TARGET_CHANNEL, help="Selected channels.") + parser.add_argument("--train_ratio", type=float, + default=TRAIN_RATIO, help="Train ratio") + parser.add_argument("--valid_ratio", type=float, + default=VALID_RATIO, help="Validate ratio.") + args_metr = parser.parse_args() + + # print args + print("-"*(20+45+5)) + for key, value in sorted(vars(args_metr).items()): + print("|{0:>20} = {1:<45}|".format(key, str(value))) + print("-"*(20+45+5)) + + if os.path.exists(args_metr.output_dir): + reply = str(input( + f"{args_metr.output_dir} exists. Do you want to overwrite it? (y/n)")).lower().strip() + if reply[0] != "y": + sys.exit(0) else: - os.makedirs(args.output_dir) - generate_data(args) + os.makedirs(args_metr.output_dir) + generate_data(args_metr) diff --git a/scripts/data_preparation/all.sh b/scripts/data_preparation/all.sh new file mode 100755 index 00000000..43f7506f --- /dev/null +++ b/scripts/data_preparation/all.sh @@ -0,0 +1,7 @@ +#!/bin/bash +python scripts/data_preparation/METR-LA/generate_training_data.py +python scripts/data_preparation/PEMS-BAY/generate_training_data.py +python scripts/data_preparation/PEMS03/generate_training_data.py +python scripts/data_preparation/PEMS04/generate_training_data.py +python scripts/data_preparation/PEMS07/generate_training_data.py +python scripts/data_preparation/PEMS08/generate_training_data.py diff --git a/scripts/node2vec/generate_node_embeddings.py b/scripts/node2vec/generate_node_embeddings.py deleted file mode 100644 index af436ea3..00000000 --- a/scripts/node2vec/generate_node_embeddings.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Code ref: https://github.com/zhengchuanpan/GMAN/blob/master/METR/node2vec/generateSE.py -""" - -import os -import sys -sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))) -import argparse -import node2vec -import networkx as nx -from gensim.models import Word2Vec -from basicts.utils.serialization import load_pkl - -def generate_node_embeddings(args): - try: - # METR and PEMS_BAY - _, _, adj_mx = load_pkl("datasets/{0}/adj_mx.pkl".format(args.dataset_name)) - except: - # PEMS0X - adj_mx = load_pkl("datasets/{0}/adj_mx.pkl".format(args.dataset_name)) - - nx_G = nx.from_numpy_array(adj_mx, create_using=nx.DiGraph()) - G = node2vec.Graph(nx_G, args.is_directed, args.p, args.q) - G.preprocess_transition_probs() - walks = G.simulate_walks(args.num_walks, args.walk_length) - - walks = [list(map(str, walk)) for walk in walks] - - model = Word2Vec(walks, vector_size = args.vector_size, window = 10, min_count=0, sg=1, workers = 8, epochs = args.epochs) - model.wv.save_word2vec_format("datasets/{0}/node2vec_emb.txt".format(args.dataset_name)) - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--dataset_name", type=str, default='METR-LA', help='dataset name.') - parser.add_argument("--is_directed", type=bool, default=True, help="direct graph.") - parser.add_argument("--p", type=int, default=2, help="p in node2vec.",) - parser.add_argument("--q", type=int, default=1, help="q in node2vec.",) - parser.add_argument("--num_walks", type=int, default=100, help="number of walks..",) - parser.add_argument("--vector_size", type=int, default=64, help='dimension of node vector.') - parser.add_argument("--walk_length", type=int, default=80, help='walk length.') - parser.add_argument("--epochs", type=int, default=1000, help='epochs') - args = parser.parse_args() - print(args) - generate_node_embeddings(args) diff --git a/scripts/node2vec/node2vec.py b/scripts/node2vec/node2vec.py deleted file mode 100644 index c03f312a..00000000 --- a/scripts/node2vec/node2vec.py +++ /dev/null @@ -1,157 +0,0 @@ -''' -Aditya Grover and Jure Leskovec. node2vec: Scalable Feature Learning for Networks. In KDD, 2016. -https://github.com/aditya-grover/node2vec - -Code ref: https://github.com/zhengchuanpan/GMAN/blob/master/METR/node2vec/node2vec.py -''' - -import numpy as np -import networkx as nx -import random - - -class Graph(): - def __init__(self, nx_G, is_directed, p, q): - self.G = nx_G - self.is_directed = is_directed - self.p = p - self.q = q - - def node2vec_walk(self, walk_length, start_node): - ''' - Simulate a random walk starting from start node. - ''' - G = self.G - alias_nodes = self.alias_nodes - alias_edges = self.alias_edges - - walk = [start_node] - - while len(walk) < walk_length: - cur = walk[-1] - cur_nbrs = sorted(G.neighbors(cur)) - if len(cur_nbrs) > 0: - if len(walk) == 1: - walk.append(cur_nbrs[alias_draw(alias_nodes[cur][0], alias_nodes[cur][1])]) - else: - prev = walk[-2] - next = cur_nbrs[alias_draw(alias_edges[(prev, cur)][0], - alias_edges[(prev, cur)][1])] - walk.append(next) - else: - break - - return walk - - def simulate_walks(self, num_walks, walk_length): - ''' - Repeatedly simulate random walks from each node. - ''' - G = self.G - walks = [] - nodes = list(G.nodes()) - print ('Walk iteration:') - for walk_iter in range(num_walks): - print (str(walk_iter+1), '/', str(num_walks)) - random.shuffle(nodes) - for node in nodes: - walks.append(self.node2vec_walk(walk_length=walk_length, start_node=node)) - - return walks - - def get_alias_edge(self, src, dst): - ''' - Get the alias edge setup lists for a given edge. - ''' - G = self.G - p = self.p - q = self.q - - unnormalized_probs = [] - for dst_nbr in sorted(G.neighbors(dst)): - if dst_nbr == src: - unnormalized_probs.append(G[dst][dst_nbr]['weight']/p) - elif G.has_edge(dst_nbr, src): - unnormalized_probs.append(G[dst][dst_nbr]['weight']) - else: - unnormalized_probs.append(G[dst][dst_nbr]['weight']/q) - norm_const = sum(unnormalized_probs) - normalized_probs = [float(u_prob)/norm_const for u_prob in unnormalized_probs] - - return alias_setup(normalized_probs) - - def preprocess_transition_probs(self): - ''' - Preprocessing of transition probabilities for guiding the random walks. - ''' - G = self.G - is_directed = self.is_directed - - alias_nodes = {} - for node in G.nodes(): - unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(G.neighbors(node))] - norm_const = sum(unnormalized_probs) - normalized_probs = [float(u_prob)/norm_const for u_prob in unnormalized_probs] - alias_nodes[node] = alias_setup(normalized_probs) - - alias_edges = {} - triads = {} - - if is_directed: - for edge in G.edges(): - alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) - else: - for edge in G.edges(): - alias_edges[edge] = self.get_alias_edge(edge[0], edge[1]) - alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0]) - - self.alias_nodes = alias_nodes - self.alias_edges = alias_edges - - return - - -def alias_setup(probs): - ''' - Compute utility lists for non-uniform sampling from discrete distributions. - Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/ - for details - ''' - K = len(probs) - q = np.zeros(K) - J = np.zeros(K, dtype=np.int) - - smaller = [] - larger = [] - for kk, prob in enumerate(probs): - q[kk] = K*prob - if q[kk] < 1.0: - smaller.append(kk) - else: - larger.append(kk) - - while len(smaller) > 0 and len(larger) > 0: - small = smaller.pop() - large = larger.pop() - - J[small] = large - q[large] = q[large] + q[small] - 1.0 - if q[large] < 1.0: - smaller.append(large) - else: - larger.append(large) - - return J, q - -def alias_draw(J, q): - ''' - Draw sample from a non-uniform discrete distribution using alias sampling. - ''' - K = len(J) - - kk = int(np.floor(np.random.rand()*K)) - if np.random.rand() < q[kk]: - return kk - else: - return J[kk] - \ No newline at end of file diff --git a/test/test_data.py b/test/test_data.py deleted file mode 100644 index 855be374..00000000 --- a/test/test_data.py +++ /dev/null @@ -1,8 +0,0 @@ -import numpy as np -import pickle as pkl -name = 'METR-LA' -name = 'PEMS-BAY' - -data = pkl.load(open('datasets/{0}/data.pkl'.format(name), 'rb')) # 34272, 207, 3 -args = pkl.load(open('datasets/{0}/args.pkl'.format(name), 'rb')) -index = pkl.load(open('datasets/{0}/index.pkl'.format(name), 'rb')) diff --git a/test/test_dataset.py b/test/test_dataset.py deleted file mode 100644 index 8cb92522..00000000 --- a/test/test_dataset.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import sys -sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) -from operator import index -from basicts.data.base_dataset import BaseDataset - -dataset_name = 'METR-LA' -raw_file_path = 'datasets/{0}/data.pkl'.format(dataset_name) -index_file_path = 'datasets/{0}/index.pkl'.format(dataset_name) -mode = 'train' -dataset = BaseDataset(raw_file_path, index_file_path, mode) -for _ in dataset: - data = _ - a = 1 - break \ No newline at end of file diff --git a/test/test_model.py b/test/test_model.py deleted file mode 100644 index 50e24692..00000000 --- a/test/test_model.py +++ /dev/null @@ -1,20 +0,0 @@ -import torch -import torch.nn as nn -import os -import sys -sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__)))) - -from basicts.archs.Stat_arch import SimpleMovingAverage, AutoRegressive, VectorAutoRegression -from basicts.archs.MTGNN_arch import MTGNN -sma = SimpleMovingAverage(12, 12, 12) -data = torch.randn(64, 12, 207, 3) -pred = sma(data) - -wma = AutoRegressive(12, 12, 12) -data = torch.randn(64, 12, 207, 3) -pred = wma(data) - -var = VectorAutoRegression(12, 12, 12, 207) -data = torch.randn(64, 12, 207, 3) -pred = var(data) -a = 1 \ No newline at end of file diff --git a/tests/test_all.py b/tests/test_all.py new file mode 100644 index 00000000..2e919d31 --- /dev/null +++ b/tests/test_all.py @@ -0,0 +1,19 @@ +import os +import time +import sys + +from utils import test + +sys.path.append(os.path.abspath(__file__ + "/../..")) + +if __name__ == "__main__": + MODELS = os.listdir("examples") + MODELS.remove("run.py") + DATASETS = os.listdir("datasets") + DATASETS.remove("raw_data") + DATASETS.remove("README.md") + + with open("test_specific_" + time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) + ".log", 'w') as f: + for model in MODELS: + for dataset in DATASETS: + test(model, dataset, f) diff --git a/tests/test_specific.py b/tests/test_specific.py new file mode 100644 index 00000000..0fdf47f6 --- /dev/null +++ b/tests/test_specific.py @@ -0,0 +1,38 @@ +import os +import time +import sys + +from utils import test + +sys.path.append(os.path.abspath(__file__ + "/../..")) + +if __name__ == "__main__": + MODELS = os.listdir("examples") + MODELS.remove("run.py") + + DATASETS = os.listdir("datasets") + DATASETS.remove("raw_data") + DATASETS.remove("README.md") + + print("Current support models: {0}".format(MODELS)) + print("Current support datasets: {0}".format(DATASETS)) + + reply_models = str(input(f"Please select the names of the models to test, separated by commas: ")) + if reply_models == "": + reply_models = MODELS + else: + reply_models = reply_models.strip().replace(" ", "").split(",") + + reply_datasets = str(input(f"Please select the names of the datasets to test, separated by commas: ")) + if reply_datasets == "": + reply_datasets = DATASETS + else: + reply_datasets = reply_datasets.strip().replace(" ", "").split(",") + + print("Models to test: {0}".format(reply_models)) + print("Datasets to test: {0}".format(reply_datasets)) + + with open("test_specific_" + time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()) + ".log", 'w') as f: + for model in reply_models: + for dataset in reply_datasets: + test(model, dataset, f) diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 00000000..2d86251c --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,26 @@ +import logging +import traceback +import easytorch +from easytorch import launch_training + + +def test(model, dataset, exception_log_file): + CFG = __import__("examples.{0}.{0}_{1}".format(model, dataset), + fromlist=["examples.{0}.{0}_{1}".format(model, dataset)]).CFG + # CFG.TRAIN.NUM_EPOCHS = 1 + # CFG.ENV.SEED = seed + print(("*" * 60 + "{0:>10}" + "@{1:<10}" + "*" * 60).format(model, dataset)) + try: + launch_training(CFG, "0") + except Exception as e: + exception_log_file.write("\n" + "*" * 60 + "{0:>10}@{1:<22}".format( + model, dataset + "test failed.") + "*" * 60 + "\n") + traceback.print_exc(limit=1, file=exception_log_file) + # safely delete all the handlers of 'easytorch-training' logger and re-add them, so as to get the correct log file path. + logger = logging.getLogger("easytorch-training") + for h in logger.handlers: + h.close() + logger.handlers = [] + easytorch.utils.logging.logger_initialized.remove("easytorch-training") + + print("*" * 141)