diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..6be1e24
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,38 @@
+# Git
+.git
+.gitignore
+.github
+
+# Docker
+.dockerignore
+
+# IDE
+.idea
+.vscode
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+**/__pycache__/
+*.pyc
+*.pyo
+*.pyd
+.Python
+*.py[cod]
+*$py.class
+.pytest_cache/
+..mypy_cache/
+
+# poetry
+.venv
+
+# C extensions
+*.so
+
+# Virtual environment
+.venv
+venv
+
+.DS_Store
+.AppleDouble
+.LSOverride
+._*
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..7f578f1
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,24 @@
+# Check http://editorconfig.org for more information
+# This is the main config file for this project:
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+[*.{py, pyi}]
+indent_style = space
+indent_size = 4
+
+[Makefile]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+[*.{diff,patch}]
+trim_trailing_whitespace = false
diff --git a/.github/.stale.yml b/.github/.stale.yml
new file mode 100644
index 0000000..dc90e5a
--- /dev/null
+++ b/.github/.stale.yml
@@ -0,0 +1,17 @@
+# Number of days of inactivity before an issue becomes stale
+daysUntilStale: 60
+# Number of days of inactivity before a stale issue is closed
+daysUntilClose: 7
+# Issues with these labels will never be considered stale
+exemptLabels:
+ - pinned
+ - security
+# Label to use when marking an issue as stale
+staleLabel: wontfix
+# Comment to post when marking an issue as stale. Set to `false` to disable
+markComment: >
+ This issue has been automatically marked as stale because it has not had
+ recent activity. It will be closed if no further activity occurs. Thank you
+ for your contributions.
+# Comment to post when closing a stale issue. Set to `false` to disable
+closeComment: false
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..bdd7677
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,42 @@
+---
+name: ๐ Bug report
+about: If something isn't working ๐ง
+title: ''
+labels: bug
+assignees:
+---
+
+## ๐ Bug Report
+
+
+
+## ๐ฌ How To Reproduce
+
+Steps to reproduce the behavior:
+
+1. ...
+
+### Code sample
+
+
+
+### Environment
+
+* OS: [e.g. Linux / Windows / macOS]
+* Python version, get it with:
+
+```bash
+python --version
+```
+
+### Screenshots
+
+
+
+## ๐ Expected behavior
+
+
+
+## ๐ Additional context
+
+
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..8f2da54
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,3 @@
+# Configuration: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository
+
+blank_issues_enabled: false
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000..c387120
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,23 @@
+---
+name: ๐ Feature request
+about: Suggest an idea for this project ๐
+title: ''
+labels: enhancement
+assignees:
+---
+
+## ๐ Feature Request
+
+
+
+## ๐ Motivation
+
+
+
+## ๐ฐ Alternatives
+
+
+
+## ๐ Additional context
+
+
diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md
new file mode 100644
index 0000000..8378d51
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/question.md
@@ -0,0 +1,25 @@
+---
+name: โ Question
+about: Ask a question about this project ๐
+title: ""
+labels: question
+assignees:
+---
+
+## Checklist
+
+
+
+- [ ] I've searched the project's [`issues`](https://github.com/ezhang7423/cfpi/issues?q=is%3Aissue).
+
+## โ Question
+
+
+
+How can I [...]?
+
+Is it possible to [...]?
+
+## ๐ Additional context
+
+
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 0000000..804b334
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,28 @@
+## Description
+
+
+
+## Related Issue
+
+
+
+## Type of Change
+
+
+
+- [ ] ๐ Examples / docs / tutorials / dependencies update
+- [ ] ๐ง Bug fix (non-breaking change which fixes an issue)
+- [ ] ๐ฅ Improvement (non-breaking change which improves an existing feature)
+- [ ] ๐ New feature (non-breaking change which adds functionality)
+- [ ] ๐ฅ Breaking change (fix or feature that would cause existing functionality to change)
+- [ ] ๐ Security fix
+
+## Checklist
+
+
+
+- [ ] I've read the [`CODE_OF_CONDUCT.md`](https://github.com/ezhang7423/cfpi/blob/master/CODE_OF_CONDUCT.md) document.
+- [ ] I've read the [`CONTRIBUTING.md`](https://github.com/ezhang7423/cfpi/blob/master/CONTRIBUTING.md) guide.
+- [ ] I've updated the code style using `make codestyle`.
+- [ ] I've written tests for all new methods and classes that I created.
+- [ ] I've written the docstring in Google format for all the methods and classes that I used.
diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml
new file mode 100644
index 0000000..0ce0984
--- /dev/null
+++ b/.github/release-drafter.yml
@@ -0,0 +1,28 @@
+# Release drafter configuration https://github.com/release-drafter/release-drafter#configuration
+# Emojis were chosen to match the https://gitmoji.carloscuesta.me/
+
+name-template: "v$NEXT_PATCH_VERSION"
+tag-template: "v$NEXT_PATCH_VERSION"
+
+categories:
+ - title: ":rocket: Features"
+ labels: [enhancement, feature]
+ - title: ":wrench: Fixes & Refactoring"
+ labels: [bug, refactoring, bugfix, fix]
+ - title: ":package: Build System & CI/CD"
+ labels: [build, ci, testing]
+ - title: ":boom: Breaking Changes"
+ labels: [breaking]
+ - title: ":pencil: Documentation"
+ labels: [documentation]
+ - title: ":arrow_up: Dependencies updates"
+ labels: [dependencies]
+
+template: |
+ ## Whatโs Changed
+
+ $CHANGES
+
+ ## :busts_in_silhouette: List of contributors
+
+ $CONTRIBUTORS
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 0000000..d210e28
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,52 @@
+name: build
+
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ["3.8"]
+
+ steps:
+ - name: noop
+ run: echo 'done'
+
+ # - uses: actions/checkout@v2
+ # - name: Set up Python ${{ matrix.python-version }}
+ # uses: actions/setup-python@v2.2.2
+ # with:
+ # python-version: ${{ matrix.python-version }}
+
+ # - name: Install poetry
+ # run: make poetry-download
+
+ # - name: Set up cache
+ # uses: actions/cache@v2.1.6
+ # with:
+ # path: .venv
+ # key: venv-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }}-${{ hashFiles('poetry.lock') }}
+ # - name: Install dependencies # TODO fix
+ # run: |
+ # poetry config virtualenvs.in-project true
+ # pip install swig numpy==1.24.4 patchelf "cython<3" lockfile
+ # pip install mujoco-py==1.50.1.68
+ # poetry install
+
+ # - name: Run style checks
+ # run: |
+ # make check-codestyle
+
+ # - name: Run tests
+ # run: |
+ # make test
+
+ # - name: Publish
+ # continue-on-error: true
+ # run: |
+ # poetry publish --build -u ezipe -p ${{ secrets.PYPI_PASSWORD }}
+
+ # - name: Run safety checks
+ # run: |
+ # make check-safety
diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml
new file mode 100644
index 0000000..a1f6e89
--- /dev/null
+++ b/.github/workflows/greetings.yml
@@ -0,0 +1,16 @@
+name: Greetings
+
+on: [pull_request, issues]
+
+jobs:
+ greeting:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/first-interaction@v1
+ with:
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
+ pr-message: 'Hello @${{ github.actor }}, thank you for submitting a PR! We will respond as soon as possible.'
+ issue-message: |
+ Hello @${{ github.actor }}, thank you for your interest in our work!
+
+ If this is a bug report, please provide screenshots and **minimum viable code to reproduce your issue**, otherwise we can not help you.
diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml
new file mode 100644
index 0000000..f55b27f
--- /dev/null
+++ b/.github/workflows/release-drafter.yml
@@ -0,0 +1,16 @@
+name: Release Drafter
+
+on:
+ push:
+ # branches to consider in the event; optional, defaults to all
+ branches:
+ - master
+
+jobs:
+ update_release_draft:
+ runs-on: ubuntu-latest
+ steps:
+ # Drafts your next Release notes as Pull Requests are merged into "master"
+ - uses: release-drafter/release-drafter@v5.15.0
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..c051ee7
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,642 @@
+# Created by https://www.gitignore.io/api/osx,python,pycharm,windows,visualstudio,visualstudiocode
+# Edit at https://www.gitignore.io/?templates=osx,python,pycharm,windows,visualstudio,visualstudiocode
+*/*/mjkey.txt
+**/.DS_STORE
+**/*.pyc
+**/*.swp
+rlkit/conf_private.py
+MANIFEST
+*.egg-info
+\.idea/
+__pycache__
+data/
+wandb/
+tmp/
+models
+*.pt
+viskit
+replay-buffers
+viskit
+*.png
+test.*
+*.png
+**.pt
+**conf_private.py
+*.npy
+data
+tmp
+**conf_*
+
+### OSX ###
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+### PyCharm ###
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# Generated files
+.idea/**/contentModel.xml
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+# *.iml
+# *.ipr
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+
+# Android studio 3.1+ serialized cache file
+.idea/caches/build_file_checksums.ser
+
+### PyCharm Patch ###
+# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
+
+# *.iml
+# modules.xml
+# .idea/misc.xml
+# *.ipr
+
+# Sonarlint plugin
+.idea/**/sonarlint/
+
+# SonarQube Plugin
+.idea/**/sonarIssues.xml
+
+# Markdown Navigator plugin
+.idea/**/markdown-navigator.xml
+.idea/**/markdown-navigator/
+
+### Python ###
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# pyenv
+.python-version
+
+# poetry
+.venv
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# Mr Developer
+.mr.developer.cfg
+.project
+.pydevproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# Plugins
+.secrets.baseline
+
+### VisualStudioCode ###
+.vscode/*
+!.vscode/tasks.json
+!.vscode/launch.json
+!.vscode/extensions.json
+
+### VisualStudioCode Patch ###
+# Ignore all local history of files
+.history
+
+### Windows ###
+# Windows thumbnail cache files
+Thumbs.db
+Thumbs.db:encryptable
+ehthumbs.db
+ehthumbs_vista.db
+
+# Dump file
+*.stackdump
+
+# Folder config file
+[Dd]esktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msix
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+
+### VisualStudio ###
+## Ignore Visual Studio temporary files, build results, and
+## files generated by popular Visual Studio add-ons.
+##
+## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
+
+# User-specific files
+*.rsuser
+*.suo
+*.user
+*.userosscache
+*.sln.docstates
+
+# User-specific files (MonoDevelop/Xamarin Studio)
+*.userprefs
+
+# Mono auto generated files
+mono_crash.*
+
+# Build results
+[Dd]ebug/
+[Dd]ebugPublic/
+[Rr]elease/
+[Rr]eleases/
+x64/
+x86/
+[Aa][Rr][Mm]/
+[Aa][Rr][Mm]64/
+bld/
+[Bb]in/
+[Oo]bj/
+[Ll]og/
+
+# Visual Studio 2015/2017 cache/options directory
+.vs/
+# Uncomment if you have tasks that create the project's static files in wwwroot
+#wwwroot/
+
+# Visual Studio 2017 auto generated files
+Generated\ Files/
+
+# MSTest test Results
+[Tt]est[Rr]esult*/
+[Bb]uild[Ll]og.*
+
+# NUnit
+*.VisualState.xml
+TestResult.xml
+nunit-*.xml
+
+# Build Results of an ATL Project
+[Dd]ebugPS/
+[Rr]eleasePS/
+dlldata.c
+
+# Benchmark Results
+BenchmarkDotNet.Artifacts/
+
+# .NET Core
+project.lock.json
+project.fragment.lock.json
+artifacts/
+
+# StyleCop
+StyleCopReport.xml
+
+# Files built by Visual Studio
+*_i.c
+*_p.c
+*_h.h
+*.ilk
+*.obj
+*.iobj
+*.pch
+*.pdb
+*.ipdb
+*.pgc
+*.pgd
+*.rsp
+*.sbr
+*.tlb
+*.tli
+*.tlh
+*.tmp
+*.tmp_proj
+*_wpftmp.csproj
+*.log
+*.vspscc
+*.vssscc
+.builds
+*.pidb
+*.svclog
+*.scc
+
+# Chutzpah Test files
+_Chutzpah*
+
+# Visual C++ cache files
+ipch/
+*.aps
+*.ncb
+*.opendb
+*.opensdf
+*.sdf
+*.cachefile
+*.VC.db
+*.VC.VC.opendb
+
+# Visual Studio profiler
+*.psess
+*.vsp
+*.vspx
+*.sap
+
+# Visual Studio Trace Files
+*.e2e
+
+# TFS 2012 Local Workspace
+$tf/
+
+# Guidance Automation Toolkit
+*.gpState
+
+# ReSharper is a .NET coding add-in
+_ReSharper*/
+*.[Rr]e[Ss]harper
+*.DotSettings.user
+
+# JustCode is a .NET coding add-in
+.JustCode
+
+# TeamCity is a build add-in
+_TeamCity*
+
+# DotCover is a Code Coverage Tool
+*.dotCover
+
+# AxoCover is a Code Coverage Tool
+.axoCover/*
+!.axoCover/settings.json
+
+# Visual Studio code coverage results
+*.coverage
+*.coveragexml
+
+# NCrunch
+_NCrunch_*
+.*crunch*.local.xml
+nCrunchTemp_*
+
+# MightyMoose
+*.mm.*
+AutoTest.Net/
+
+# Web workbench (sass)
+.sass-cache/
+
+# Installshield output folder
+[Ee]xpress/
+
+# DocProject is a documentation generator add-in
+DocProject/buildhelp/
+DocProject/Help/*.HxT
+DocProject/Help/*.HxC
+DocProject/Help/*.hhc
+DocProject/Help/*.hhk
+DocProject/Help/*.hhp
+DocProject/Help/Html2
+DocProject/Help/html
+
+# Click-Once directory
+publish/
+
+# Publish Web Output
+*.[Pp]ublish.xml
+*.azurePubxml
+# Note: Comment the next line if you want to checkin your web deploy settings,
+# but database connection strings (with potential passwords) will be unencrypted
+*.pubxml
+*.publishproj
+
+# Microsoft Azure Web App publish settings. Comment the next line if you want to
+# checkin your Azure Web App publish settings, but sensitive information contained
+# in these scripts will be unencrypted
+PublishScripts/
+
+# NuGet Packages
+*.nupkg
+# NuGet Symbol Packages
+*.snupkg
+# The packages folder can be ignored because of Package Restore
+**/[Pp]ackages/*
+# except build/, which is used as an MSBuild target.
+!**/[Pp]ackages/build/
+# Uncomment if necessary however generally it will be regenerated when needed
+#!**/[Pp]ackages/repositories.config
+# NuGet v3's project.json files produces more ignorable files
+*.nuget.props
+*.nuget.targets
+
+# Microsoft Azure Build Output
+csx/
+*.build.csdef
+
+# Microsoft Azure Emulator
+ecf/
+rcf/
+
+# Windows Store app package directories and files
+AppPackages/
+BundleArtifacts/
+Package.StoreAssociation.xml
+_pkginfo.txt
+*.appx
+*.appxbundle
+*.appxupload
+
+# Visual Studio cache files
+# files ending in .cache can be ignored
+*.[Cc]ache
+# but keep track of directories ending in .cache
+!?*.[Cc]ache/
+
+# Others
+ClientBin/
+~$*
+*~
+*.dbmdl
+*.dbproj.schemaview
+*.jfm
+*.pfx
+*.publishsettings
+orleans.codegen.cs
+
+# Including strong name files can present a security risk
+# (https://github.com/github/gitignore/pull/2483#issue-259490424)
+#*.snk
+
+# Since there are multiple workflows, uncomment next line to ignore bower_components
+# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
+#bower_components/
+
+# RIA/Silverlight projects
+Generated_Code/
+
+# Backup & report files from converting an old project file
+# to a newer Visual Studio version. Backup files are not needed,
+# because we have git ;-)
+_UpgradeReport_Files/
+Backup*/
+UpgradeLog*.XML
+UpgradeLog*.htm
+ServiceFabricBackup/
+*.rptproj.bak
+
+# SQL Server files
+*.mdf
+*.ldf
+*.ndf
+
+# Business Intelligence projects
+*.rdl.data
+*.bim.layout
+*.bim_*.settings
+*.rptproj.rsuser
+*- [Bb]ackup.rdl
+*- [Bb]ackup ([0-9]).rdl
+*- [Bb]ackup ([0-9][0-9]).rdl
+
+# Microsoft Fakes
+FakesAssemblies/
+
+# GhostDoc plugin setting file
+*.GhostDoc.xml
+
+# Node.js Tools for Visual Studio
+.ntvs_analysis.dat
+node_modules/
+
+# Visual Studio 6 build log
+*.plg
+
+# Visual Studio 6 workspace options file
+*.opt
+
+# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
+*.vbw
+
+# Visual Studio LightSwitch build output
+**/*.HTMLClient/GeneratedArtifacts
+**/*.DesktopClient/GeneratedArtifacts
+**/*.DesktopClient/ModelManifest.xml
+**/*.Server/GeneratedArtifacts
+**/*.Server/ModelManifest.xml
+_Pvt_Extensions
+
+# Paket dependency manager
+.paket/paket.exe
+paket-files/
+
+# FAKE - F# Make
+.fake/
+
+# CodeRush personal settings
+.cr/personal
+
+# Python Tools for Visual Studio (PTVS)
+*.pyc
+
+# Cake - Uncomment if you are using it
+# tools/**
+# !tools/packages.config
+
+# Tabs Studio
+*.tss
+
+# Telerik's JustMock configuration file
+*.jmconfig
+
+# BizTalk build output
+*.btp.cs
+*.btm.cs
+*.odx.cs
+*.xsd.cs
+
+# OpenCover UI analysis results
+OpenCover/
+
+# Azure Stream Analytics local run output
+ASALocalRun/
+
+# MSBuild Binary and Structured Log
+*.binlog
+
+# NVidia Nsight GPU debugger configuration file
+*.nvuser
+
+# MFractors (Xamarin productivity tool) working folder
+.mfractor/
+
+# Local History for Visual Studio
+.localhistory/
+
+# BeatPulse healthcheck temp database
+healthchecksdb
+
+# Backup folder for Package Reference Convert tool in Visual Studio 2017
+MigrationBackup/
+
+# End of https://www.gitignore.io/api/osx,python,pycharm,windows,visualstudio,visualstudiocode
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000..93355e1
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "checkpoints"]
+ path = checkpoints
+ url = https://huggingface.co/datasets/ezipe/cfpi/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..85d266f
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,36 @@
+default_language_version:
+ python: python3.8
+
+default_stages: [commit, push]
+
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v2.5.0
+ hooks:
+ - id: check-yaml
+ - id: end-of-file-fixer
+ exclude: LICENSE
+
+ - repo: local
+ hooks:
+ - id: pyupgrade
+ name: pyupgrade
+ entry: poetry run pyupgrade --py38-plus
+ types: [python]
+ language: system
+
+ - repo: local
+ hooks:
+ - id: isort
+ name: isort
+ entry: poetry run isort --settings-path pyproject.toml
+ types: [python]
+ language: system
+
+ - repo: local
+ hooks:
+ - id: black
+ name: black
+ entry: poetry run black --config pyproject.toml
+ types: [python]
+ language: system
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..145d385
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,76 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ete@ucsb.edu. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see
+https://www.contributor-covenant.org/faq
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..16e880e
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,47 @@
+# How to contribute
+
+## Dependencies
+
+We use `poetry` to manage the [dependencies](https://github.com/python-poetry/poetry).
+If you dont have `poetry`, you should install with `make poetry-download`.
+
+To install dependencies and prepare [`pre-commit`](https://pre-commit.com/) hooks you would need to run `install` command:
+
+```bash
+make install
+make pre-commit-install
+```
+
+To activate your `virtualenv` run `poetry shell`.
+
+## Codestyle
+
+After installation you may execute code formatting.
+
+```bash
+make codestyle
+```
+
+### Checks
+
+Many checks are configured for this project. Command `make check-codestyle` will check black, isort and darglint.
+The `make check-safety` command will look at the security of your code.
+
+Comand `make lint` applies all checks.
+
+### Before submitting
+
+Before submitting your code please do the following steps:
+
+1. Add any changes you want
+1. Add tests for the new changes
+1. Edit documentation if you have changed something significant
+1. Run `make codestyle` to format your changes.
+1. Run `make lint` to ensure that types, security and docstrings are okay.
+
+## Other help
+
+You can contribute by spreading a word about this library.
+It would also be a huge contribution to write
+a short article on how you are using this project.
+You can also share your best practices with us.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..a840eff
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+Copyright (c) 2023 ezhang7423
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
+OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..62ae0b5
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,145 @@
+.ONESHELL:
+#* Variables
+SHELL := bash
+PYTHON := python
+PYTHONPATH := `pwd`
+CONDA := conda
+CONDA_ACTIVATE = source $$(conda info --base)/etc/profile.d/conda.sh ; conda activate ; conda activate
+
+#* Docker variables
+IMAGE := cfpi
+VERSION := latest
+
+#* Poetry
+.PHONY: poetry-download
+poetry-download:
+ curl -sSL https://install.python-poetry.org | $(PYTHON) -
+
+.PHONY: poetry-remove
+poetry-remove:
+ curl -sSL https://install.python-poetry.org | $(PYTHON) - --uninstall
+
+#* Installation
+.PHONY: install
+install:
+ ! type -P poetry &> /dev/null && curl -sSL https://install.python-poetry.org | python3 -
+ ! type -P $(CONDA) &> /dev/null && { echo "Please install conda (https://docs.conda.io/en/latest/miniconda.html)"; exit 1; }
+
+ # install cfpi conda environment
+ $(CONDA) create -n cfpi python=3.8 -y
+ $(CONDA_ACTIVATE) cfpi
+
+ @if [ -z "$(NO_DATA)" ]; then\
+ echo "Downloading data...";\
+ git submodule update --init --recursive ./checkpoints;\
+ fi
+
+ type python
+
+ pip3 install torch torchvision torchaudio
+ pip install swig numpy==1.24.4
+
+ # install mujoco-py dependencies https://github.com/openai/mujoco-py/issues/627
+ conda install -y -c conda-forge mesa-libgl-cos7-x86_64
+ conda install -y -c conda-forge glfw
+ conda install -y -c conda-forge mesalib
+ conda install -y -c menpo glfw3
+ export CPATH=$(CONDA_PREFIX)/include
+ cp /usr/lib64/libGL.so.1 $(CONDA_PREFIX)/lib/
+ ln -s $(CONDA_PREFIX)/lib/libGL.so.1 $(CONDA_PREFIX)/lib/libGL.so
+ export PYTHON_KEYRING_BACKEND=keyring.backends.null.Keyring
+ pip install patchelf "cython<3"
+ pip install cffi lockfile glfw imageio
+ pip install mujoco-py==1.50.1.68
+
+ poetry lock -n && poetry export --without-hashes > requirements.txt
+ poetry install -n
+ # -poetry run mypy --install-types --non-interactive ./
+
+.PHONY: pre-commit-install
+pre-commit-install:
+ poetry run pre-commit install
+
+#* Formatters
+.PHONY: codestyle
+codestyle:
+ poetry run pyupgrade --exit-zero-even-if-changed --py38-plus **/*.py
+ poetry run isort --settings-path pyproject.toml ./
+ poetry run black --config pyproject.toml ./
+
+.PHONY: formatting
+formatting: codestyle
+
+#* Linting
+.PHONY: test
+test:
+ PYTHONPATH=$(PYTHONPATH) poetry run pytest -c pyproject.toml --cov-report=html --cov=cfpi tests/
+
+.PHONY: check-codestyle
+check-codestyle:
+ poetry run isort --diff --check-only --settings-path pyproject.toml ./
+ poetry run black --diff --check --config pyproject.toml ./
+ poetry run darglint --verbosity 2 cfpi tests
+
+.PHONY: mypy
+mypy:
+ poetry run mypy --config-file pyproject.toml ./
+
+.PHONY: check-safety
+check-safety:
+ poetry check
+ poetry run safety check --full-report
+ poetry run bandit -ll --recursive cfpi tests
+
+.PHONY: lint
+lint: test check-codestyle mypy check-safety
+
+.PHONY: update-dev-deps
+update-dev-deps:
+ poetry add -D bandit@latest darglint@latest "isort[colors]@latest" mypy@latest pre-commit@latest pydocstyle@latest pylint@latest pytest@latest pyupgrade@latest safety@latest coverage@latest coverage-badge@latest pytest-html@latest pytest-cov@latest
+ poetry add -D --allow-prereleases black@latest
+
+#* Docker
+# Example: make docker-build VERSION=latest
+# Example: make docker-build IMAGE=some_name VERSION=0.1.0
+.PHONY: docker-build
+docker-build:
+ @echo Building docker $(IMAGE):$(VERSION) ...
+ docker build \
+ -t $(IMAGE):$(VERSION) . \
+ -f ./docker/Dockerfile --no-cache
+
+# Example: make docker-remove VERSION=latest
+# Example: make docker-remove IMAGE=some_name VERSION=0.1.0
+.PHONY: docker-remove
+docker-remove:
+ @echo Removing docker $(IMAGE):$(VERSION) ...
+ docker rmi -f $(IMAGE):$(VERSION)
+
+#* Cleaning
+.PHONY: pycache-remove
+pycache-remove:
+ find . | grep -E "(__pycache__|\.pyc|\.pyo$$)" | xargs rm -rf
+
+.PHONY: dsstore-remove
+dsstore-remove:
+ find . | grep -E ".DS_Store" | xargs rm -rf
+
+.PHONY: mypycache-remove
+mypycache-remove:
+ find . | grep -E ".mypy_cache" | xargs rm -rf
+
+.PHONY: ipynbcheckpoints-remove
+ipynbcheckpoints-remove:
+ find . | grep -E ".ipynb_checkpoints" | xargs rm -rf
+
+.PHONY: pytestcache-remove
+pytestcache-remove:
+ find . | grep -E ".pytest_cache" | xargs rm -rf
+
+.PHONY: build-remove
+build-remove:
+ rm -rf build/
+
+.PHONY: cleanup
+cleanup: pycache-remove dsstore-remove mypycache-remove ipynbcheckpoints-remove pytestcache-remove
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..84d35af
--- /dev/null
+++ b/README.md
@@ -0,0 +1,406 @@
+# cfpi
+
+
+
+[![Build status](https://github.com/cfpi-icml23/code/actions/workflows/build.yml/badge.svg)](https://github.com/ezhang7423/cfpi/actions?query=workflow%3Abuild)
+[![Dependencies Status](https://img.shields.io/badge/dependencies-up%20to%20date-brightgreen.svg)](https://github.com/ezhang7423/cfpi/pulls?utf8=%E2%9C%93&q=is%3Apr%20author%3Aapp%2Fdependabot)
+[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
+[![Security: bandit](https://img.shields.io/badge/security-bandit-green.svg)](https://github.com/PyCQA/bandit)
+[![Pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white)](https://github.com/ezhang7423/cfpi/blob/master/.pre-commit-config.yaml)
+[![Semantic Versions](https://img.shields.io/badge/%20%20%F0%9F%93%A6%F0%9F%9A%80-semantic--versions-e10079.svg)](https://github.com/ezhang7423/cfpi/releases)
+[![License](https://img.shields.io/github/license/ezhang7423/cfpi)](https://github.com/ezhang7423/cfpi/blob/master/LICENSE)
+
+Offline Reinforcement Learning with Closed-Form Policy Improvement Operators
+
+
+
+## โ Installation
+
+We require [Mambaforge](https://github.com/conda-forge/miniforge#mambaforge) (a faster drop-in replacement of conda) or [conda](https://docs.conda.io/en/latest/miniconda.html). Mambaforge is recommended. To install, simply run
+
+```bash
+make install
+```
+
+If you'd like to install without downloading data, run
+
+```bash
+make NO_DATA=1 install
+```
+You'll then need to install [mujoco 210](https://github.com/deepmind/mujoco/releases/tag/2.1.0) to ~/.mujoco/mujoco210/ and add the following to your `.bashrc`: `export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/nvidia:$HOME/.mujoco/mjpro150/bin`.
+
+
+## Get Started
+
+```
+# example run
+cfpi bc
+
+# help
+cfpi --help
+
+ Usage: cfpi [OPTIONS] ALGORITHM:{bc|mg|reverse_kl|sarsa_iqn|sg} VARIANT
+
+โญโ Arguments โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโฎ
+โ * algorithm ALGORITHM:{bc|mg|reverse_kl|sar Specify algorithm to run. Find โ
+โ sa_iqn|sg} all supported algorithms in โ
+โ ./cfpi/variants/SUPPORTED_ALGOโฆ โ
+โ [default: None] โ
+โ [required] โ
+โ * variant TEXT Specify which variant of the โ
+โ algorithm to run. Find all โ
+โ supported variant in โ
+โ ./cfpi/variants//variant.py` file
+
+### Makefile usage
+
+[`Makefile`](https://github.com/ezhang7423/cfpi/blob/master/Makefile) contains a lot of functions for faster development.
+
+
+1. Download and remove Poetry
+
+
+To download and install Poetry run:
+
+```bash
+make poetry-download
+```
+
+To uninstall
+
+```bash
+make poetry-remove
+```
+
+
+
+
+
+2. Install all dependencies and pre-commit hooks
+
+
+Install requirements:
+
+```bash
+make install
+```
+
+Pre-commit hooks coulb be installed after `git init` via
+
+```bash
+make pre-commit-install
+```
+
+
+
+
+
+3. Codestyle
+
+
+Automatic formatting uses `pyupgrade`, `isort` and `black`.
+
+```bash
+make codestyle
+
+# or use synonym
+make formatting
+```
+
+Codestyle checks only, without rewriting files:
+
+```bash
+make check-codestyle
+```
+
+> Note: `check-codestyle` uses `isort`, `black` and `darglint` library
+
+Update all dev libraries to the latest version using one comand
+
+```bash
+make update-dev-deps
+```
+
+
+4. Code security
+
+
+```bash
+make check-safety
+```
+
+This command launches `Poetry` integrity checks as well as identifies security issues with `Safety` and `Bandit`.
+
+```bash
+make check-safety
+```
+
+
+
+
+
+
+
+
+5. Type checks
+
+
+Run `mypy` static type checker
+
+```bash
+make mypy
+```
+
+
+
+
+
+6. Tests with coverage badges
+
+
+Run `pytest`
+
+```bash
+make test
+```
+
+
+
+
+
+7. All linters
+
+
+Of course there is a command to ~~rule~~ run all linters in one:
+
+```bash
+make lint
+```
+
+the same as:
+
+```bash
+make test && make check-codestyle && make mypy && make check-safety
+```
+
+
+
+
+
+8. Docker
+
+
+```bash
+make docker-build
+```
+
+which is equivalent to:
+
+```bash
+make docker-build VERSION=latest
+```
+
+Remove docker image with
+
+```bash
+make docker-remove
+```
+
+More information [about docker](https://github.com/ezhang7423/cfpi/tree/master/docker).
+
+
+
+
+
+9. Cleanup
+
+Delete pycache files
+
+```bash
+make pycache-remove
+```
+
+Remove package build
+
+```bash
+make build-remove
+```
+
+Delete .DS_STORE files
+
+```bash
+make dsstore-remove
+```
+
+Remove .mypycache
+
+```bash
+make mypycache-remove
+```
+
+Or to remove all above run:
+
+```bash
+make cleanup
+```
+
+
+
+
+### Poetry
+
+Want to know more about Poetry? Check [its documentation](https://python-poetry.org/docs/).
+
+
+Details about Poetry
+
+
+Poetry's [commands](https://python-poetry.org/docs/cli/#commands) are very intuitive and easy to learn, like:
+
+- `poetry add numpy@latest`
+- `poetry run pytest`
+- `poetry publish --build`
+
+etc
+
+
+
+
+### Building and releasing
+
+Building a new version of the application contains steps:
+
+- Bump the version of your package `poetry version `. You can pass the new version explicitly, or a rule such as `major`, `minor`, or `patch`. For more details, refer to the [Semantic Versions](https://semver.org/) standard.
+- Make a commit to `GitHub`.
+- Create a `GitHub release`.
+- And... publish ๐ `poetry publish --build`
+
+## ๐ฏ What's next
+
+- Add support for deterministic CFPI
+- Add support for VAE-CFPI
+
+## ๐ก License
+
+[![License](https://img.shields.io/github/license/ezhang7423/cfpi)](https://github.com/ezhang7423/cfpi/blob/master/LICENSE)
+
+This project is licensed under the terms of the `MIT` license. See [LICENSE](https://github.com/ezhang7423/cfpi/blob/master/LICENSE) for more details.
+
+## ๐ Citation
+
+```bibtex
+@misc{li2022offline,
+ title={Offline Reinforcement Learning with Closed-Form Policy Improvement Operators},
+ author={Jiachen Li and Edwin Zhang and Ming Yin and Qinxun Bai and Yu-Xiang Wang and William Yang Wang},
+ journal={ICML},
+ year={2023},
+```
+
+# ๐ Credits
+
+This project would not be possible without the following wonderful prior work.
+
+Optimistic Actor Critic gave inspiration to our
+method,
+D4RL
+provides the dataset and benchmark for evaluating the performance of our agent, and
+RLkit offered a strong RL framework
+for building our code from.
+
+Template: [`python-package-template`](https://github.com/TezRomacH/python-package-template)
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..a98308c
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,27 @@
+# Security
+
+## ๐ Reporting Security Issues
+
+> Do not open issues that might have security implications!
+> It is critical that security related issues are reported privately so we have time to address them before they become public knowledge.
+
+Vulnerabilities can be reported by emailing core members:
+
+- ezhang7423 [ete@ucsb.edu](mailto:ete@ucsb.edu)
+
+Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
+
+- Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
+- Full paths of source file(s) related to the manifestation of the issue
+- The location of the affected source code (tag/branch/commit or direct URL)
+- Any special configuration required to reproduce the issue
+- Environment (e.g. Linux / Windows / macOS)
+- Step-by-step instructions to reproduce the issue
+- Proof-of-concept or exploit code (if possible)
+- Impact of the issue, including how an attacker might exploit the issue
+
+This information will help us triage your report more quickly.
+
+## Preferred Languages
+
+We prefer all communications to be in English.
diff --git a/cfpi/__init__.py b/cfpi/__init__.py
new file mode 100644
index 0000000..ca3ede1
--- /dev/null
+++ b/cfpi/__init__.py
@@ -0,0 +1,34 @@
+# type: ignore[attr-defined]
+"""Offline Reinforcement Learning with Closed-Form Policy Improvement Operators"""
+
+from importlib import metadata as importlib_metadata
+import pickle
+class RenamingUnpickler(pickle.Unpickler):
+ def find_class(self, module, name):
+
+ if module == 'rlkit.core.pythonplusplus':
+ module = 'eztils.torch'
+
+ if module == 'rlkit.envs.wrappers':
+ module = 'cfpi.envs'
+
+ if 'rlkit.torch' in module:
+ module = module.replace('rlkit.torch', 'cfpi.pytorch')
+
+ if 'rlkit' in module:
+ module = module.replace('rlkit', 'cfpi')
+
+
+ return super().find_class(module, name)
+
+pickle.Unpickler = RenamingUnpickler
+
+def get_version() -> str:
+ try:
+ return importlib_metadata.version(__name__)
+ except importlib_metadata.PackageNotFoundError: # pragma: no cover
+ return "unknown"
+
+
+version: str = get_version()
+__version__ = version
diff --git a/cfpi/__main__.py b/cfpi/__main__.py
new file mode 100644
index 0000000..66100f4
--- /dev/null
+++ b/cfpi/__main__.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python3
+import os
+
+os.environ["D4RL_SUPPRESS_IMPORT_ERROR"] = "1"
+from typing_extensions import Annotated
+
+import glob
+import importlib
+import importlib.util
+import inspect
+import json
+import site
+from enum import Enum
+
+import typer
+from eztils import bold, datestr, dict_to_safe_json, red
+from pydantic import BaseSettings
+from typer import Argument, Option
+
+from cfpi.conf import GridSearch, Parallel
+from cfpi.variants import SUPPORTED_ALGORITHMS
+
+app: typer.Typer = typer.Typer(name="cfpi", no_args_is_help=True)
+
+
+def find_attr(module, attr_substr):
+ return [attr for attr in user_defined_attrs(module) if attr_substr in attr]
+
+
+def user_defined_attrs(
+ cls,
+ excluded: list = None,
+):
+ excluded = excluded or ["Base", "frozen_enforced_dataclass"]
+ return [
+ attr for attr in dir(cls) if not attr.startswith("__") and attr not in excluded
+ ]
+
+
+def user_defined_attrs_dict(cls, excluded: list = None, string=False):
+ excluded = excluded or ["Base", "frozen_enforced_dataclass"]
+ return {
+ k: str(v) if string else v
+ for k, v in cls.__dict__.items()
+ if not k.startswith("__") and k not in excluded
+ }
+
+
+def load_experiment(variant, alg: str):
+ from cfpi.variants.base import FuncWrapper
+
+ variant_module = importlib.import_module(f"cfpi.variants.{alg}.variant")
+ variant: BaseSettings = getattr(variant_module, variant)
+
+ variant_dict = variant().dict()
+ for k in variant_dict:
+ if isinstance(variant_dict[k], FuncWrapper):
+ variant_dict[k] = variant_dict[k].f # unwrap the functions and class
+ if isinstance(variant_dict[k], dict): # maybe do this recursively in the future
+ for k2 in variant_dict[k]:
+ if isinstance(variant_dict[k][k2], FuncWrapper):
+ variant_dict[k][k2] = variant_dict[k][k2].f
+
+ return variant_dict
+
+
+def print_section(name, content):
+ from cfpi.core.logging import SEPARATOR
+
+ bold(name.upper() + ":", "\n")
+ print(content, SEPARATOR)
+
+
+def list_to_dict(l):
+ return {i: i for i in l}
+
+
+@app.command()
+def main(
+ algorithm: Annotated[
+ Enum("Algorithm", list_to_dict(user_defined_attrs(SUPPORTED_ALGORITHMS))),
+ Argument(
+ help="Specify algorithm to run. Find all supported algorithms in ./cfpi/variants/SUPPORTED_ALGORITHMS.py",
+ autocompletion=lambda: user_defined_attrs(SUPPORTED_ALGORITHMS),
+ ),
+ ],
+ variant: Annotated[
+ str,
+ Option(
+ help="Specify which variant of the algorithm to run. Find all supported variant in ./cfpi/variants/.py",
+ ),
+ ] = 'VanillaVariant',
+ parallel: Annotated[
+ Enum("Parallel", list_to_dict(user_defined_attrs(Parallel))),
+ Option(
+ help="Run multiple versions of the algorithm on different environments and seeds.",
+ autocompletion=lambda: user_defined_attrs(Parallel),
+ ),
+ ] = None,
+ gridsearch: Annotated[
+ Enum("GridSearch", list_to_dict(user_defined_attrs(GridSearch))),
+ Option(
+ help="Do a gridsearch. Only supported when parallel is also enabled",
+ autocompletion=lambda: user_defined_attrs(GridSearch),
+ ),
+ ] = None,
+ dry: Annotated[
+ bool,
+ Option(
+ help="Just print the variant and pipeline.",
+ ),
+ ] = False,
+):
+ algorithm = algorithm.value
+ parallel = parallel.value if parallel else None
+ gridsearch = gridsearch.value if gridsearch else None
+
+ import torch
+
+ torch.multiprocessing.set_start_method("spawn")
+ from cfpi import conf
+ from cfpi.conf import GridSearch, Parallel
+ from cfpi.launchers import (
+ run_hyperparameters,
+ run_parallel_pipeline_here,
+ run_pipeline_here,
+ )
+
+ # remove mujoco locks
+ for l in glob.glob(f"{site.getsitepackages()[0]}/mujoco_py/generated/*lock"):
+ print(l)
+ os.remove(l)
+
+ variant = load_experiment(variant, algorithm)
+
+ if dry:
+ print_section("time", datestr())
+ pipeline = variant["pipeline"]
+ print_section("variant", json.dumps(dict_to_safe_json(variant), indent=2))
+ print_section("pipeline", pipeline.composition)
+
+ if gridsearch:
+ if dry:
+ print_section(
+ "gridsearch args",
+ inspect.getsource(getattr(GridSearch, gridsearch)),
+ )
+ else:
+ run_hyperparameters(
+ getattr(Parallel, parallel),
+ variant,
+ hyperparameters=(getattr(GridSearch, gridsearch))["gridsearch_values"],
+ )
+ return
+
+ if parallel:
+ if dry:
+ print_section(
+ "parallel args",
+ inspect.getsource(getattr(Parallel, parallel)),
+ )
+ else:
+ run_parallel_pipeline_here(getattr(Parallel, parallel), variant)
+ return
+
+ if dry:
+ red("Debug mode: ", conf.DEBUG)
+ red("Root dir", conf.Log.rootdir)
+ return
+
+ run_pipeline_here(
+ variant=variant,
+ snapshot_mode=variant.get("snapshot_mode", "gap_and_last"),
+ snapshot_gap=variant.get("snapshot_gap", 100),
+ gpu_id=variant.get("gpu_id", 0),
+ )
+
+
+if __name__ == "__main__":
+ app()
diff --git a/cfpi/checkpoint_config.py b/cfpi/checkpoint_config.py
new file mode 100644
index 0000000..b2c7d2e
--- /dev/null
+++ b/cfpi/checkpoint_config.py
@@ -0,0 +1,208 @@
+from typing import Dict, List, Optional
+import socket
+from cfpi.variants.base import BaseModel
+
+
+
+class CheckpointParam(BaseModel):
+ envs: List[str]
+ seeds: List[int]
+ path: str
+ key: Optional[str] = None
+ file: Optional[str] = None
+ itrs: Optional[Dict[str, List[int]]] = None
+ validation_optimal_epochs: Optional[Dict[str, int]] = None
+
+class Q_IQN(CheckpointParam):
+ envs: List[str] = [
+ "halfcheetah-medium-expert-v2",
+ "halfcheetah-medium-replay-v2",
+ "halfcheetah-medium-v2",
+ "hopper-medium-expert-v2",
+ "hopper-medium-replay-v2",
+ "hopper-medium-v2",
+ "walker2d-medium-expert-v2",
+ "walker2d-medium-replay-v2",
+ "walker2d-medium-v2",
+ "antmaze-umaze-v0",
+ "antmaze-umaze-diverse-v0",
+ "antmaze-medium-diverse-v0",
+ "antmaze-medium-play-v0",
+ ]
+ seeds: List[int] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ path: str = "q-iqn"
+ key: Optional[str] = "trainer/qfs"
+ file: Optional[str] = None
+ itrs: Optional[Dict[str, List[int]]] = {
+ "hopper-medium-expert-v2": [50, 100, 150, 200, 250, 300, 350, 400],
+ "halfcheetah-medium-expert-v2": [50, 100, 150, 200, 250, 300, 350, 400],
+ "hopper-medium-v2": [50, 100, 150, 200, 250, 300, 350, 400],
+ "halfcheetah-medium-v2": [50, 100, 150, 200, 250, 300, 350, 400],
+ "walker2d-medium-expert-v2": [50, 100, 150, 200, 250, 300, 350, 400],
+ "hopper-medium-replay-v2": [50, 100, 150, 200, 250, 300, 350, 400],
+ "walker2d-medium-replay-v2": [
+ 100,
+ 200,
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1500,
+ ],
+ "halfcheetah-medium-replay-v2": [
+ 100,
+ 200,
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1500,
+ ],
+ "walker2d-medium-v2": [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000],
+ "antmaze-umaze-v0": [100, 200, 300, 400, 500],
+ "antmaze-umaze-diverse-v0": [100, 200, 300, 400, 500],
+ "antmaze-medium-diverse-v0": [100, 200, 300, 400, 500],
+ "antmaze-medium-play-v0": [100, 200, 300, 400, 500],
+ }
+ validation_optimal_epochs: Optional[Dict[str, int]] = {
+ "halfcheetah-medium-expert-v2": 400,
+ "halfcheetah-medium-replay-v2": 1500,
+ "halfcheetah-medium-v2": 200,
+ "hopper-medium-expert-v2": 400,
+ "hopper-medium-replay-v2": 300,
+ "hopper-medium-v2": 400,
+ "walker2d-medium-expert-v2": 400,
+ "walker2d-medium-replay-v2": 1100,
+ "walker2d-medium-v2": 700,
+ "antmaze-umaze-v0": 500,
+ "antmaze-umaze-diverse-v0": 500,
+ "antmaze-medium-diverse-v0": 500,
+ "antmaze-medium-play-v0": 500,
+ }
+
+
+class Q_IQL(CheckpointParam):
+ envs: List[str] = [
+ "antmaze-umaze-v0",
+ "antmaze-umaze-diverse-v0",
+ "antmaze-medium-diverse-v0",
+ "antmaze-medium-play-v0",
+ "antmaze-large-play-v0",
+ "antmaze-large-diverse-v0",
+ ]
+ seeds: List[int] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ path: str = "q-iql-models"
+ key: Optional[str] = None
+ file: Optional[str] = None
+ itrs: Optional[Dict[str, List[int]]] = None
+ validation_optimal_epochs: Optional[Dict[str, int]] = None
+
+
+class SG(CheckpointParam):
+ envs: List[str] = [
+ "antmaze-umaze-v0",
+ "antmaze-umaze-diverse-v0",
+ "antmaze-medium-diverse-v0",
+ "antmaze-medium-play-v0",
+ "halfcheetah-medium-expert-v2",
+ "halfcheetah-medium-replay-v2",
+ "halfcheetah-medium-v2",
+ "hopper-medium-expert-v2",
+ "hopper-medium-replay-v2",
+ "hopper-medium-v2",
+ "walker2d-medium-expert-v2",
+ "walker2d-medium-replay-v2",
+ "walker2d-medium-v2",
+ ]
+ seeds: List[int] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ path: str = "sg"
+ key: Optional[str] = "trainer/policy"
+ file: Optional[str] = None
+ itrs: Optional[Dict[str, List[int]]] = None
+ validation_optimal_epochs: Optional[Dict[str, int]] = None
+
+class MG4(CheckpointParam):
+ envs: List[str] = [
+ "antmaze-umaze-v0",
+ "antmaze-umaze-diverse-v0",
+ "antmaze-medium-diverse-v0",
+ "antmaze-medium-play-v0",
+ "walker2d-medium-expert-v2",
+ "hopper-medium-expert-v2",
+ "halfcheetah-medium-expert-v2",
+ "hopper-medium-replay-v2",
+ "halfcheetah-medium-replay-v2",
+ "walker2d-medium-replay-v2",
+ "hopper-medium-v2",
+ "halfcheetah-medium-v2",
+ "walker2d-medium-v2",
+ ]
+ seeds: List[int] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ path: str = "mg-4"
+ key: Optional[str] = "trainer/policy"
+ file: Optional[str] = "params.pt"
+ itrs: Optional[Dict[str, List[int]]] = None
+ validation_optimal_epochs: Optional[Dict[str, int]] = None
+
+
+
+class MG8:
+ envs: List[str] = [
+ "walker2d-medium-expert-v2",
+ "hopper-medium-expert-v2",
+ "halfcheetah-medium-expert-v2",
+ "hopper-medium-replay-v2",
+ "halfcheetah-medium-replay-v2",
+ "walker2d-medium-replay-v2",
+ "hopper-medium-v2",
+ "halfcheetah-medium-v2",
+ "walker2d-medium-v2",
+ ]
+ seeds: List[int] = range(10)
+ path: str = "mg-8-no-normalize" # normalized
+ key:Optional[str] = "trainer/policy"
+ file: Optional[str] = "params.pt"
+
+
+class MG8_WITHOUT_NORMALIZE(CheckpointParam):
+ envs: List[str] = [
+ "antmaze-umaze-v0",
+ "antmaze-umaze-diverse-v0",
+ "antmaze-medium-diverse-v0",
+ "antmaze-medium-play-v0",
+ "antmaze-large-play-v0",
+ "antmaze-large-diverse-v0",
+ ]
+ seeds: List[int] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
+ path: str = "mg-8"
+ key: Optional[str] = "trainer/policy"
+
+
+class MG12_WITHOUT_NORMALIZE:
+ envs: List[str] = [
+ "antmaze-umaze-v0",
+ "antmaze-umaze-diverse-v0",
+ "antmaze-medium-diverse-v0",
+ "antmaze-medium-play-v0",
+ "antmaze-large-play-v0",
+ "antmaze-large-diverse-v0",
+ ]
+ seeds: List[int] = list(range(10))
+ path: str = "mg-12-no-normalize"
+ key: Optional[str] = "trainer/policy"
diff --git a/cfpi/conf.py b/cfpi/conf.py
new file mode 100644
index 0000000..fb49ad8
--- /dev/null
+++ b/cfpi/conf.py
@@ -0,0 +1,201 @@
+from typing import Dict, List
+
+import os
+from os.path import abspath, join
+
+
+import cfpi
+from cfpi.variants.base import BaseModel
+from cfpi import checkpoint_config
+CheckpointParams = checkpoint_config
+
+"""
+Debug mode will
+* skip confirmation when replacing directories
+* change the data directory to ./tmp
+* turn off wandb logging
+"""
+try:
+ from cfpi.conf_private import DEBUG
+except ImportError:
+ DEBUG = False
+
+DEBUG = True #!!
+DISPLAY_WELCOME = True
+
+
+class LogModel(BaseModel):
+ repo_dir: str = abspath(join(os.path.dirname(cfpi.__file__), os.pardir))
+ try:
+ from cfpi.conf_private import rootdir
+
+ rootdir: str = rootdir
+ except ImportError:
+ rootdir: str = repo_dir
+ basedir: str = join(rootdir, "tmp" if DEBUG else "data")
+
+
+Log = LogModel()
+
+ENSEMBLE_MODEL_ROOT: str = join(Log.rootdir, "models")
+CHECKPOINT_PATH: str = join(Log.rootdir, "checkpoints")
+
+class GridSearch:
+ """The closest thing to a namespace in python"""
+
+ class Base(BaseModel):
+ gridsearch_values: Dict[str, List]
+
+ class Testing(Base):
+ gridsearch_values: Dict[str, List] = {"delta": [1, 2], "beta_LB": [0]}
+
+ class Full(Base):
+ gridsearch_values: Dict[str, List] = {
+ "delta": [
+ -0.0,
+ 0.1417768376957354,
+ 0.4590436050264207,
+ 0.6680472308365775,
+ 1.5517556536555206,
+ ],
+ "beta_LB": [0.1, 0.5, 2],
+ }
+
+ class Mg(Base):
+ gridsearch_values: Dict[str, List] = {"beta_LB": [0.1, 1.0]}
+
+ class Pac(Base):
+ gridsearch_values: Dict[str, List] = {"beta_LB": [1.0, 0.1]}
+
+ class EnsembleSize(Base):
+ gridsearch_values: Dict[str, List] = {"ensemble_size": [1, 2, 5, 10]}
+
+ class QTrainedEpochs(Base):
+ gridsearch_values: Dict[str, List] = {
+ "q_trained_epochs": [1000, 1250, 1500, 1750, 2000]
+ }
+
+ class KFold(Base):
+ gridsearch_values: Dict[str, List] = {"fold_idx": [1, 2, 3]}
+
+ class TrqDelta(Base):
+ gridsearch_values: Dict[str, List] = {"delta": [0.01, 0.001, 0.0001]}
+
+ class ReverseKl(Base):
+ gridsearch_values: Dict[str, List] = {"alpha": [0.03, 0.1, 0.3, 1.0, 3.0, 10.0]}
+
+ class Deltas(Base):
+ gridsearch_values: Dict[str, List] = {
+ "delta_range": [
+ [0.0, 0.0],
+ [0.1, 0.1],
+ [0.2, 0.2],
+ [0.3, 0.3],
+ [0.4, 0.4],
+ [0.5, 0.5],
+ [0.6, 0.6],
+ [0.7, 0.7],
+ [0.8, 0.8],
+ [1.0, 1.0],
+ [1.5, 1.5],
+ [2.0, 2.0],
+ [0.0, 1.0],
+ [0.5, 1.0],
+ [0.8, 1.2],
+ [1.0, 1.5],
+ [1, 2],
+ ]
+ }
+
+ class FineGrainedDeltas(Base):
+ gridsearch_values: Dict[str, List] = {
+ "delta": [[0.5], [0.75], [1.0], [1.25], [1.5], [1.75], [2.0], [2.25]]
+ }
+
+ class DetDeltas(Base):
+ gridsearch_values: Dict[str, List] = {
+ "delta_range": [[0.5, 1.0], [0.25, 0.5], [0.0, 1.5], [1.0, 2.0]]
+ }
+
+ class CqlDeltas(Base):
+ gridsearch_values: Dict[str, List] = {
+ "delta_range": [[0.1, 0.1], [0.2, 0.5], [1.0, 1.0], [1.5, 1.5]]
+ }
+
+ class EasyBcq(Base):
+ gridsearch_values: Dict[str, List] = {
+ "num_candidate_actions": [2, 5, 10, 20, 50, 100]
+ }
+
+
+def lrange(i):
+ return list(range(i))
+
+
+class Parallel:
+ class Base(BaseModel):
+ seeds: List[int]
+ envs: List[str]
+
+ class Single(Base):
+ seeds: List[int] = lrange(1)
+ envs: List[str] = [
+ "hopper-medium-v2",
+ "walker2d-medium-v2",
+ "halfcheetah-medium-v2",
+ ]
+
+ class Wide(Base):
+ seeds: List[int] = lrange(10)
+ envs: List[str] = [
+ "hopper-medium-replay-v2",
+ "walker2d-medium-replay-v2",
+ "halfcheetah-medium-replay-v2",
+ "hopper-medium-v2",
+ "walker2d-medium-v2",
+ "halfcheetah-medium-v2",
+ "hopper-medium-expert-v2",
+ "walker2d-medium-expert-v2",
+ "halfcheetah-medium-expert-v2",
+ ]
+
+ class MediumReplay(Base):
+ seeds: List[int] = lrange(10)
+ envs: List[str] = [
+ "hopper-medium-replay-v2",
+ "walker2d-medium-replay-v2",
+ "halfcheetah-medium-replay-v2",
+ ]
+
+ class MediumExpert(Base):
+ seeds: List[int] = lrange(10)
+ envs: List[str] = [
+ "hopper-medium-expert-v2",
+ "walker2d-medium-expert-v2",
+ "halfcheetah-medium-expert-v2",
+ ]
+
+ class AntMaze(Base):
+ seeds: List[int] = lrange(5)
+ envs: List[str] = [
+ "antmaze-umaze-v0",
+ "antmaze-umaze-diverse-v0",
+ "antmaze-medium-diverse-v0",
+ "antmaze-medium-play-v0",
+ "antmaze-large-play-v0",
+ "antmaze-large-diverse-v0",
+ ]
+
+
+class WandbModel(BaseModel):
+ is_on: bool = not DEBUG # Whether or not to use wandb
+ entity: str = "mdsac"
+ project: str = "cfpi"
+
+
+Wandb = WandbModel()
+
+try:
+ from cfpi.conf_private import *
+except ImportError:
+ pass
diff --git a/cfpi/core/__init__.py b/cfpi/core/__init__.py
new file mode 100644
index 0000000..bde5875
--- /dev/null
+++ b/cfpi/core/__init__.py
@@ -0,0 +1,3 @@
+"""
+General classes, functions, utilities that are used throughout cfpi.
+"""
diff --git a/cfpi/core/gtimer.py b/cfpi/core/gtimer.py
new file mode 100644
index 0000000..d4e0c7e
--- /dev/null
+++ b/cfpi/core/gtimer.py
@@ -0,0 +1,8 @@
+import portalocker
+from gtimer import *
+from gtimer import stamp as st
+
+
+def stamp(*args, **kwargs):
+ with portalocker.Lock(f"/tmp/{args[0]}"):
+ st(*args, unique=False, **kwargs)
diff --git a/cfpi/core/logging/__init__.py b/cfpi/core/logging/__init__.py
new file mode 100644
index 0000000..85336f3
--- /dev/null
+++ b/cfpi/core/logging/__init__.py
@@ -0,0 +1,313 @@
+"""
+Based on rllab's logger.
+
+https://github.com/rll/rllab
+"""
+import csv
+import datetime
+import errno
+import json
+import os
+import os.path as osp
+import sys
+from collections import OrderedDict
+from contextlib import contextmanager
+from enum import Enum
+
+import dateutil.tz
+import portalocker
+import torch
+
+import wandb
+from cfpi import conf
+from cfpi.core.logging.tabulate import tabulate
+
+SEPARATOR = "\n\n-----------------\n\n"
+
+
+def add_prefix(log_dict: OrderedDict, prefix: str, divider=""):
+ with_prefix = OrderedDict()
+ for key, val in log_dict.items():
+ with_prefix[prefix + divider + key] = val
+ return with_prefix
+
+
+class TerminalTablePrinter:
+ def __init__(self):
+ self.headers = None
+ self.tabulars = []
+
+ def print_tabular(self, new_tabular):
+ if self.headers is None:
+ self.headers = [x[0] for x in new_tabular]
+ else:
+ assert len(self.headers) == len(new_tabular)
+ self.tabulars.append([x[1] for x in new_tabular])
+ self.refresh()
+
+ def refresh(self):
+ import os
+
+ rows, _ = os.popen("stty size", "r").read().split()
+ tabulars = self.tabulars[-(int(rows) - 3) :]
+ sys.stdout.write("\x1b[2J\x1b[H")
+ sys.stdout.write(tabulate(tabulars, self.headers))
+ sys.stdout.write("\n")
+
+
+class MyEncoder(json.JSONEncoder):
+ def default(self, o):
+ from cfpi.launchers.pipeline import Pipeline
+
+ if isinstance(o, type) or isinstance(o, Pipeline):
+ return {"$class": o.__module__ + "." + o.__name__}
+ elif isinstance(o, Enum):
+ return {"$enum": o.__module__ + "." + o.__class__.__name__ + "." + o.name}
+ elif callable(o):
+ return {"$function": o.__module__ + "." + o.__name__}
+ return json.JSONEncoder.default(self, o)
+
+
+def mkdir_p(path):
+ try:
+ os.makedirs(path)
+ except OSError as exc: # Python >2.5
+ if exc.errno == errno.EEXIST and os.path.isdir(path):
+ pass
+ else:
+ raise
+
+
+class Logger:
+ def __init__(self):
+ self._prefixes = []
+ self._prefix_str = ""
+
+ self._tabular_prefixes = []
+ self._tabular_prefix_str = ""
+
+ self._tabular = []
+ self._tabular_keys = {}
+
+ self._text_outputs = []
+ self._tabular_outputs = []
+
+ self._text_fds = {}
+ self._tabular_fds = {}
+ self._tabular_header_written = set()
+
+ self._snapshot_dir = None
+ self._snapshot_mode = "all"
+ self._snapshot_gap = 1
+
+ self._log_tabular_only = False
+ self.table_printer = TerminalTablePrinter()
+
+ def reset(self):
+ self.__init__()
+
+ def _add_output(self, file_name, arr, fds, mode="a"):
+ if file_name not in arr:
+ mkdir_p(os.path.dirname(file_name))
+ arr.append(file_name)
+ fds[file_name] = open(file_name, mode)
+
+ def push_prefix(self, prefix):
+ self._prefixes.append(prefix)
+ self._prefix_str = "".join(self._prefixes)
+
+ def add_text_output(self, file_name):
+ self._add_output(file_name, self._text_outputs, self._text_fds, mode="a")
+
+ def add_tabular_output(self, file_name, relative_to_snapshot_dir=False):
+ if relative_to_snapshot_dir:
+ file_name = osp.join(self._snapshot_dir, file_name)
+ self._add_output(file_name, self._tabular_outputs, self._tabular_fds, mode="w")
+ self._tabular_keys[file_name] = None
+
+ def set_snapshot_dir(self, dir_name):
+ self._snapshot_dir = dir_name
+
+ def get_snapshot_dir(
+ self,
+ ):
+ return self._snapshot_dir
+
+ def set_snapshot_mode(self, mode):
+ self._snapshot_mode = mode
+
+ def set_snapshot_gap(self, gap):
+ self._snapshot_gap = gap
+
+ def set_log_tabular_only(self, log_tabular_only):
+ self._log_tabular_only = log_tabular_only
+
+ def log(self, s, with_prefix=True, with_timestamp=True):
+ out = s
+ if with_prefix:
+ out = self._prefix_str + out
+ if with_timestamp:
+ now = datetime.datetime.now(dateutil.tz.tzlocal())
+ timestamp = now.strftime("%Y-%m-%d %H:%M:%S.%f %Z")
+ out = f"{timestamp} | {out}"
+ if not self._log_tabular_only:
+ # Also log to stdout
+ print(out)
+ for fd in list(self._text_fds.values()):
+ fd.write(out + "\n")
+ fd.flush()
+ sys.stdout.flush()
+
+ def record_tabular(self, key, val):
+ self._tabular.append((self._tabular_prefix_str + str(key), str(val)))
+
+ def record_dict(self, d, prefix=None):
+ if prefix is not None:
+ self.push_tabular_prefix(prefix)
+ for k, v in d.items():
+ self.record_tabular(k, v)
+ if prefix is not None:
+ self.pop_tabular_prefix()
+
+ def push_tabular_prefix(self, key):
+ self._tabular_prefixes.append(key)
+ self._tabular_prefix_str = "".join(self._tabular_prefixes)
+
+ def pop_tabular_prefix(
+ self,
+ ):
+ del self._tabular_prefixes[-1]
+ self._tabular_prefix_str = "".join(self._tabular_prefixes)
+
+ def get_table_dict(
+ self,
+ ):
+ return dict(self._tabular)
+
+ @contextmanager
+ def prefix(self, key):
+ self.push_prefix(key)
+ try:
+ yield
+ finally:
+ self.pop_prefix()
+
+ def log_variant(self, log_file, variant_data):
+ mkdir_p(os.path.dirname(log_file))
+ with open(log_file, "w") as f:
+ json.dump(variant_data, f, indent=2, sort_keys=True, cls=MyEncoder)
+
+ def dump_tabular(self, *args, **kwargs):
+ wh = kwargs.pop("write_header", None)
+ if len(self._tabular) > 0:
+ if self._log_tabular_only:
+ self.table_printer.print_tabular(self._tabular)
+ else:
+ for line in tabulate(self._tabular).split("\n"):
+ self.log(line, *args, **kwargs)
+ tabular_dict = dict(self._tabular)
+ # Also write to the csv files
+ for filename, tabular_fd in list(self._tabular_fds.items()):
+ # Only saves keys in first iteration to CSV!
+ # (But every key is printed out in text)
+ itr0_keys = self._tabular_keys.get(filename)
+ if itr0_keys is None:
+ itr0_keys = list(sorted(tabular_dict.keys()))
+ self._tabular_keys[filename] = itr0_keys
+ else:
+ prev_keys = set(itr0_keys)
+ curr_keys = set(tabular_dict.keys())
+ if curr_keys != prev_keys:
+ print("Warning: CSV key mismatch")
+ print("extra keys in 0th iter", prev_keys - curr_keys)
+ print("extra keys in cur iter", curr_keys - prev_keys)
+
+ writer = csv.DictWriter(
+ tabular_fd,
+ fieldnames=itr0_keys,
+ extrasaction="ignore",
+ )
+ if wh or (
+ wh is None and tabular_fd not in self._tabular_header_written
+ ):
+ writer.writeheader()
+ self._tabular_header_written.add(tabular_fd)
+ writer.writerow(tabular_dict)
+ tabular_fd.flush()
+ del self._tabular[:]
+
+ def pop_prefix(
+ self,
+ ):
+ del self._prefixes[-1]
+ self._prefix_str = "".join(self._prefixes)
+
+ def save_itr_params(self, itr, params):
+ if self._snapshot_dir:
+ if self._snapshot_mode == "all":
+ file_name = osp.join(self._snapshot_dir, "itr_%d.pt" % (itr + 1))
+ torch.save(params, file_name)
+ elif self._snapshot_mode == "last":
+ # override previous params
+ file_name = osp.join(self._snapshot_dir, "params.pt")
+ torch.save(params, file_name)
+ elif self._snapshot_mode == "gap":
+ if (itr + 1) % self._snapshot_gap == 0:
+ file_name = osp.join(self._snapshot_dir, "itr_%d.pt" % (itr + 1))
+ torch.save(params, file_name)
+ elif self._snapshot_mode == "gap_and_last":
+ if (itr + 1) % self._snapshot_gap == 0:
+ file_name = osp.join(self._snapshot_dir, "itr_%d.pt" % (itr + 1))
+ torch.save(params, file_name)
+ file_name = osp.join(self._snapshot_dir, "params.pt")
+ torch.save(params, file_name)
+ elif self._snapshot_mode == "none":
+ pass
+ else:
+ raise NotImplementedError
+
+
+def wlog(*args, **kwargs):
+ if conf.Wandb.is_on:
+ with portalocker.Lock(f"/tmp/wandb_log_lock_{os.getlogin()}"):
+ wandb.log(*args, **kwargs)
+
+
+class WandbLogger(Logger):
+ def __init__(self, blacklist=None, highlight=None):
+ super().__init__()
+ if blacklist is None:
+ blacklist = ["Epoch", "epoch", "eval/Average Returns"]
+ if highlight is None:
+ highlight = {
+ "eval/Returns Mean": "Eval Returns Mean",
+ "eval/Returns Std": "Eval Returns Std",
+ "expl/Returns Mean": "Expl Returns Mean",
+ "expl/Returns Std": "Expl Returns Std",
+ }
+ self.blacklist = blacklist
+ self.highlight = highlight
+
+ def dump_tabular(self, *args, **kwargs):
+ logs = {k: float(v) for k, v in self.get_table_dict().items()}
+
+ for b in self.blacklist:
+ logs.pop(b, None)
+ for old_key, new_key in self.highlight.items():
+ try:
+ logs[new_key] = logs.pop(old_key)
+ except KeyError:
+ continue
+
+ wlog(logs, commit=True)
+ super().dump_tabular(*args, **kwargs)
+
+ def set_offline_rl(self):
+ self.highlight = {
+ "eval/normalized_score": "Eval Normalized Score",
+ # "eval/path length Mean": "Eval Path length",
+ "eval/Returns Mean": "Eval Returns Mean",
+ }
+
+
+logger = WandbLogger()
diff --git a/cfpi/core/logging/eval_util.py b/cfpi/core/logging/eval_util.py
new file mode 100644
index 0000000..98dfc1b
--- /dev/null
+++ b/cfpi/core/logging/eval_util.py
@@ -0,0 +1,74 @@
+"""
+Common evaluation utilities.
+"""
+
+from collections import OrderedDict
+
+import numpy as np
+from eztils import create_stats_ordered_dict, list_of_dicts__to__dict_of_lists
+
+
+def get_generic_path_information(paths, stat_prefix=""):
+ """
+ Get an OrderedDict with a bunch of statistic names and values.
+ """
+ statistics = OrderedDict()
+ if len(paths) == 0:
+ return statistics
+ returns = [sum(path["rewards"]) for path in paths]
+
+ rewards = np.vstack([path["rewards"] for path in paths])
+ statistics.update(
+ create_stats_ordered_dict("Rewards", rewards, stat_prefix=stat_prefix)
+ )
+ statistics.update(
+ create_stats_ordered_dict("Returns", returns, stat_prefix=stat_prefix)
+ )
+ actions = [path["actions"] for path in paths]
+ if len(actions[0].shape) == 1:
+ actions = np.hstack([path["actions"] for path in paths])
+ else:
+ actions = np.vstack([path["actions"] for path in paths])
+ statistics.update(
+ create_stats_ordered_dict("Actions", actions, stat_prefix=stat_prefix)
+ )
+ statistics["Num Paths"] = len(paths)
+ statistics[stat_prefix + "Average Returns"] = get_average_returns(paths)
+
+ for info_key in ["env_infos", "agent_infos"]:
+ if info_key in paths[0]:
+ all_env_infos = [
+ list_of_dicts__to__dict_of_lists(p[info_key]) for p in paths
+ ]
+ for k in all_env_infos[0].keys():
+ final_ks = np.array([info[k][-1] for info in all_env_infos])
+ first_ks = np.array([info[k][0] for info in all_env_infos])
+ all_ks = np.concatenate([info[k] for info in all_env_infos])
+ statistics.update(
+ create_stats_ordered_dict(
+ stat_prefix + k,
+ final_ks,
+ stat_prefix=f"{info_key}/final/",
+ )
+ )
+ statistics.update(
+ create_stats_ordered_dict(
+ stat_prefix + k,
+ first_ks,
+ stat_prefix=f"{info_key}/initial/",
+ )
+ )
+ statistics.update(
+ create_stats_ordered_dict(
+ stat_prefix + k,
+ all_ks,
+ stat_prefix=f"{info_key}/",
+ )
+ )
+
+ return statistics
+
+
+def get_average_returns(paths):
+ returns = [sum(path["rewards"]) for path in paths]
+ return np.mean(returns)
diff --git a/cfpi/core/logging/tabulate.py b/cfpi/core/logging/tabulate.py
new file mode 100644
index 0000000..d4f4ec0
--- /dev/null
+++ b/cfpi/core/logging/tabulate.py
@@ -0,0 +1,914 @@
+# Taken from John's code
+
+"""Pretty-print tabular data."""
+
+
+import re
+from collections import namedtuple
+from itertools import zip_longest
+from platform import python_version_tuple
+
+if python_version_tuple()[0] < "3":
+ from functools import partial
+
+ _none_type = type(None)
+ _int_type = int
+ _float_type = float
+ _text_type = str
+ _binary_type = str
+else:
+ from functools import partial, reduce
+
+ _none_type = type(None)
+ _int_type = int
+ _float_type = float
+ _text_type = str
+ _binary_type = bytes
+
+
+__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
+__version__ = "0.7.2"
+
+
+Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
+
+
+DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
+
+
+# A table structure is suppposed to be:
+#
+# --- lineabove ---------
+# headerrow
+# --- linebelowheader ---
+# datarow
+# --- linebewteenrows ---
+# ... (more datarows) ...
+# --- linebewteenrows ---
+# last datarow
+# --- linebelow ---------
+#
+# TableFormat's line* elements can be
+#
+# - either None, if the element is not used,
+# - or a Line tuple,
+# - or a function: [col_widths], [col_alignments] -> string.
+#
+# TableFormat's *row elements can be
+#
+# - either None, if the element is not used,
+# - or a DataRow tuple,
+# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
+#
+# padding (an integer) is the amount of white space around data values.
+#
+# with_header_hide:
+#
+# - either None, to display all table elements unconditionally,
+# - or a list of elements not to be displayed if the table has column headers.
+#
+TableFormat = namedtuple(
+ "TableFormat",
+ [
+ "lineabove",
+ "linebelowheader",
+ "linebetweenrows",
+ "linebelow",
+ "headerrow",
+ "datarow",
+ "padding",
+ "with_header_hide",
+ ],
+)
+
+
+def _pipe_segment_with_colons(align, colwidth):
+ """Return a segment of a horizontal line with optional colons which
+ indicate column's alignment (as in `pipe` output format)."""
+ w = colwidth
+ if align in ["right", "decimal"]:
+ return ("-" * (w - 1)) + ":"
+ elif align == "center":
+ return ":" + ("-" * (w - 2)) + ":"
+ elif align == "left":
+ return ":" + ("-" * (w - 1))
+ else:
+ return "-" * w
+
+
+def _pipe_line_with_colons(colwidths, colaligns):
+ """Return a horizontal line with optional colons to indicate column's
+ alignment (as in `pipe` output format)."""
+ segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
+ return "|" + "|".join(segments) + "|"
+
+
+def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
+ alignment = {
+ "left": "",
+ "right": 'align="right"| ',
+ "center": 'align="center"| ',
+ "decimal": 'align="right"| ',
+ }
+ # hard-coded padding _around_ align attribute and value together
+ # rather than padding parameter which affects only the value
+ values_with_attrs = [
+ " " + alignment.get(a, "") + c + " " for c, a in zip(cell_values, colaligns)
+ ]
+ colsep = separator * 2
+ return (separator + colsep.join(values_with_attrs)).rstrip()
+
+
+def _latex_line_begin_tabular(colwidths, colaligns):
+ alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
+ tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
+ print("hi!", tabular_columns_fmt)
+ return "\\begin{tabular}{" + tabular_columns_fmt + "}\n\\hline"
+
+
+_table_formats = {
+ "simple": TableFormat(
+ lineabove=Line("", "-", " ", ""),
+ linebelowheader=Line("", "-", " ", ""),
+ linebetweenrows=None,
+ linebelow=Line("", "-", " ", ""),
+ headerrow=DataRow("", " ", ""),
+ datarow=DataRow("", " ", ""),
+ padding=0,
+ with_header_hide=["lineabove", "linebelow"],
+ ),
+ "plain": TableFormat(
+ lineabove=None,
+ linebelowheader=None,
+ linebetweenrows=None,
+ linebelow=None,
+ headerrow=DataRow("", " ", ""),
+ datarow=DataRow("", " ", ""),
+ padding=0,
+ with_header_hide=None,
+ ),
+ "grid": TableFormat(
+ lineabove=Line("+", "-", "+", "+"),
+ linebelowheader=Line("+", "=", "+", "+"),
+ linebetweenrows=Line("+", "-", "+", "+"),
+ linebelow=Line("+", "-", "+", "+"),
+ headerrow=DataRow("|", "|", "|"),
+ datarow=DataRow("|", "|", "|"),
+ padding=1,
+ with_header_hide=None,
+ ),
+ "pipe": TableFormat(
+ lineabove=_pipe_line_with_colons,
+ linebelowheader=_pipe_line_with_colons,
+ linebetweenrows=None,
+ linebelow=None,
+ headerrow=DataRow("|", "|", "|"),
+ datarow=DataRow("|", "|", "|"),
+ padding=1,
+ with_header_hide=["lineabove"],
+ ),
+ "orgtbl": TableFormat(
+ lineabove=None,
+ linebelowheader=Line("|", "-", "+", "|"),
+ linebetweenrows=None,
+ linebelow=None,
+ headerrow=DataRow("|", "|", "|"),
+ datarow=DataRow("|", "|", "|"),
+ padding=1,
+ with_header_hide=None,
+ ),
+ "rst": TableFormat(
+ lineabove=Line("", "=", " ", ""),
+ linebelowheader=Line("", "=", " ", ""),
+ linebetweenrows=None,
+ linebelow=Line("", "=", " ", ""),
+ headerrow=DataRow("", " ", ""),
+ datarow=DataRow("", " ", ""),
+ padding=0,
+ with_header_hide=None,
+ ),
+ "mediawiki": TableFormat(
+ lineabove=Line(
+ '{| class="wikitable" style="text-align: left;"',
+ "",
+ "",
+ "\n|+ \n|-",
+ ),
+ linebelowheader=Line("|-", "", "", ""),
+ linebetweenrows=Line("|-", "", "", ""),
+ linebelow=Line("|}", "", "", ""),
+ headerrow=partial(_mediawiki_row_with_attrs, "!"),
+ datarow=partial(_mediawiki_row_with_attrs, "|"),
+ padding=0,
+ with_header_hide=None,
+ ),
+ "latex": TableFormat(
+ lineabove=_latex_line_begin_tabular,
+ linebelowheader=Line("\\hline", "", "", ""),
+ linebetweenrows=None,
+ linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
+ headerrow=DataRow("", "&", "\\\\"),
+ datarow=DataRow("", "&", "\\\\"),
+ padding=1,
+ with_header_hide=None,
+ ),
+ "tsv": TableFormat(
+ lineabove=None,
+ linebelowheader=None,
+ linebetweenrows=None,
+ linebelow=None,
+ headerrow=DataRow("", "\t", ""),
+ datarow=DataRow("", "\t", ""),
+ padding=0,
+ with_header_hide=None,
+ ),
+}
+
+
+tabulate_formats = list(sorted(_table_formats.keys()))
+
+
+_invisible_codes = re.compile("\x1b\\[\\d*m") # ANSI color codes
+_invisible_codes_bytes = re.compile(b"\x1b\\[\\d*m") # ANSI color codes
+
+
+def simple_separated_format(separator):
+ """Construct a simple TableFormat with columns separated by a separator.
+
+ >>> tsv = simple_separated_format("\\t") ; \
+ tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
+ True
+
+ """
+ return TableFormat(
+ None,
+ None,
+ None,
+ None,
+ headerrow=DataRow("", separator, ""),
+ datarow=DataRow("", separator, ""),
+ padding=0,
+ with_header_hide=None,
+ )
+
+
+def _isconvertible(conv, string):
+ try:
+ conv(string)
+ return True
+ except ValueError:
+ return False
+
+
+def _isnumber(string):
+ """
+ >>> _isnumber("123.45")
+ True
+ >>> _isnumber("123")
+ True
+ >>> _isnumber("spam")
+ False
+ """
+ return _isconvertible(float, string)
+
+
+def _isint(string):
+ """
+ >>> _isint("123")
+ True
+ >>> _isint("123.45")
+ False
+ """
+ return (
+ type(string) is int
+ or (isinstance(string, _binary_type) or isinstance(string, _text_type))
+ and _isconvertible(int, string)
+ )
+
+
+def _type(string, has_invisible=True):
+ """The least generic type (type(None), int, float, str, unicode).
+
+ >>> _type(None) is type(None)
+ True
+ >>> _type("foo") is type("")
+ True
+ >>> _type("1") is type(1)
+ True
+ >>> _type('\x1b[31m42\x1b[0m') is type(42)
+ True
+ >>> _type('\x1b[31m42\x1b[0m') is type(42)
+ True
+
+ """
+
+ if has_invisible and (
+ isinstance(string, _text_type) or isinstance(string, _binary_type)
+ ):
+ string = _strip_invisible(string)
+
+ if string is None:
+ return _none_type
+ elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
+ return _text_type
+ elif _isint(string):
+ return int
+ elif _isnumber(string):
+ return float
+ elif isinstance(string, _binary_type):
+ return _binary_type
+ else:
+ return _text_type
+
+
+def _afterpoint(string):
+ """Symbols after a decimal point, -1 if the string lacks the decimal point.
+
+ >>> _afterpoint("123.45")
+ 2
+ >>> _afterpoint("1001")
+ -1
+ >>> _afterpoint("eggs")
+ -1
+ >>> _afterpoint("123e45")
+ 2
+
+ """
+ if _isnumber(string):
+ if _isint(string):
+ return -1
+ else:
+ pos = string.rfind(".")
+ pos = string.lower().rfind("e") if pos < 0 else pos
+ if pos >= 0:
+ return len(string) - pos - 1
+ else:
+ return -1 # no point
+ else:
+ return -1 # not a number
+
+
+def _padleft(width, s, has_invisible=True):
+ """Flush right.
+
+ >>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
+ True
+
+ """
+ iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
+ fmt = "{0:>%ds}" % iwidth
+ return fmt.format(s)
+
+
+def _padright(width, s, has_invisible=True):
+ """Flush left.
+
+ >>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
+ True
+
+ """
+ iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
+ fmt = "{0:<%ds}" % iwidth
+ return fmt.format(s)
+
+
+def _padboth(width, s, has_invisible=True):
+ """Center string.
+
+ >>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
+ True
+
+ """
+ iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
+ fmt = "{0:^%ds}" % iwidth
+ return fmt.format(s)
+
+
+def _strip_invisible(s):
+ "Remove invisible ANSI color codes."
+ if isinstance(s, _text_type):
+ return re.sub(_invisible_codes, "", s)
+ else: # a bytestring
+ return re.sub(_invisible_codes_bytes, "", s)
+
+
+def _visible_width(s):
+ """Visible width of a printed string. ANSI color codes are removed.
+
+ >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
+ (5, 5)
+
+ """
+ if isinstance(s, _text_type) or isinstance(s, _binary_type):
+ return len(_strip_invisible(s))
+ else:
+ return len(_text_type(s))
+
+
+def _align_column(strings, alignment, minwidth=0, has_invisible=True):
+ """[string] -> [padded_string]
+
+ >>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
+ [' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
+
+ >>> list(map(str,_align_column(['123.4', '56.7890'], None)))
+ ['123.4', '56.7890']
+
+ """
+ if alignment == "right":
+ strings = [s.strip() for s in strings]
+ padfn = _padleft
+ elif alignment == "center":
+ strings = [s.strip() for s in strings]
+ padfn = _padboth
+ elif alignment == "decimal":
+ decimals = [_afterpoint(s) for s in strings]
+ maxdecimals = max(decimals)
+ strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)]
+ padfn = _padleft
+ elif not alignment:
+ return strings
+ else:
+ strings = [s.strip() for s in strings]
+ padfn = _padright
+
+ if has_invisible:
+ width_fn = _visible_width
+ else:
+ width_fn = len
+
+ maxwidth = max(max(list(map(width_fn, strings))), minwidth)
+ padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
+ return padded_strings
+
+
+def _more_generic(type1, type2):
+ types = {_none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4}
+ invtypes = {4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type}
+ moregeneric = max(types.get(type1, 4), types.get(type2, 4))
+ return invtypes[moregeneric]
+
+
+def _column_type(strings, has_invisible=True):
+ """The least generic type all column values are convertible to.
+
+ >>> _column_type(["1", "2"]) is _int_type
+ True
+ >>> _column_type(["1", "2.3"]) is _float_type
+ True
+ >>> _column_type(["1", "2.3", "four"]) is _text_type
+ True
+ >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
+ True
+ >>> _column_type([None, "brux"]) is _text_type
+ True
+ >>> _column_type([1, 2, None]) is _int_type
+ True
+ >>> import datetime as dt
+ >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
+ True
+
+ """
+ types = [_type(s, has_invisible) for s in strings]
+ return reduce(_more_generic, types, int)
+
+
+def _format(val, valtype, floatfmt, missingval=""):
+ """Format a value accoding to its type.
+
+ Unicode is supported:
+
+ >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
+ tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
+ good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
+ tabulate(tbl, headers=hrow) == good_result
+ True
+
+ """
+ if val is None:
+ return missingval
+
+ if valtype in [int, _text_type]:
+ return f"{val}"
+ elif valtype is _binary_type:
+ return _text_type(val, "ascii")
+ elif valtype is float:
+ return format(float(val), floatfmt)
+ else:
+ return f"{val}"
+
+
+def _align_header(header, alignment, width):
+ if alignment == "left":
+ return _padright(width, header)
+ elif alignment == "center":
+ return _padboth(width, header)
+ elif not alignment:
+ return f"{header}"
+ else:
+ return _padleft(width, header)
+
+
+def _normalize_tabular_data(tabular_data, headers):
+ """Transform a supported data type to a list of lists, and a list of headers.
+
+ Supported tabular data types:
+
+ * list-of-lists or another iterable of iterables
+
+ * list of named tuples (usually used with headers="keys")
+
+ * 2D NumPy arrays
+
+ * NumPy record arrays (usually used with headers="keys")
+
+ * dict of iterables (usually used with headers="keys")
+
+ * pandas.DataFrame (usually used with headers="keys")
+
+ The first row can be used as headers if headers="firstrow",
+ column indices can be used as headers if headers="keys".
+
+ """
+
+ if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
+ # dict-like and pandas.DataFrame?
+ if hasattr(tabular_data.values, "__call__"):
+ # likely a conventional dict
+ keys = list(tabular_data.keys())
+ rows = list(
+ zip_longest(*list(tabular_data.values()))
+ ) # columns have to be transposed
+ elif hasattr(tabular_data, "index"):
+ # values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
+ keys = list(tabular_data.keys())
+ vals = tabular_data.values # values matrix doesn't need to be transposed
+ names = tabular_data.index
+ rows = [[v] + list(row) for v, row in zip(names, vals)]
+ else:
+ raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
+
+ if headers == "keys":
+ headers = list(map(_text_type, keys)) # headers should be strings
+
+ else: # it's a usual an iterable of iterables, or a NumPy array
+ rows = list(tabular_data)
+
+ if (
+ headers == "keys"
+ and hasattr(tabular_data, "dtype")
+ and getattr(tabular_data.dtype, "names")
+ ):
+ # numpy record array
+ headers = tabular_data.dtype.names
+ elif (
+ headers == "keys"
+ and len(rows) > 0
+ and isinstance(rows[0], tuple)
+ and hasattr(rows[0], "_fields")
+ ): # namedtuple
+ headers = list(map(_text_type, rows[0]._fields))
+ elif headers == "keys" and len(rows) > 0: # keys are column indices
+ headers = list(map(_text_type, list(range(len(rows[0])))))
+
+ # take headers from the first row if necessary
+ if headers == "firstrow" and len(rows) > 0:
+ headers = list(map(_text_type, rows[0])) # headers should be strings
+ rows = rows[1:]
+
+ headers = list(headers)
+ rows = list(map(list, rows))
+
+ # pad with empty headers for initial columns if necessary
+ if headers and len(rows) > 0:
+ nhs = len(headers)
+ ncols = len(rows[0])
+ if nhs < ncols:
+ headers = [""] * (ncols - nhs) + headers
+
+ return rows, headers
+
+
+def tabulate(
+ tabular_data,
+ headers=[],
+ tablefmt="simple",
+ floatfmt="g",
+ numalign="decimal",
+ stralign="left",
+ missingval="",
+):
+ """Format a fixed width table for pretty printing.
+
+ >>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
+ --- ---------
+ 1 2.34
+ -56 8.999
+ 2 10001
+ --- ---------
+
+ The first required argument (`tabular_data`) can be a
+ list-of-lists (or another iterable of iterables), a list of named
+ tuples, a dictionary of iterables, a two-dimensional NumPy array,
+ NumPy record array, or a Pandas' dataframe.
+
+
+ Table headers
+ -------------
+
+ To print nice column headers, supply the second argument (`headers`):
+
+ - `headers` can be an explicit list of column headers
+ - if `headers="firstrow"`, then the first row of data is used
+ - if `headers="keys"`, then dictionary keys or column indices are used
+
+ Otherwise a headerless table is produced.
+
+ If the number of headers is less than the number of columns, they
+ are supposed to be names of the last columns. This is consistent
+ with the plain-text format of R and Pandas' dataframes.
+
+ >>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
+ ... headers="firstrow"))
+ sex age
+ ----- ----- -----
+ Alice F 24
+ Bob M 19
+
+
+ Column alignment
+ ----------------
+
+ `tabulate` tries to detect column types automatically, and aligns
+ the values properly. By default it aligns decimal points of the
+ numbers (or flushes integer numbers to the right), and flushes
+ everything else to the left. Possible column alignments
+ (`numalign`, `stralign`) are: "right", "center", "left", "decimal"
+ (only for `numalign`), and None (to disable alignment).
+
+
+ Table formats
+ -------------
+
+ `floatfmt` is a format specification used for columns which
+ contain numeric data with a decimal point.
+
+ `None` values are replaced with a `missingval` string:
+
+ >>> print(tabulate([["spam", 1, None],
+ ... ["eggs", 42, 3.14],
+ ... ["other", None, 2.7]], missingval="?"))
+ ----- -- ----
+ spam 1 ?
+ eggs 42 3.14
+ other ? 2.7
+ ----- -- ----
+
+ Various plain-text table formats (`tablefmt`) are supported:
+ 'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
+ and 'latex'. Variable `tabulate_formats` contains the list of
+ currently supported formats.
+
+ "plain" format doesn't use any pseudographics to draw tables,
+ it separates columns with a double space:
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
+ ... ["strings", "numbers"], "plain"))
+ strings numbers
+ spam 41.9999
+ eggs 451
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
+ spam 41.9999
+ eggs 451
+
+ "simple" format is like Pandoc simple_tables:
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
+ ... ["strings", "numbers"], "simple"))
+ strings numbers
+ --------- ---------
+ spam 41.9999
+ eggs 451
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
+ ---- --------
+ spam 41.9999
+ eggs 451
+ ---- --------
+
+ "grid" is similar to tables produced by Emacs table.el package or
+ Pandoc grid_tables:
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
+ ... ["strings", "numbers"], "grid"))
+ +-----------+-----------+
+ | strings | numbers |
+ +===========+===========+
+ | spam | 41.9999 |
+ +-----------+-----------+
+ | eggs | 451 |
+ +-----------+-----------+
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+ +------+----------+
+ | spam | 41.9999 |
+ +------+----------+
+ | eggs | 451 |
+ +------+----------+
+
+ "pipe" is like tables in PHP Markdown Extra extension or Pandoc
+ pipe_tables:
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
+ ... ["strings", "numbers"], "pipe"))
+ | strings | numbers |
+ |:----------|----------:|
+ | spam | 41.9999 |
+ | eggs | 451 |
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
+ |:-----|---------:|
+ | spam | 41.9999 |
+ | eggs | 451 |
+
+ "orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
+ are slightly different from "pipe" format by not using colons to
+ define column alignment, and using a "+" sign to indicate line
+ intersections:
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
+ ... ["strings", "numbers"], "orgtbl"))
+ | strings | numbers |
+ |-----------+-----------|
+ | spam | 41.9999 |
+ | eggs | 451 |
+
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
+ | spam | 41.9999 |
+ | eggs | 451 |
+
+ "rst" is like a simple table format from reStructuredText; please
+ note that reStructuredText accepts also "grid" tables:
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
+ ... ["strings", "numbers"], "rst"))
+ ========= =========
+ strings numbers
+ ========= =========
+ spam 41.9999
+ eggs 451
+ ========= =========
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
+ ==== ========
+ spam 41.9999
+ eggs 451
+ ==== ========
+
+ "mediawiki" produces a table markup used in Wikipedia and on other
+ MediaWiki-based sites:
+
+ >>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
+ ... headers="firstrow", tablefmt="mediawiki"))
+ {| class="wikitable" style="text-align: left;"
+ |+
+ |-
+ ! strings !! align="right"| numbers
+ |-
+ | spam || align="right"| 41.9999
+ |-
+ | eggs || align="right"| 451
+ |}
+
+ "latex" produces a tabular environment of LaTeX document markup:
+
+ >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
+ \\begin{tabular}{lr}
+ \\hline
+ spam & 41.9999 \\\\
+ eggs & 451 \\\\
+ \\hline
+ \\end{tabular}
+
+ """
+
+ list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
+
+ # optimization: look for ANSI control codes once,
+ # enable smart width functions only if a control code is found
+ plain_text = "\n".join(
+ ["\t".join(map(_text_type, headers))]
+ + ["\t".join(map(_text_type, row)) for row in list_of_lists]
+ )
+ has_invisible = re.search(_invisible_codes, plain_text)
+ if has_invisible:
+ width_fn = _visible_width
+ else:
+ width_fn = len
+
+ # format rows and columns, convert numeric values to strings
+ cols = list(zip(*list_of_lists))
+ coltypes = list(map(_column_type, cols))
+ cols = [
+ [_format(v, ct, floatfmt, missingval) for v in c]
+ for c, ct in zip(cols, coltypes)
+ ]
+
+ # align columns
+ aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
+ minwidths = [width_fn(h) + 2 for h in headers] if headers else [0] * len(cols)
+ cols = [
+ _align_column(c, a, minw, has_invisible)
+ for c, a, minw in zip(cols, aligns, minwidths)
+ ]
+
+ if headers:
+ # align headers and add headers
+ minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
+ headers = [
+ _align_header(h, a, minw) for h, a, minw in zip(headers, aligns, minwidths)
+ ]
+ rows = list(zip(*cols))
+ else:
+ minwidths = [width_fn(c[0]) for c in cols]
+ rows = list(zip(*cols))
+
+ if not isinstance(tablefmt, TableFormat):
+ tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
+
+ return _format_table(tablefmt, headers, rows, minwidths, aligns)
+
+
+def _build_simple_row(padded_cells, rowfmt):
+ "Format row according to DataRow format without padding."
+ begin, sep, end = rowfmt
+ return (begin + sep.join(padded_cells) + end).rstrip()
+
+
+def _build_row(padded_cells, colwidths, colaligns, rowfmt):
+ "Return a string which represents a row of data cells."
+ if not rowfmt:
+ return None
+ if hasattr(rowfmt, "__call__"):
+ return rowfmt(padded_cells, colwidths, colaligns)
+ else:
+ return _build_simple_row(padded_cells, rowfmt)
+
+
+def _build_line(colwidths, colaligns, linefmt):
+ "Return a string which represents a horizontal line."
+ if not linefmt:
+ return None
+ if hasattr(linefmt, "__call__"):
+ return linefmt(colwidths, colaligns)
+ else:
+ begin, fill, sep, end = linefmt
+ cells = [fill * w for w in colwidths]
+ return _build_simple_row(cells, (begin, sep, end))
+
+
+def _pad_row(cells, padding):
+ if cells:
+ pad = " " * padding
+ padded_cells = [pad + cell + pad for cell in cells]
+ return padded_cells
+ else:
+ return cells
+
+
+def _format_table(fmt, headers, rows, colwidths, colaligns):
+ """Produce a plain-text representation of the table."""
+ lines = []
+ hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
+ pad = fmt.padding
+ headerrow = fmt.headerrow
+
+ padded_widths = [(w + 2 * pad) for w in colwidths]
+ padded_headers = _pad_row(headers, pad)
+ padded_rows = [_pad_row(row, pad) for row in rows]
+
+ if fmt.lineabove and "lineabove" not in hidden:
+ lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
+
+ if padded_headers:
+ lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
+ if fmt.linebelowheader and "linebelowheader" not in hidden:
+ lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
+
+ if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
+ # initial rows with a line below
+ for row in padded_rows[:-1]:
+ lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
+ lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
+ # the last row without a line below
+ lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
+ else:
+ for row in padded_rows:
+ lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
+
+ if fmt.linebelow and "linebelow" not in hidden:
+ lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
+
+ return "\n".join(lines)
diff --git a/cfpi/core/multiprocessing.py b/cfpi/core/multiprocessing.py
new file mode 100644
index 0000000..fb16ffe
--- /dev/null
+++ b/cfpi/core/multiprocessing.py
@@ -0,0 +1,27 @@
+import multiprocessing
+import multiprocessing.pool
+from multiprocessing.context import SpawnContext
+
+import torch
+
+
+class NoDaemonProcess(torch.multiprocessing.Process):
+ @property
+ def daemon(self):
+ return False
+
+ @daemon.setter
+ def daemon(self, value):
+ pass
+
+
+class NoDaemonContext(SpawnContext):
+ Process = NoDaemonProcess
+
+
+# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool
+# because the latter is only a wrapper function, not a proper class.
+class NestablePool(multiprocessing.pool.Pool):
+ def __init__(self, *args, **kwargs):
+ kwargs["context"] = NoDaemonContext()
+ super().__init__(*args, **kwargs)
diff --git a/cfpi/core/rl_algorithm.py b/cfpi/core/rl_algorithm.py
new file mode 100644
index 0000000..b62e6cf
--- /dev/null
+++ b/cfpi/core/rl_algorithm.py
@@ -0,0 +1,253 @@
+import abc
+from collections import OrderedDict
+
+import cfpi.core.gtimer as gt
+from cfpi.core.logging import eval_util, logger
+from cfpi.data_management.replay_buffer import ReplayBuffer
+from cfpi.samplers.path_collector import MdpPathCollector
+
+
+def _get_epoch_timings():
+ times_itrs = gt.get_times().stamps.itrs
+ times = OrderedDict()
+ epoch_time = 0
+ for key in sorted(times_itrs):
+ time = times_itrs[key][-1]
+ epoch_time += time
+ times[f"time/{key} (s)"] = time
+ times["time/epoch (s)"] = epoch_time
+ times["time/total (s)"] = gt.get_times().total
+ return times
+
+
+class BaseRLAlgorithm(metaclass=abc.ABCMeta):
+ def __init__(
+ self,
+ trainer,
+ exploration_env,
+ evaluation_env,
+ exploration_data_collector: MdpPathCollector,
+ evaluation_data_collector: MdpPathCollector,
+ replay_buffer: ReplayBuffer,
+ ):
+ self.trainer = trainer
+ self.expl_env = exploration_env
+ self.eval_env = evaluation_env
+ self.expl_data_collector = exploration_data_collector
+ self.eval_data_collector = evaluation_data_collector
+ self.replay_buffer = replay_buffer
+ self._start_epoch = 0
+
+ self.post_epoch_funcs = []
+
+ def train(self, start_epoch=0):
+ self._start_epoch = start_epoch
+ self._train()
+
+ def _train(self):
+ """
+ Train model.
+ """
+ raise NotImplementedError("_train must implemented by inherited class")
+
+ def _begin_epoch(self, epoch):
+ pass
+
+ def _end_epoch(self, epoch):
+ snapshot = self._get_snapshot()
+ logger.save_itr_params(epoch - self._start_epoch, snapshot)
+ gt.stamp("saving")
+ self._log_stats(epoch)
+
+ self.expl_data_collector.end_epoch(epoch)
+ self.eval_data_collector.end_epoch(epoch)
+ self.replay_buffer.end_epoch(epoch)
+ self.trainer.end_epoch(epoch)
+
+ for post_epoch_func in self.post_epoch_funcs:
+ post_epoch_func(self, epoch)
+
+ def _get_snapshot(self):
+ snapshot = {}
+ for k, v in self.trainer.get_snapshot().items():
+ snapshot["trainer/" + k] = v
+ for k, v in self.expl_data_collector.get_snapshot().items():
+ snapshot["exploration/" + k] = v
+ for k, v in self.eval_data_collector.get_snapshot().items():
+ snapshot["evaluation/" + k] = v
+ for k, v in self.replay_buffer.get_snapshot().items():
+ snapshot["replay_buffer/" + k] = v
+ return snapshot
+
+ def record_exploration(self):
+ logger.record_dict(self.expl_data_collector.get_diagnostics(), prefix="expl/")
+ expl_paths = self.expl_data_collector.get_epoch_paths()
+ if hasattr(self.expl_env, "get_diagnostics"):
+ logger.record_dict(
+ self.expl_env.get_diagnostics(expl_paths),
+ prefix="expl/",
+ )
+ logger.record_dict(
+ eval_util.get_generic_path_information(expl_paths),
+ prefix="expl/",
+ )
+
+ def log_additonal(self, epoch):
+ return
+
+ def _log_stats(self, epoch):
+ logger.log(f"Epoch {epoch} finished", with_timestamp=True)
+ logger.record_dict({"epoch": epoch})
+
+ """
+ Replay Buffer
+ """
+ logger.record_dict(
+ self.replay_buffer.get_diagnostics(), prefix="replay_buffer/"
+ )
+
+ """
+ Trainer
+ """
+ logger.record_dict(self.trainer.get_diagnostics(), prefix="trainer/")
+
+ """
+ Exploration
+ """
+ self.record_exploration()
+
+ """
+ Evaluation
+ """
+ logger.record_dict(
+ self.eval_data_collector.get_diagnostics(),
+ prefix="eval/",
+ )
+ eval_paths = self.eval_data_collector.get_epoch_paths()
+ if hasattr(self.eval_env, "get_diagnostics"):
+ logger.record_dict(
+ self.eval_env.get_diagnostics(eval_paths),
+ prefix="eval/",
+ )
+ logger.record_dict(
+ eval_util.get_generic_path_information(eval_paths),
+ prefix="eval/",
+ )
+ self.log_additional(epoch)
+
+ """
+ Misc
+ """
+ gt.stamp("logging")
+ logger.record_dict(_get_epoch_timings())
+ logger.record_tabular("Epoch", epoch)
+ logger.dump_tabular(with_prefix=False, with_timestamp=False)
+
+ @abc.abstractmethod
+ def training_mode(self, mode):
+ """
+ Set training mode to `mode`.
+ :param mode: If True, training will happen (e.g. set the dropout
+ probabilities to not all ones).
+ """
+
+
+class BatchRLAlgorithm(BaseRLAlgorithm, metaclass=abc.ABCMeta):
+ def __init__(
+ self,
+ trainer,
+ exploration_env,
+ evaluation_env,
+ exploration_data_collector: MdpPathCollector,
+ evaluation_data_collector: MdpPathCollector,
+ replay_buffer: ReplayBuffer,
+ batch_size,
+ max_path_length,
+ num_epochs,
+ num_eval_steps_per_epoch,
+ num_expl_steps_per_train_loop,
+ num_trains_per_train_loop,
+ num_train_loops_per_epoch=1,
+ min_num_steps_before_training=0,
+ start_epoch=0, # negative epochs are offline, positive epochs are online
+ ):
+ super().__init__(
+ trainer,
+ exploration_env,
+ evaluation_env,
+ exploration_data_collector,
+ evaluation_data_collector,
+ replay_buffer,
+ )
+ self.batch_size = batch_size
+ self.max_path_length = max_path_length
+ self.num_epochs = num_epochs
+ self.num_eval_steps_per_epoch = num_eval_steps_per_epoch
+ self.num_trains_per_train_loop = num_trains_per_train_loop
+ self.num_train_loops_per_epoch = num_train_loops_per_epoch
+ self.num_expl_steps_per_train_loop = num_expl_steps_per_train_loop
+ self.min_num_steps_before_training = min_num_steps_before_training
+ self._start_epoch = start_epoch
+
+ def train(self):
+ """Negative epochs are offline, positive epochs are online"""
+ for self.epoch in gt.timed_for(
+ range(self._start_epoch, self.num_epochs),
+ save_itrs=True,
+ ):
+ self.offline_rl = self.epoch < 0
+ self._begin_epoch(self.epoch)
+ self._train()
+ self._end_epoch(self.epoch)
+
+ def _train(self):
+ if self.epoch == 0 and self.min_num_steps_before_training > 0:
+ init_expl_paths = self.expl_data_collector.collect_new_paths(
+ self.max_path_length,
+ self.min_num_steps_before_training,
+ discard_incomplete_paths=False,
+ )
+ if not self.offline_rl:
+ self.replay_buffer.add_paths(init_expl_paths)
+ self.expl_data_collector.end_epoch(-1)
+
+ self.eval_data_collector.collect_new_paths(
+ self.max_path_length,
+ self.num_eval_steps_per_epoch,
+ discard_incomplete_paths=True,
+ )
+ gt.stamp("evaluation sampling")
+
+ for _ in range(self.num_train_loops_per_epoch):
+ new_expl_paths = self.expl_data_collector.collect_new_paths(
+ self.max_path_length,
+ self.num_expl_steps_per_train_loop,
+ discard_incomplete_paths=False,
+ )
+ gt.stamp("exploration sampling")
+
+ if not self.offline_rl:
+ self.replay_buffer.add_paths(new_expl_paths)
+ gt.stamp("data storing")
+
+ self.training_mode(True)
+ for _ in range(self.num_trains_per_train_loop):
+ train_data = self.replay_buffer.random_batch(self.batch_size)
+ self.trainer.train(train_data)
+ gt.stamp("training")
+ self.training_mode(False)
+
+
+class Trainer(metaclass=abc.ABCMeta):
+ @abc.abstractmethod
+ def train(self, data):
+ pass
+
+ def end_epoch(self, epoch):
+ pass
+
+ def get_snapshot(self):
+ return {}
+
+ def get_diagnostics(self):
+ return {}
diff --git a/cfpi/data_management/__init__.py b/cfpi/data_management/__init__.py
new file mode 100644
index 0000000..7f70f69
--- /dev/null
+++ b/cfpi/data_management/__init__.py
@@ -0,0 +1,35 @@
+import pickle
+
+import numpy as np
+import torch
+from torch import nn as nn
+
+PICKLE = "pickle"
+NUMPY = "numpy"
+JOBLIB = "joblib"
+TORCH = "torch"
+
+
+def load_file(local_path, file_type=None):
+ if file_type is None:
+ extension = local_path.split(".")[-1]
+ if extension == "npy":
+ file_type = NUMPY
+ elif extension == "pkl":
+ file_type = PICKLE
+ elif extension == "joblib":
+ file_type = JOBLIB
+ elif extension == "pt":
+ file_type = TORCH
+ else:
+ raise ValueError("Could not infer file type.")
+ if file_type == NUMPY:
+ object = np.load(open(local_path, "rb"), allow_pickle=True)
+ elif file_type == TORCH:
+ object = torch.load(local_path)
+ else:
+ # f = open(local_path, 'rb')
+ # object = CPU_Unpickler(f).load()
+ object = pickle.load(open(local_path, "rb"))
+ print("loaded", local_path)
+ return object
diff --git a/cfpi/data_management/env_replay_buffer.py b/cfpi/data_management/env_replay_buffer.py
new file mode 100644
index 0000000..d70db32
--- /dev/null
+++ b/cfpi/data_management/env_replay_buffer.py
@@ -0,0 +1,118 @@
+import eztils.torch as ptu
+import torch
+from gym.spaces import Discrete
+
+from cfpi.data_management.simple_replay_buffer import SimpleReplayBuffer
+from cfpi.envs import get_dim
+
+
+class EnvReplayBuffer(SimpleReplayBuffer):
+ def __init__(self, max_replay_buffer_size, env, env_info_sizes=None):
+ """
+ :param max_replay_buffer_size:
+ :param env:
+ """
+ self.env = env
+ self._ob_space = env.observation_space
+ self._action_space = env.action_space
+
+ if env_info_sizes is None:
+ if hasattr(env, "info_sizes"):
+ env_info_sizes = env.info_sizes
+ else:
+ env_info_sizes = dict()
+
+ super().__init__(
+ max_replay_buffer_size=max_replay_buffer_size,
+ observation_dim=get_dim(self._ob_space),
+ action_dim=get_dim(self._action_space),
+ env_info_sizes=env_info_sizes,
+ )
+
+ def add_sample(
+ self, observation, action, reward, terminal, next_observation, **kwargs
+ ):
+ if isinstance(self._action_space, Discrete):
+ new_action = ptu.zeros(self._action_dim)
+ new_action[action] = 1
+ else:
+ new_action = action
+ return super().add_sample(
+ observation=observation,
+ action=new_action,
+ reward=reward,
+ next_observation=next_observation,
+ terminal=terminal,
+ **kwargs,
+ )
+
+
+class EnvReplayBufferNextAction(EnvReplayBuffer):
+ def __init__(self, max_replay_buffer_size, env, env_info_sizes=None):
+ super().__init__(max_replay_buffer_size, env, env_info_sizes)
+ self._next_actions = ptu.zeros(
+ (max_replay_buffer_size, get_dim(self._action_space))
+ )
+
+ def add_sample(
+ self,
+ observation,
+ action,
+ reward,
+ next_observation,
+ next_action,
+ terminal,
+ **kwargs,
+ ):
+ if isinstance(self._action_space, Discrete):
+ new_next_action = ptu.zeros(self._action_dim)
+ new_next_action[next_action] = 1
+ else:
+ new_next_action = next_action
+
+ self._next_actions[self._top] = next_action
+ return super().add_sample(
+ observation, action, reward, terminal, next_observation, **kwargs
+ )
+
+ def random_batch(self, batch_size):
+ indices = ptu.randperm(self._size, dtype=torch.long)[:batch_size]
+ batch = dict(
+ observations=self._observations[indices],
+ actions=self._actions[indices],
+ rewards=self._rewards[indices],
+ terminals=self._terminals[indices],
+ next_observations=self._next_obs[indices],
+ next_actions=self._next_actions[indices], #! only diff
+ )
+ for key in self._env_info_keys:
+ assert key not in batch.keys()
+ batch[key] = self._env_infos[key][indices]
+ return batch
+
+
+class EnvReplayBufferNextActionNewAction(EnvReplayBufferNextAction):
+ def __init__(self, max_replay_buffer_size, env, env_info_sizes=None):
+ super().__init__(max_replay_buffer_size, env, env_info_sizes)
+ self._new_next_actions = ptu.zeros(
+ (max_replay_buffer_size, get_dim(self._action_space))
+ )
+
+ def add_sample(self, *args, **kwargs):
+ raise NotImplementedError
+
+ def random_batch(self, batch_size):
+ indices = ptu.randperm(self._size, dtype=torch.long)[:batch_size]
+ batch = dict(
+ observations=self._observations[indices],
+ actions=self._actions[indices],
+ rewards=self._rewards[indices],
+ terminals=self._terminals[indices],
+ next_observations=self._next_obs[indices],
+ next_actions=self._next_actions[indices],
+ new_next_actions=self._new_next_actions[indices],
+ )
+ for key in self._env_info_keys:
+ assert key not in batch.keys()
+ batch[key] = self._env_infos[key][indices]
+ return batch
diff --git a/cfpi/data_management/hdf5_path_loader.py b/cfpi/data_management/hdf5_path_loader.py
new file mode 100644
index 0000000..82a6f8b
--- /dev/null
+++ b/cfpi/data_management/hdf5_path_loader.py
@@ -0,0 +1,101 @@
+import d4rl
+import eztils.torch as ptu
+import numpy as np
+from torch import nn as nn
+
+
+def d4rl_qlearning_dataset_with_next_actions(env):
+ ds = d4rl.qlearning_dataset(env)
+ ds["next_actions"] = np.roll(ds["actions"], -1, axis=0)
+ for k in ds: # discard the last action so all keys are the same length
+ ds[k] = ds[k][:-1]
+
+ return ds
+
+
+def d4rl_qlearning_dataset_with_next_actions_new_actions(env):
+ ds = d4rl_qlearning_dataset_with_next_actions(env)
+ ds["new_next_actions"] = ds["next_actions"].copy()
+
+ return ds
+
+
+def load_hdf5(dataset, replay_buffer):
+ _obs = dataset["observations"]
+ N = _obs.shape[0]
+ assert (
+ replay_buffer._max_replay_buffer_size >= N
+ ), "dataset does not fit in replay buffer"
+
+ _actions = dataset["actions"]
+ _next_obs = dataset["next_observations"]
+ _rew = dataset["rewards"][:N]
+ _done = dataset["terminals"][:N]
+
+ replay_buffer._observations[:N] = ptu.to_torch(_obs[:N])
+ replay_buffer._next_obs[:N] = ptu.to_torch(_next_obs[:N])
+ replay_buffer._actions[:N] = ptu.to_torch(_actions[:N])
+ replay_buffer._rewards[:N] = ptu.to_torch(np.expand_dims(_rew, 1)[:N])
+ replay_buffer._terminals[:N] = ptu.to_torch(np.expand_dims(_done, 1)[:N])
+ replay_buffer._size = N - 1
+ replay_buffer._top = replay_buffer._size
+
+
+def load_hdf5_next_actions(dataset, replay_buffer):
+ load_hdf5(dataset, replay_buffer)
+ _obs = dataset["observations"]
+ N = _obs.shape[0]
+
+ replay_buffer._next_actions[:N] = ptu.to_torch(dataset["next_actions"][:N])
+
+
+def load_hdf5_next_actions_new_actions(dataset, replay_buffer):
+ load_hdf5_next_actions(dataset, replay_buffer)
+ _obs = dataset["observations"]
+ N = _obs.shape[0]
+
+ replay_buffer._new_next_actions[:N] = ptu.to_torch(dataset["new_next_actions"][:N])
+
+
+def load_hdf5_next_actions_and_val_data(
+ dataset, replay_buffer, train_raio=0.95, fold_idx=1
+):
+ _obs = dataset["observations"]
+ _actions = dataset["actions"]
+ _next_obs = dataset["next_observations"]
+ _next_actions = dataset["next_actions"]
+ _rew = dataset["rewards"]
+ _done = dataset["terminals"]
+
+ N = _obs.shape[0]
+ assert (
+ replay_buffer._max_replay_buffer_size >= N
+ ), "dataset does not fit in replay buffer"
+
+ assert np.array_equal(
+ _next_actions[: N - 1],
+ _actions[1:N],
+ )
+
+ # rng = np.random.default_rng()
+ for _ in range(fold_idx):
+ indices = np.random.permutation(N)
+ tran_indices, val_indices = np.split(indices, [int(N * train_raio)])
+
+ size = len(tran_indices)
+ replay_buffer._observations[:size] = ptu.to_torch(_obs[tran_indices])
+ replay_buffer._next_obs[:size] = ptu.to_torch(_next_obs[tran_indices])
+ replay_buffer._actions[:size] = ptu.to_torch(_actions[tran_indices])
+ replay_buffer._rewards[:size] = ptu.to_torch(np.expand_dims(_rew[tran_indices], 1))
+ replay_buffer._terminals[:size] = ptu.to_torch(
+ np.expand_dims(_done[tran_indices], 1)
+ )
+ replay_buffer._next_actions[:size] = ptu.to_torch(_next_actions[tran_indices])
+
+ replay_buffer._size = size - 1
+ replay_buffer._top = replay_buffer._size
+
+ val_observations = ptu.to_torch(_obs[val_indices])
+ val_actions = ptu.to_torch(_actions[val_indices])
+
+ return replay_buffer, val_observations, val_actions
diff --git a/cfpi/data_management/replay_buffer.py b/cfpi/data_management/replay_buffer.py
new file mode 100644
index 0000000..76a462e
--- /dev/null
+++ b/cfpi/data_management/replay_buffer.py
@@ -0,0 +1,86 @@
+import abc
+
+
+class ReplayBuffer(metaclass=abc.ABCMeta):
+ """
+ A class used to save and replay data.
+ """
+
+ @abc.abstractmethod
+ def add_sample(
+ self, observation, action, reward, next_observation, terminal, **kwargs
+ ):
+ """
+ Add a transition tuple.
+ """
+
+ @abc.abstractmethod
+ def terminate_episode(self):
+ """
+ Let the replay buffer know that the episode has terminated in case some
+ special book-keeping has to happen.
+ :return:
+ """
+
+ def add_path(self, path):
+ """
+ Add a path to the replay buffer.
+
+ This default implementation naively goes through every step, but you
+ may want to optimize this.
+
+ NOTE: You should NOT call "terminate_episode" after calling add_path.
+ It's assumed that this function handles the episode termination.
+
+ :param path: Dict like one outputted by cfpi.samplers.util.rollout
+ """
+ for i, (
+ obs,
+ action,
+ reward,
+ next_obs,
+ terminal,
+ agent_info,
+ env_info,
+ ) in enumerate(
+ zip(
+ path["observations"],
+ path["actions"],
+ path["rewards"],
+ path["next_observations"],
+ path["terminals"],
+ path["agent_infos"],
+ path["env_infos"],
+ )
+ ):
+ self.add_sample(
+ observation=obs,
+ action=action,
+ reward=reward,
+ next_observation=next_obs,
+ terminal=terminal,
+ agent_info=agent_info,
+ env_info=env_info,
+ )
+ self.terminate_episode()
+
+ def add_paths(self, paths):
+ for path in paths:
+ self.add_path(path)
+
+ @abc.abstractmethod
+ def random_batch(self, batch_size):
+ """
+ Return a batch of size `batch_size`.
+ :param batch_size:
+ :return:
+ """
+
+ def get_diagnostics(self):
+ return {}
+
+ def get_snapshot(self):
+ return {}
+
+ def end_epoch(self, epoch):
+ return
diff --git a/cfpi/data_management/simple_replay_buffer.py b/cfpi/data_management/simple_replay_buffer.py
new file mode 100644
index 0000000..7eb186a
--- /dev/null
+++ b/cfpi/data_management/simple_replay_buffer.py
@@ -0,0 +1,101 @@
+from collections import OrderedDict
+
+import eztils.torch as ptu
+import torch
+
+from cfpi.data_management.replay_buffer import ReplayBuffer
+
+
+class SimpleReplayBuffer(ReplayBuffer):
+ def __init__(
+ self,
+ max_replay_buffer_size,
+ observation_dim,
+ action_dim,
+ env_info_sizes,
+ ):
+ self._observation_dim = observation_dim
+ self._action_dim = action_dim
+ self._max_replay_buffer_size = max_replay_buffer_size
+
+ self._observations = ptu.zeros((max_replay_buffer_size, observation_dim))
+ # It's a bit memory inefficient to save the observations twice,
+ # but it makes the code *much* easier since you no longer have to
+ # worry about termination conditions.
+ self._next_obs = ptu.zeros((max_replay_buffer_size, observation_dim))
+ self._actions = ptu.zeros((max_replay_buffer_size, action_dim))
+ # Make everything a 2D np array to make it easier for other code to
+ # reason about the shape of the data
+ self._rewards = ptu.zeros((max_replay_buffer_size, 1))
+ # self._terminals[i] = a terminal was received at time i
+ self._terminals = ptu.zeros((max_replay_buffer_size, 1), dtype=torch.uint8)
+ # Define self._env_infos[key][i] to be the return value of env_info[key]
+ # at time i
+ self._env_infos = {}
+ for key, size in env_info_sizes.items():
+ self._env_infos[key] = ptu.zeros((max_replay_buffer_size, size))
+ self._env_info_keys = list(env_info_sizes.keys())
+
+ self._top = 0
+ self._size = 0
+
+ def add_sample(
+ self,
+ observation,
+ action,
+ reward,
+ next_observation,
+ terminal,
+ env_info,
+ **kwargs,
+ ):
+ self._observations[self._top] = observation
+ self._actions[self._top] = action
+ self._rewards[self._top] = reward
+ self._terminals[self._top] = terminal
+ self._next_obs[self._top] = next_observation
+
+ for key in self._env_info_keys:
+ self._env_infos[key][self._top] = env_info[key]
+ self._advance()
+
+ def terminate_episode(self):
+ pass
+
+ def clear(self):
+ self._top = 0
+ self._size = 0
+ self._episode_starts = []
+ self._cur_episode_start = 0
+
+ def _advance(self):
+ self._top = (self._top + 1) % self._max_replay_buffer_size
+ if self._size < self._max_replay_buffer_size:
+ self._size += 1
+
+ def random_batch(self, batch_size):
+ indices = ptu.randperm(self._size, dtype=torch.long)[:batch_size]
+
+ batch = dict(
+ observations=self._observations[indices],
+ actions=self._actions[indices],
+ rewards=self._rewards[indices],
+ terminals=self._terminals[indices],
+ next_observations=self._next_obs[indices],
+ )
+ for key in self._env_info_keys:
+ assert key not in batch.keys()
+ batch[key] = self._env_infos[key][indices]
+ return batch
+
+ def rebuild_env_info_dict(self, idx):
+ return {key: self._env_infos[key][idx] for key in self._env_info_keys}
+
+ def batch_env_info_dict(self, indices):
+ return {key: self._env_infos[key][indices] for key in self._env_info_keys}
+
+ def num_steps_can_sample(self):
+ return self._size
+
+ def get_diagnostics(self):
+ return OrderedDict([("size", self._size)])
diff --git a/cfpi/envs.py b/cfpi/envs.py
new file mode 100644
index 0000000..9aa6894
--- /dev/null
+++ b/cfpi/envs.py
@@ -0,0 +1,153 @@
+"""
+This file provides a more uniform interface to gym.make(env_id) that handles
+imports and normalization
+"""
+
+import gym
+import numpy as np
+from eztils import bold
+from gym import Env
+from gym.spaces import Box, Discrete, Tuple
+
+
+def env_producer(
+ env_id=None,
+ env_class=None,
+ env_kwargs=None,
+ normalize_env=True,
+ seed=None,
+ d4rl=False,
+):
+ assert env_id or env_class
+ if env_class and env_kwargs is None:
+ env_kwargs = {}
+
+ if d4rl:
+ bold("Importing D4RL...")
+ assert normalize_env is False
+ import d4rl
+
+ env = gym.make(env_id)
+ env = env.env # Unwrap TimeLimit on Environment
+ elif env_class:
+ env = env_class(**env_kwargs)
+ elif env_id:
+ env = gym.make(env_id)
+ if seed is not None:
+ env.seed(seed)
+ if normalize_env:
+ env = NormalizedBoxEnv(env)
+
+ return env
+
+
+def get_dim(space):
+ if isinstance(space, Box):
+ return space.low.size
+ elif isinstance(space, Discrete):
+ return space.n
+ elif isinstance(space, Tuple):
+ return sum(get_dim(subspace) for subspace in space.spaces)
+ elif hasattr(space, "flat_dim"):
+ return space.flat_dim
+ else:
+ raise TypeError(f"Unknown space: {space}")
+
+
+class ProxyEnv(Env):
+ def __init__(self, wrapped_env):
+ self._wrapped_env = wrapped_env
+ self.action_space = self._wrapped_env.action_space
+ self.observation_space = self._wrapped_env.observation_space
+
+ @property
+ def wrapped_env(self):
+ return self._wrapped_env
+
+ def reset(self, **kwargs):
+ return self._wrapped_env.reset(**kwargs)
+
+ def step(self, action):
+ return self._wrapped_env.step(action)
+
+ def render(self, *args, **kwargs):
+ return self._wrapped_env.render(*args, **kwargs)
+
+ @property
+ def horizon(self):
+ return self._wrapped_env.horizon
+
+ def terminate(self):
+ if hasattr(self.wrapped_env, "terminate"):
+ self.wrapped_env.terminate()
+
+ def __getattr__(self, attr):
+ if attr == "_wrapped_env":
+ raise AttributeError()
+ return getattr(self._wrapped_env, attr)
+
+ def __getstate__(self):
+ """
+ This is useful to override in case the wrapped env has some funky
+ __getstate__ that doesn't play well with overriding __getattr__.
+
+ The main problematic case is/was gym's EzPickle serialization scheme.
+ :return:
+ """
+ return self.__dict__
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+
+ def __str__(self):
+ return f"{type(self).__name__}({self.wrapped_env})"
+
+
+class NormalizedBoxEnv(ProxyEnv):
+ """
+ Normalize action to in [-1, 1].
+
+ Optionally normalize observations and scale reward.
+ """
+
+ def __init__(
+ self,
+ env,
+ reward_scale=1.0,
+ obs_mean=None,
+ obs_std=None,
+ ):
+ ProxyEnv.__init__(self, env)
+ self._should_normalize = not (obs_mean is None and obs_std is None)
+ if self._should_normalize:
+ if obs_mean is None:
+ obs_mean = np.zeros_like(env.observation_space.low)
+ else:
+ obs_mean = np.array(obs_mean)
+ if obs_std is None:
+ obs_std = np.ones_like(env.observation_space.low)
+ else:
+ obs_std = np.array(obs_std)
+ self._reward_scale = reward_scale
+ self._obs_mean = obs_mean
+ self._obs_std = obs_std
+ ub = np.ones(self._wrapped_env.action_space.shape)
+ self.action_space = Box(-1 * ub, ub)
+
+ def _apply_normalize_obs(self, obs):
+ return (obs - self._obs_mean) / (self._obs_std + 1e-8)
+
+ def step(self, action):
+ lb = self._wrapped_env.action_space.low
+ ub = self._wrapped_env.action_space.high
+ scaled_action = lb + (action + 1.0) * 0.5 * (ub - lb)
+ scaled_action = np.clip(scaled_action, lb, ub)
+
+ wrapped_step = self._wrapped_env.step(scaled_action)
+ next_obs, reward, done, info = wrapped_step
+ if self._should_normalize:
+ next_obs = self._apply_normalize_obs(next_obs)
+ return next_obs, reward * self._reward_scale, done, info
+
+ def __str__(self):
+ return "Normalized: %s" % self._wrapped_env
diff --git a/cfpi/external/onestep.py b/cfpi/external/onestep.py
new file mode 100644
index 0000000..64cfb17
--- /dev/null
+++ b/cfpi/external/onestep.py
@@ -0,0 +1,264 @@
+import math
+from numbers import Number
+
+import torch
+import torch.distributions as D
+from torch import nn
+from torch.distributions import Distribution, constraints
+from torch.distributions.utils import broadcast_all
+
+## bridge functions between their repo and ours
+
+
+def soft_clamp(x, low, high):
+ x = torch.tanh(x)
+ x = low + 0.5 * (high - low) * (x + 1)
+ return x
+
+
+class GaussMLP(nn.Module):
+ def __init__(self, state_dim, action_dim, width, depth, dist_type):
+ super().__init__()
+ self.net = MLP(
+ input_shape=(state_dim), output_dim=2 * action_dim, width=width, depth=depth
+ )
+ self.log_std_bounds = (-5.0, 0.0)
+ self.mu_bounds = (-1.0, 1.0)
+ self.dist_type = dist_type
+
+ def forward(self, s):
+ s = torch.flatten(s, start_dim=1)
+ mu, log_std = self.net(s).chunk(2, dim=-1)
+
+ mu = soft_clamp(mu, *self.mu_bounds)
+ log_std = soft_clamp(log_std, *self.log_std_bounds)
+
+ std = log_std.exp()
+ if self.dist_type == "normal":
+ dist = D.Normal(mu, std)
+ elif self.dist_type == "trunc":
+ dist = TruncatedNormal(mu, std)
+ elif self.dist_type == "squash":
+ dist = SquashedNormal(mu, std)
+ else:
+ raise TypeError("Expected dist_type to be 'normal', 'trunc', or 'squash'")
+ return dist
+
+
+#! q_network.PY
+
+
+class QMLP(nn.Module):
+ def __init__(self, state_dim, action_dim, width, depth):
+ super().__init__()
+ self.net = MLP(
+ input_shape=(state_dim + action_dim), output_dim=1, width=width, depth=depth
+ )
+
+ def forward(self, s, a):
+ x = torch.cat(
+ [torch.flatten(s, start_dim=1), torch.flatten(a, start_dim=1)], axis=-1
+ )
+ return self.net(x)
+
+
+#! utils.PY
+
+
+class MLP(nn.Module):
+ def __init__(self, input_shape, output_dim, width, depth):
+ super().__init__()
+ input_dim = torch.tensor(input_shape).prod()
+ layers = [
+ nn.Linear(input_dim, width),
+ # nn.LayerNorm(width), nn.Tanh()]
+ nn.ReLU(),
+ ]
+ for _ in range(depth - 1):
+ layers.append(nn.Linear(width, width))
+ layers.append(nn.ReLU())
+ layers.append(nn.Linear(width, output_dim))
+ self.net = nn.Sequential(nn.Flatten(), *layers)
+
+ def forward(self, x):
+ return self.net(x)
+
+
+CONST_SQRT_2 = math.sqrt(2)
+CONST_INV_SQRT_2PI = 1 / math.sqrt(2 * math.pi)
+CONST_INV_SQRT_2 = 1 / math.sqrt(2)
+CONST_LOG_INV_SQRT_2PI = math.log(CONST_INV_SQRT_2PI)
+CONST_LOG_SQRT_2PI_E = 0.5 * math.log(2 * math.pi * math.e)
+
+
+class TruncatedStandardNormal(Distribution):
+ """
+ Truncated Standard Normal distribution
+ https://github.com/toshas/torch_truncnorm
+ https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
+ """
+
+ arg_constraints = {
+ "a": constraints.real,
+ "b": constraints.real,
+ }
+ support = constraints.real
+ has_rsample = True
+
+ def __init__(self, a, b, eps=1e-8, validate_args=None):
+ self.a, self.b = broadcast_all(a, b)
+ if isinstance(a, Number) and isinstance(b, Number):
+ batch_shape = torch.Size()
+ else:
+ batch_shape = self.a.size()
+ super().__init__(batch_shape, validate_args=validate_args)
+ if self.a.dtype != self.b.dtype:
+ raise ValueError("Truncation bounds types are different")
+ self._dtype_min_gt_0 = torch.tensor(torch.finfo(self.a.dtype).eps).type_as(
+ self.a
+ )
+ self._dtype_max_lt_1 = torch.tensor(1 - torch.finfo(self.a.dtype).eps).type_as(
+ self.a
+ )
+ self._big_phi_a = self._big_phi(self.a)
+ self._big_phi_b = self._big_phi(self.b)
+ self._Z = (self._big_phi_b - self._big_phi_a).clamp_min(eps)
+ self._log_Z = self._Z.log()
+
+ @property
+ def mean(self):
+ return -(self._little_phi(self.b) - self._little_phi(self.a)) / self._Z
+
+ @property
+ def auc(self):
+ return self._Z
+
+ @staticmethod
+ def _little_phi(x):
+ return (-(x**2) * 0.5).exp() * CONST_INV_SQRT_2PI
+
+ @staticmethod
+ def _big_phi(x):
+ return 0.5 * (1 + (x * CONST_INV_SQRT_2).erf())
+
+ @staticmethod
+ def _inv_big_phi(x):
+ return CONST_SQRT_2 * (2 * x - 1).erfinv()
+
+ def cdf(self, value):
+ if self._validate_args:
+ self._validate_sample(value)
+ return ((self._big_phi(value) - self._big_phi_a) / self._Z).clamp(0, 1)
+
+ def icdf(self, value):
+ if self._validate_args:
+ self._validate_sample(value)
+ return self._inv_big_phi(self._big_phi_a + value * self._Z)
+
+ def log_prob(self, value):
+ if self._validate_args:
+ self._validate_sample(value)
+ return CONST_LOG_INV_SQRT_2PI - self._log_Z - (value**2) * 0.5
+
+ def rsample(self, sample_shape=torch.Size()):
+ shape = self._extended_shape(sample_shape)
+ p = torch.empty(shape).uniform_(self._dtype_min_gt_0, self._dtype_max_lt_1)
+ p = p.type_as(self.a)
+ return self.icdf(p)
+
+
+class TruncatedNormal(TruncatedStandardNormal):
+ """
+ Truncated Normal distribution
+ https://github.com/toshas/torch_truncnorm
+ https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
+ """
+
+ arg_constraints = {
+ "loc": constraints.real,
+ "scale": constraints.positive,
+ "a": constraints.real,
+ "b": constraints.real,
+ }
+ support = constraints.real
+ has_rsample = True
+
+ def __init__(self, loc, scale, a=-1, b=1, eps=1e-8, validate_args=None):
+ self.loc, self.scale, self.a, self.b = broadcast_all(loc, scale, a, b)
+ a_standard = (a - self.loc) / self.scale
+ b_standard = (b - self.loc) / self.scale
+ super().__init__(a_standard, b_standard, eps=eps, validate_args=validate_args)
+ self._log_scale = self.scale.log()
+
+ @property
+ def mean(self):
+ mean = super().mean
+ return mean * self.scale + self.loc
+
+ def _to_std_rv(self, value):
+ if self._validate_args:
+ self._validate_sample(value)
+ return (value - self.loc) / self.scale
+
+ def _from_std_rv(self, value):
+ if self._validate_args:
+ self._validate_sample(value)
+ return value * self.scale + self.loc
+
+ def cdf(self, value):
+ return super().cdf(self._to_std_rv(value))
+
+ def icdf(self, value):
+ if self._validate_args:
+ self._validate_sample(value)
+ return self._from_std_rv(super().icdf(value))
+
+ def log_prob(self, value):
+ if self._validate_args:
+ self._validate_sample(value)
+ return super().log_prob(self._to_std_rv(value)) - self._log_scale
+
+
+from torch.distributions import Normal, TanhTransform, transformed_distribution
+
+
+class SquashedNormal(transformed_distribution.TransformedDistribution):
+ def __init__(self, loc, scale):
+ self.loc = loc
+ self.scale = scale
+
+ self.base_dist = Normal(loc, scale)
+ transforms = [TanhTransform()]
+ super().__init__(self.base_dist, transforms)
+
+ @property
+ def mean(self):
+ mu = self.loc
+ for tr in self.transforms:
+ mu = tr(mu)
+ return mu
+
+
+def t_dtype_single(x):
+ dtype = x.dtype
+ if dtype == torch.int64:
+ return torch.int32
+ elif dtype == torch.float64:
+ return torch.float32
+ else:
+ return dtype
+
+
+def torch_single_precision(x):
+ if type(x) != torch.Tensor:
+ x = torch.tensor(x)
+ return x.type(t_dtype_single(x))
+
+
+def mode(mixed_dist):
+ component_probs = mixed_dist.mixture_distribution.probs
+ mode_idx = torch.argmax(component_probs, dim=1)
+ means = mixed_dist.component_distribution.mean
+ batch_size = means.shape[0]
+ mode = means[range(batch_size), mode_idx]
+ return mode
diff --git a/cfpi/launchers/__init__.py b/cfpi/launchers/__init__.py
new file mode 100644
index 0000000..1fc3fb0
--- /dev/null
+++ b/cfpi/launchers/__init__.py
@@ -0,0 +1,453 @@
+"""
+This file contains 'launchers', which are self-contained functions that take
+in a configuration dictionary and runs a full experiment. The dictionary configures the
+experiment. Examples include run_pipeline_here, run_parallel_pipeline_here, and run_hyperparameters
+
+It is important that the functions are completely self-contained (i.e. they
+import their own modules) so that they can be serialized.
+"""
+import copy
+import datetime
+import itertools
+import os
+import os.path as osp
+import pickle
+import random
+import shutil
+import sys
+import time
+import traceback
+from copy import deepcopy
+from pathlib import Path
+
+import torch
+from eztils import bold, green, query_yes_no, red
+from eztils.git import generate_snapshot
+from eztils.torch import seed_everything, set_gpu_mode
+from tqdm import tqdm
+
+import wandb
+from cfpi import conf
+from cfpi.core.logging import logger
+from cfpi.core.multiprocessing import NestablePool
+
+from .pipeline import Pipelines
+
+
+def print_welcome():
+ if not conf.DISPLAY_WELCOME:
+ return
+ logo = r"""
+โโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโ
+โโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโ
+ """
+ icon = r"""
+ ,_-=(!7(7/zs_.
+ .=' ' .`/,/!(=)Zm.
+ .._,,._.. ,-`- `,\ ` -` -`\\7//WW.
+ ,v=~/.-,-\- -!|V-s.)iT-|s|\-.' `///mK%.
+ v!`i!-.e]-g`bT/i(/[=.Z/m)K(YNYi.. /-]i44M.
+ v`/,`|v]-DvLcfZ/eV/iDLN\D/ZK@%8W[Z.. `/d!Z8m
+ //,c\(2(X/NYNY8]ZZ/bZd\()/\7WY%WKKW) -'|(][%4.
+ ,\\i\c(e)WX@WKKZKDKWMZ8(b5/ZK8]Z7%ffVM, -.Y!bNMi
+ /-iit5N)KWG%%8%%%%W8%ZWM(8YZvD)XN(@. [ \]!/GXW[
+ / ))G8\NMN%W%%%%%%%%%%8KK@WZKYK*ZG5KMi,- vi[NZGM[
+ i\!(44Y8K%8%%%**~YZYZ@%%%%%4KWZ/PKN)ZDZ7 c=//WZK%!
+,\v\YtMZW8W%%f`,`.t/bNZZK%%W%%ZXb*K(K5DZ -c\\/KM48
+-|c5PbM4DDW%f v./c\[tMY8W%PMW%D@KW)Gbf -/(=ZZKM8[
+2(N8YXWK85@K -'c|K4/KKK%@ V%@@WD8e~ .//ct)8ZK%8`
+=)b%]Nd)@KM[ !'\cG!iWYK%%| !M@KZf -c\))ZDKW%`
+YYKWZGNM4/Pb '-VscP4]b@W% 'Mf` -L\///KM(%W!
+!KKW4ZK/W7)Z. '/cttbY)DKW% -` .',\v)K(5KW%%f
+'W)KWKZZg)Z2/,!/L(-DYYb54% ,,`, -\-/v(((KK5WW%f
+ \M4NDDKZZ(e!/\7vNTtZd)8\Mi!\-,-/i-v((tKNGN%W%%
+ 'M8M88(Zd))///((|D\tDY\\KK-`/-i(=)KtNNN@W%%%@%[
+ !8%@KW5KKN4///s(\Pd!ROBY8/=2(/4ZdzKD%K%%%M8@%%
+ '%%%W%dGNtPK(c\/2\[Z(ttNYZ2NZW8W8K%%%%YKM%M%%.
+ *%%W%GW5@/%!e]_tZdY()v)ZXMZW%W%%%*5Y]K%ZK%8[
+ '*%%%%8%8WK\)[/ZmZ/Zi]!/M%%%%@f\ \Y/NNMK%%!
+ 'VM%%%%W%WN5Z/Gt5/b)((cV@f` - |cZbMKW%%|
+ 'V*M%%%WZ/ZG\t5((+)L'-,,/ -)X(NWW%%
+ `~`MZ/DZGNZG5(((\, ,t\\Z)KW%@
+ 'M8K%8GN8\5(5///]i!v\K)85W%%f
+ YWWKKKKWZ8G54X/GGMeK@WM8%@
+ !M8%8%48WG@KWYbW%WWW%%%@
+ VM%WKWK%8K%%8WWWW%%%@`
+ ~*%%%%%%W%%%%%%%@~
+ ~*MM%%%%%%@f`
+ """
+ print(icon)
+ print(logo)
+ if conf.DEBUG:
+ print("\n\nDebug mode on!\n\n")
+ conf.DISPLAY_WELCOME = False
+
+
+"""
+Run experiment
+"""
+
+
+def run_pipeline_here(
+ variant,
+ use_gpu=True,
+ gpu_id=0,
+ # Logger params:
+ snapshot_mode="gap_and_last",
+ snapshot_gap=100,
+ git_infos=None,
+ script_name=None,
+ base_log_dir=None,
+ force_randomize_seed=False,
+ parallel=False,
+ **setup_logger_kwargs,
+):
+ """
+ Run an experiment locally without any serialization.
+ This will add the 'log_dir' key to variant, and set variant['version'] to 'normal' if isn't already set.
+
+ experiments. Note that one experiment may correspond to multiple seeds,.
+ :param seed: Seed used for this experiment.
+ :param use_gpu: Run with GPU. By default False.
+ :param script_name: Name of the running script
+ :return:
+ trainer_cls=trainer_cls,
+ """
+
+ if not parallel:
+ print_welcome()
+
+ start = datetime.datetime.today()
+ try:
+ seed = variant.get("seed")
+ algorithm = variant.get("algorithm")
+
+ if force_randomize_seed or seed is None:
+ seed = random.randint(0, 100000)
+ variant["seed"] = seed
+ logger.reset()
+
+ actual_log_dir = setup_logger(
+ algorithm=algorithm,
+ variant=variant,
+ seed=seed,
+ snapshot_mode=snapshot_mode,
+ snapshot_gap=snapshot_gap,
+ base_log_dir=base_log_dir,
+ git_infos=git_infos,
+ script_name=script_name,
+ parallel=parallel,
+ env_id=variant["env_id"],
+ **setup_logger_kwargs,
+ )
+
+ seed_everything(seed)
+ set_gpu_mode(use_gpu, gpu_id)
+
+ run_experiment_here_kwargs = dict(
+ variant=variant,
+ seed=seed,
+ use_gpu=use_gpu,
+ algorithm=algorithm,
+ snapshot_mode=snapshot_mode,
+ snapshot_gap=snapshot_gap,
+ git_infos=git_infos,
+ parallel=parallel,
+ script_name=script_name,
+ base_log_dir=base_log_dir,
+ **setup_logger_kwargs,
+ )
+ with open(actual_log_dir + "/experiment.pkl", "wb") as handle:
+ pickle.dump(
+ dict(run_experiment_here_kwargs=run_experiment_here_kwargs),
+ handle,
+ protocol=pickle.HIGHEST_PROTOCOL,
+ )
+
+ variant["log_dir"] = actual_log_dir
+
+ return Pipelines.run_pipeline(variant)
+
+ except Exception as e:
+ exception_name, _, __ = sys.exc_info()
+ if (
+ exception_name is not None
+ and not issubclass(exception_name, KeyboardInterrupt)
+ and not issubclass(exception_name, FileExistsError)
+ ):
+ red( # this doesn't get called in when running as a spawned process...why?
+ f'{variant.get("algorithm")} seed: {variant.get("seed")} env_id: {variant.get("env_id")} started at {start.strftime("%I:%M %p %a %b %y")}, has crashed'
+ )
+ print(traceback.format_exc(), flush=True)
+ sys.stdout.flush()
+ sys.stderr.flush()
+ if conf.Wandb.is_on:
+ if wandb.run is not None:
+ wandb.alert(
+ title="Experiment Crash",
+ text=f'{variant.get("algorithm")} started at {start.strftime("%I:%M %p %a %b %y")}, has crashed',
+ level="ERROR",
+ )
+ if conf.DEBUG:
+ raise e
+
+ green("Successfully finished")
+
+
+"""
+Multiprocessing
+"""
+
+
+def run_hyperparameters(parallel_cls, variant, hyperparameters: dict):
+ print_welcome()
+
+ if hyperparameters is None:
+ raise Exception("No Hyperparameters given")
+
+ all_experiment_combinations = []
+ for kwarg_vals in list(itertools.product(*hyperparameters.values())):
+ hp_string = ""
+ trainer_kwargs = copy.deepcopy(variant["trainer_kwargs"])
+ v = deepcopy(variant)
+ for kw, val in zip(hyperparameters.keys(), kwarg_vals):
+ hp_string += f"{kw[0]}={val}-"
+ trainer_kwargs[kw] = val
+ v[kw] = val
+ v["trainer_kwargs"] = trainer_kwargs
+ v["__gridsearch"] = hp_string[:-1]
+ experiment_combinations = list(
+ itertools.product(
+ parallel_cls.seeds,
+ parallel_cls.envs,
+ (v,),
+ )
+ )
+
+ all_experiment_combinations += experiment_combinations
+
+ pool_run(list(enumerate(all_experiment_combinations)))
+
+
+def run_parallel_pipeline_here(parallel_cls, variant):
+ print_welcome()
+ pool_run(
+ list(
+ enumerate(
+ itertools.product(
+ parallel_cls.seeds,
+ parallel_cls.envs,
+ (variant,),
+ )
+ )
+ )
+ )
+
+
+def pool_run(experiment_combinations):
+ with torch.multiprocessing.Manager() as manager:
+ d = manager.dict()
+ with NestablePool(torch.cuda.device_count() * 2) as p:
+ list(
+ tqdm(
+ p.imap_unordered(
+ parallel_run_experiment_here_wrapper,
+ [(d, e) for e in experiment_combinations],
+ ),
+ total=len(experiment_combinations),
+ )
+ )
+
+
+def parallel_run_experiment_here_wrapper(experiment_tuple):
+ """A wrapper around run_experiment_here that uses just a single argument to work with multiprocessing pool map."""
+ d, (i, (seed, env_id, variant)) = experiment_tuple
+
+ cp = torch.multiprocessing.current_process().ident
+ start = time.time()
+ while cp is None:
+ cp = torch.multiprocessing.current_process().ident
+ time.sleep(1)
+ if time.time() - start > 30:
+ raise Exception("Couldn't get current process id!")
+ # time out after thirty seconds
+ bold(f"Running env_id: {env_id}, seed: {seed} with process {cp}")
+ if torch.cuda.is_available():
+ if d.get(cp) is None:
+ gpu_id = int(i % torch.cuda.device_count())
+ d[cp] = gpu_id
+ else:
+ gpu_id = d[cp]
+ else:
+ gpu_id = None
+ variant = deepcopy(variant)
+ variant["seed"] = seed
+ variant["env_id"] = env_id
+ run_pipeline_here(
+ variant=variant,
+ gpu_id=gpu_id,
+ parallel=True,
+ snapshot_mode=variant["snapshot_mode"],
+ snapshot_gap=variant["snapshot_gap"],
+ )
+
+
+"""
+Logging
+"""
+
+
+def create_log_dir(
+ algorithm,
+ env_id,
+ variant,
+ version="normal",
+ seed=0,
+ parallel=False,
+ base_log_dir=None,
+):
+ """
+ Creates and returns a unique log directory.
+
+ :param algorithm: All experiments with this prefix will have log
+ directories be under this directory.
+ experiment.
+ :param base_log_dir: The directory where all log should be saved.
+ :return:
+ """
+ if variant.get("__gridsearch"):
+ log_dir = (
+ Path(base_log_dir or conf.Log.basedir)
+ / algorithm
+ / version
+ / env_id
+ / (variant.get("__gridsearch")).replace(" ", "_")
+ / str(seed)
+ )
+ else:
+ log_dir = (
+ Path(base_log_dir or conf.Log.basedir)
+ / algorithm
+ / version
+ / env_id
+ / str(seed)
+ )
+
+ if osp.exists(log_dir):
+ red(f"This experiment already exists: {log_dir}")
+ if parallel:
+ print("Exiting")
+ raise FileExistsError
+ # why do this hang?
+
+ if conf.DEBUG or query_yes_no(
+ "Would you like to replace the existing directory?"
+ ):
+ bold("Replacing this directory...")
+ shutil.rmtree(log_dir)
+ os.makedirs(log_dir, exist_ok=True)
+ bold("Replaced")
+ else:
+ print("Not replacing, exiting now")
+ raise FileExistsError
+ else:
+ green(f"Running experiment in: {log_dir}")
+ os.makedirs(log_dir, exist_ok=False)
+ return str(log_dir)
+
+
+def setup_logger(
+ algorithm="default",
+ env_id=None,
+ variant=None,
+ text_log_file="debug.log",
+ variant_log_file="variant.json",
+ tabular_log_file="progress.csv",
+ snapshot_mode="last",
+ snapshot_gap=1,
+ log_tabular_only=False,
+ git_infos=None,
+ script_name=None,
+ wandb_entity=conf.Wandb.entity,
+ wandb_project=conf.Wandb.project,
+ parallel=False,
+ **create_log_dir_kwargs,
+):
+ """
+ Set up logger to have some reasonable default settings.
+
+ Will save log output to
+
+ basedir////
+
+ exp_name will be auto-generated to be unique.
+
+ If log_dir is specified, then that directory is used as the output dir.
+
+ :param algorithm: The sub-directory for this specific experiment.
+ :param variant:
+ :param text_log_file:
+ :param variant_log_file:
+ :param tabular_log_file:
+ :param snapshot_mode:
+ :param log_tabular_only:
+ :param snapshot_gap:
+ :param log_dir:
+ :param git_infos:
+ :param script_name: If set, save the script name to this.
+ :return:
+ """
+ if variant.get("version") is None:
+ variant["version"] = "normal"
+
+ log_dir = create_log_dir(
+ algorithm,
+ env_id,
+ variant,
+ version=variant["version"],
+ parallel=parallel,
+ **create_log_dir_kwargs,
+ )
+ if parallel:
+ sys.stdout = open(osp.join(log_dir, "stdout.out"), "a")
+ sys.stderr = open(osp.join(log_dir, "stderr.out"), "a")
+
+ if conf.Wandb.is_on:
+ wandb_group = f"{algorithm}-{variant['version']}-{env_id}"
+ if variant.get("__gridsearch"):
+ wandb_name = f"seed-{variant['seed']}-hp-{variant.get('__gridsearch')}"
+ else:
+ wandb_name = f"seed-{variant['seed']}"
+
+ wandb.init(
+ project=wandb_project,
+ entity=wandb_entity,
+ group=wandb_group,
+ name=wandb_name,
+ config=variant,
+ reinit=True,
+ )
+ wandb.run.log_code(os.path.join(conf.Log.repo_dir, "src"))
+
+ if variant is not None:
+ variant_log_path = osp.join(log_dir, variant_log_file)
+ logger.log_variant(variant_log_path, variant)
+
+ tabular_log_path = osp.join(log_dir, tabular_log_file)
+ text_log_path = osp.join(log_dir, text_log_file)
+
+ logger.add_text_output(text_log_path)
+ logger.add_tabular_output(tabular_log_path)
+ logger.set_snapshot_dir(log_dir)
+ logger.set_snapshot_mode(snapshot_mode)
+ logger.set_snapshot_gap(snapshot_gap)
+ logger.set_log_tabular_only(log_tabular_only)
+ exp_name = log_dir.split("/")[-1]
+ logger.push_prefix("[%s] " % exp_name)
+ bold("Backing up folder...")
+ generate_snapshot(conf.Log.repo_dir, log_dir, exclude=['checkpoints'])
+ bold("Backed up!")
+ if script_name is not None:
+ with open(osp.join(log_dir, "script_name.txt"), "w") as f:
+ f.write(script_name)
+ return log_dir
diff --git a/cfpi/launchers/pipeline.py b/cfpi/launchers/pipeline.py
new file mode 100644
index 0000000..86042e2
--- /dev/null
+++ b/cfpi/launchers/pipeline.py
@@ -0,0 +1,144 @@
+from typing import Callable, List
+
+import copy
+import inspect
+
+from cfpi import conf
+from cfpi.pytorch.torch_rl_algorithm import Trainer
+
+
+class PipelineCtx:
+ def __init__(self, **kwargs) -> None:
+ for kw in kwargs:
+ setattr(self, kw, kwargs[kw])
+ # These ones cannot be modified
+ self.variant = None
+ self.trainer_cls = None
+ # feel free to add more
+ self.eval_env = None
+ self.dataset = None
+ self.qfs = []
+ self.target_qfs = []
+ self.policy = None
+ self.obs_mean = None
+ self.obs_std = None
+ self.trainer = None
+ self.eval_policy = None
+ self.eval_path_collector = None
+ self.replay_buffer = None
+ self.algorithm = None
+
+
+class Pipeline:
+ def __init__(self, name, pipeline) -> None:
+ self.name: str = name
+
+ self.pipeline: List[Callable] = pipeline
+
+ @classmethod
+ def from_(cls, previous_pipeline, name):
+ return cls(name, copy.deepcopy(previous_pipeline.pipeline))
+
+ def delete(self, func_name):
+ found = None
+ for i, f in enumerate(self.pipeline):
+ if f.__name__ == func_name:
+ found = i
+ break
+ if found is None:
+ print(f"Failed to replace {func_name} in {self.name}")
+ else:
+ del self.pipeline[found]
+
+ def replace(self, func_name, new_func):
+ found = False
+ for i, f in enumerate(self.pipeline):
+ if f.__name__ == func_name:
+ found = True
+ self.pipeline[i] = new_func
+ break
+
+ if not found:
+ print(f"Failed to replace {func_name} in {self.name}")
+
+ @property
+ def composition(self):
+ return "\n\n".join([inspect.getsource(f) for f in self.pipeline])
+
+ def __getitem__(self, index):
+ return self.pipeline[index]
+
+ def __str__(self) -> str:
+ return f":\n" + ",\n".join(
+ [f.__name__ for f in self.pipeline]
+ )
+
+ @property
+ def __name__(self):
+ return str(self)
+
+
+from .pipeline_pieces import (
+ create_algorithm,
+ create_dataset_next_actions,
+ create_eval_env,
+ create_eval_path_collector,
+ create_pac_eval_policy,
+ create_replay_buffer,
+ create_trainer,
+ load_checkpoint_iqn_q,
+ load_checkpoint_policy,
+ load_demos,
+ offline_init,
+ optionally_normalize_dataset,
+ pac_sanity_check,
+ train,
+)
+
+
+class Pipelines:
+ @staticmethod
+ def run_pipeline(variant, ctx: PipelineCtx = None, silent=True):
+ try:
+ pipeline: Pipeline = variant["pipeline"]
+ except KeyError:
+ raise Exception("Please add a pipeline to your variant!")
+
+ if ctx is None:
+ try:
+ trainer_cls: Trainer = variant["trainer_cls"]
+ except KeyError:
+ raise Exception("Please add a to your variant!")
+
+ ctx = PipelineCtx()
+ ctx.variant = variant
+ ctx.trainer_cls = trainer_cls
+
+ # print variant and pipeline
+ if not silent:
+ print(pipeline)
+ if conf.DEBUG:
+ print(pipeline.composition)
+
+ for f in pipeline:
+ f(ctx)
+
+ offline_zerostep_pac_pipeline = Pipeline( # Don't need any training
+ "ZeroStepPacExperiment",
+ [
+ pac_sanity_check,
+ offline_init,
+ create_eval_env,
+ create_dataset_next_actions,
+ optionally_normalize_dataset,
+ load_checkpoint_iqn_q,
+ load_checkpoint_policy,
+ create_trainer,
+ create_pac_eval_policy,
+ create_eval_path_collector,
+ create_replay_buffer,
+ load_demos,
+ create_algorithm,
+ train,
+ ],
+ )
diff --git a/cfpi/launchers/pipeline_pieces.py b/cfpi/launchers/pipeline_pieces.py
new file mode 100644
index 0000000..67616d0
--- /dev/null
+++ b/cfpi/launchers/pipeline_pieces.py
@@ -0,0 +1,319 @@
+import copy
+import inspect
+import os.path as osp
+
+import eztils.torch as ptu
+import torch
+
+from cfpi import conf
+from cfpi.core.logging import logger
+from cfpi.data_management.env_replay_buffer import (
+ EnvReplayBuffer,
+ EnvReplayBufferNextAction,
+ EnvReplayBufferNextActionNewAction,
+)
+from cfpi.data_management.hdf5_path_loader import (
+ d4rl_qlearning_dataset_with_next_actions,
+ load_hdf5,
+ load_hdf5_next_actions,
+ load_hdf5_next_actions_new_actions,
+)
+from cfpi.envs import NormalizedBoxEnv, env_producer
+from cfpi.launchers.pipeline import PipelineCtx
+from cfpi.policies.gaussian_policy import GaussianPolicy, UnnormalizeGaussianPolicy
+from cfpi.policies.stochastic import MakeCfpiDeterministic, MakeDeterministic
+from cfpi.pytorch.networks.mlp import ConcatMlp, QuantileMlp
+from cfpi.pytorch.torch_rl_algorithm import OfflineTorchBatchRLAlgorithm
+from cfpi.samplers.path_collector import MdpPathCollector
+
+
+def offline_init(_):
+ logger.set_offline_rl()
+
+
+def pac_sanity_check(ctx):
+ assert ctx.variant["d4rl"]
+ # assert ctx.variant["algorithm_kwargs"]["zero_step"]
+ # assert ctx.variant["normalize_env"]
+ # assert ctx.variant["IQN"]
+ assert (
+ ctx.variant["checkpoint_params"]
+ in user_defined_attrs_dict(conf.CheckpointParams).keys()
+ )
+ assert ctx.variant["checkpoint_params"] != "Q"
+
+ params: conf.CheckpointParams.CheckpointParam = getattr(conf.CheckpointParams, ctx.variant["checkpoint_params"])()
+ assert ctx.variant["seed"] in params.seeds
+ assert ctx.variant["env_id"] in params.envs, ctx.variant["env_id"]
+ assert ctx.variant["seed"] in conf.CheckpointParams.Q_IQN().seeds
+ assert ctx.variant["env_id"] in conf.CheckpointParams.Q_IQN().envs
+
+
+def create_eval_env(ctx: PipelineCtx):
+ d4rl = ctx.variant["d4rl"]
+
+ ctx.eval_env = env_producer(
+ env_id=ctx.variant["env_id"],
+ d4rl=d4rl,
+ seed=ctx.variant["seed"],
+ normalize_env=False,
+ )
+
+
+def create_dataset(ctx: PipelineCtx):
+ ctx.dataset = ctx.eval_env.get_dataset()
+
+
+def create_dataset_next_actions(ctx: PipelineCtx):
+ ctx.dataset = d4rl_qlearning_dataset_with_next_actions(ctx.eval_env)
+ if "antmaze" in ctx.variant["env_id"]:
+ ctx.dataset["rewards"] -= 1
+
+
+def optionally_normalize_dataset(ctx: PipelineCtx):
+ ctx.obs_mean = ctx.dataset["observations"].mean(0)
+ ctx.obs_std = ctx.dataset["observations"].std(0)
+
+ if not ctx.variant["normalize_env"]:
+ return
+
+ ctx.eval_env = NormalizedBoxEnv(
+ ctx.eval_env,
+ obs_mean=ctx.dataset["observations"].mean(0),
+ obs_std=ctx.dataset["observations"].std(0),
+ )
+
+ ctx.dataset["observations"] = (ctx.dataset["observations"] - ctx.obs_mean) / (
+ ctx.obs_std + 1e-8
+ )
+ ctx.dataset["next_observations"] = (
+ ctx.dataset["next_observations"] - ctx.obs_mean
+ ) / (ctx.obs_std + 1e-8)
+ action_space = ctx.eval_env._wrapped_env.action_space
+
+ rg = (action_space.high - action_space.low) / 2
+ center = (action_space.high + action_space.low) / 2
+ ctx.dataset["actions"] = (ctx.dataset["actions"] - center) / rg
+
+ if "next_actions" in ctx.dataset:
+ ctx.dataset["next_actions"] = (ctx.dataset["next_actions"] - center) / rg
+ if "new_next_actions" in ctx.dataset:
+ ctx.dataset["new_next_actions"] = (
+ ctx.dataset["new_next_actions"] - center
+ ) / rg
+
+
+def create_q(ctx: PipelineCtx):
+ obs_dim = ctx.eval_env.observation_space.low.size
+ action_dim = ctx.eval_env.action_space.low.size
+
+ qf1 = ctx.variant["qf_class"](
+ input_size=obs_dim + action_dim, output_size=1, **ctx.variant["qf_kwargs"]
+ )
+ qf2 = ctx.variant["qf_class"](
+ input_size=obs_dim + action_dim, output_size=1, **ctx.variant["qf_kwargs"]
+ )
+
+ target_qf1 = ctx.variant["qf_class"](
+ input_size=obs_dim + action_dim, output_size=1, **ctx.variant["qf_kwargs"]
+ )
+ target_qf2 = ctx.variant["qf_class"](
+ input_size=obs_dim + action_dim, output_size=1, **ctx.variant["qf_kwargs"]
+ )
+
+ ctx.qfs = [qf1, qf2]
+ ctx.target_qfs = [target_qf1, target_qf2]
+
+
+def create_policy(ctx: PipelineCtx):
+ obs_dim = ctx.eval_env.observation_space.low.size
+ action_dim = ctx.eval_env.action_space.low.size
+
+ ctx.policy = ctx.variant["policy_class"](
+ obs_dim=obs_dim, action_dim=action_dim, **ctx.variant["policy_kwargs"]
+ )
+
+
+def create_trainer(ctx: PipelineCtx):
+ arg_names = inspect.getfullargspec(ctx.trainer_cls.__init__).args
+ arg_names.remove("self")
+
+ passed_args = {}
+ for arg in arg_names:
+ try:
+ passed_args[arg] = getattr(ctx, arg)
+ except AttributeError:
+ if ctx.variant["trainer_kwargs"].get(arg) is not None:
+ passed_args[arg] = ctx.variant["trainer_kwargs"][arg]
+ ctx.trainer = ctx.trainer_cls(**passed_args)
+
+
+def create_eval_policy(ctx: PipelineCtx):
+ ctx.eval_policy = MakeDeterministic(ctx.policy)
+
+
+def create_pac_eval_policy(ctx: PipelineCtx):
+ ctx.eval_policy = MakeCfpiDeterministic(ctx.trainer)
+
+
+def create_eval_path_collector(ctx: PipelineCtx):
+ ctx.eval_path_collector = MdpPathCollector(
+ ctx.eval_env,
+ ctx.eval_policy,
+ rollout_fn=ctx.variant["rollout_fn"],
+ )
+
+
+def create_replay_buffer(ctx: PipelineCtx):
+ ctx.replay_buffer = ctx.variant["replay_buffer_class"](
+ ctx.variant["replay_buffer_size"],
+ ctx.eval_env,
+ )
+
+
+def create_algorithm(ctx: PipelineCtx):
+ ctx.algorithm = OfflineTorchBatchRLAlgorithm(
+ trainer=ctx.trainer,
+ evaluation_env=ctx.eval_env,
+ evaluation_data_collector=ctx.eval_path_collector,
+ replay_buffer=ctx.replay_buffer,
+ **ctx.variant["algorithm_kwargs"],
+ )
+
+
+def load_demos(ctx: PipelineCtx):
+ if isinstance(ctx.replay_buffer, EnvReplayBufferNextActionNewAction):
+ load_hdf5_next_actions_new_actions(ctx.dataset, ctx.replay_buffer)
+ assert torch.equal(
+ ctx.replay_buffer._next_actions[: ctx.replay_buffer._size - 1],
+ ctx.replay_buffer._actions[1 : ctx.replay_buffer._size],
+ )
+ assert torch.equal(
+ ctx.replay_buffer._new_next_actions[: ctx.replay_buffer._size - 1],
+ ctx.replay_buffer._actions[1 : ctx.replay_buffer._size],
+ )
+ elif isinstance(ctx.replay_buffer, EnvReplayBufferNextAction):
+ load_hdf5_next_actions(ctx.dataset, ctx.replay_buffer)
+ assert torch.equal(
+ ctx.replay_buffer._next_actions[: ctx.replay_buffer._size - 1],
+ ctx.replay_buffer._actions[1 : ctx.replay_buffer._size],
+ )
+
+ elif isinstance(ctx.replay_buffer, EnvReplayBuffer):
+ # Off policy
+ load_hdf5(ctx.dataset, ctx.replay_buffer)
+ else:
+ raise Exception("Unsupported replay buffer class", type(ctx.replay_buffer))
+
+
+def user_defined_attrs_dict(cls):
+ return {k: v for k, v in cls.__dict__.items() if not k.startswith("__")}
+
+
+def load_checkpoint_iqn_q(ctx: PipelineCtx):
+ q_params = conf.CheckpointParams.Q_IQN()
+
+ q_epoch = q_params.validation_optimal_epochs[ctx.variant["env_id"]]
+ params = torch.load(
+ osp.join(
+ conf.CHECKPOINT_PATH,
+ q_params.path,
+ ctx.variant["env_id"],
+ str(ctx.variant["seed"]),
+ f"itr_{q_epoch}.pt",
+ ),
+ map_location=ptu.DEVICE,
+ )
+ assert isinstance(params["trainer/qf1"], QuantileMlp)
+ assert isinstance(params["trainer/qf2"], QuantileMlp)
+
+ ctx.qfs = [params["trainer/qf1"], params["trainer/qf2"]]
+ ctx.target_qfs = [params["trainer/target_qf1"], params["trainer/target_qf2"]]
+
+
+def load_checkpoint_iql_q(ctx: PipelineCtx):
+ class UnnormalizeIQL(ConcatMlp):
+ def __init__(self, state_mean, state_std, mlp: ConcatMlp):
+ self.__dict__ = copy.deepcopy(mlp.__dict__)
+ self.state_mean = ptu.torch_ify(state_mean)
+ self.state_std = ptu.torch_ify(state_std)
+
+ def forward(self, obs, act, **kwargs):
+ unnormalized_obs = self.unnormalize(obs)
+ return super().forward(unnormalized_obs, act, **kwargs)
+
+ def unnormalize(self, obs):
+ return (obs * self.state_std) + self.state_mean
+
+ q_params = conf.CheckpointParams.Q_IQL()
+
+ params = torch.load(
+ osp.join(
+ conf.CHECKPOINT_PATH,
+ q_params.path,
+ ctx.variant["env_id"],
+ str(ctx.variant["seed"]),
+ "params.pt",
+ ),
+ map_location=ptu.DEVICE,
+ )
+
+ ctx.qfs[0].load_state_dict(params["trainer/qf1"])
+ ctx.qfs[1].load_state_dict(params["trainer/qf2"])
+
+ ctx.qfs[0] = UnnormalizeIQL(ctx.obs_mean, ctx.obs_std, ctx.qfs[0])
+ ctx.qfs[1] = UnnormalizeIQL(ctx.obs_mean, ctx.obs_std, ctx.qfs[1])
+
+
+def load_checkpoint_iql_policy(ctx: PipelineCtx):
+ iql_params = conf.CheckpointParams.Q_IQL()
+
+ params = torch.load(
+ osp.join(
+ conf.CHECKPOINT_PATH,
+ iql_params.path,
+ ctx.variant["env_id"],
+ str(ctx.variant["seed"]),
+ "params.pt",
+ ),
+ map_location=ptu.DEVICE,
+ )
+
+ obs_dim = ctx.eval_env.observation_space.low.size
+ action_dim = ctx.eval_env.action_space.low.size
+
+ ctx.policy = GaussianPolicy(
+ [256, 256],
+ obs_dim,
+ action_dim,
+ std_architecture="values",
+ max_log_std=0,
+ min_log_std=-6,
+ )
+ ctx.policy.load_state_dict(params["trainer/policy"])
+ ctx.policy = UnnormalizeGaussianPolicy(ctx.obs_mean, ctx.obs_std, ctx.policy)
+
+
+def load_checkpoint_policy(ctx: PipelineCtx):
+ params = getattr(conf.CheckpointParams, ctx.variant["checkpoint_params"])()
+
+ policy_path = ""
+ base = osp.join(
+ conf.CHECKPOINT_PATH,
+ params.path,
+ ctx.variant["env_id"],
+ str(ctx.variant["seed"]),
+ )
+ # policy_path = osp.join(base, "itr_1000.pt")
+
+ if osp.exists(osp.join(base, "itr_500.pt")):
+ policy_path = osp.join(base, "itr_500.pt")
+ else:
+ policy_path = osp.join(base, "itr_-500.pt")
+
+ ctx.policy = torch.load(policy_path, map_location="cpu")[params.key]
+
+
+def train(ctx: PipelineCtx):
+ ctx.algorithm.to(ptu.DEVICE)
+ ctx.algorithm.train()
diff --git a/cfpi/policies/__init__.py b/cfpi/policies/__init__.py
new file mode 100644
index 0000000..3ac6d2f
--- /dev/null
+++ b/cfpi/policies/__init__.py
@@ -0,0 +1,35 @@
+import abc
+
+
+class Policy(metaclass=abc.ABCMeta):
+ """
+ General policy interface.
+ """
+
+ @abc.abstractmethod
+ def get_action(self, observation):
+ """
+
+ :param observation:
+ :return: action, debug_dictionary
+ """
+
+ def reset(self):
+ pass
+
+
+from cfpi.policies.gaussian_policy import (
+ GaussianPolicy,
+ TanhGaussianMixturePolicy,
+ TanhGaussianPolicy,
+)
+from cfpi.policies.stochastic import MakeDeterministic, TorchStochasticPolicy
+
+__all__ = [
+ "Policy",
+ "TorchStochasticPolicy",
+ "MakeDeterministic",
+ "TanhGaussianPolicy",
+ "GaussianPolicy",
+ "TanhGaussianMixturePolicy",
+]
diff --git a/cfpi/policies/gaussian_policy.py b/cfpi/policies/gaussian_policy.py
new file mode 100644
index 0000000..38ba97a
--- /dev/null
+++ b/cfpi/policies/gaussian_policy.py
@@ -0,0 +1,403 @@
+import copy
+
+import eztils.torch as ptu
+import numpy as np
+import torch
+import torch.nn.functional as F
+from eztils.torch import (
+ GaussianMixture,
+ MultivariateDiagonalNormal,
+ TanhGaussianMixture,
+ TanhNormal,
+)
+from torch import nn as nn
+
+from cfpi.policies.stochastic import TorchStochasticPolicy
+from cfpi.pytorch.networks import Mlp
+
+LOG_SIG_MAX = 2
+LOG_SIG_MIN = -5 # this used to be 20
+MEAN_MIN = -9.0
+MEAN_MAX = 9.0
+
+
+# noinspection PyMethodOverriding
+class TanhGaussianPolicy(Mlp, TorchStochasticPolicy):
+ """
+ Usage:
+
+ ```
+ policy = TanhGaussianPolicy(...)
+ """
+
+ def __init__(
+ self, hidden_sizes, obs_dim, action_dim, std=None, init_w=1e-3, **kwargs
+ ):
+ super().__init__(
+ hidden_sizes,
+ input_size=obs_dim,
+ output_size=action_dim,
+ init_w=init_w,
+ **kwargs,
+ )
+ self.log_std = None
+ self.std = std
+ if std is None:
+ last_hidden_size = obs_dim
+ if len(hidden_sizes) > 0:
+ last_hidden_size = hidden_sizes[-1]
+ self.last_fc_log_std = nn.Linear(last_hidden_size, action_dim)
+ self.last_fc_log_std.weight.data.uniform_(-init_w, init_w)
+ self.last_fc_log_std.bias.data.uniform_(-init_w, init_w)
+ else:
+ self.log_std = np.log(std)
+ assert LOG_SIG_MIN <= self.log_std <= LOG_SIG_MAX
+
+ def forward(self, obs):
+ h = obs
+ for i, fc in enumerate(self.fcs):
+ h = self.hidden_activation(fc(h))
+ mean = self.last_fc(h)
+ if self.std is None:
+ log_std = self.last_fc_log_std(h)
+ log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)
+ std = torch.exp(log_std)
+ else:
+ std = (
+ torch.from_numpy(
+ np.array(
+ [
+ self.std,
+ ]
+ )
+ )
+ .float()
+ .to(ptu.device)
+ )
+
+ return TanhNormal(mean, std)
+
+ def log_prob(self, obs, actions):
+ raw_actions = ptu.atanh(actions)
+ h = obs
+ for i, fc in enumerate(self.fcs):
+ h = self.hidden_activation(fc(h))
+ mean = self.last_fc(h)
+ mean = torch.clamp(mean, MEAN_MIN, MEAN_MAX)
+ if self.std is None:
+ log_std = self.last_fc_log_std(h)
+ log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)
+ std = torch.exp(log_std)
+ else:
+ std = self.std
+ log_std = self.log_std
+
+ tanh_normal = TanhNormal(mean, std)
+ log_prob = tanh_normal.log_prob(value=actions, pre_tanh_value=raw_actions)
+ return log_prob
+
+
+class GaussianPolicy(Mlp, TorchStochasticPolicy):
+ def __init__(
+ self,
+ hidden_sizes,
+ obs_dim,
+ action_dim,
+ std=None,
+ init_w=1e-3,
+ min_log_std=-6,
+ max_log_std=0,
+ std_architecture="shared",
+ **kwargs,
+ ):
+ super().__init__(
+ hidden_sizes,
+ input_size=obs_dim,
+ output_size=action_dim,
+ init_w=init_w,
+ output_activation=torch.tanh,
+ **kwargs,
+ )
+ self.min_log_std = min_log_std
+ self.max_log_std = max_log_std
+ self.log_std = None
+ self.std = std
+ self.std_architecture = std_architecture
+ if std is None:
+ if self.std_architecture == "shared":
+ last_hidden_size = obs_dim
+ if len(hidden_sizes) > 0:
+ last_hidden_size = hidden_sizes[-1]
+ self.last_fc_log_std = nn.Linear(last_hidden_size, action_dim)
+ self.last_fc_log_std.weight.data.uniform_(-init_w, init_w)
+ self.last_fc_log_std.bias.data.uniform_(-init_w, init_w)
+ elif self.std_architecture == "values":
+ self.log_std_logits = nn.Parameter(
+ ptu.zeros(action_dim, requires_grad=True)
+ )
+ else:
+ raise ValueError(self.std_architecture)
+ else:
+ self.log_std = np.log(std)
+ assert LOG_SIG_MIN <= self.log_std <= LOG_SIG_MAX
+
+ def forward(self, obs):
+ h = obs
+ for i, fc in enumerate(self.fcs):
+ h = self.hidden_activation(fc(h))
+ preactivation = self.last_fc(h)
+ mean = self.output_activation(preactivation)
+ if self.std is None:
+ if self.std_architecture == "shared":
+ log_std = torch.sigmoid(self.last_fc_log_std(h))
+ elif self.std_architecture == "values":
+ log_std = torch.sigmoid(self.log_std_logits)
+ else:
+ raise ValueError(self.std_architecture)
+ log_std = self.min_log_std + log_std * (self.max_log_std - self.min_log_std)
+ std = torch.exp(log_std)
+ else:
+ std = (
+ torch.from_numpy(
+ np.array(
+ [
+ self.std,
+ ]
+ )
+ )
+ .float()
+ .to(ptu.device)
+ )
+
+ return MultivariateDiagonalNormal(mean, std)
+
+
+class TanhGaussianMixturePolicy(Mlp, TorchStochasticPolicy):
+ def __init__(
+ self,
+ hidden_sizes,
+ obs_dim,
+ action_dim,
+ weight_temperature=1.0,
+ init_w=1e-3,
+ num_gaussians=2,
+ **kwargs,
+ ):
+ super().__init__(
+ hidden_sizes,
+ input_size=obs_dim,
+ output_size=action_dim * num_gaussians,
+ init_w=init_w,
+ **kwargs,
+ )
+ self.action_dim = action_dim
+ self.num_gaussians = num_gaussians
+ last_hidden_size = obs_dim
+ if len(hidden_sizes) > 0:
+ last_hidden_size = hidden_sizes[-1]
+
+ self.last_fc_log_std = nn.Linear(last_hidden_size, action_dim * num_gaussians)
+ self.last_fc_log_std.weight.data.uniform_(-init_w, init_w)
+ self.last_fc_log_std.bias.data.uniform_(-init_w, init_w)
+
+ self.last_fc_weights = nn.Linear(last_hidden_size, num_gaussians)
+ self.last_fc_weights.weight.data.uniform_(-init_w, init_w)
+ self.last_fc_weights.bias.data.uniform_(-init_w, init_w)
+ self.weight_temperature = weight_temperature
+
+ def log_prob(self, obs, actions):
+ raw_actions = ptu.atanh(actions)
+ h = obs
+ for i, fc in enumerate(self.fcs):
+ h = self.hidden_activation(fc(h))
+ mean = self.last_fc(h)
+ mean = torch.clamp(mean, MEAN_MIN, MEAN_MAX)
+
+ log_std = self.last_fc_log_std(h)
+ log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)
+ std = torch.exp(log_std)
+
+ weights = F.softmax(
+ self.last_fc_weights(h.detach()) / self.weight_temperature,
+ dim=1,
+ )
+
+ mixture_weights = weights.reshape((-1, self.num_gaussians))
+ mixture_means = mean.reshape(
+ (
+ -1,
+ self.num_gaussians,
+ self.action_dim,
+ )
+ )
+ mixture_stds = std.reshape(
+ (
+ -1,
+ self.num_gaussians,
+ self.action_dim,
+ )
+ )
+
+ tanh_gmm = TanhGaussianMixture(mixture_means, mixture_stds, mixture_weights)
+ log_prob = tanh_gmm.log_prob(value=actions, pre_tanh_value=raw_actions)
+ return log_prob
+
+ def forward(self, obs):
+ h = obs
+ for i, fc in enumerate(self.fcs):
+ h = self.hidden_activation(fc(h))
+ mean = self.last_fc(h)
+ log_std = self.last_fc_log_std(h)
+ log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)
+ std = torch.exp(log_std)
+
+ weights = F.softmax(
+ self.last_fc_weights(h.detach()) / self.weight_temperature,
+ dim=1,
+ )
+
+ mixture_weights = weights.reshape((-1, self.num_gaussians))
+ mixture_means = mean.reshape(
+ (
+ -1,
+ self.num_gaussians,
+ self.action_dim,
+ )
+ )
+ mixture_stds = std.reshape(
+ (
+ -1,
+ self.num_gaussians,
+ self.action_dim,
+ )
+ )
+ return TanhGaussianMixture(mixture_means, mixture_stds, mixture_weights)
+
+
+class GaussianMixturePolicy(TanhGaussianMixturePolicy):
+ def __init__(
+ self,
+ hidden_sizes,
+ obs_dim,
+ action_dim,
+ weight_temperature=1,
+ init_w=0.001,
+ num_gaussians=2,
+ output_activation=torch.tanh,
+ **kwargs,
+ ):
+ super().__init__(
+ hidden_sizes,
+ obs_dim,
+ action_dim,
+ weight_temperature,
+ init_w,
+ num_gaussians,
+ **kwargs,
+ )
+ self.output_activation = output_activation
+
+ def log_prob(self, obs, actions):
+ h = obs
+ for i, fc in enumerate(self.fcs):
+ h = self.hidden_activation(fc(h))
+ mean = self.last_fc(h)
+ mean = torch.clamp(mean, MEAN_MIN, MEAN_MAX)
+ mean = self.output_activation(mean)
+
+ log_std = self.last_fc_log_std(h)
+ log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)
+ std = torch.exp(log_std)
+
+ weights = F.softmax(
+ self.last_fc_weights(h.detach()) / self.weight_temperature,
+ dim=1,
+ )
+
+ mixture_weights = weights.reshape((-1, self.num_gaussians))
+ mixture_means = mean.reshape(
+ (
+ -1,
+ self.num_gaussians,
+ self.action_dim,
+ )
+ )
+ mixture_stds = std.reshape(
+ (
+ -1,
+ self.num_gaussians,
+ self.action_dim,
+ )
+ )
+ gmm = GaussianMixture(mixture_means, mixture_stds, mixture_weights)
+ log_prob = gmm.log_prob(value=actions)
+ return log_prob
+
+ def forward(self, obs):
+ h = obs
+ for i, fc in enumerate(self.fcs):
+ h = self.hidden_activation(fc(h))
+ mean = self.last_fc(h)
+ mean = self.output_activation(mean)
+
+ log_std = self.last_fc_log_std(h)
+ log_std = torch.clamp(log_std, LOG_SIG_MIN, LOG_SIG_MAX)
+ std = torch.exp(log_std)
+
+ weights = F.softmax(
+ self.last_fc_weights(h.detach()) / self.weight_temperature,
+ dim=1,
+ )
+
+ mixture_weights = weights.reshape((-1, self.num_gaussians))
+ mixture_means = mean.reshape(
+ (
+ -1,
+ self.num_gaussians,
+ self.action_dim,
+ )
+ )
+ mixture_stds = std.reshape(
+ (
+ -1,
+ self.num_gaussians,
+ self.action_dim,
+ )
+ )
+ return GaussianMixture(mixture_means, mixture_stds, mixture_weights)
+
+
+class UnnormalizeTanhGaussianPolicy(TanhGaussianPolicy):
+ def __init__(self, state_mean, state_std, policy: TanhGaussianPolicy):
+ self.__dict__ = copy.deepcopy(policy.__dict__)
+ self.state_mean = ptu.to_torch(state_mean)
+ self.state_std = ptu.to_torch(state_std)
+
+ def forward(self, obs):
+ obs = self.unnormalize(obs)
+ return super().forward(obs)
+
+ def log_prob(self, obs):
+ obs = self.unnormalize(obs)
+ return super().log_prob(obs)
+
+ def unnormalize(self, obs):
+ return (obs * self.state_std) + self.state_mean
+
+
+class UnnormalizeGaussianPolicy(GaussianPolicy):
+ def __init__(self, state_mean, state_std, policy: GaussianPolicy):
+ self.__dict__ = copy.deepcopy(policy.__dict__)
+ self.state_mean = ptu.to_torch(state_mean)
+ self.state_std = ptu.to_torch(state_std)
+
+ def forward(self, obs):
+ obs = self.unnormalize(obs)
+ return super().forward(obs)
+
+ def log_prob(self, obs):
+ obs = self.unnormalize(obs)
+ return super().log_prob(obs)
+
+ def unnormalize(self, obs):
+ return (obs * self.state_std) + self.state_mean
diff --git a/cfpi/policies/simple.py b/cfpi/policies/simple.py
new file mode 100644
index 0000000..7451046
--- /dev/null
+++ b/cfpi/policies/simple.py
@@ -0,0 +1,108 @@
+import torch
+import torch.nn.functional as F
+from eztils.torch import randn, to_np, to_torch
+from torch import nn
+
+from cfpi.policies import Policy
+from cfpi.pytorch.networks import Mlp
+from cfpi.pytorch.normalizer import TorchFixedNormalizer
+
+
+def elem_or_tuple_to_numpy(elem_or_tuple):
+ if isinstance(elem_or_tuple, tuple):
+ return tuple(to_np(x) for x in elem_or_tuple)
+ else:
+ return to_np(elem_or_tuple)
+
+
+def eval_np(module, *args, **kwargs):
+ """
+ Eval this module with a numpy interface
+ Same as a call to __call__ except all Variable input/outputs are
+ replaced with numpy equivalents.
+ Assumes the output is either a single object or a tuple of objects.
+ """
+ torch_args = tuple(to_torch(x) for x in args)
+ torch_kwargs = {k: to_torch(v) for k, v in kwargs.items()}
+ outputs = module(*torch_args, **torch_kwargs)
+ return elem_or_tuple_to_numpy(outputs)
+
+
+class MlpPolicy(Mlp, Policy):
+ """
+ A simpler interface for creating policies.
+ """
+
+ def __init__(self, *args, obs_normalizer: TorchFixedNormalizer = None, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.obs_normalizer = obs_normalizer
+
+ def forward(self, obs, **kwargs):
+ if self.obs_normalizer:
+ obs = self.obs_normalizer.normalize(obs)
+ return super().forward(obs, **kwargs)
+
+ def get_action(self, obs_np):
+ actions = self.get_actions(obs_np[None])
+ return actions[0, :], {}
+
+ def get_actions(self, obs):
+ return eval_np(self, obs)
+
+
+class TanhMlpPolicy(MlpPolicy):
+ """
+ A helper class since most policies have a tanh output activation.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, output_activation=torch.tanh, **kwargs)
+
+
+# Vanilla Variational Auto-Encoder
+# Adapted from https://github.com/sfujim/BCQ/blob/master/continuous_BCQ/BCQ.py
+class VAEPolicy(nn.Module, Policy):
+ def __init__(self, state_dim, action_dim, hidden_dim, latent_dim, max_action):
+ super().__init__()
+ self.e1 = nn.Linear(state_dim + action_dim, hidden_dim)
+ self.e2 = nn.Linear(hidden_dim, hidden_dim)
+
+ self.mean = nn.Linear(hidden_dim, latent_dim)
+ self.log_std = nn.Linear(hidden_dim, latent_dim)
+
+ self.d1 = nn.Linear(state_dim + latent_dim, hidden_dim)
+ self.d2 = nn.Linear(hidden_dim, hidden_dim)
+ self.d3 = nn.Linear(hidden_dim, action_dim)
+
+ self.max_action = max_action
+ self.latent_dim = latent_dim
+
+ def forward(self, state, action):
+ z = F.relu(self.e1(torch.cat([state, action], 1)))
+ z = F.relu(self.e2(z))
+
+ mean = self.mean(z)
+ # Clamped for numerical stability
+ log_std = self.log_std(z).clamp(-4, 15)
+ std = torch.exp(log_std)
+ z = mean + std * torch.randn_like(std)
+
+ u = self.decode(state, z)
+
+ return u, mean, std
+
+ def decode(self, state, z=None):
+ # When sampling from the VAE, the latent vector is clipped to [-0.5, 0.5]
+ if z is None:
+ z = randn((state.shape[0], self.latent_dim)).clamp(-0.5, 0.5)
+
+ a = F.relu(self.d1(torch.cat([state, z], 1)))
+ a = F.relu(self.d2(a))
+ return self.max_action * torch.tanh(self.d3(a))
+
+ def get_action(self, obs_np):
+ actions = self.get_actions(obs_np[None])
+ return actions[0, :], {}
+
+ def get_actions(self, obs):
+ return eval_np(self.decode, obs)
diff --git a/cfpi/policies/stochastic.py b/cfpi/policies/stochastic.py
new file mode 100644
index 0000000..4814887
--- /dev/null
+++ b/cfpi/policies/stochastic.py
@@ -0,0 +1,55 @@
+import abc
+
+from eztils.torch import Delta, to_torch
+
+from cfpi.policies import Policy
+from cfpi.policies.simple import elem_or_tuple_to_numpy
+from cfpi.pytorch.networks.distribution_generator import DistributionGenerator
+
+
+class TorchStochasticPolicy(DistributionGenerator, Policy, metaclass=abc.ABCMeta):
+ def get_action(self, obs_np, time_step=None):
+ actions = self.get_actions(obs_np[None], time_step)
+ return actions.squeeze(), {}
+
+ def get_actions(
+ self,
+ obs_np,
+ time_step=None,
+ ):
+ if time_step is None:
+ dist = self._get_dist_from_np(obs_np)
+ else:
+ dist = self._get_dist_from_np(obs_np, time_step)
+ actions = dist.sample()
+ return elem_or_tuple_to_numpy(actions)
+
+ def _get_dist_from_np(self, *args, **kwargs):
+ torch_args = tuple(to_torch(x) for x in args)
+ torch_kwargs = {k: to_torch(v) for k, v in kwargs.items()}
+ dist = self(*torch_args, **torch_kwargs)
+ return dist
+
+
+class MakeDeterministic(TorchStochasticPolicy):
+ def __init__(
+ self,
+ action_distribution_generator: DistributionGenerator,
+ ):
+ super().__init__()
+ self._action_distribution_generator = action_distribution_generator
+
+ def forward(self, *args, **kwargs):
+ dist = self._action_distribution_generator.forward(*args, **kwargs)
+ return Delta(dist.mle_estimate())
+
+
+class MakeCfpiDeterministic(TorchStochasticPolicy):
+ def __init__(self, trainer):
+ super().__init__()
+ self.trainer = trainer
+
+ def forward(self, *args, **kwargs):
+ # with torch.enable_grad():
+ dist: Delta = self.trainer.get_cfpi_action(*args, **kwargs)
+ return dist
diff --git a/cfpi/pytorch/__init__.py b/cfpi/pytorch/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cfpi/pytorch/algorithms/cfpi/deterministic.py b/cfpi/pytorch/algorithms/cfpi/deterministic.py
new file mode 100644
index 0000000..6423166
--- /dev/null
+++ b/cfpi/pytorch/algorithms/cfpi/deterministic.py
@@ -0,0 +1,91 @@
+"""
+PAC with a Q Lower Bound, with pretrained q and pi beta from one step repository.
+"""
+import os.path as osp
+
+import eztils.torch as ptu
+import torch
+from eztils.torch import Delta, TanhDelta
+
+from cfpi import conf
+from cfpi.launchers.pipeline import Pipeline, PipelineCtx, Pipelines
+from cfpi.pytorch.algorithms.cfpi.single_gaussian import SG_CFPI_Trainer
+
+
+class Deterministic_CFPI_Trainer(SG_CFPI_Trainer):
+ def get_cfpi_action(self, obs) -> TanhDelta:
+ behavior_action = self.behavior_policy(obs)
+ if self.delta_range == [0.0, 0.0]:
+ return Delta(behavior_action)
+
+ self.sample_delta()
+ assert self.iqn
+ return self.compute_cfpi_action(obs, behavior_action)
+
+ def compute_cfpi_action(self, obs, behavior_action: torch.Tensor):
+ # * preliminaries
+
+ pre_tanh_mu_beta = ptu.atanh(behavior_action)
+ batch_size = obs.shape[0]
+ pre_tanh_mu_beta.requires_grad_()
+ mu_beta = torch.tanh(pre_tanh_mu_beta)
+
+ # * calculate gradient of q lower bound w.r.t action
+ # Get the lower bound of the Q estimate
+ q_LB = self.calc_q_LB(obs, mu_beta)
+ # Obtain the gradient of q_LB wrt to action
+ # with action evaluated at mu_proposal
+ grad = torch.autograd.grad(q_LB.sum(), pre_tanh_mu_beta)[
+ 0
+ ] #! note: this doesn't work on cpu.
+
+ assert grad is not None
+ assert pre_tanh_mu_beta.shape == grad.shape
+
+ # * cacluate proposals
+ # Set Sigma_beta as an identity matrix
+ Sigma_beta = torch.ones_like(grad)
+
+ denom = self.get_shift_denominator(grad, Sigma_beta)
+
+ # [batch_size, num_deltas, action_dim]
+ delta_mu = torch.sqrt(2 * self.delta) * (grad / denom).unsqueeze(1)
+
+ mu_proposal = pre_tanh_mu_beta + delta_mu
+ tanh_mu_proposal = torch.tanh(mu_proposal).reshape(
+ batch_size * self.num_delta, -1
+ )
+
+ # * get the lower bounded q
+ obs_exp = obs.repeat_interleave(self.num_delta, dim=0)
+ q_LB = self.calc_q_LB(obs_exp, tanh_mu_proposal)
+ q_LB = q_LB.reshape(batch_size, self.num_delta)
+
+ # * argmax the proposals
+ select_idx = q_LB.argmax(1)
+ selected = mu_proposal[torch.arange(len(select_idx)), select_idx]
+ return TanhDelta(selected)
+
+
+def deterministic_sanity_check(ctx: PipelineCtx):
+ assert ctx.variant["checkpoint_params"] == "DET"
+
+
+def load_checkpoint_policy(ctx: PipelineCtx):
+ params = getattr(conf.CheckpointParams, ctx.variant["checkpoint_params"])
+ base = osp.join(
+ conf.CHECKPOINT_PATH,
+ params.path,
+ ctx.variant["env_id"],
+ str(ctx.variant["seed"]),
+ )
+
+ policy_path = osp.join(base, params.file)
+ ctx.policy = torch.load(policy_path, map_location="cpu")[params.key]
+
+
+DETBasePipeline = Pipeline.from_(
+ Pipelines.offline_zerostep_pac_pipeline, "DETBasePipeline"
+)
+DETBasePipeline.pipeline.insert(0, deterministic_sanity_check)
+DETBasePipeline.replace("load_checkpoint_policy", load_checkpoint_policy)
diff --git a/cfpi/pytorch/algorithms/cfpi/iql.py b/cfpi/pytorch/algorithms/cfpi/iql.py
new file mode 100644
index 0000000..c6e4811
--- /dev/null
+++ b/cfpi/pytorch/algorithms/cfpi/iql.py
@@ -0,0 +1,304 @@
+"""Torch implementation of Implicit Q-Learning (IQL)
+https://github.com/ikostrikov/implicit_q_learning
+"""
+
+from collections import OrderedDict
+
+import eztils.torch as ptu
+import numpy as np
+import torch
+import torch.optim as optim
+from eztils import create_stats_ordered_dict
+from torch import nn as nn
+
+from cfpi.core.logging import add_prefix
+from cfpi.pytorch.networks import LinearTransform
+from cfpi.pytorch.torch_rl_algorithm import TorchTrainer
+
+
+class IQL_CFPI_Trainer(TorchTrainer):
+ def __init__(
+ self,
+ env,
+ policy,
+ qf1,
+ qf2,
+ vf,
+ quantile=0.5,
+ target_qf1=None,
+ target_qf2=None,
+ buffer_policy=None,
+ z=None,
+ discount=0.99,
+ reward_scale=1.0,
+ policy_lr=1e-3,
+ qf_lr=1e-3,
+ policy_weight_decay=0,
+ q_weight_decay=0,
+ optimizer_class=optim.Adam,
+ policy_update_period=1,
+ q_update_period=1,
+ reward_transform_class=None,
+ reward_transform_kwargs=None,
+ terminal_transform_class=None,
+ terminal_transform_kwargs=None,
+ clip_score=None,
+ soft_target_tau=1e-2,
+ target_update_period=1,
+ beta=1.0,
+ ):
+ super().__init__()
+ self.env = env
+ self.policy = policy
+ self.qf1 = qf1
+ self.qf2 = qf2
+ self.target_qf1 = target_qf1
+ self.target_qf2 = target_qf2
+ self.soft_target_tau = soft_target_tau
+ self.target_update_period = target_update_period
+ self.vf = vf
+ self.z = z
+ self.buffer_policy = buffer_policy
+
+ self.optimizers = {}
+
+ self.policy_optimizer = optimizer_class(
+ self.policy.parameters(),
+ weight_decay=policy_weight_decay,
+ lr=policy_lr,
+ )
+ self.optimizers[self.policy] = self.policy_optimizer
+ self.qf1_optimizer = optimizer_class(
+ self.qf1.parameters(),
+ weight_decay=q_weight_decay,
+ lr=qf_lr,
+ )
+ self.qf2_optimizer = optimizer_class(
+ self.qf2.parameters(),
+ weight_decay=q_weight_decay,
+ lr=qf_lr,
+ )
+ self.vf_optimizer = optimizer_class(
+ self.vf.parameters(),
+ weight_decay=q_weight_decay,
+ lr=qf_lr,
+ )
+
+ if self.z:
+ self.z_optimizer = optimizer_class(
+ self.z.parameters(),
+ weight_decay=q_weight_decay,
+ lr=qf_lr,
+ )
+
+ self.discount = discount
+ self.reward_scale = reward_scale
+ self.eval_statistics = OrderedDict()
+ self._n_train_steps_total = 0
+ self._need_to_update_eval_statistics = True
+
+ self.q_update_period = q_update_period
+ self.policy_update_period = policy_update_period
+
+ self.reward_transform_class = reward_transform_class or LinearTransform
+ self.reward_transform_kwargs = reward_transform_kwargs or dict(m=1, b=0)
+ self.terminal_transform_class = terminal_transform_class or LinearTransform
+ self.terminal_transform_kwargs = terminal_transform_kwargs or dict(m=1, b=0)
+ self.reward_transform = self.reward_transform_class(
+ **self.reward_transform_kwargs
+ )
+ self.terminal_transform = self.terminal_transform_class(
+ **self.terminal_transform_kwargs
+ )
+
+ self.clip_score = clip_score
+ self.beta = beta
+ self.quantile = quantile
+
+ def train_from_torch(
+ self,
+ batch,
+ # train=True,
+ # pretrain=False,
+ ):
+ rewards = batch["rewards"]
+ terminals = batch["terminals"]
+ obs = batch["observations"]
+ actions = batch["actions"]
+ next_obs = batch["next_observations"]
+ if self.reward_transform:
+ rewards = self.reward_transform(rewards)
+
+ if self.terminal_transform:
+ terminals = self.terminal_transform(terminals)
+ """
+ Policy and Alpha Loss
+ """
+ dist = self.policy(obs)
+
+ """
+ QF Loss
+ """
+ q1_pred = self.qf1(obs, actions)
+ q2_pred = self.qf2(obs, actions)
+ target_vf_pred = self.vf(next_obs).detach()
+
+ q_target = (
+ self.reward_scale * rewards
+ + (1.0 - terminals) * self.discount * target_vf_pred
+ )
+ q_target = q_target.detach()
+ qf1_loss = self.qf_criterion(q1_pred, q_target)
+ qf2_loss = self.qf_criterion(q2_pred, q_target)
+
+ """
+ VF Loss
+ """
+ q_pred = torch.min(
+ self.target_qf1(obs, actions),
+ self.target_qf2(obs, actions),
+ ).detach()
+ vf_pred = self.vf(obs)
+ vf_err = vf_pred - q_pred
+ vf_sign = (vf_err > 0).float()
+ vf_weight = (1 - vf_sign) * self.quantile + vf_sign * (1 - self.quantile)
+ vf_loss = (vf_weight * (vf_err**2)).mean()
+
+ """
+ Policy Loss
+ """
+ policy_logpp = dist.log_prob(actions)
+
+ adv = q_pred - vf_pred
+ exp_adv = torch.exp(adv / self.beta)
+ if self.clip_score is not None:
+ exp_adv = torch.clamp(exp_adv, max=self.clip_score)
+
+ weights = exp_adv[:, 0].detach()
+ policy_loss = (-policy_logpp * weights).mean()
+
+ """
+ Update networks
+ """
+ if self._n_train_steps_total % self.q_update_period == 0:
+ self.qf1_optimizer.zero_grad()
+ qf1_loss.backward()
+ self.qf1_optimizer.step()
+
+ self.qf2_optimizer.zero_grad()
+ qf2_loss.backward()
+ self.qf2_optimizer.step()
+
+ self.vf_optimizer.zero_grad()
+ vf_loss.backward()
+ self.vf_optimizer.step()
+
+ if self._n_train_steps_total % self.policy_update_period == 0:
+ self.policy_optimizer.zero_grad()
+ policy_loss.backward()
+ self.policy_optimizer.step()
+
+ """
+ Soft Updates
+ """
+ if self._n_train_steps_total % self.target_update_period == 0:
+ ptu.soft_update_from_to(self.qf1, self.target_qf1, self.soft_target_tau)
+ ptu.soft_update_from_to(self.qf2, self.target_qf2, self.soft_target_tau)
+
+ """
+ Save some statistics for eval
+ """
+ if self._need_to_update_eval_statistics:
+ self._need_to_update_eval_statistics = False
+ """
+ Eval should set this to None.
+ This way, these statistics are only computed for one batch.
+ """
+ self.eval_statistics["QF1 Loss"] = np.mean(ptu.to_np(qf1_loss))
+ self.eval_statistics["QF2 Loss"] = np.mean(ptu.to_np(qf2_loss))
+ self.eval_statistics["Policy Loss"] = np.mean(ptu.to_np(policy_loss))
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "Q1 Predictions",
+ ptu.to_np(q1_pred),
+ )
+ )
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "Q2 Predictions",
+ ptu.to_np(q2_pred),
+ )
+ )
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "Q Targets",
+ ptu.to_np(q_target),
+ )
+ )
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "rewards",
+ ptu.to_np(rewards),
+ )
+ )
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "terminals",
+ ptu.to_np(terminals),
+ )
+ )
+ self.eval_statistics["replay_buffer_len"] = self.replay_buffer._size
+ policy_statistics = add_prefix(dist.get_diagnostics(), "policy/")
+ self.eval_statistics.update(policy_statistics)
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "Advantage Weights",
+ ptu.to_np(weights),
+ )
+ )
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "Advantage Score",
+ ptu.to_np(adv),
+ )
+ )
+
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "V1 Predictions",
+ ptu.to_np(vf_pred),
+ )
+ )
+ self.eval_statistics["VF Loss"] = np.mean(ptu.to_np(vf_loss))
+
+ self._n_train_steps_total += 1
+
+ def get_diagnostics(self):
+ stats = super().get_diagnostics()
+ stats.update(self.eval_statistics)
+ return stats
+
+ def end_epoch(self, epoch):
+ self._need_to_update_eval_statistics = True
+
+ @property
+ def networks(self):
+ nets = [
+ self.policy,
+ self.qf1,
+ self.qf2,
+ self.target_qf1,
+ self.target_qf2,
+ self.vf,
+ ]
+ return nets
+
+ def get_snapshot(self):
+ return dict(
+ policy=self.policy,
+ qf1=self.qf1,
+ qf2=self.qf2,
+ target_qf1=self.target_qf1,
+ target_qf2=self.target_qf2,
+ vf=self.vf,
+ )
diff --git a/cfpi/pytorch/algorithms/cfpi/mixture_gaussian.py b/cfpi/pytorch/algorithms/cfpi/mixture_gaussian.py
new file mode 100644
index 0000000..63e1175
--- /dev/null
+++ b/cfpi/pytorch/algorithms/cfpi/mixture_gaussian.py
@@ -0,0 +1,557 @@
+import math
+import os.path as osp
+
+import eztils.torch as ptu
+import numpy as np
+import torch
+import torch.optim as optim
+from eztils.torch import TanhDelta, TanhGaussianMixture
+from eztils import red
+from cfpi import conf
+from cfpi.data_management.hdf5_path_loader import (
+ d4rl_qlearning_dataset_with_next_actions_new_actions,
+)
+from cfpi.launchers.pipeline import Pipeline, PipelineCtx, Pipelines
+from cfpi.launchers.pipeline_pieces import (
+ create_eval_policy,
+ create_q,
+ load_checkpoint_iql_policy,
+ load_checkpoint_iql_q,
+ user_defined_attrs_dict,
+)
+from cfpi.pytorch.algorithms.cfpi.single_gaussian import SG_CFPI_Trainer
+from cfpi.pytorch.algorithms.q_training.sarsa_iqn import create_q_iqn
+
+
+class MG_CFPI_Trainer(SG_CFPI_Trainer):
+ def __init__(
+ self,
+ policy,
+ qfs,
+ target_qfs,
+ discount=0.99,
+ reward_scale=1,
+ policy_lr=0.001,
+ qf_lr=0.001,
+ optimizer_class=optim.Adam,
+ soft_target_tau=0.01,
+ target_update_period=1,
+ num_quantiles=8,
+ plotter=None,
+ render_eval_paths=False,
+ beta_LB=0.5,
+ trivial_threshold=0.05,
+ delta_range=None,
+ num_delta=None,
+ num_candidate_actions=10,
+ # new params
+ action_selection_mode="max_from_both",
+ use_max_lambda=False,
+ IQN=True,
+ ):
+ if delta_range is None:
+ delta_range = [0.0, 0.0]
+ super().__init__(
+ policy=policy,
+ qfs=qfs,
+ target_qfs=target_qfs,
+ discount=discount,
+ reward_scale=reward_scale,
+ policy_lr=policy_lr,
+ qf_lr=qf_lr,
+ optimizer_class=optimizer_class,
+ soft_target_tau=soft_target_tau,
+ target_update_period=target_update_period,
+ num_quantiles=num_quantiles,
+ plotter=plotter,
+ render_eval_paths=render_eval_paths,
+ delta_range=delta_range,
+ num_delta=num_delta,
+ beta_LB=beta_LB,
+ IQN=IQN,
+ )
+
+ assert action_selection_mode in ["jensen", "max", "max_from_both", "easy_bcq"]
+ self.action_selection_mode = action_selection_mode
+ self.use_max_lambda = use_max_lambda
+ self.num_candidate_actions = num_candidate_actions
+ self.trivial_threshold = trivial_threshold
+ print("action_selection mode:", action_selection_mode)
+
+ def get_cfpi_action(self, obs) -> TanhDelta:
+ dist: TanhGaussianMixture = self.get_action_dist(obs)
+
+ if self.easy_bcq or self.action_selection_mode == "easy_bcq":
+ return self.get_easy_bcq_action(obs, dist)
+
+ if self.delta_range == [0.0, 0.0]:
+ batch_size = obs.shape[0]
+ obs_exp = obs.repeat_interleave(dist.num_gaussians, dim=0)
+ qfs = self.calc_q_LB(
+ obs_exp, dist.mean.reshape(batch_size * dist.num_gaussians, -1)
+ ).reshape(batch_size, dist.num_gaussians)
+ mask = dist.weights < self.trivial_threshold
+ qfs[mask] = -torch.inf
+
+ idx = torch.argmax(qfs, dim=1)
+ selected_actions = dist.normal_mean[torch.arange(len(idx)), idx]
+ return TanhDelta(selected_actions)
+
+ self.sample_delta()
+
+ if self.action_selection_mode == "jensen":
+ jensen_proposal, jensen_value = self.compute_jensen_proposal(obs, dist)
+ return TanhDelta(jensen_proposal)
+ elif self.action_selection_mode == "max":
+ max_proposal, max_value = self.compute_max_proposal(obs, dist)
+ return TanhDelta(max_proposal)
+ elif self.action_selection_mode == "max_from_both":
+ jensen_proposal, jensen_value = self.compute_jensen_proposal(obs, dist)
+ max_proposal, max_value = self.compute_max_proposal(obs, dist)
+
+ with torch.no_grad():
+ # [batch_size, 2, act_dim]
+ proposal = torch.cat(
+ [max_proposal.unsqueeze(1), jensen_proposal.unsqueeze(1)], dim=1
+ )
+ # [batch_size, 2]
+ value = torch.cat(
+ [max_value.unsqueeze(1), jensen_value.unsqueeze(1)], dim=1
+ )
+
+ idx = torch.argmax(value, dim=1)
+ selected_actions = proposal[torch.arange(len(idx)), idx]
+ if torch.any(torch.isnan(selected_actions)):
+ red("not good, found nan actions")
+ raise Exception("Action selection is NaN!")
+ return TanhDelta(selected_actions)
+ else:
+ raise NotImplementedError
+
+ def calc_log_p_mu(self, Sigma_beta):
+ return -0.5 * (2 * math.pi * Sigma_beta).prod(-1).log()
+
+ def compute_max_proposal(self, obs, dist: TanhGaussianMixture) -> torch.tensor:
+ """
+ Max proposals
+ """
+ # * preliminaries
+ pre_tanh_mu_beta = dist.normal_mean
+ weights = dist.weights
+
+ num_gaussians = dist.num_gaussians
+ batch_size = obs.shape[0]
+
+ # * calculate delta. this is the m distance constraint. we require the Mahalanobis (m) distance to be <= this value.
+ # [batch_size, num_gaussian, act_dim]
+ Sigma_beta = torch.pow(dist.stddev, 2)
+ # [batch_size, num_gaussian]
+ log_weights = weights.log()
+ # [batch_size, num_gaussian]
+ log_p_mu = self.calc_log_p_mu(Sigma_beta)
+
+ if self.use_max_lambda:
+ pseudo_log_p_mu = (log_weights + log_p_mu).max(-1, keepdim=True)[0]
+ else:
+ pseudo_log_p_mu = (log_weights + log_p_mu).sum(-1, keepdim=True)
+
+ # [batch_size, num_delta, num_gaussian]
+ max_delta = 2 * ( #! refer to appendix in paper
+ self.delta + (log_weights - pseudo_log_p_mu + log_p_mu).unsqueeze(1)
+ ).clamp(min=0.0)
+
+ # * calculate gradient of q lower bound w.r.t action
+ pre_tanh_mu_beta.requires_grad_()
+ # [batch_size * num_gaussian, obs_dim]
+ obs_exp = obs.repeat_interleave(num_gaussians, dim=0)
+ # [batch_size * num_gaussian, act_dim]
+ mu_beta = torch.tanh(pre_tanh_mu_beta.reshape(-1, pre_tanh_mu_beta.shape[-1]))
+
+ # Get the lower bound of the Q estimate
+ # [batch_size * num_gaussian, 1, ensemble_size]
+ q_LB = self.calc_q_LB(obs_exp, mu_beta)
+ # [batch_size * num_gaussian, 1]
+ q_LB = q_LB.reshape(-1, num_gaussians)
+
+ # Obtain the gradient of q_LB wrt to a
+ # with a evaluated at mu_proposal
+ grad = torch.autograd.grad(
+ q_LB.sum(), pre_tanh_mu_beta
+ ) #! this returns a tuple!!
+ # [batch_size, num_gaussian, act_dim]
+ grad = grad[0]
+
+ assert grad is not None
+ assert pre_tanh_mu_beta.shape == grad.shape
+
+ # * calculate proposals
+ denom = self.get_shift_denominator(grad, Sigma_beta)
+ # [batch_size, num_gaussians, action_dim]
+ direction = (torch.mul(Sigma_beta, grad) / denom).unsqueeze(1)
+
+ # [batch_size, num_delta, num_gaussians, action_dim]
+ delta_mu = torch.sqrt(2 * max_delta).unsqueeze(-1) * direction
+
+ mu_proposal = (pre_tanh_mu_beta.unsqueeze(1) + delta_mu).reshape(
+ batch_size, self.num_delta * num_gaussians, -1
+ )
+ # [batch_size * num_gaussians * num_delta, action_dim]
+ tanh_mu_proposal = torch.tanh(mu_proposal).reshape(
+ batch_size * self.num_delta * num_gaussians, -1
+ )
+
+ # * get the lower bounded q
+ assert self.num_delta == 1
+ obs_exp = obs.repeat_interleave(self.num_delta * num_gaussians, dim=0)
+ # obs_exp = obs.repeat_(self.num_delta * num_gaussians, 1)
+ q_LB = self.calc_q_LB(obs_exp, tanh_mu_proposal)
+ q_LB = q_LB.reshape(batch_size, num_gaussians * self.num_delta)
+ # mask low probabilities
+ q_LB[(weights.repeat(1, self.num_delta) < self.trivial_threshold)] = -torch.inf
+
+ # * argmax the proposals
+ max_value, idx = torch.max(q_LB, dim=1)
+ select_mu_proposal = mu_proposal[torch.arange(len(idx)), idx]
+
+ return select_mu_proposal, max_value
+
+ def compute_jensen_proposal(self, obs, dist: TanhGaussianMixture) -> torch.tensor:
+ # * preliminaries
+ mean_per_comp = dist.normal_mean
+ weights = dist.weights
+ batch_size = obs.shape[0]
+ Sigma_beta = torch.pow(dist.stddev, 2) + 1e-6
+ normalized_factor = (weights.unsqueeze(-1) / Sigma_beta).sum(
+ 1
+ ) #! this is "A" in the paper
+ pre_tanh_mu_bar = (weights.unsqueeze(-1) / Sigma_beta * mean_per_comp).sum(
+ 1
+ ) / normalized_factor
+
+ # * calculate delta. this is the m distance constraint. we require the Mahalanobis (m) distance to be <= this value.
+ # [batch_size, num_gaussian]
+ # jensen_delta = -2 * self.tau + (weights * log_p_mu).sum(-1)
+ jensen_delta = self.delta # this is flexible
+
+ # Obtain the change in mu
+ pseudo_delta = (
+ 2 * jensen_delta
+ - (weights * (torch.pow(mean_per_comp, 2) / Sigma_beta).sum(-1))
+ .sum(1, keepdim=True)
+ .unsqueeze(1)
+ + (torch.pow(pre_tanh_mu_bar, 2) * normalized_factor)
+ .sum(1, keepdim=True)
+ .unsqueeze(1)
+ )
+ pre_tanh_mu_bar.requires_grad_()
+ mu_bar = torch.tanh(pre_tanh_mu_bar)
+
+ if torch.all(pseudo_delta < 0):
+ return mu_bar, ptu.ones(mu_bar.shape[0]) * -torch.inf
+
+ # * calculate gradient of q lower bound w.r.t action
+ q_LB = self.calc_q_LB(obs, mu_bar)
+ # Obtain the gradient of q_LB wrt to a
+ # with a evaluated at mu_proposal
+ grad = torch.autograd.grad(q_LB.sum(), pre_tanh_mu_bar)[0]
+
+ assert grad is not None
+ assert pre_tanh_mu_bar.shape == grad.shape
+
+ denom = self.get_shift_denominator(grad, 1 / normalized_factor)
+
+ numerator = torch.sqrt((pseudo_delta).clamp(min=0.0))
+ delta_mu = numerator * (
+ torch.mul(1 / normalized_factor, grad) / denom
+ ).unsqueeze(1)
+
+ # * calculate proposals
+ mu_proposal = pre_tanh_mu_bar.unsqueeze(1) + delta_mu
+ jensen_value = (delta_mu * grad).sum(-1) + q_LB.squeeze(-1)
+
+ # * get the lower bounded q
+ obs_exp = obs.repeat(self.num_delta, 1)
+ q_LB = self.calc_q_LB(
+ obs_exp, torch.tanh(mu_proposal).reshape(batch_size * self.num_delta, -1)
+ )
+ q_LB = q_LB.reshape(batch_size, self.num_delta)
+ q_LB[(pseudo_delta <= -1e-10).squeeze(-1)] = -torch.inf
+
+ # * argmax the proposals
+ jensen_value, idx = torch.max(q_LB, dim=1)
+ select_mu_proposal = mu_proposal[torch.arange(len(idx)), idx]
+
+ # # * optionally check correctness
+ if False:
+ for i in range(self.num_delta):
+ if q_LB[0, i] == -torch.inf:
+ continue
+ self.check_jensen_correctness(
+ grad,
+ mean_per_comp,
+ Sigma_beta,
+ weights,
+ dist,
+ mu_proposal[0, i],
+ 2 * jensen_delta[0, i],
+ # -2 * self.delta[0, i] - (weights * self.calc_log_p_mu(Sigma_beta)).sum(-1),
+ )
+
+ return select_mu_proposal, jensen_value
+
+ def check_jensen_correctness(
+ self,
+ grad,
+ mean_per_comp,
+ Sigma_beta,
+ weights,
+ dist,
+ predicted_mu_proposal,
+ delta,
+ ):
+ print("checking jensen correctness...")
+ import cvxpy as cp
+
+ # Construct the problem.
+ grad_np = ptu.to_np(grad.squeeze())
+ mean_per_comp_np = ptu.to_np(mean_per_comp.squeeze())
+ Sigma_beta_np = ptu.to_np(Sigma_beta.squeeze())
+ weights_np = ptu.to_np(weights.squeeze())
+ num_comp = dist.num_gaussians
+
+ x = cp.Variable(grad_np.shape[0])
+ objective = cp.Minimize(-(grad_np @ x))
+ constraints = [
+ sum(
+ [
+ sum((x - mean_per_comp_np[i]) ** 2 / Sigma_beta_np[i])
+ * weights_np[i]
+ for i in range(num_comp)
+ ]
+ )
+ <= delta.item()
+ ]
+ prob = cp.Problem(objective, constraints)
+
+ # The optimal objective value is returned by `prob.solve()`.
+ prob.solve()
+ # # The optimal value for x is stored in `x.value`.
+ if x.value is not None: #! why is this none sometimes?
+ assert np.allclose(
+ ptu.to_np(predicted_mu_proposal), x.value, atol=1e-1
+ ), f"{predicted_mu_proposal} != {x.value}"
+ # The optimal Lagrange multiplier for a constraint is stored in
+ # `constraint.dual_value`.
+ # print(constraints[0].dual_value
+
+
+def mg_sanity_check(ctx: PipelineCtx):
+ assert ctx.variant["checkpoint_params"] != "SG"
+
+
+MGBasePipeline = Pipeline.from_(
+ Pipelines.offline_zerostep_pac_pipeline, "MGBasePipeline"
+)
+MGBasePipeline.pipeline.insert(0, mg_sanity_check)
+
+# * --------------------------------------------------
+
+
+def create_stochastic_eval_policy(ctx: PipelineCtx):
+ ctx.eval_policy = ctx.policy
+
+
+MGEvalBCPipeline = Pipeline.from_(MGBasePipeline, "MGEvalBCPipeline")
+MGEvalBCPipeline.replace("create_pac_eval_policy", create_stochastic_eval_policy)
+
+# * --------------------------------------------------
+
+MGIQLPipeline = Pipeline.from_(MGBasePipeline, "MGIQLPipeline")
+
+
+def iql_sanity_check(ctx):
+ assert ctx.variant["d4rl"]
+ assert ctx.variant["algorithm_kwargs"]["zero_step"]
+ # if 'antmaze' in ctx.variant['env_id']:
+ # assert ctx.variant["normalize_env"] == False
+ # else:
+ # assert ctx.variant["normalize_env"] == True
+
+ assert not ctx.variant["IQN"]
+ assert (
+ ctx.variant["checkpoint_params"]
+ in user_defined_attrs_dict(conf.CheckpointParams).keys()
+ )
+ assert ctx.variant["checkpoint_params"] != "Q"
+
+ params: conf.CheckpointParams.CheckpointParam = getattr(conf.CheckpointParams, ctx.variant["checkpoint_params"])()
+ assert ctx.variant["seed"] in params.seeds
+ assert ctx.variant["env_id"] in params.envs, ctx.variant["env_id"]
+ assert ctx.variant["seed"] in conf.CheckpointParams.Q_IQL.seeds
+ assert ctx.variant["env_id"] in conf.CheckpointParams.Q_IQL.envs
+
+
+MGIQLPipeline.pipeline.insert(6, create_q)
+
+MGIQLPipeline.replace("pac_sanity_check", iql_sanity_check)
+MGIQLPipeline.replace("load_checkpoint_iqn_q", load_checkpoint_iql_q)
+
+
+AllIQLPipeline = Pipeline.from_(MGIQLPipeline, "AllIQLPipeline")
+AllIQLPipeline.delete("mg_sanity_check")
+AllIQLPipeline.replace("load_checkpoint_policy", load_checkpoint_iql_policy)
+
+# * --------------------------------------------------
+# MGPAC + IQL for ICLR rebuttal
+# Q functions are trained by the IQL with normalize_env = False
+# Policies are trained in our repos with normalize_env = False
+MGIQLAntMazePipeline = Pipeline.from_(MGIQLPipeline, "MGIQLAntMazePipeline")
+
+
+def mg_iql_antmaze_sanity_check(ctx):
+ assert ctx.variant["d4rl"]
+ assert ctx.variant["algorithm_kwargs"]["zero_step"]
+ assert "antmaze" in ctx.variant["env_id"]
+ assert ctx.variant["normalize_env"] is False
+
+ assert not ctx.variant["IQN"]
+ assert not ctx.variant["trainer_kwargs"]["IQN"]
+ assert (
+ ctx.variant["checkpoint_params"]
+ in user_defined_attrs_dict(conf.CheckpointParams).keys()
+ )
+ assert ctx.variant["checkpoint_params"] != "Q"
+
+ params = getattr(conf.CheckpointParams, ctx.variant["checkpoint_params"])
+ assert ctx.variant["seed"] in params.seeds
+ assert ctx.variant["env_id"] in params.envs, ctx.variant["env_id"]
+ assert ctx.variant["seed"] in conf.CheckpointParams.Q_IQL.seeds
+ assert ctx.variant["env_id"] in conf.CheckpointParams.Q_IQL.envs
+
+
+def load_checkpoint_antmaze_iql_q(ctx: PipelineCtx):
+ q_params = conf.CheckpointParams.Q_IQL
+
+ params = torch.load(
+ osp.join(
+ conf.CHECKPOINT_PATH,
+ q_params.path,
+ ctx.variant["env_id"],
+ str(ctx.variant["seed"]),
+ "params.pt",
+ ),
+ map_location=ptu.device,
+ )
+
+ ctx.qfs[0].load_state_dict(params["trainer/qf1"])
+ ctx.qfs[1].load_state_dict(params["trainer/qf2"])
+
+
+def load_checkpoint_antmaze_iql_policy(ctx: PipelineCtx):
+ params = getattr(conf.CheckpointParams, ctx.variant["checkpoint_params"])
+
+ policy_path = ""
+ base = osp.join(
+ conf.CHECKPOINT_PATH,
+ params.path,
+ ctx.variant["env_id"],
+ str(ctx.variant["seed"]),
+ )
+
+ policy_path = osp.join(base, "itr_500.pt")
+ ctx.policy = torch.load(policy_path, map_location="cpu")[params.key]
+
+
+MGIQLAntMazePipeline.replace("iql_sanity_check", mg_iql_antmaze_sanity_check)
+MGIQLAntMazePipeline.replace("load_checkpoint_iql_q", load_checkpoint_antmaze_iql_q)
+MGIQLAntMazePipeline.replace(
+ "load_checkpoint_policy", load_checkpoint_antmaze_iql_policy
+)
+
+# BC via Sampling
+MGIQLAntMazeEvalBCPipeline = Pipeline.from_(
+ MGIQLAntMazePipeline, "MGIQLAntMazeEvalBCPipeline"
+)
+MGIQLAntMazeEvalBCPipeline.replace(
+ "create_pac_eval_policy", create_stochastic_eval_policy
+)
+
+MGIQLAntMazeMleEvalBCPipeline = Pipeline.from_(
+ MGIQLAntMazePipeline, "MGIQLAntMazeMleEvalBCPipeline"
+)
+MGIQLAntMazeMleEvalBCPipeline.replace("create_pac_eval_policy", create_eval_policy)
+
+
+def load_checkpoint_antmaze_ensemble_iql_q(ctx: PipelineCtx):
+ q_params = conf.CheckpointParams.Q_IQL_ENSEMBLE
+
+ params = torch.load(
+ osp.join(
+ conf.CHECKPOINT_PATH,
+ q_params.path,
+ ctx.variant["env_id"],
+ str(ctx.variant["seed"]),
+ "itr_700.pt",
+ ),
+ map_location=ptu.device,
+ )
+ ctx.qfs = params["trainer/qfs"]
+ ctx.target_qfs = params["trainer/target_qfs"]
+
+
+MGEnsembleIQLAntMazePipeline = Pipeline.from_(
+ MGIQLAntMazePipeline, "MGEnsembleIQLAntMazePipeline"
+)
+MGEnsembleIQLAntMazePipeline.replace(
+ "load_checkpoint_antmaze_iql_q", load_checkpoint_antmaze_ensemble_iql_q
+)
+
+
+# * --------------------------------------------------
+# Multi-step MGPAC on AntMaze for ICLR rebuttal
+# normalize_env = False
+# Policies are trained in our repos with normalize_env = False
+
+
+def multi_step_mg_antmaze_sanity_check(ctx):
+ assert ctx.variant["d4rl"]
+ assert "antmaze" in ctx.variant["env_id"]
+ assert ctx.variant["normalize_env"] is False
+
+ assert ctx.variant["trainer_kwargs"]["IQN"]
+ assert (
+ ctx.variant["checkpoint_params"]
+ in user_defined_attrs_dict(conf.CheckpointParams).keys()
+ )
+
+ params = getattr(conf.CheckpointParams, ctx.variant["checkpoint_params"])
+ assert ctx.variant["seed"] in params.seeds
+ assert ctx.variant["env_id"] in params.envs, ctx.variant["env_id"]
+ assert ctx.variant["seed"] in conf.CheckpointParams.Q_IQL.seeds
+ assert ctx.variant["env_id"] in conf.CheckpointParams.Q_IQL.envs
+
+
+IterativepMGAntMazePipeline = Pipeline.from_(
+ MGBasePipeline, "IterativepMGAntMazePipeline"
+)
+IterativepMGAntMazePipeline.replace(
+ "pac_sanity_check", multi_step_mg_antmaze_sanity_check
+)
+IterativepMGAntMazePipeline.replace("load_checkpoint_iqn_q", create_q_iqn)
+IterativepMGAntMazePipeline.replace(
+ "load_checkpoint_policy", load_checkpoint_antmaze_iql_policy
+)
+
+
+def create_dataset_next_actions_new_actions(ctx: PipelineCtx):
+ ctx.dataset = d4rl_qlearning_dataset_with_next_actions_new_actions(ctx.eval_env)
+ if "antmaze" in ctx.variant["env_id"]:
+ ctx.dataset["rewards"] -= 1
+
+
+MultiStepMGAntMazePipeline = Pipeline.from_(
+ MGBasePipeline, "MultiStepMGAntMazePipeline"
+)
+MultiStepMGAntMazePipeline.replace(
+ "create_dataset_next_actions", create_dataset_next_actions_new_actions
+)
diff --git a/cfpi/pytorch/algorithms/cfpi/single_gaussian.py b/cfpi/pytorch/algorithms/cfpi/single_gaussian.py
new file mode 100644
index 0000000..6b01ed5
--- /dev/null
+++ b/cfpi/pytorch/algorithms/cfpi/single_gaussian.py
@@ -0,0 +1,608 @@
+"""
+PAC with a Q Lower Bound, with pretrained q and pi beta from one step repository.
+"""
+
+from typing import List, Union
+
+import pickle
+from collections import OrderedDict
+from os import path as osp
+
+import eztils.torch as ptu
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.optim as optim
+from eztils import create_stats_ordered_dict
+from eztils.torch import (
+ Delta,
+ MultivariateDiagonalNormal,
+ TanhDelta,
+ TanhGaussianMixture,
+ TanhNormal,
+)
+
+from cfpi import conf
+from cfpi.launchers.pipeline import Pipeline, PipelineCtx, Pipelines
+from cfpi.policies.gaussian_policy import (
+ TanhGaussianPolicy,
+ UnnormalizeTanhGaussianPolicy,
+)
+from cfpi.pytorch.algorithms.q_training.sarsa_iqn import (
+ get_tau,
+ quantile_regression_loss,
+)
+from cfpi.pytorch.networks.mlp import ParallelMlp, QuantileMlp
+from cfpi.pytorch.torch_rl_algorithm import TorchTrainer
+
+
+class SG_CFPI_Trainer(TorchTrainer):
+ def __init__(
+ self,
+ policy,
+ qfs,
+ target_qfs,
+ discount=0.99,
+ reward_scale=1,
+ policy_lr=0.001,
+ qf_lr=0.001,
+ optimizer_class=optim.Adam,
+ soft_target_tau=0.01,
+ target_update_period=1,
+ target_policy_noise=0.1,
+ target_policy_noise_clip=0.5,
+ num_quantiles=8,
+ plotter=None,
+ render_eval_paths=False,
+ # NEW PARAMS
+ beta_LB=0.5,
+ easy_bcq=False,
+ num_candidate_actions=10,
+ delta_range=None,
+ num_delta=None,
+ IQN=True,
+ ):
+ super().__init__()
+ if delta_range is None:
+ delta_range = [0.0, 0.0]
+
+ self.iqn = IQN
+ self.policy = policy
+ self.qfs: Union[List[QuantileMlp], ParallelMlp] = qfs
+ self.target_qfs = target_qfs
+ self.soft_target_tau = soft_target_tau
+ self.target_update_period = target_update_period
+ self.target_policy_noise = target_policy_noise
+ self.target_policy_noise_clip = target_policy_noise_clip
+
+ self.plotter = plotter
+ self.render_eval_paths = render_eval_paths
+
+ self.num_quantiles = num_quantiles
+ if IQN:
+ self.qf_criterion = quantile_regression_loss
+ else:
+ self.qf_criterion = nn.MSELoss()
+
+ self.policy_optimizer = optimizer_class(
+ self.policy.parameters(),
+ lr=policy_lr,
+ )
+ if isinstance(qfs, nn.Module):
+ self.qfs_optimizer = optimizer_class(
+ self.qfs.parameters(),
+ lr=qf_lr,
+ )
+ elif isinstance(qfs, list):
+ self.qf1_optimizer = optimizer_class(
+ self.qfs[0].parameters(),
+ lr=qf_lr,
+ )
+ self.qf2_optimizer = optimizer_class(
+ self.qfs[1].parameters(),
+ lr=qf_lr,
+ )
+
+ self.discount = discount
+ self.reward_scale = reward_scale
+ self._n_train_steps_total = 0
+ self._need_to_update_eval_statistics = True
+ self.eval_statistics = OrderedDict()
+
+ # New params
+ self.beta_LB = beta_LB
+ # [1(for batch_size), num_delta, 1(for action_dim)]
+ self.delta_range = delta_range
+
+ assert (
+ len(self.delta_range) == 2
+ ), f"Delta range ({self.delta_range}) should be in the form of [lower_range, upper_range]!"
+
+ if self.delta_range != [0.0, 0.0]:
+ assert (
+ self.delta_range[0] <= self.delta_range[1]
+ ), f"Delta range ({self.delta_range}) should be in the form of [lower_range, upper_range]!"
+
+ if num_delta is None:
+ num_delta = int((self.delta_range[1] - self.delta_range[0]) * 10)
+
+ if self.delta_range[1] == self.delta_range[0]:
+ num_delta = 1
+
+ self.num_delta = num_delta
+ self.easy_bcq = easy_bcq
+ self.num_candidate_actions = num_candidate_actions
+ self.behavior_policy = self.policy # for naming's sake
+ self.max_action = 1.0
+
+ print("-----------------")
+ print("Delta:", delta_range)
+ print("Num delta:", self.num_delta)
+ print("beta_LB:", self.beta_LB)
+ print("-----------------")
+
+ def get_action_dist(self, obs):
+ return self.behavior_policy(obs)
+
+ def get_iqn_outputs(self, obs, act, use_target=False):
+ qfs = self.target_qfs if use_target else self.qfs
+ res = ptu.zeros(obs.shape[0], len(qfs))
+ for i, iqn in enumerate(qfs):
+ res[:, i] = iqn.get_mean(obs, act)
+
+ return res
+
+ def get_shift_denominator(self, grad, sigma):
+ # The dividor is (g^T Sigma g) ** 0.5
+ # Sigma is diagonal, so this works out to be
+ # ( sum_{i=1}^k (g^(i))^2 (sigma^(i))^2 ) ** 0.5
+ return (
+ torch.sqrt(
+ torch.sum(torch.mul(torch.pow(grad, 2), sigma), dim=1, keepdim=True)
+ )
+ + 1e-7
+ )
+
+ def sample_delta(self):
+ """
+ Sample and set delta for this range.
+ """
+ self.delta = (
+ ptu.rand(self.num_delta) * (self.delta_range[1] - self.delta_range[0])
+ + self.delta_range[0]
+ ).reshape(1, self.num_delta, 1)
+
+ def get_easy_bcq_action(self, obs, action_dist):
+ batch_size = obs.shape[0]
+ if isinstance(action_dist, TanhGaussianMixture):
+ proposals = action_dist.sample(self.num_candidate_actions).squeeze()
+ else:
+ proposals = action_dist.sample_n(self.num_candidate_actions).reshape(
+ batch_size * self.num_candidate_actions, -1
+ )
+ obs_exp = obs.repeat_interleave(self.num_candidate_actions, dim=0)
+ assert batch_size == 1
+ qfs = self.calc_q_LB(obs_exp, proposals).reshape(batch_size, -1)
+ idx = torch.argmax(qfs, dim=1)
+ selected_actions = proposals[idx].squeeze()
+ return Delta(selected_actions)
+
+ def get_cfpi_action(self, obs) -> TanhDelta:
+ dist: TanhNormal = self.get_action_dist(obs)
+ is_train = obs.shape[0] > 1
+
+ if self.easy_bcq:
+ return self.get_easy_bcq_action(obs, dist)
+
+ if self.delta_range == [0.0, 0.0]:
+ if self.iqn:
+ return TanhDelta(dist.normal_mean)
+ else:
+ return Delta(dist.mean)
+
+ self.sample_delta()
+ if self.iqn:
+ return self.compute_cfpi_action(obs, dist, is_train)
+ else:
+ return self.compute_cfpi_action_iql(obs, dist)
+
+ def calc_q_LB(self, obs, act, use_target=False):
+ assert not use_target
+ if isinstance(self.qfs, list):
+ q = self.get_iqn_outputs(obs, act, use_target)
+
+ mu_q = q.mean(-1)
+ sigma_q = q.std(-1)
+ q_LB = mu_q - self.beta_LB * sigma_q
+ return q_LB
+ else:
+ qfs = self.target_qfs if use_target else self.qfs
+ sample_idxs = np.random.choice(10, 2, replace=False)
+ q = qfs(obs, act)
+ q_LB = q[..., sample_idxs].min(-1)
+ return qfs(obs, act).mean(-1)
+
+ def compute_cfpi_action(self, obs, dist: TanhNormal, is_train: bool):
+ # * preliminaries
+
+ pre_tanh_mu_beta = dist.normal_mean
+ batch_size = obs.shape[0]
+ pre_tanh_mu_beta.requires_grad_()
+ mu_beta = torch.tanh(pre_tanh_mu_beta)
+
+ # * calculate gradient of q lower bound w.r.t action
+ # Get the lower bound of the Q estimate
+ q_LB = self.calc_q_LB(obs, mu_beta, use_target=is_train)
+ # Obtain the gradient of q_LB wrt to action
+ # with action evaluated at mu_proposal
+ grad = torch.autograd.grad(q_LB.sum(), pre_tanh_mu_beta)[
+ 0
+ ] #! note: this doesn't work on cpu.
+
+ assert grad is not None
+ assert pre_tanh_mu_beta.shape == grad.shape
+
+ # * cacluate proposals
+ # Obtain Sigma_T (the covariance matrix of the normal distribution)
+ Sigma_beta = torch.pow(dist.stddev, 2)
+
+ denom = self.get_shift_denominator(grad, Sigma_beta)
+
+ # [batch_size, num_deltas, action_dim]
+ delta_mu = torch.sqrt(2 * self.delta) * (
+ torch.mul(Sigma_beta, grad) / denom
+ ).unsqueeze(1)
+
+ mu_proposal = pre_tanh_mu_beta + delta_mu
+ tanh_mu_proposal = torch.tanh(mu_proposal).reshape(
+ batch_size * self.num_delta, -1
+ )
+
+ # * get the lower bounded q
+ obs_exp = obs.repeat_interleave(self.num_delta, dim=0)
+ q_LB = self.calc_q_LB(obs_exp, tanh_mu_proposal)
+ q_LB = q_LB.reshape(batch_size, self.num_delta)
+
+ # * argmax the proposals
+ select_idx = q_LB.argmax(1)
+ selected = mu_proposal[torch.arange(len(select_idx)), select_idx]
+ return TanhDelta(selected)
+
+ def compute_cfpi_action_iql(self, obs, dist: MultivariateDiagonalNormal):
+ # * preliminaries
+
+ mu_beta = dist.mean
+ batch_size = obs.shape[0]
+ mu_beta.requires_grad_()
+
+ # * calculate gradient of q lower bound w.r.t action
+ # Get the lower bound of the Q estimate
+ q_LB = self.calc_q_LB(obs, mu_beta)
+ # Obtain the gradient of q_LB wrt to a
+ # with a evaluated at mu_proposal
+ grad = torch.autograd.grad(q_LB.sum(), mu_beta)[
+ 0
+ ] #! note: this doesn't work on cpu.
+
+ assert grad is not None
+ assert mu_beta.shape == grad.shape
+
+ # * cacluate proposals
+ # Obtain Sigma_T (the covariance matrix of the normal distribution)
+ Sigma_beta = torch.pow(dist.stddev, 2)
+
+ denom = self.get_shift_denominator(grad, Sigma_beta)
+
+ # [batch_size, num_deltas, action_dim]
+ delta_mu = torch.sqrt(2 * self.delta) * (
+ torch.mul(Sigma_beta, grad) / denom
+ ).unsqueeze(1)
+
+ mu_proposal = torch.clamp(mu_beta + delta_mu, -1, 1)
+ mu_proposal_reshaped = torch.clamp(mu_proposal, -1, 1).reshape(
+ batch_size * self.num_delta, -1
+ )
+
+ # * get the lower bounded q
+ obs_exp = obs.repeat_interleave(self.num_delta, dim=0)
+ q_LB = self.calc_q_LB(obs_exp, mu_proposal_reshaped)
+ q_LB = q_LB.reshape(batch_size, self.num_delta)
+
+ # * argmax the proposals
+ select_idx = q_LB.argmax(1)
+ selected = mu_proposal[torch.arange(len(select_idx)), select_idx]
+ return Delta(selected)
+
+ def train_from_torch(self, batch):
+ losses, stats = self.compute_loss(
+ batch,
+ skip_statistics=not self._need_to_update_eval_statistics,
+ )
+ """
+ Update networks
+ """
+ self.qf1_optimizer.zero_grad()
+ self.qf2_optimizer.zero_grad()
+
+ losses.backward()
+
+ self.qf1_optimizer.step()
+ self.qf2_optimizer.step()
+
+ self._n_train_steps_total += 1
+
+ self.try_update_target_networks()
+ if self._need_to_update_eval_statistics:
+ self.eval_statistics = stats
+ # Compute statistics using only one batch per epoch
+ self._need_to_update_eval_statistics = False
+
+ def try_update_target_networks(self):
+ if self._n_train_steps_total % self.target_update_period == 0:
+ self.update_target_networks()
+
+ def update_target_networks(self):
+ ptu.soft_update_from_to(self.qfs[0], self.target_qfs[0], self.soft_target_tau)
+ ptu.soft_update_from_to(self.qfs[1], self.target_qfs[1], self.soft_target_tau)
+
+ def compute_loss(
+ self,
+ batch,
+ skip_statistics=False,
+ ):
+ rewards = batch["rewards"]
+ terminals = batch["terminals"]
+ obs = batch["observations"]
+ actions = batch["actions"]
+ next_obs = batch["next_observations"]
+ next_actions = batch["next_actions"]
+
+ """
+ QF Loss
+ """
+ assert isinstance(self.qfs[0], QuantileMlp)
+ assert isinstance(self.qfs[1], QuantileMlp)
+
+ batch_size = obs.shape[0]
+ tau_hat_samples, presum_tau_samples = get_tau(
+ batch_size * 2, self.num_quantiles
+ )
+ tau_hat, next_tau_hat = tau_hat_samples.reshape(2, batch_size, -1)
+ presum_tau, next_presum_tau = presum_tau_samples.reshape(2, batch_size, -1)
+
+ z1_pred = self.qfs[0](obs, actions, tau_hat)
+ z2_pred = self.qfs[1](obs, actions, tau_hat)
+
+ if self._n_train_steps_total < 20 * 1000:
+ new_next_actions = next_actions
+ else:
+ try:
+ new_next_actions = batch["new_next_actions"]
+ except KeyError:
+ next_dist = self.get_cfpi_action(next_obs)
+ new_next_actions = next_dist.mean
+ noise = ptu.randn(new_next_actions.shape) * self.target_policy_noise
+ noise = torch.clamp(
+ noise, -self.target_policy_noise_clip, self.target_policy_noise_clip
+ )
+ new_next_actions = torch.clamp(
+ new_next_actions + noise, -self.max_action, self.max_action
+ )
+
+ with torch.no_grad():
+ target_z1_value = self.target_qfs[0](
+ next_obs, new_next_actions, next_tau_hat
+ )
+ target_z2_value = self.target_qfs[1](
+ next_obs, new_next_actions, next_tau_hat
+ )
+
+ target_z_value = torch.min(target_z1_value, target_z2_value)
+ z_target = (
+ self.reward_scale * rewards
+ + (1.0 - terminals) * self.discount * target_z_value
+ ).detach()
+
+ qf1_loss = self.qf_criterion(z1_pred, z_target, tau_hat, next_presum_tau)
+ qf2_loss = self.qf_criterion(z2_pred, z_target, tau_hat, next_presum_tau)
+ qfs_loss = qf1_loss + qf2_loss
+
+ """
+ Save some statistics for eval
+ """
+ eval_statistics = OrderedDict()
+ if not skip_statistics:
+ with torch.no_grad():
+ eval_statistics["QF1 Loss"] = ptu.to_np(qf1_loss)
+ eval_statistics["QF2 Loss"] = ptu.to_np(qf2_loss)
+
+ q1_pred = (z1_pred * presum_tau).sum(-1)
+ q2_pred = (z2_pred * presum_tau).sum(-1)
+ q_preds = (q1_pred + q2_pred) / 2
+
+ target_q_value = (target_z_value * next_presum_tau).sum(-1)
+
+ eval_statistics.update(
+ create_stats_ordered_dict(
+ "Mean Q Predictions",
+ ptu.to_np(q_preds),
+ )
+ )
+ eval_statistics.update(
+ create_stats_ordered_dict(
+ "Mean Target Q Predictions",
+ ptu.to_np(target_q_value),
+ )
+ )
+
+ return qfs_loss, eval_statistics
+
+ def get_diagnostics(self):
+ stats = super().get_diagnostics()
+ stats.update(self.eval_statistics)
+ return stats
+
+ def end_epoch(self, epoch):
+ self._need_to_update_eval_statistics = True
+
+ @property
+ def networks(self):
+ if isinstance(self.qfs, list):
+ return self.qfs + self.target_qfs + [self.policy]
+ else:
+ return [
+ self.policy,
+ self.qfs,
+ self.target_qfs,
+ ]
+
+ @property
+ def optimizers(self):
+ return [
+ self.qfs_optimizer,
+ self.policy_optimizer,
+ ]
+
+ def get_snapshot(self):
+ return dict(
+ policy=self.policy,
+ qfs=self.qfs,
+ target_qfs=self.target_qfs,
+ )
+
+
+def sg_sanity_check(ctx: PipelineCtx):
+ assert ctx.variant["checkpoint_params"] == "SG"
+
+
+SGBasePipeline = Pipeline.from_(
+ Pipelines.offline_zerostep_pac_pipeline, "SGBasePipeline"
+)
+SGBasePipeline.pipeline.insert(0, sg_sanity_check)
+
+# * --------------------------------------------------
+
+
+
+
+def variable_epoch_load_checkpoint_policy(ctx: PipelineCtx):
+ params = getattr(conf.CheckpointParams, ctx.variant["checkpoint_params"])
+
+ ctx.policy = torch.load(
+ osp.join(
+ conf.CHECKPOINT_PATH,
+ params.path,
+ ctx.variant["env_id"],
+ str(ctx.variant["seed"]),
+ f'itr_+{ctx.variant["epoch_no"]}'.pt,
+ ),
+ map_location="cpu",
+ )[params.key]
+
+ if params.unnormalize:
+ if (
+ ctx.variant["env_id"] == "halfcheetah-medium-expert-v2"
+ and ctx.variant["seed"] < 4
+ ):
+ pass
+ else:
+ ctx.policy = UnnormalizeTanhGaussianPolicy(
+ ctx.obs_mean, ctx.obs_std, ctx.policy
+ )
+
+
+EpochBCExperiment = Pipeline.from_(SGBasePipeline, "EpochBCExperiment")
+EpochBCExperiment.replace(
+ "load_checkpoint_policy", variable_epoch_load_checkpoint_policy
+)
+
+
+# * --------------------------------------------------
+def load_gt_policy(d4rl_datset):
+ policy_dict = {k: v for k, v in d4rl_datset.items() if "bias" in k or "weight" in k}
+ policy_dict = {
+ ".".join(k.split("/")[2:]): ptu.torch_ify(v) for k, v in policy_dict.items()
+ }
+ assert len(policy_dict) == 8
+
+ hidden_sz, obs_dim = policy_dict["fc0.weight"].shape
+ act_dim, hidden_sz = policy_dict["last_fc.weight"].shape
+
+ pi = TanhGaussianPolicy([256, 256], obs_dim, act_dim)
+ pi.load_state_dict(policy_dict)
+ return pi
+
+
+def load_ground_truth_policy(ctx: PipelineCtx):
+ ctx.policy = load_gt_policy(ctx.dataset)
+
+
+GTExperiment = Pipeline.from_(SGBasePipeline, "GroundTruthExperiment")
+GTExperiment.replace("load_checkpoint_policy", load_ground_truth_policy)
+
+
+# * --------------------------------------------------
+def load_checkpoint_cql(ctx: PipelineCtx):
+ ctx.CQL = pickle.load(
+ open(
+ osp.join(
+ conf.CHECKPOINT_PATH,
+ "cql-models",
+ ctx.variant["env_id"],
+ str(ctx.variant["seed"]),
+ "model_989.pkl",
+ ),
+ "rb",
+ )
+ )["sac"]
+
+
+def load_cql_policy(ctx: PipelineCtx):
+ class TanhNormalWrapper(nn.Module):
+ def __init__(self, policy) -> None:
+ super().__init__()
+ self.policy = policy
+
+ def forward(self, *args, **kwargs):
+ mu, std = self.policy(*args, **kwargs)
+ return TanhNormal(mu, std)
+
+ ctx.policy = TanhNormalWrapper(ctx.CQL.policy)
+
+
+def load_cql_q(ctx: PipelineCtx):
+ load_checkpoint_cql(ctx)
+
+ ctx.qfs.append(ctx.CQL.qf1)
+ ctx.qfs.append(ctx.CQL.qf2)
+
+
+CQLExperiment = Pipeline.from_(SGBasePipeline, "CQLExperiment")
+CQLExperiment.replace("load_checkpoint_policy", load_cql_policy)
+CQLExperiment.replace("load_checkpoint_iqn_q", load_cql_q)
+
+
+# def create_q(ctx: PipelineCtx):
+# obs_dim = ctx.eval_env.observation_space.low.size
+# action_dim = ctx.eval_env.action_space.low.size
+
+# qf1 = ctx.variant["qf_class"](
+# input_size=obs_dim + action_dim, output_size=1, **ctx.variant["qf_kwargs"]
+# )
+# qf2 = ctx.variant["qf_class"](
+# input_size=obs_dim + action_dim, output_size=1, **ctx.variant["qf_kwargs"]
+# )
+
+# target_qf1 = ctx.variant["qf_class"](
+# input_size=obs_dim + action_dim, output_size=1, **ctx.variant["qf_kwargs"]
+# )
+# target_qf2 = ctx.variant["qf_class"](
+# input_size=obs_dim + action_dim, output_size=1, **ctx.variant["qf_kwargs"]
+# )
+
+# ctx.qfs = [qf1, qf2]
+# ctx.target_qfs = [target_qf1, target_qf2]
+
+# SGAntMazePipeline = Pipeline.from_(SGBasePipeline, "SGAntMazePipeline")
+# SGAntMazePipeline.pipeline.insert(4, create_q)
+# SGAntMazePipeline.replace("load_checkpoint_iqn_q", load_checkpoint_iql_q)
diff --git a/cfpi/pytorch/algorithms/cfpi/vae.py b/cfpi/pytorch/algorithms/cfpi/vae.py
new file mode 100644
index 0000000..4ffd048
--- /dev/null
+++ b/cfpi/pytorch/algorithms/cfpi/vae.py
@@ -0,0 +1,135 @@
+"""
+PAC with a Q Lower Bound, with pretrained q and pi beta from one step repository.
+"""
+import eztils.torch as ptu
+import torch
+import torch.optim as optim
+from eztils.torch import Delta
+
+from cfpi.launchers.pipeline import Pipeline, PipelineCtx, Pipelines
+from cfpi.pytorch.algorithms.cfpi.deterministic import load_checkpoint_policy
+from cfpi.pytorch.algorithms.cfpi.single_gaussian import SG_CFPI_Trainer
+
+
+class Vae_CFPI_Trainer(SG_CFPI_Trainer):
+ def __init__(
+ self,
+ policy,
+ qfs,
+ target_qfs,
+ discount=0.99,
+ reward_scale=1,
+ policy_lr=0.001,
+ qf_lr=0.001,
+ optimizer_class=optim.Adam,
+ soft_target_tau=0.01,
+ target_update_period=1,
+ plotter=None,
+ render_eval_paths=False,
+ beta_LB=0.5,
+ delta_range=None,
+ num_delta=None,
+ num_candidate_actions=10,
+ target_quantile=0.7,
+ IQN=True,
+ ):
+ super().__init__(
+ policy=policy,
+ qfs=qfs,
+ target_qfs=target_qfs,
+ discount=discount,
+ reward_scale=reward_scale,
+ policy_lr=policy_lr,
+ qf_lr=qf_lr,
+ optimizer_class=optimizer_class,
+ soft_target_tau=soft_target_tau,
+ target_update_period=target_update_period,
+ plotter=plotter,
+ render_eval_paths=render_eval_paths,
+ beta_LB=beta_LB,
+ delta_range=delta_range,
+ num_delta=num_delta,
+ target_quantile=target_quantile,
+ IQN=IQN,
+ )
+ self.num_candidate_actions = num_candidate_actions
+
+ def get_cfpi_action(self, obs) -> Delta:
+ assert obs.shape[0] == 1
+ obs_rep = obs.repeat(self.num_candidate_actions, 1)
+ behavior_actions = self.behavior_policy.decode(obs_rep)
+
+ qfs = self.calc_q_LB(obs_rep, behavior_actions)
+ idx = torch.argmax(qfs, dim=0)
+ selected_actions = behavior_actions[idx]
+ if self.delta_range == [0.0, 0.0]:
+ return Delta(selected_actions)
+
+ self.sample_delta()
+ assert self.iqn
+ return self.compute_cfpi_action(obs, selected_actions.reshape(1, -1))
+
+ def compute_cfpi_action(self, obs, behavior_actions: torch.Tensor):
+ """
+ Max proposals
+ """
+ num_actions = behavior_actions.shape[0]
+ # * preliminaries
+ pre_tanh_mu_beta = ptu.atanh(behavior_actions)
+ # * calculate gradient of q lower bound w.r.t action
+ pre_tanh_mu_beta.requires_grad_()
+ # [num_candidate_actions, obs_dim]
+ obs_exp = obs.repeat(num_actions, 1)
+ # [num_candidate_actions, act_dim]
+ mu_beta = torch.tanh(pre_tanh_mu_beta)
+
+ # Get the lower bound of the Q estimate
+ # [num_candidate_actions, ensemble_size]
+ q_LB = self.calc_q_LB(obs_exp, mu_beta)
+
+ # Obtain the gradient of q_LB wrt to a
+ # with a evaluated at mu_proposal
+ grad = torch.autograd.grad(
+ q_LB.sum(), pre_tanh_mu_beta
+ ) #! this returns a tuple!!
+ # [num_candidate_actions, act_dim]
+ grad = grad[0]
+
+ assert grad is not None
+ assert pre_tanh_mu_beta.shape == grad.shape
+
+ # * calculate proposals
+ Sigma_beta = torch.ones_like(grad)
+ # [num_candidate_actions, ]
+ denom = self.get_shift_denominator(grad, Sigma_beta)
+ # [num_candidate_actions, 1, action_dim]
+ direction = (torch.mul(Sigma_beta, grad) / denom).unsqueeze(1)
+
+ # [num_candidate_actions, num_delta, action_dim]
+ delta_mu = torch.sqrt(2 * self.delta) * direction
+ # [num_candidate_actions * num_delta, action_dim]
+ mu_proposal = (pre_tanh_mu_beta.unsqueeze(1) + delta_mu).reshape(
+ self.num_delta * num_actions, -1
+ )
+ # [num_candidate_actions * num_delta, action_dim]
+ tanh_mu_proposal = torch.tanh(mu_proposal)
+
+ # * get the lower bounded q
+ obs_exp = obs.repeat(self.num_delta * num_actions, 1)
+ q_LB = self.calc_q_LB(obs_exp, tanh_mu_proposal)
+ # * argmax the proposals
+ _, idx = torch.max(q_LB, dim=0)
+ select_mu_proposal = tanh_mu_proposal[idx]
+
+ return Delta(select_mu_proposal)
+
+
+def vae_sanity_check(ctx: PipelineCtx):
+ assert ctx.variant["checkpoint_params"] == "VAE"
+
+
+VaeBasePipeline = Pipeline.from_(
+ Pipelines.offline_zerostep_pac_pipeline, "VaeBasePipeline"
+)
+VaeBasePipeline.pipeline.insert(0, vae_sanity_check)
+VaeBasePipeline.replace("load_checkpoint_policy", load_checkpoint_policy)
diff --git a/cfpi/pytorch/algorithms/policy_training/bc.py b/cfpi/pytorch/algorithms/policy_training/bc.py
new file mode 100644
index 0000000..5d13473
--- /dev/null
+++ b/cfpi/pytorch/algorithms/policy_training/bc.py
@@ -0,0 +1,226 @@
+"""
+Behavior Cloning Policy
+"""
+from typing import Tuple
+
+from collections import OrderedDict, namedtuple
+
+import eztils.torch as ptu
+import numpy as np
+import torch
+import torch.optim as optim
+from eztils import create_stats_ordered_dict
+
+from cfpi.core.logging import add_prefix
+from cfpi.data_management.hdf5_path_loader import load_hdf5_next_actions_and_val_data
+from cfpi.launchers.pipeline import Pipeline, PipelineCtx
+from cfpi.launchers.pipeline_pieces import (
+ create_algorithm,
+ create_dataset_next_actions,
+ create_eval_env,
+ create_eval_path_collector,
+ create_eval_policy,
+ create_policy,
+ create_replay_buffer,
+ create_trainer,
+ load_demos,
+ offline_init,
+ optionally_normalize_dataset,
+ train,
+)
+from cfpi.pytorch.torch_rl_algorithm import TorchTrainer
+from cfpi import conf
+
+BCLosses = namedtuple(
+ "BCLosses",
+ "policy_loss",
+)
+
+
+class BCTrainer(TorchTrainer):
+ def __init__(self, policy, policy_lr=0.001, optimizer_class=optim.Adam, **kwargs):
+ super().__init__()
+ self.policy = policy
+
+ self.policy_optimizer = optimizer_class(
+ self.policy.parameters(),
+ lr=policy_lr,
+ )
+
+ self._n_train_steps_total = 0
+ self._need_to_update_eval_statistics = True
+
+ self.val_obs = None
+ self.val_actions = None
+ self.gt_val_log_prob = None
+
+ def set_val_data(self, val_obs, val_actions, gt_val_log_prob):
+ self.val_obs = val_obs
+ self.val_actions = val_actions
+ self.gt_val_log_prob = np.mean(ptu.to_np(gt_val_log_prob))
+
+ def compute_loss(
+ self,
+ batch,
+ skip_statistics=False,
+ ) -> Tuple[BCLosses, OrderedDict]:
+ obs = batch["observations"]
+ actions = batch["actions"]
+
+ """
+ Policy Loss
+ """
+ dist = self.policy(obs)
+ _, log_pi = dist.sample_and_logprob()
+ log_prob = self.policy.log_prob(obs, actions).unsqueeze(-1)
+ policy_loss = -log_prob.mean()
+
+ """
+ Save some statistics for eval
+ """
+ eval_statistics = OrderedDict()
+ if not skip_statistics:
+ eval_statistics["Policy Loss"] = np.mean(ptu.to_np(policy_loss))
+
+ eval_statistics.update(
+ create_stats_ordered_dict(
+ "Log Pis",
+ ptu.to_np(log_pi),
+ )
+ )
+ policy_statistics = add_prefix(dist.get_diagnostics(), "policy/")
+ eval_statistics.update(policy_statistics)
+
+ if self.gt_val_log_prob is not None:
+ with torch.no_grad():
+ pred_val_log_prob = self.policy.log_prob(
+ self.val_obs, self.val_actions
+ )
+ eval_statistics["Pred Val Log Prob"] = np.mean(
+ ptu.to_np(pred_val_log_prob)
+ )
+ eval_statistics["GT Val Log Prob"] = self.gt_val_log_prob
+
+ # kl_div = 0.5 * (
+ # (pred_val_Sigma / self.val_Sigma_gt - 1).sum(-1)
+ # + (self.val_Sigma_gt / pred_val_Sigma).prod(-1).log()
+ # + (self.val_Sigma_gt * (pred_val_mu - self.val_mu_gt) ** 2).sum(-1)
+ # )
+
+ # eval_statistics["Val KL Divergence"] = np.mean(ptu.to_np(policy_loss))
+
+ loss = BCLosses(
+ policy_loss=policy_loss,
+ )
+
+ return loss, eval_statistics
+
+ def train_from_torch(self, batch):
+ losses, stats = self.compute_loss(
+ batch,
+ skip_statistics=not self._need_to_update_eval_statistics,
+ )
+ """
+ Update networks
+ """
+ self.policy_optimizer.zero_grad()
+ losses.policy_loss.backward()
+ self.policy_optimizer.step()
+
+ self._n_train_steps_total += 1
+
+ if self._need_to_update_eval_statistics:
+ self.eval_statistics = stats
+ # Compute statistics using only one batch per epoch
+ self._need_to_update_eval_statistics = False
+
+ def get_diagnostics(self):
+ stats = super().get_diagnostics()
+ stats.update(self.eval_statistics)
+ return stats
+
+ def end_epoch(self, epoch):
+ self._need_to_update_eval_statistics = True
+
+ @property
+ def networks(self):
+ return [
+ self.policy,
+ ]
+
+ @property
+ def optimizers(self):
+ return [
+ self.policy_optimizer,
+ ]
+
+ def get_snapshot(self):
+ return dict(
+ policy=self.policy,
+ )
+
+
+def bc_sanity(ctx):
+ assert ctx.variant["d4rl"]
+ assert ctx.variant["normalize_env"] is False
+
+
+def create_eval_policy(ctx: PipelineCtx):
+ ctx.eval_policy = ctx.policy
+
+
+BCPipeline = Pipeline(
+ "offline_bc_pipeline",
+ [
+ bc_sanity,
+ offline_init,
+ create_eval_env,
+ create_dataset_next_actions,
+ optionally_normalize_dataset,
+ create_policy,
+ create_trainer,
+ create_eval_policy,
+ create_eval_path_collector,
+ create_replay_buffer,
+ create_algorithm,
+ load_demos,
+ train,
+ ],
+)
+
+
+def load_demos_and_val_data(ctx: PipelineCtx):
+ ctx.replay_buffer, val_obs, val_actions = load_hdf5_next_actions_and_val_data(
+ ctx.dataset,
+ ctx.replay_buffer,
+ ctx.variant["train_ratio"],
+ ctx.variant["fold_idx"],
+ )
+
+ action_space = ctx.eval_env._wrapped_env.action_space
+ rg = ptu.from_numpy(action_space.high - action_space.low) / 2
+ center = ptu.from_numpy(action_space.high + action_space.low) / 2
+
+ obs_mean = ptu.from_numpy(ctx.eval_env._obs_mean)
+ obs_std = ptu.from_numpy(ctx.eval_env._obs_std)
+
+ val_obs_unnormalized = val_obs * obs_std[None] + obs_mean[None]
+ val_actions_unnormalized = val_actions * rg[None] + center[None]
+
+ params = torch.load(
+ f'{conf.CHECKPOINT_PATH}/bc/{ctx.variant["env_id"]}/{ctx.variant["seed"]}/params.pt',
+ map_location="cpu",
+ )
+
+ with torch.no_grad():
+ policy = params["trainer/policy"]
+ policy.to(ptu.device)
+ gt_val_log_prob = policy.log_prob(
+ val_obs_unnormalized, val_actions_unnormalized
+ )
+
+ ctx.trainer.set_val_data(val_obs, val_actions, gt_val_log_prob)
+
+
+BCWithValPipeline = Pipeline.from_(BCPipeline, "BCWithValPipeline")
+BCWithValPipeline.replace("load_demos", load_demos_and_val_data)
\ No newline at end of file
diff --git a/cfpi/pytorch/algorithms/q_training/ensemble_iql.py b/cfpi/pytorch/algorithms/q_training/ensemble_iql.py
new file mode 100644
index 0000000..6318c9f
--- /dev/null
+++ b/cfpi/pytorch/algorithms/q_training/ensemble_iql.py
@@ -0,0 +1,373 @@
+"""Torch implementation of Ensemble Implicit Q-Learning (IQL) based off
+https://github.com/ikostrikov/implicit_q_learning
+"""
+
+from collections import OrderedDict
+
+import eztils.torch as ptu
+import numpy as np
+import torch
+import torch.optim as optim
+from eztils import create_stats_ordered_dict
+from torch import nn as nn
+
+from cfpi.core.logging import add_prefix
+from cfpi.launchers.pipeline import Pipeline, PipelineCtx
+from cfpi.launchers.pipeline_pieces import (
+ create_algorithm,
+ create_dataset_next_actions,
+ create_eval_env,
+ create_eval_path_collector,
+ create_eval_policy,
+ create_policy,
+ create_replay_buffer,
+ create_trainer,
+ load_demos,
+ offline_init,
+ optionally_normalize_dataset,
+ train,
+)
+from cfpi.policies.gaussian_policy import GaussianPolicy
+from cfpi.pytorch.networks import LinearTransform
+from cfpi.pytorch.networks.mlp import ParallelMlp
+from cfpi.pytorch.torch_rl_algorithm import TorchTrainer
+
+
+class EnsembleIQLTrainer(TorchTrainer):
+ def __init__(
+ self,
+ policy,
+ qfs,
+ target_qfs,
+ vfs,
+ quantile=0.5,
+ discount=0.99,
+ reward_scale=1.0,
+ policy_lr=1e-3,
+ qf_lr=1e-3,
+ policy_weight_decay=0,
+ q_weight_decay=0,
+ optimizer_class=optim.Adam,
+ policy_update_period=1,
+ q_update_period=1,
+ reward_transform_class=None,
+ reward_transform_kwargs=None,
+ terminal_transform_class=None,
+ terminal_transform_kwargs=None,
+ clip_score=None,
+ soft_target_tau=1e-2,
+ target_update_period=1,
+ beta=1.0,
+ ):
+ super().__init__()
+
+ # https://github.com/rail-berkeley/rlkit/blob/master/examples/iql/antmaze_finetune.py
+ assert reward_scale == 1.0
+ assert policy_weight_decay == 0
+ assert q_weight_decay == 0
+ # NOTE: We set b = 0, as we already do the minus 1 in create_dataset_next_actions
+ assert reward_transform_kwargs == dict(m=1, b=0)
+ assert terminal_transform_kwargs is None
+ assert quantile == 0.9
+ assert clip_score == 100
+ assert beta == 0.1
+
+ self.policy = policy
+ self.qfs = qfs
+ self.target_qfs = target_qfs
+ self.soft_target_tau = soft_target_tau
+ self.target_update_period = target_update_period
+ self.vfs = vfs
+
+ self.optimizers = {}
+
+ self.policy_optimizer = optimizer_class(
+ self.policy.parameters(),
+ weight_decay=policy_weight_decay,
+ lr=policy_lr,
+ )
+ self.optimizers[self.policy] = self.policy_optimizer
+ self.qfs_optimizer = optimizer_class(
+ self.qfs.parameters(),
+ lr=qf_lr,
+ )
+ self.vfs_optimizer = optimizer_class(
+ self.vfs.parameters(),
+ weight_decay=q_weight_decay,
+ lr=qf_lr,
+ )
+
+ self.discount = discount
+ self.reward_scale = reward_scale
+ self.eval_statistics = OrderedDict()
+ self._n_train_steps_total = 0
+ self._need_to_update_eval_statistics = True
+
+ self.q_update_period = q_update_period
+ self.policy_update_period = policy_update_period
+
+ self.reward_transform_class = reward_transform_class or LinearTransform
+ self.reward_transform_kwargs = reward_transform_kwargs or dict(m=1, b=0)
+ self.terminal_transform_class = terminal_transform_class or LinearTransform
+ self.terminal_transform_kwargs = terminal_transform_kwargs or dict(m=1, b=0)
+ self.reward_transform = self.reward_transform_class(
+ **self.reward_transform_kwargs
+ )
+ self.terminal_transform = self.terminal_transform_class(
+ **self.terminal_transform_kwargs
+ )
+
+ self.clip_score = clip_score
+ self.beta = beta
+ self.quantile = quantile
+
+ def train_from_torch(
+ self,
+ batch,
+ # train=True,
+ # pretrain=False,
+ ):
+ rewards = batch["rewards"]
+ terminals = batch["terminals"]
+ obs = batch["observations"]
+ actions = batch["actions"]
+ next_obs = batch["next_observations"]
+ batch_size = rewards.shape[0]
+ if self.reward_transform:
+ rewards = self.reward_transform(rewards)
+
+ if self.terminal_transform:
+ terminals = self.terminal_transform(terminals)
+ """
+ Policy and Alpha Loss
+ """
+ dist = self.policy(obs)
+
+ """
+ QF Loss
+ """
+ # [batch_size, 1, num_heads]
+ q_preds = self.qfs(obs, actions)
+ # [batch_size, 1, num_heads // 2]
+ target_vf_preds = self.vfs(next_obs).detach().repeat_interleave(2, dim=-1)
+ assert torch.equal(target_vf_preds[..., 0], target_vf_preds[..., 1])
+
+ with torch.no_grad():
+ terminals = terminals.unsqueeze(-1).expand(-1, -1, self.qfs.num_heads)
+ rewards = rewards.unsqueeze(-1).expand(-1, -1, self.qfs.num_heads)
+ q_target = (
+ self.reward_scale * rewards
+ + (1.0 - terminals) * self.discount * target_vf_preds
+ )
+
+ qfs_loss = torch.sum(
+ torch.mean(
+ (q_preds - q_target.detach()) ** 2,
+ dim=0,
+ )
+ )
+
+ """
+ VF Loss
+ """
+ # [batch_size, 1, num_heads]
+ target_q_preds = self.target_qfs(obs, actions)
+ # [batch_size, 1, num_heads // 2, 2]
+ target_q_preds_reshape = target_q_preds.reshape(batch_size, 1, -1, 2)
+ assert torch.equal(target_q_preds_reshape[:, :, 0], target_q_preds[..., :2])
+ # [batch_size, 1, num_heads // 2]
+ q_preds = target_q_preds_reshape.min(dim=-1)[0].detach()
+ # # In-distribution minimization trick from the RedQ
+ # sample_idxs = np.random.choice(10, 2, replace=False)
+ # q_pred = torch.min(target_q_preds[:, :, sample_idxs], dim=-1, keepdim=True)[
+ # 0
+ # ].detach()
+
+ # [batch_size, 1, num_heads // 2]
+ vf_preds = self.vfs(obs)
+ # [batch_size, 1, num_heads // 2]
+ vf_err = vf_preds - q_preds
+ # [batch_size, 1, num_heads // 2]
+ vf_sign = (vf_err > 0).float()
+ # [batch_size, 1, num_heads // 2]
+ vf_weight = (1 - vf_sign) * self.quantile + vf_sign * (1 - self.quantile)
+ # Take the mean over the batch_size dim; Sum over the ensemble dim
+ vfs_loss = (vf_weight * (vf_err**2)).mean(dim=0).sum()
+
+ """
+ Policy Loss
+ """
+ policy_logpp = dist.log_prob(actions)
+ # [batch_size, 1]
+ adv = (q_preds - vf_preds).mean(dim=-1)
+ exp_adv = torch.exp(adv / self.beta)
+ if self.clip_score is not None:
+ exp_adv = torch.clamp(exp_adv, max=self.clip_score)
+
+ weights = exp_adv[:, 0].detach()
+ policy_loss = (-policy_logpp * weights).mean()
+
+ """
+ Update networks
+ """
+ if self._n_train_steps_total % self.q_update_period == 0:
+ self.qfs_optimizer.zero_grad()
+ qfs_loss.backward()
+ self.qfs_optimizer.step()
+
+ self.vfs_optimizer.zero_grad()
+ vfs_loss.backward()
+ self.vfs_optimizer.step()
+
+ if self._n_train_steps_total % self.policy_update_period == 0:
+ self.policy_optimizer.zero_grad()
+ policy_loss.backward()
+ self.policy_optimizer.step()
+
+ """
+ Soft Updates
+ """
+ if self._n_train_steps_total % self.target_update_period == 0:
+ ptu.soft_update_from_to(self.qfs, self.target_qfs, self.soft_target_tau)
+
+ """
+ Save some statistics for eval
+ """
+ if self._need_to_update_eval_statistics:
+ self._need_to_update_eval_statistics = False
+ """
+ Eval should set this to None.
+ This way, these statistics are only computed for one batch.
+ """
+ self.eval_statistics["QF Loss"] = ptu.to_np(qfs_loss)
+ self.eval_statistics["Policy Loss"] = np.mean(ptu.to_np(policy_loss))
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "Mean Q Predictions",
+ ptu.to_np(q_preds.mean(dim=-1)),
+ )
+ )
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "Mean Target Q Predictions",
+ ptu.to_np(target_vf_preds.mean(dim=-1)),
+ )
+ )
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "rewards",
+ ptu.to_np(rewards[..., -1]),
+ )
+ )
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "terminals",
+ ptu.to_np(terminals[..., -1]),
+ )
+ )
+ policy_statistics = add_prefix(dist.get_diagnostics(), "policy/")
+ self.eval_statistics.update(policy_statistics)
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "Advantage Weights",
+ ptu.to_np(weights),
+ )
+ )
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "Mean Advantage Score",
+ ptu.to_np(adv),
+ )
+ )
+
+ self.eval_statistics.update(
+ create_stats_ordered_dict(
+ "Mean V Predictions",
+ ptu.to_np(vf_preds.mean(dim=-1)),
+ )
+ )
+ self.eval_statistics["VF Loss"] = np.mean(ptu.to_np(vfs_loss))
+
+ self._n_train_steps_total += 1
+
+ def get_diagnostics(self):
+ stats = super().get_diagnostics()
+ stats.update(self.eval_statistics)
+ return stats
+
+ def end_epoch(self, epoch):
+ self._need_to_update_eval_statistics = True
+
+ @property
+ def networks(self):
+ nets = [
+ self.policy,
+ self.qfs,
+ self.target_qfs,
+ self.vfs,
+ ]
+ return nets
+
+ def get_snapshot(self):
+ return dict(
+ policy=self.policy,
+ qfs=self.qfs,
+ target_qfs=self.target_qfs,
+ vfs=self.vfs,
+ )
+
+
+"""
+Pipeline code
+"""
+
+
+def ensemble_iql_sanity(ctx):
+ assert ctx.variant["d4rl"]
+ assert ctx.variant["normalize_env"] is False
+ assert ctx.variant["qf_class"] == ParallelMlp
+ assert ctx.variant["policy_class"] == GaussianPolicy
+ assert (
+ ctx.variant["vf_kwargs"]["num_heads"] * 2
+ == ctx.variant["qf_kwargs"]["num_heads"]
+ )
+
+
+def create_qv(ctx: PipelineCtx):
+ obs_dim = ctx.eval_env.observation_space.low.size
+ action_dim = ctx.eval_env.action_space.low.size
+
+ qfs = ctx.variant["qf_class"](
+ input_size=obs_dim + action_dim, output_size=1, **ctx.variant["qf_kwargs"]
+ )
+ vfs = ctx.variant["vf_class"](
+ input_size=obs_dim, output_size=1, **ctx.variant["vf_kwargs"]
+ )
+ target_qfs = ctx.variant["qf_class"](
+ input_size=obs_dim + action_dim, output_size=1, **ctx.variant["qf_kwargs"]
+ )
+
+ ctx.qfs = qfs
+ ctx.vfs = vfs
+ ctx.target_qfs = target_qfs
+
+
+EnsembleIQLPipeline = Pipeline(
+ "EnsembleIQLPipeline",
+ [
+ ensemble_iql_sanity,
+ offline_init,
+ create_eval_env,
+ create_dataset_next_actions,
+ optionally_normalize_dataset,
+ create_qv,
+ create_policy,
+ create_trainer,
+ create_eval_policy,
+ create_eval_path_collector,
+ create_replay_buffer,
+ create_algorithm,
+ load_demos,
+ train,
+ ],
+)
diff --git a/cfpi/pytorch/algorithms/q_training/sarsa_iqn.py b/cfpi/pytorch/algorithms/q_training/sarsa_iqn.py
new file mode 100644
index 0000000..9faa163
--- /dev/null
+++ b/cfpi/pytorch/algorithms/q_training/sarsa_iqn.py
@@ -0,0 +1,325 @@
+"""
+Learn an ensemble of q with sarsa and pi_beta with NLL
+"""
+from typing import Tuple
+
+from collections import OrderedDict, namedtuple
+
+import eztils.torch as ptu
+import numpy as np
+import torch
+import torch.optim as optim
+from eztils import create_stats_ordered_dict
+from torch import nn as nn
+from torch.nn.functional import smooth_l1_loss
+
+from cfpi.launchers.pipeline import Pipeline, PipelineCtx, Pipelines
+from cfpi.launchers.pipeline_pieces import create_dataset_next_actions
+from cfpi.pytorch.networks.mlp import QuantileMlp
+from cfpi.pytorch.torch_rl_algorithm import TorchTrainer
+
+SarsaLosses = namedtuple(
+ "SarsaLosses",
+ "qfs_loss",
+)
+
+
+def quantile_regression_loss(pred, target, tau, weight):
+ pred = pred.unsqueeze(-1)
+ target = target.detach().unsqueeze(-2)
+ tau = tau.detach().unsqueeze(-1)
+
+ weight = weight.detach().unsqueeze(-2)
+ expanded_pred, expanded_target = torch.broadcast_tensors(pred, target)
+ L = smooth_l1_loss(expanded_pred, expanded_target, reduction="none") # (N, T, T)
+
+ sign = torch.sign(expanded_pred - expanded_target) / 2.0 + 0.5
+ rho = torch.abs(tau - sign) * L * weight
+ return rho.sum(dim=-1).mean()
+
+
+def get_tau(batch_size, num_quantiles=8):
+ with torch.no_grad():
+ presum_tau = ptu.rand(batch_size, num_quantiles) + 0.1
+ presum_tau /= presum_tau.sum(dim=-1, keepdims=True)
+
+ tau = torch.cumsum(
+ presum_tau, dim=1
+ ) # (N, T), note that they are tau1...tauN in the paper
+ tau_hat = ptu.zeros_like(tau)
+ tau_hat[:, 0:1] = tau[:, 0:1] / 2.0
+ tau_hat[:, 1:] = (tau[:, 1:] + tau[:, :-1]) / 2.0
+ return tau_hat, presum_tau
+
+
+def get_target_quantile(quantiles, tau_hat, target_percentile):
+ x_idx = torch.arange(len(quantiles))
+ y_idx = torch.min((tau_hat - target_percentile).abs(), dim=1)[1]
+ target_percentiles = quantiles[x_idx, y_idx]
+ return target_percentiles
+
+
+class SarsaIQNTrainer(TorchTrainer):
+ def __init__(
+ self,
+ eval_env,
+ qfs,
+ target_qfs,
+ discount=0.99,
+ reward_scale=1.0,
+ qf_lr=1e-3,
+ optimizer_class=optim.Adam,
+ soft_target_tau=1e-2,
+ target_update_period=1,
+ num_quantiles=8,
+ plotter=None,
+ render_eval_paths=False,
+ **kwargs,
+ ):
+ super().__init__()
+ self.env = eval_env
+ assert len(qfs) == 2
+ self.qf1 = qfs[0]
+ self.qf2 = qfs[1]
+ self.target_qf1 = target_qfs[0]
+ self.target_qf2 = target_qfs[1]
+ self.soft_target_tau = soft_target_tau
+ self.target_update_period = target_update_period
+
+ self.plotter = plotter
+ self.render_eval_paths = render_eval_paths
+
+ self.qf_criterion = quantile_regression_loss
+
+ self.qf1_optimizer = optimizer_class(
+ self.qf1.parameters(),
+ lr=qf_lr,
+ )
+ self.qf2_optimizer = optimizer_class(
+ self.qf2.parameters(),
+ lr=qf_lr,
+ )
+
+ self.discount = discount
+ self.reward_scale = reward_scale
+ self._n_train_steps_total = 0
+ self._need_to_update_eval_statistics = True
+ self.eval_statistics = OrderedDict()
+
+ self.num_quantiles = num_quantiles
+
+ def train_from_torch(self, batch):
+ losses, stats = self.compute_loss(
+ batch,
+ skip_statistics=not self._need_to_update_eval_statistics,
+ )
+ """
+ Update networks
+ """
+ self.qf1_optimizer.zero_grad()
+ self.qf2_optimizer.zero_grad()
+
+ losses.qfs_loss.backward()
+
+ self.qf1_optimizer.step()
+ self.qf2_optimizer.step()
+
+ self._n_train_steps_total += 1
+
+ self.try_update_target_networks()
+ if self._need_to_update_eval_statistics:
+ self.eval_statistics = stats
+ # Compute statistics using only one batch per epoch
+ self._need_to_update_eval_statistics = False
+
+ def try_update_target_networks(self):
+ if self._n_train_steps_total % self.target_update_period == 0:
+ self.update_target_networks()
+
+ def update_target_networks(self):
+ ptu.soft_update_from_to(self.qf1, self.target_qf1, self.soft_target_tau)
+ ptu.soft_update_from_to(self.qf2, self.target_qf2, self.soft_target_tau)
+
+ def compute_loss(
+ self,
+ batch,
+ skip_statistics=False,
+ ) -> Tuple[SarsaLosses, OrderedDict]:
+ rewards = batch["rewards"]
+ terminals = batch["terminals"]
+ obs = batch["observations"]
+ actions = batch["actions"]
+ next_obs = batch["next_observations"]
+ next_actions = batch["next_actions"]
+ """
+ QF Loss
+ """
+
+ assert isinstance(self.qf1, QuantileMlp)
+ assert isinstance(self.qf2, QuantileMlp)
+
+ batch_size = obs.shape[0]
+ tau_hat_samples, presum_tau_samples = get_tau(
+ batch_size * 2, self.num_quantiles
+ )
+ tau_hat, next_tau_hat = tau_hat_samples.reshape(2, batch_size, -1)
+ presum_tau, next_presum_tau = presum_tau_samples.reshape(2, batch_size, -1)
+
+ z1_pred = self.qf1(obs, actions, tau_hat)
+ z2_pred = self.qf2(obs, actions, tau_hat)
+
+ with torch.no_grad():
+ target_z1_value = self.target_qf1(next_obs, next_actions, next_tau_hat)
+ target_z2_value = self.target_qf2(next_obs, next_actions, next_tau_hat)
+
+ z1_target = (
+ self.reward_scale * rewards
+ + (1.0 - terminals) * self.discount * target_z1_value
+ )
+ z2_target = (
+ self.reward_scale * rewards
+ + (1.0 - terminals) * self.discount * target_z2_value
+ )
+
+ qf1_loss = self.qf_criterion(z1_pred, z1_target, tau_hat, next_presum_tau)
+ qf2_loss = self.qf_criterion(z2_pred, z2_target, tau_hat, next_presum_tau)
+ qfs_loss = qf1_loss + qf2_loss
+
+ """
+ Save some statistics for eval
+ """
+ eval_statistics = OrderedDict()
+ if not skip_statistics:
+ with torch.no_grad():
+ eval_statistics["QF1 Loss"] = ptu.to_np(qf1_loss)
+ eval_statistics["QF2 Loss"] = ptu.to_np(qf2_loss)
+
+ q1_pred = (z1_pred * presum_tau).sum(-1, keepdim=True)
+ q2_pred = (z2_pred * presum_tau).sum(-1, keepdim=True)
+ q_preds = torch.cat([q1_pred, q2_pred], dim=-1)
+
+ z_pred = (z1_pred + z2_pred) / 2
+ quantile_40 = get_target_quantile(z_pred, tau_hat, 0.4)
+ quantile_60 = get_target_quantile(z_pred, tau_hat, 0.6)
+ quantile_80 = get_target_quantile(z_pred, tau_hat, 0.8)
+
+ target_q1_value = (target_z1_value * presum_tau).sum(-1, keepdim=True)
+ target_q2_value = (target_z2_value * presum_tau).sum(-1, keepdim=True)
+ target_q_values = torch.cat([target_q1_value, target_q2_value], dim=-1)
+
+ eval_statistics.update(
+ create_stats_ordered_dict(
+ "Mean Q Predictions",
+ ptu.to_np(q_preds.mean(-1)),
+ )
+ )
+ eval_statistics.update(
+ create_stats_ordered_dict(
+ "Mean Target Q Predictions",
+ ptu.to_np(target_q_values.mean(dim=-1)),
+ )
+ )
+ eval_statistics.update(
+ create_stats_ordered_dict(
+ "Q std",
+ np.mean(
+ ptu.to_np(torch.std(q_preds, dim=-1)),
+ ),
+ ),
+ )
+ eval_statistics.update(
+ create_stats_ordered_dict(
+ "40 Quantile",
+ ptu.to_np(quantile_40),
+ )
+ )
+ eval_statistics.update(
+ create_stats_ordered_dict(
+ "60 Quantile",
+ ptu.to_np(quantile_60),
+ )
+ )
+ eval_statistics.update(
+ create_stats_ordered_dict(
+ "80 Quantile",
+ ptu.to_np(quantile_80),
+ )
+ )
+
+ loss = SarsaLosses(
+ qfs_loss=qfs_loss,
+ )
+ return loss, eval_statistics
+
+ def get_diagnostics(self):
+ stats = super().get_diagnostics()
+ stats.update(self.eval_statistics)
+ return stats
+
+ def end_epoch(self, epoch):
+ self._need_to_update_eval_statistics = True
+
+ @property
+ def networks(self):
+ return [
+ self.qf1,
+ self.qf2,
+ self.target_qf1,
+ self.target_qf2,
+ ]
+
+ @property
+ def optimizers(self):
+ return [
+ self.qf1_optimizer,
+ self.qf2_optimizer,
+ ]
+
+ def get_snapshot(self):
+ return dict(
+ qf1=self.qf1,
+ qf2=self.qf2,
+ target_qf1=self.target_qf1,
+ target_qf2=self.target_qf2,
+ )
+
+
+"""
+Pipeline code
+"""
+
+
+def create_q_iqn(ctx: PipelineCtx):
+ obs_dim = ctx.eval_env.observation_space.low.size
+ action_dim = ctx.eval_env.action_space.low.size
+
+ qf1 = ctx.variant["qf_class"](
+ input_size=obs_dim + action_dim,
+ **ctx.variant["qf_kwargs"],
+ )
+ qf2 = ctx.variant["qf_class"](
+ input_size=obs_dim + action_dim,
+ **ctx.variant["qf_kwargs"],
+ )
+
+ target_qf1 = ctx.variant["qf_class"](
+ input_size=obs_dim + action_dim,
+ **ctx.variant["qf_kwargs"],
+ )
+ target_qf2 = ctx.variant["qf_class"](
+ input_size=obs_dim + action_dim,
+ **ctx.variant["qf_kwargs"],
+ )
+
+ ctx.qfs = [qf1, qf2]
+ ctx.target_qfs = [target_qf1, target_qf2]
+
+
+SarsaIQNPipeline = Pipeline.from_(
+ Pipelines.offline_zerostep_pac_pipeline, "SarsaIQNPipeline"
+)
+SarsaIQNPipeline.delete('pac_sanity_check')
+SarsaIQNPipeline.delete('load_checkpoint_policy')
+SarsaIQNPipeline.delete("create_eval_policy")
+SarsaIQNPipeline.replace("load_checkpoint_iqn_q", create_q_iqn)
+SarsaIQNPipeline.replace("create_dataset", create_dataset_next_actions)
diff --git a/cfpi/pytorch/networks/__init__.py b/cfpi/pytorch/networks/__init__.py
new file mode 100644
index 0000000..953b314
--- /dev/null
+++ b/cfpi/pytorch/networks/__init__.py
@@ -0,0 +1,26 @@
+"""
+General networks for pytorch.
+
+Algorithm-specific networks should go else-where.
+"""
+import torch
+
+from .mlp import ConcatMlp, ConcatMultiHeadedMlp, Mlp, ParallelMlp
+
+
+class LinearTransform(torch.nn.Module):
+ def __init__(self, m, b):
+ super().__init__()
+ self.m = m
+ self.b = b
+
+ def __call__(self, t):
+ return self.m * t + self.b
+
+
+__all__ = [
+ "Mlp",
+ "ConcatMlp",
+ "ConcatMultiHeadedMlp",
+ "ParallelMlp",
+]
diff --git a/cfpi/pytorch/networks/distribution_generator.py b/cfpi/pytorch/networks/distribution_generator.py
new file mode 100644
index 0000000..e1fdcbf
--- /dev/null
+++ b/cfpi/pytorch/networks/distribution_generator.py
@@ -0,0 +1,39 @@
+from eztils.torch import ( # GaussianMixture as GaussianMixtureDistribution,
+ Distribution,
+ MultivariateDiagonalNormal,
+)
+from torch import nn
+
+
+class DistributionGenerator(nn.Sequential, nn.Module):
+ def forward(self, *input, **kwarg) -> Distribution:
+ for module in self._modules.values():
+ if isinstance(input, tuple):
+ input = module(*input)
+ else:
+ input = module(input)
+ return input
+
+
+class Gaussian(DistributionGenerator):
+ def __init__(self, module, std=None, reinterpreted_batch_ndims=1):
+ super().__init__(module)
+ self.std = std
+ self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
+
+ def forward(self, *input):
+ if self.std:
+ mean = super().forward(*input)
+ std = self.std
+ else:
+ mean, log_std = super().forward(*input)
+ std = log_std.exp()
+ return MultivariateDiagonalNormal(
+ mean, std, reinterpreted_batch_ndims=self.reinterpreted_batch_ndims
+ )
+
+
+# class GaussianMixture(ModuleToDistributionGenerator):
+# def forward(self, *input):
+# mixture_means, mixture_stds, weights = super().forward(*input)
+# return GaussianMixtureDistribution(mixture_means, mixture_stds, weights)
diff --git a/cfpi/pytorch/networks/mlp.py b/cfpi/pytorch/networks/mlp.py
new file mode 100644
index 0000000..0e681ae
--- /dev/null
+++ b/cfpi/pytorch/networks/mlp.py
@@ -0,0 +1,442 @@
+from collections import OrderedDict
+
+import eztils.torch as ptu
+import numpy as np
+import torch
+from eztils.torch import LayerNorm, ParallelLayerNorm, activation_from_string
+from torch import nn
+from torch.nn import functional as F
+
+
+def fanin_init(tensor):
+ size = tensor.size()
+ if len(size) == 2:
+ fan_in = size[0]
+ elif len(size) > 2:
+ fan_in = np.prod(size[1:])
+ else:
+ raise Exception("Shape must be have dimension at least 2.")
+ bound = 1.0 / np.sqrt(fan_in)
+ return tensor.data.uniform_(-bound, bound)
+
+
+def ident(x):
+ return x
+
+
+class Mlp(nn.Module):
+ def __init__(
+ self,
+ hidden_sizes,
+ output_size,
+ input_size,
+ init_w=3e-3,
+ hidden_activation=F.relu,
+ output_activation=ident,
+ hidden_init=fanin_init,
+ b_init_value=0.0,
+ layer_norm=False,
+ dropout=False,
+ dropout_kwargs=None,
+ layer_norm_kwargs=None,
+ ):
+ super().__init__()
+
+ if layer_norm_kwargs is None:
+ layer_norm_kwargs = dict()
+
+ self.input_size = input_size
+ self.output_size = output_size
+ self.hidden_activation = hidden_activation
+ self.output_activation = output_activation
+ self.layer_norm = layer_norm
+ self.fcs = []
+ self.layer_norms = []
+ self.layer_norm_kwargs = layer_norm_kwargs
+ in_size = input_size
+
+ for i, next_size in enumerate(hidden_sizes):
+ fc = nn.Linear(in_size, next_size)
+ in_size = next_size
+ hidden_init(fc.weight)
+ fc.bias.data.fill_(b_init_value)
+ self.__setattr__(f"fc{i}", fc)
+ self.fcs.append(fc)
+
+ if self.layer_norm:
+ ln = LayerNorm(next_size, **layer_norm_kwargs)
+ self.__setattr__(f"layer_norm{i}", ln)
+ self.layer_norms.append(ln)
+
+ self.dropout_kwargs = dropout_kwargs
+ self.dropout = dropout
+ self.last_fc = nn.Linear(in_size, output_size)
+ self.last_fc.weight.data.uniform_(-init_w, init_w)
+ self.last_fc.bias.data.fill_(0)
+
+ def forward(self, input, return_preactivations=False):
+ h = input
+ for i, fc in enumerate(self.fcs):
+ h = fc(h)
+ if self.layer_norm and i < len(self.fcs) - 1:
+ h = self.layer_norms[i](h)
+ if self.dropout:
+ F.dropout(h, **self.dropout_kwargs)
+ h = self.hidden_activation(h)
+ preactivation = self.last_fc(h)
+ output = self.output_activation(preactivation)
+ if return_preactivations:
+ return output, preactivation
+ else:
+ return output
+
+
+class MultiHeadedMlp(Mlp):
+ """
+ .-> linear head 0
+ /
+ input --> MLP ---> linear head 1
+ \
+ .-> linear head 2
+ """
+
+ def __init__(
+ self,
+ hidden_sizes,
+ output_sizes,
+ input_size,
+ init_w=3e-3,
+ hidden_activation=F.relu,
+ output_activations=None,
+ hidden_init=fanin_init,
+ b_init_value=0.0,
+ layer_norm=False,
+ layer_norm_kwargs=None,
+ ):
+ super().__init__(
+ hidden_sizes=hidden_sizes,
+ output_size=sum(output_sizes),
+ input_size=input_size,
+ init_w=init_w,
+ hidden_activation=hidden_activation,
+ hidden_init=hidden_init,
+ b_init_value=b_init_value,
+ layer_norm=layer_norm,
+ layer_norm_kwargs=layer_norm_kwargs,
+ )
+ self._splitter = SplitIntoManyHeads(
+ output_sizes,
+ output_activations,
+ )
+
+ def forward(self, input):
+ flat_outputs = super().forward(input)
+ return self._splitter(flat_outputs)
+
+
+class ConcatMultiHeadedMlp(MultiHeadedMlp):
+ """
+ Concatenate inputs along dimension and then pass through MultiHeadedMlp.
+ """
+
+ def __init__(self, *args, dim=1, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.dim = dim
+
+ def forward(self, *inputs, **kwargs):
+ flat_inputs = torch.cat(inputs, dim=self.dim)
+ return super().forward(flat_inputs, **kwargs)
+
+
+class ConcatMlp(Mlp):
+ """
+ Concatenate inputs along dimension and then pass through MLP.
+ """
+
+ def __init__(self, *args, dim=1, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.dim = dim
+
+ def forward(self, *inputs, **kwargs):
+ flat_inputs = torch.cat(inputs, dim=self.dim)
+ return super().forward(flat_inputs, **kwargs)
+
+
+class SplitIntoManyHeads(nn.Module):
+ """
+ .-> head 0
+ /
+ input ---> head 1
+ \
+ '-> head 2
+ """
+
+ def __init__(
+ self,
+ output_sizes,
+ output_activations=None,
+ ):
+ super().__init__()
+ if output_activations is None:
+ output_activations = ["identity" for _ in output_sizes]
+ else:
+ if len(output_activations) != len(output_sizes):
+ raise ValueError(
+ "output_activation and output_sizes must have " "the same length"
+ )
+
+ self._output_narrow_params = []
+ self._output_activations = []
+ for output_activation in output_activations:
+ if isinstance(output_activation, str):
+ output_activation = activation_from_string(output_activation)
+ self._output_activations.append(output_activation)
+ start_idx = 0
+ for output_size in output_sizes:
+ self._output_narrow_params.append((start_idx, output_size))
+ start_idx = start_idx + output_size
+
+ def forward(self, flat_outputs):
+ pre_activation_outputs = tuple(
+ flat_outputs.narrow(1, start, length)
+ for start, length in self._output_narrow_params
+ )
+ outputs = tuple(
+ activation(x)
+ for activation, x in zip(self._output_activations, pre_activation_outputs)
+ )
+ return outputs
+
+
+class ParallelMlp(nn.Module):
+ """
+ Efficient implementation of multiple MLPs with identical architectures.
+
+ .-> mlp 0
+ /
+ input ---> mlp 1
+ \
+ '-> mlp 2
+
+ See https://discuss.pytorch.org/t/parallel-execution-of-modules-in-nn-modulelist/43940/7
+ for details
+
+ The last dimension of the output corresponds to the MLP index.
+ """
+
+ def __init__(
+ self,
+ num_heads,
+ input_size,
+ output_size, # per mlp
+ hidden_sizes,
+ hidden_activation="ReLU",
+ output_activation="identity",
+ dim=1,
+ layer_norm=False,
+ dropout=False,
+ input_is_already_expanded=False,
+ ):
+ super().__init__()
+
+ def create_layers():
+ layers = []
+ input_dim = input_size
+ for i, hidden_size in enumerate(hidden_sizes):
+ fc = nn.Conv1d(
+ in_channels=input_dim * num_heads,
+ out_channels=hidden_size * num_heads,
+ kernel_size=1,
+ groups=num_heads,
+ )
+ # fc.register_forward_hook(self.forward_hook(i))
+ layers.append(fc)
+ if isinstance(hidden_activation, str):
+ activation = activation_from_string(hidden_activation)
+ else:
+ activation = hidden_activation
+ layers.append(activation)
+
+ if layer_norm:
+ ln = ParallelLayerNorm(num_heads, hidden_size)
+ layers.append(ln)
+ # ln.register_forward_hook(self.forward_hook(f"{i} ln"))
+
+ if dropout:
+ drop = nn.Dropout(p=0.4)
+ layers.append(drop)
+
+ input_dim = hidden_size
+
+ last_fc = nn.Conv1d(
+ in_channels=input_dim * num_heads,
+ out_channels=output_size * num_heads,
+ kernel_size=1,
+ groups=num_heads,
+ )
+ layers.append(last_fc)
+
+ if output_activation != "identity":
+ if isinstance(output_activation, str):
+ activation = activation_from_string(output_activation)
+ else:
+ activation = output_activation
+ layers.append(activation)
+ return layers
+
+ self.network = nn.Sequential(*create_layers())
+ self.num_heads = num_heads
+ self.input_is_already_expanded = input_is_already_expanded
+ self.dim = dim
+ self.layer_norm = layer_norm
+ # self.selected_out = OrderedDict()
+
+ # def forward_hook(self, layer_name):
+ # def hook(module, input, output):
+ # self.selected_out[layer_name] = output
+
+ # return hook
+
+ def forward(self, *inputs):
+ x = torch.cat(inputs, dim=self.dim)
+
+ if not self.input_is_already_expanded:
+ x = x.repeat(1, self.num_heads).unsqueeze(-1)
+ flat = self.network(x)
+ batch_size = x.shape[0]
+ return flat.view(batch_size, -1, self.num_heads)
+
+ @staticmethod
+ def ensemble_to_individual(ens): # ens: ParallelMlp
+ ret = []
+ layer_sizes = []
+ for layer in ens.network:
+ if isinstance(layer, nn.Conv1d):
+ layer_sizes.append(
+ (
+ int(layer.in_channels / ens.num_heads),
+ int(layer.out_channels / ens.num_heads),
+ )
+ )
+
+ for i in range(ens.num_heads):
+ mlp = ConcatMlp(
+ hidden_sizes=[sz[1] for sz in layer_sizes[:-1]],
+ output_size=layer_sizes[-1][-1],
+ input_size=layer_sizes[0][0],
+ )
+ with torch.no_grad():
+ constructed_state_dict = OrderedDict()
+
+ ens_state_dict = ens.state_dict()
+ for mlp_key, ens_key in zip(mlp.state_dict(), ens_state_dict):
+ tensor = ens_state_dict[ens_key].squeeze()
+ single_sz = int(tensor.shape[0] / ens.num_heads)
+ constructed_state_dict[mlp_key] = tensor[
+ single_sz * i : single_sz * (i + 1)
+ ]
+
+ mlp.load_state_dict(constructed_state_dict)
+
+ ret.append(mlp)
+ return ret
+
+
+class QuantileMlp(nn.Module):
+ def __init__(
+ self,
+ hidden_sizes,
+ input_size,
+ embedding_size=64,
+ num_quantiles=8,
+ layer_norm=True,
+ ):
+ super().__init__()
+ self.layer_norm = layer_norm
+ # hidden_sizes[:-2] MLP base
+ # hidden_sizes[-2] before merge
+ # hidden_sizes[-1] before output
+
+ self.base_fc = []
+ last_size = input_size
+ for next_size in hidden_sizes[:-1]:
+ self.base_fc += [
+ nn.Linear(last_size, next_size),
+ nn.LayerNorm(next_size) if layer_norm else nn.Identity(),
+ nn.ReLU(inplace=True),
+ ]
+ last_size = next_size
+ self.base_fc = nn.Sequential(*self.base_fc)
+ self.num_quantiles = num_quantiles
+ self.embedding_size = embedding_size
+ self.tau_fc = nn.Sequential(
+ nn.Linear(embedding_size, last_size),
+ nn.LayerNorm(last_size) if layer_norm else nn.Identity(),
+ nn.Sigmoid(),
+ )
+ self.merge_fc = nn.Sequential(
+ nn.Linear(last_size, hidden_sizes[-1]),
+ nn.LayerNorm(hidden_sizes[-1]) if layer_norm else nn.Identity(),
+ nn.ReLU(inplace=True),
+ )
+ self.last_fc = nn.Linear(hidden_sizes[-1], 1)
+ self.const_vec = ptu.from_numpy(np.arange(1, 1 + self.embedding_size))
+
+ def forward(self, state, action, tau):
+ """
+ Calculate Quantile Value in Batch
+ tau: quantile fractions, (N, T)
+ """
+ h = torch.cat([state, action], dim=1)
+ h = self.base_fc(h) # (N, C)
+
+ x = torch.cos(tau.unsqueeze(-1) * self.const_vec * np.pi) # (N, T, E)
+ x = self.tau_fc(x) # (N, T, C)
+
+ h = torch.mul(x, h.unsqueeze(-2)) # (N, T, C)
+ h = self.merge_fc(h) # (N, T, C)
+ output = self.last_fc(h).squeeze(-1) # (N, T)
+ return output
+
+ def get_tau_quantile(self, state, action, tau):
+ """
+ Calculate Quantile Value in Batch
+ tau: quantile fractions, (N, T)
+ """
+ h = torch.cat([state, action], dim=1)
+ h = self.base_fc(h) # (N, C)
+ with torch.no_grad():
+ tau_pt = ptu.ones([state.shape[0], 1, 1]) * tau
+ x = torch.cos(tau_pt * self.const_vec * np.pi) # (N, 1, E)
+ x = self.tau_fc(x) # (N, 1, C)
+
+ h = torch.mul(x, h.unsqueeze(-2)) # (N, 1, C)
+ h = self.merge_fc(h) # (N, 1, C)
+ output = self.last_fc(h).squeeze() # (N, 1)
+ return output
+
+ def get_mean(self, state, action):
+ """
+ Calculate Quantile Mean in Batch (E(Z) = Q)
+ tau: quantile fractions, (N, T)
+ N = batch
+ C = hidden sz (256)
+ E = embedding sz (64)
+ """
+ h = torch.cat([state, action], dim=1)
+ h = self.base_fc(h) # (N, C)
+
+ with torch.no_grad():
+ presum_tau = ptu.zeros(state.shape[0], 32) + 1.0 / 32 # (N, 32)
+ tau = torch.cumsum(presum_tau, dim=1)
+ tau_hat = ptu.zeros_like(tau)
+ tau_hat[:, 0:1] = tau[:, 0:1] / 2.0
+ tau_hat[:, 1:] = (tau[:, 1:] + tau[:, :-1]) / 2.0 # (N, 32)
+
+ x = torch.cos(tau_hat.unsqueeze(-1) * self.const_vec * np.pi) # (N, 32, E)
+ x = self.tau_fc(x) # (N, 32, C)
+
+ h = torch.mul(x, h.unsqueeze(-2)) # (N, 32, C)
+ h = self.merge_fc(h) # (N, 32, C)
+ output = self.last_fc(h).squeeze() # (N, 32) #! gets rid of C
+ return output.mean(-1) # (N,)
diff --git a/cfpi/pytorch/normalizer.py b/cfpi/pytorch/normalizer.py
new file mode 100644
index 0000000..7888947
--- /dev/null
+++ b/cfpi/pytorch/normalizer.py
@@ -0,0 +1,65 @@
+import eztils.torch as ptu
+import torch
+
+"""
+Based on code from Marcin Andrychowicz
+"""
+import numpy as np
+
+
+class FixedNormalizer:
+ def __init__(
+ self,
+ size,
+ default_clip_range: float = np.inf,
+ mean=0,
+ std=1,
+ eps=1e-8,
+ ):
+ assert std > 0
+ std = std + eps
+ self.size = size
+ self.default_clip_range = default_clip_range
+ self.mean = mean + np.zeros(self.size, np.float32)
+ self.std = std + np.zeros(self.size, np.float32)
+ self.eps = eps
+
+ def set_mean(self, mean):
+ self.mean = mean + np.zeros(self.size, np.float32)
+
+ def set_std(self, std):
+ std = std + self.eps
+ self.std = std + np.zeros(self.size, np.float32)
+
+ def normalize(self, v, clip_range=None):
+ if clip_range is None:
+ clip_range = self.default_clip_range
+ mean, std = self.mean, self.std
+ if v.ndim == 2:
+ mean = mean.reshape(1, -1)
+ std = std.reshape(1, -1)
+ return np.clip((v - mean) / std, -clip_range, clip_range)
+
+ def denormalize(self, v):
+ mean, std = self.mean, self.std
+ if v.ndim == 2:
+ mean = mean.reshape(1, -1)
+ std = std.reshape(1, -1)
+ return mean + v * std
+
+ def copy_stats(self, other):
+ self.set_mean(other.mean)
+ self.set_std(other.std)
+
+
+class TorchFixedNormalizer(FixedNormalizer):
+ def normalize(self, v, clip_range=None):
+ if clip_range is None:
+ clip_range = self.default_clip_range
+ mean = ptu.from_numpy(self.mean)
+ std = ptu.from_numpy(self.std)
+ if v.dim() == 2:
+ # Unsqueeze along the batch use automatic broadcasting
+ mean = mean.unsqueeze(0)
+ std = std.unsqueeze(0)
+ return torch.clamp((v - mean) / std, -clip_range, clip_range)
diff --git a/cfpi/pytorch/torch_rl_algorithm.py b/cfpi/pytorch/torch_rl_algorithm.py
new file mode 100644
index 0000000..082796d
--- /dev/null
+++ b/cfpi/pytorch/torch_rl_algorithm.py
@@ -0,0 +1,224 @@
+from typing import Iterable
+
+import abc
+from collections import OrderedDict
+
+from torch import nn as nn
+from torch.cuda import empty_cache
+
+import cfpi.conf as conf
+import cfpi.core.gtimer as gt
+import wandb
+from cfpi.core.logging import logger
+from cfpi.core.logging.eval_util import get_average_returns
+from cfpi.core.rl_algorithm import BatchRLAlgorithm, Trainer
+from cfpi.data_management.replay_buffer import ReplayBuffer
+from cfpi.samplers.path_collector import MdpPathCollector
+
+
+class TorchBatchRLAlgorithm(BatchRLAlgorithm):
+ def __init__(
+ self,
+ trainer,
+ exploration_env,
+ evaluation_env,
+ exploration_data_collector: MdpPathCollector,
+ evaluation_data_collector: MdpPathCollector,
+ replay_buffer: ReplayBuffer,
+ batch_size,
+ max_path_length,
+ num_epochs,
+ num_eval_steps_per_epoch,
+ num_expl_steps_per_train_loop,
+ num_trains_per_train_loop,
+ num_train_loops_per_epoch=1,
+ min_num_steps_before_training=0,
+ start_epoch=0,
+ ):
+ super().__init__(
+ trainer,
+ exploration_env,
+ evaluation_env,
+ exploration_data_collector,
+ evaluation_data_collector,
+ replay_buffer,
+ batch_size,
+ max_path_length,
+ num_epochs,
+ num_eval_steps_per_epoch,
+ num_expl_steps_per_train_loop,
+ num_trains_per_train_loop,
+ num_train_loops_per_epoch,
+ min_num_steps_before_training,
+ start_epoch,
+ )
+
+ def to(self, device):
+ for net in self.trainer.networks:
+ net.to(device)
+
+ def training_mode(self, mode):
+ for net in self.trainer.networks:
+ net.train(mode)
+
+
+class OfflineTorchBatchRLAlgorithm(TorchBatchRLAlgorithm):
+ def __init__(
+ self,
+ trainer,
+ evaluation_env,
+ evaluation_data_collector: MdpPathCollector,
+ replay_buffer: ReplayBuffer,
+ batch_size,
+ max_path_length,
+ num_epochs,
+ num_eval_steps_per_epoch,
+ num_trains_per_train_loop,
+ num_train_loops_per_epoch=1,
+ start_epoch=0,
+ zero_step=False,
+ pre_calculate_new_next_actions=False,
+ ):
+ super().__init__(
+ trainer,
+ None, # set exploration_env to None
+ evaluation_env,
+ None, # set expl data collector to None
+ evaluation_data_collector,
+ replay_buffer,
+ batch_size,
+ max_path_length,
+ num_epochs,
+ num_eval_steps_per_epoch,
+ None, # set expl steps per train loop to None
+ num_trains_per_train_loop,
+ num_train_loops_per_epoch,
+ None, # set min_num_steps_before_training to None
+ start_epoch,
+ )
+ self.normalized_scores = []
+ self.zero_step = zero_step
+ self.pre_calculate_new_next_actions = pre_calculate_new_next_actions
+ assert self.expl_env is None
+
+ def record_exploration(self): # don't record exploration
+ pass
+
+ def log_additional(self, epoch):
+ eval_paths = self.eval_data_collector.get_epoch_paths()
+ if eval_paths == []:
+ return
+
+ normalized_score = (
+ self.eval_env.get_normalized_score(get_average_returns(eval_paths)) * 100
+ )
+ self.normalized_scores.append(normalized_score)
+ logger.record_dict(
+ OrderedDict(normalized_score=normalized_score),
+ prefix="eval/",
+ )
+
+ def _get_snapshot(self):
+ snapshot = {}
+ for k, v in self.trainer.get_snapshot().items():
+ snapshot["trainer/" + k] = v
+ for k, v in self.eval_data_collector.get_snapshot().items():
+ snapshot["evaluation/" + k] = v
+ for k, v in self.replay_buffer.get_snapshot().items():
+ snapshot["replay_buffer/" + k] = v
+ return snapshot
+
+ def _end_epoch(self, epoch, save_params=True):
+ if self.pre_calculate_new_next_actions:
+ for i in range(self.replay_buffer._size // self.batch_size + 1):
+ indices = range(
+ i * self.batch_size,
+ min((i + 1) * self.batch_size, self.replay_buffer._size),
+ )
+ next_obs = self.replay_buffer._next_obs[indices]
+ new_next_actions = self.trainer.get_cfpi_action(next_obs).mean
+ self.replay_buffer._new_next_actions[
+ indices
+ ] = new_next_actions.detach()
+ empty_cache()
+ gt.stamp("generate_new_next_actions")
+ snapshot = self._get_snapshot()
+ if save_params:
+ logger.save_itr_params(epoch - self._start_epoch, snapshot)
+ gt.stamp("saving")
+ self._log_stats(epoch)
+
+ self.eval_data_collector.end_epoch(epoch)
+ self.replay_buffer.end_epoch(epoch)
+ self.trainer.end_epoch(epoch)
+
+ for post_epoch_func in self.post_epoch_funcs:
+ post_epoch_func(self, epoch)
+
+ def train(self):
+ if not self.zero_step:
+ return super().train()
+ else:
+ self.offline_rl = True
+ for i in range(self.num_epochs):
+ self._begin_epoch(i)
+ self.eval_data_collector.collect_new_paths(
+ self.max_path_length,
+ self.num_eval_steps_per_epoch,
+ discard_incomplete_paths=True,
+ )
+ self._end_epoch(i, save_params=False)
+ if conf.Wandb.is_on:
+ table = wandb.Table(
+ data=list(enumerate(self.normalized_scores)),
+ columns=["step", "normalized score"],
+ )
+ histogram = wandb.plot.histogram(
+ table,
+ value="normalized score",
+ title="Normalized Score Distribution",
+ )
+ wandb.log({"Normalized Score Distribution": histogram})
+ wandb.finish()
+
+ def _train(self):
+ self.training_mode(True)
+ for _ in range(self.num_train_loops_per_epoch):
+ for _ in range(self.num_trains_per_train_loop):
+ train_data = self.replay_buffer.random_batch(self.batch_size)
+ gt.stamp("sampling batch")
+ self.trainer.train(train_data)
+ gt.stamp("training")
+ self.training_mode(False)
+ # First train, then evaluate
+ self.eval_data_collector.collect_new_paths(
+ self.max_path_length,
+ self.num_eval_steps_per_epoch,
+ discard_incomplete_paths=True,
+ )
+ gt.stamp("evaluation sampling")
+
+
+class TorchTrainer(Trainer, metaclass=abc.ABCMeta):
+ def __init__(self):
+ self._num_train_steps = 0
+
+ def train(self, batch):
+ self._num_train_steps += 1
+ self.train_from_torch(batch)
+
+ def get_diagnostics(self):
+ return OrderedDict(
+ [
+ ("num train calls", self._num_train_steps),
+ ]
+ )
+
+ @abc.abstractmethod
+ def train_from_torch(self, batch):
+ pass
+
+ @property
+ @abc.abstractmethod
+ def networks(self) -> Iterable[nn.Module]:
+ pass
diff --git a/cfpi/samplers/__init__.py b/cfpi/samplers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cfpi/samplers/path_collector.py b/cfpi/samplers/path_collector.py
new file mode 100644
index 0000000..642f9b8
--- /dev/null
+++ b/cfpi/samplers/path_collector.py
@@ -0,0 +1,97 @@
+from collections import OrderedDict, deque
+
+from eztils import create_stats_ordered_dict
+
+from cfpi.samplers.rollout_functions import rollout
+
+
+class MdpPathCollector:
+ def __init__(
+ self,
+ env,
+ policy,
+ max_num_epoch_paths_saved=None,
+ render=False,
+ render_kwargs=None,
+ rollout_fn=rollout,
+ save_env_in_snapshot=True,
+ ):
+ if render_kwargs is None:
+ render_kwargs = {}
+ self._env = env
+ self._policy = policy
+ self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
+ self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
+ self._render = render
+ self._render_kwargs = render_kwargs
+ self._rollout_fn = rollout_fn
+
+ self._num_steps_total = 0
+ self._num_paths_total = 0
+
+ self._save_env_in_snapshot = save_env_in_snapshot
+
+ def collect_new_paths(
+ self,
+ max_path_length,
+ num_steps,
+ discard_incomplete_paths,
+ ):
+ paths = []
+ num_steps_collected = 0
+ while num_steps_collected < num_steps:
+ max_path_length_this_loop = min( # Do not go over num_steps
+ max_path_length,
+ num_steps - num_steps_collected,
+ )
+ path = self._rollout_fn(
+ self._env,
+ self._policy,
+ max_path_length=max_path_length_this_loop,
+ render=self._render,
+ render_kwargs=self._render_kwargs,
+ )
+ path_len = len(path["actions"])
+ if (
+ path_len != max_path_length
+ and not path["dones"][-1]
+ and discard_incomplete_paths
+ ):
+ break
+ num_steps_collected += path_len
+ paths.append(path)
+ self._num_paths_total += len(paths)
+ self._num_steps_total += num_steps_collected
+ self._epoch_paths.extend(paths)
+ return paths
+
+ def get_epoch_paths(self):
+ return self._epoch_paths
+
+ def end_epoch(self, epoch):
+ self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
+
+ def get_diagnostics(self):
+ path_lens = [len(path["actions"]) for path in self._epoch_paths]
+ stats = OrderedDict(
+ [
+ ("num steps total", self._num_steps_total),
+ ("num paths total", self._num_paths_total),
+ ]
+ )
+ stats.update(
+ create_stats_ordered_dict(
+ "path length",
+ path_lens,
+ always_show_all_stats=True,
+ )
+ )
+ return stats
+
+ def get_snapshot(self):
+ snapshot_dict = dict(
+ policy=self._policy,
+ )
+ if self._save_env_in_snapshot:
+ snapshot_dict["env"] = self._env
+ return snapshot_dict
diff --git a/cfpi/samplers/rollout_functions.py b/cfpi/samplers/rollout_functions.py
new file mode 100644
index 0000000..5eac030
--- /dev/null
+++ b/cfpi/samplers/rollout_functions.py
@@ -0,0 +1,95 @@
+import copy
+
+import numpy as np
+
+
+def rollout(
+ env,
+ agent,
+ max_path_length=np.inf,
+ render=False,
+ render_kwargs=None,
+ preprocess_obs_for_policy_fn=None,
+ get_action_kwargs=None,
+ return_dict_obs=False,
+ full_o_postprocess_func=None,
+ reset_callback=None,
+):
+ if render_kwargs is None:
+ render_kwargs = {}
+ if get_action_kwargs is None:
+ get_action_kwargs = {}
+ if preprocess_obs_for_policy_fn is None:
+
+ def preprocess_obs_for_policy_fn(x):
+ return x
+
+ raw_obs = []
+ raw_next_obs = []
+ observations = []
+ actions = []
+ rewards = []
+ terminals = []
+ dones = []
+ agent_infos = []
+ env_infos = []
+ next_observations = []
+ path_length = 0
+ agent.reset()
+ o = env.reset()
+ if reset_callback:
+ reset_callback(env, agent, o)
+ if render:
+ env.render(**render_kwargs)
+ while path_length < max_path_length:
+ raw_obs.append(o)
+ o_for_agent = preprocess_obs_for_policy_fn(o)
+ a, agent_info = agent.get_action(o_for_agent, **get_action_kwargs)
+
+ if full_o_postprocess_func:
+ full_o_postprocess_func(env, agent, o)
+
+ next_o, r, done, env_info = env.step(copy.deepcopy(a))
+ if render:
+ env.render(**render_kwargs)
+ observations.append(o)
+ rewards.append(r)
+ terminal = False
+ if done:
+ # terminal=False if TimeLimit caused termination
+ if not env_info.pop("TimeLimit.truncated", False):
+ terminal = True
+ terminals.append(terminal)
+ dones.append(done)
+ actions.append(a)
+ next_observations.append(next_o)
+ raw_next_obs.append(next_o)
+ agent_infos.append(agent_info)
+ env_infos.append(env_info)
+ path_length += 1
+ if done:
+ break
+ o = next_o
+ actions = np.array(actions)
+ if len(actions.shape) == 1:
+ actions = np.expand_dims(actions, 1)
+ observations = np.array(observations)
+ next_observations = np.array(next_observations)
+ if return_dict_obs:
+ observations = raw_obs
+ next_observations = raw_next_obs
+ rewards = np.array(rewards)
+ if len(rewards.shape) == 1:
+ rewards = rewards.reshape(-1, 1)
+ return dict(
+ observations=observations,
+ actions=actions,
+ rewards=rewards,
+ next_observations=next_observations,
+ terminals=np.array(terminals).reshape(-1, 1),
+ dones=np.array(dones).reshape(-1, 1),
+ agent_infos=agent_infos,
+ env_infos=env_infos,
+ full_observations=raw_obs,
+ full_next_observations=raw_obs,
+ )
diff --git a/cfpi/variants/SUPPORTED_ALGORITHMS.py b/cfpi/variants/SUPPORTED_ALGORITHMS.py
new file mode 100644
index 0000000..c447c83
--- /dev/null
+++ b/cfpi/variants/SUPPORTED_ALGORITHMS.py
@@ -0,0 +1,20 @@
+class Base:
+ file_path: str
+
+
+# SPECIFIED RELATIVE TO ./torch/algorithms
+class sg(Base):
+ file_path = "cfpi.single_gaussian"
+
+
+class mg(Base):
+ file_path = "cfpi.mixture_gaussian"
+
+
+class bc(Base):
+ file_path = "bc"
+
+
+class sarsa_iqn(Base):
+ file_path = "sarsa_iqn"
+
diff --git a/cfpi/variants/__init__.py b/cfpi/variants/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/cfpi/variants/base/__init__.py b/cfpi/variants/base/__init__.py
new file mode 100644
index 0000000..ce97a80
--- /dev/null
+++ b/cfpi/variants/base/__init__.py
@@ -0,0 +1,21 @@
+from pydantic import BaseModel as PydanticBaseModel
+
+
+class BaseModel(PydanticBaseModel):
+ class Config:
+ arbitrary_types_allowed = True
+ allow_mutation = False
+ validate_all = True
+
+
+class FuncWrapper: # Wrap a function or class definition as an object since we can't directly include functions
+ def __init__(self, f) -> None:
+ self.f = f
+
+
+w = FuncWrapper # alias
+
+
+#! VAE Kwargs
+class VaeKwargs(BaseModel):
+ hidden_dim = 750
diff --git a/cfpi/variants/base/algorithm.py b/cfpi/variants/base/algorithm.py
new file mode 100644
index 0000000..94d9e2a
--- /dev/null
+++ b/cfpi/variants/base/algorithm.py
@@ -0,0 +1,25 @@
+#! Algo Kwargs
+from cfpi.variants.base import BaseModel
+
+
+class AlgoKwargs(BaseModel):
+ start_epoch = -1000 # offline epochs
+ num_epochs = 0
+ batch_size = 256
+ max_path_length = 1000
+ num_trains_per_train_loop = 1000
+
+
+class OnlineAlgoKwargs(AlgoKwargs):
+ num_expl_steps_per_train_loop = 0
+ min_num_steps_before_training = 1000
+
+
+class OfflineAlgoKwargs(AlgoKwargs):
+ num_eval_steps_per_epoch = 5000
+
+
+class PacAlgoKwargs(OfflineAlgoKwargs):
+ zero_step = True
+ num_eval_steps_per_epoch = 1000
+ num_epochs = 100
diff --git a/cfpi/variants/base/policy.py b/cfpi/variants/base/policy.py
new file mode 100644
index 0000000..5b5f733
--- /dev/null
+++ b/cfpi/variants/base/policy.py
@@ -0,0 +1,18 @@
+#! Policy Kwargs
+from cfpi.variants.base import BaseModel
+
+
+class PolicyKwargs(BaseModel):
+ hidden_sizes = [1024, 1024]
+
+
+class Two256(PolicyKwargs):
+ hidden_sizes = [256, 256]
+
+
+class Three256(PolicyKwargs):
+ hidden_sizes = [256, 256, 256]
+
+
+class Four256(PolicyKwargs):
+ hidden_sizes = [256, 256, 256, 256]
diff --git a/cfpi/variants/base/q.py b/cfpi/variants/base/q.py
new file mode 100644
index 0000000..1b29260
--- /dev/null
+++ b/cfpi/variants/base/q.py
@@ -0,0 +1,15 @@
+from cfpi.variants.base import BaseModel
+
+
+class QFKwargs(BaseModel):
+ hidden_sizes = [1024, 1024]
+
+
+class EnsembleQFKwargs(QFKwargs):
+ num_heads = 10
+
+
+class QuantileMLPKwargs(QFKwargs):
+ hidden_sizes = [256, 256, 256]
+ num_quantiles = 8
+ embedding_size = 64
diff --git a/cfpi/variants/base/trainer.py b/cfpi/variants/base/trainer.py
new file mode 100644
index 0000000..b49729d
--- /dev/null
+++ b/cfpi/variants/base/trainer.py
@@ -0,0 +1,21 @@
+from cfpi.variants.base import BaseModel
+
+
+class TrainerKwargs(BaseModel):
+ discount = 0.99
+ policy_lr = 3e-4
+ qf_lr = 1e-4
+ reward_scale = 1
+ soft_target_tau = 0.005
+ target_update_period = 1
+
+
+class SacTrainerKwargs(TrainerKwargs):
+ use_automatic_entropy_tuning = False
+
+
+class PacTrainerKwargs(TrainerKwargs):
+ beta_LB = 1.0
+ # delta: list = [0.0]
+ delta_range = [0.2, 2.0]
+ num_delta = 10
diff --git a/cfpi/variants/base/variant.py b/cfpi/variants/base/variant.py
new file mode 100644
index 0000000..7c29b7a
--- /dev/null
+++ b/cfpi/variants/base/variant.py
@@ -0,0 +1,64 @@
+from cfpi.data_management.env_replay_buffer import (
+ EnvReplayBuffer,
+)
+from cfpi.launchers.pipeline import Pipelines
+from cfpi.policies.gaussian_policy import TanhGaussianPolicy
+from cfpi.pytorch.networks.mlp import ConcatMlp
+from cfpi.pytorch.torch_rl_algorithm import (
+ OfflineTorchBatchRLAlgorithm,
+ TorchTrainer,
+ Trainer,
+)
+from cfpi.samplers.rollout_functions import rollout
+from cfpi.variants.base import BaseModel, w
+from cfpi.variants.base.algorithm import OfflineAlgoKwargs, PacAlgoKwargs
+from cfpi.variants.base.policy import PolicyKwargs, Three256
+from cfpi.variants.base.q import QFKwargs, QuantileMLPKwargs
+from cfpi.variants.base.trainer import PacTrainerKwargs, TrainerKwargs
+
+
+class PathLoaderKwargs(BaseModel):
+ pass
+
+
+class OfflineVariant(BaseModel):
+ # require children to specify
+ algorithm = ""
+ version = ""
+ env_id = ""
+ seed = -1
+
+ algorithm_kwargs = OfflineAlgoKwargs()
+ policy_kwargs = PolicyKwargs()
+ qf_kwargs = QFKwargs()
+ trainer_kwargs = TrainerKwargs()
+ path_loader_kwargs = PathLoaderKwargs()
+
+ policy_class = w(TanhGaussianPolicy)
+ qf_class = w(ConcatMlp)
+ trainer_cls = w(TorchTrainer)
+ alg_class = w(OfflineTorchBatchRLAlgorithm)
+ replay_buffer_class = w(EnvReplayBuffer)
+ rollout_fn = w(rollout)
+
+ replay_buffer_size = int(2e6)
+
+ snapshot_mode = "gap_and_last"
+ snapshot_gap = 100
+
+
+class PacVariant(OfflineVariant):
+ trainer_cls = w(Trainer) # require child to specify
+ checkpoint_params = "SPECIFY"
+
+ policy_kwargs = Three256()
+ qf_kwargs = QuantileMLPKwargs()
+ trainer_kwargs = PacTrainerKwargs()
+ algorithm_kwargs = PacAlgoKwargs()
+ IQN = True
+ d4rl = True
+ normalize_env = True
+
+ pipeline = w(Pipelines.offline_zerostep_pac_pipeline)
+
+
diff --git a/cfpi/variants/bc/algorithm.py b/cfpi/variants/bc/algorithm.py
new file mode 100644
index 0000000..c95c58a
--- /dev/null
+++ b/cfpi/variants/bc/algorithm.py
@@ -0,0 +1,10 @@
+# * Setup --------------------------------------------------
+
+
+from cfpi.variants.base.algorithm import OfflineAlgoKwargs
+
+
+class BCAlgoKwargs(OfflineAlgoKwargs):
+ start_epoch = -int(500)
+ num_eval_steps_per_epoch = 1000
+ num_epochs = 0
diff --git a/cfpi/variants/bc/policy.py b/cfpi/variants/bc/policy.py
new file mode 100644
index 0000000..00b962e
--- /dev/null
+++ b/cfpi/variants/bc/policy.py
@@ -0,0 +1,41 @@
+from torch.nn import functional as F
+
+from cfpi.variants.base import w
+from cfpi.variants.base.policy import PolicyKwargs, Three256
+
+
+class TunedSingleGaussianPolicyKwargs(PolicyKwargs):
+ hidden_activation = w(F.leaky_relu)
+ # layer_norm = True
+ dropout = True
+ dropout_kwargs = {"p": 0.1}
+ hidden_sizes = [256, 256, 256, 256]
+
+
+class ThreeLayerFourGaussianPolicyKwargs(Three256):
+ num_gaussians = 4
+
+
+class TunedPolicyKwargs(ThreeLayerFourGaussianPolicyKwargs):
+ hidden_activation = w(F.leaky_relu)
+ # layer_norm = True
+ num_gaussians = 10
+ dropout = True
+ dropout_kwargs = {"p": 0.1}
+ hidden_sizes = [256, 256, 256, 256]
+
+
+class ThreeLayerTwelveGaussianPolicyKwargs(Three256):
+ num_gaussians = 12
+
+
+class ThreeLayerEightGaussianPolicyKwargs(Three256):
+ num_gaussians = 8
+
+
+class ThreeLayerTwoGaussianPolicyKwargs(Three256):
+ num_gaussians = 2
+
+
+class TwoLayerFourGaussianPolicyKwargs(PolicyKwargs):
+ num_gaussians = 4
diff --git a/cfpi/variants/bc/trainer.py b/cfpi/variants/bc/trainer.py
new file mode 100644
index 0000000..a45ce8e
--- /dev/null
+++ b/cfpi/variants/bc/trainer.py
@@ -0,0 +1,12 @@
+from torch import optim
+
+from cfpi.variants.base import w
+from cfpi.variants.base.trainer import TrainerKwargs
+
+
+class BCTrainerKwargs(TrainerKwargs):
+ policy_lr = 1e-4
+
+
+class TunedBCTrainerKwargs(TrainerKwargs):
+ optimizer_class = w(optim.AdamW)
diff --git a/cfpi/variants/bc/variant.py b/cfpi/variants/bc/variant.py
new file mode 100644
index 0000000..a04b072
--- /dev/null
+++ b/cfpi/variants/bc/variant.py
@@ -0,0 +1,142 @@
+from cfpi.policies.gaussian_policy import (
+ GaussianMixturePolicy,
+ GaussianPolicy,
+ TanhGaussianMixturePolicy,
+)
+from cfpi.pytorch.algorithms.policy_training.bc import (
+ BCPipeline,
+ BCTrainer,
+ BCWithValPipeline,
+)
+from cfpi.variants.base import w
+from cfpi.variants.base.policy import Three256
+from cfpi.variants.base.variant import OfflineVariant
+from cfpi.variants.bc.algorithm import BCAlgoKwargs
+from cfpi.variants.bc.policy import (
+ ThreeLayerEightGaussianPolicyKwargs,
+ ThreeLayerFourGaussianPolicyKwargs,
+ ThreeLayerTwelveGaussianPolicyKwargs,
+ ThreeLayerTwoGaussianPolicyKwargs,
+ TunedPolicyKwargs,
+ TunedSingleGaussianPolicyKwargs,
+ TwoLayerFourGaussianPolicyKwargs,
+)
+from cfpi.variants.bc.trainer import BCTrainerKwargs, TunedBCTrainerKwargs
+
+
+class BaseBCVariant(OfflineVariant):
+ trainer_cls = w(BCTrainer)
+
+ policy_kwargs = Three256()
+ trainer_kwargs = BCTrainerKwargs()
+ algorithm_kwargs = BCAlgoKwargs()
+ IQN = True
+ d4rl = True
+ normalize_env = True
+
+ pipeline = w(BCPipeline)
+
+
+# * Runnable ------------------------------------------------
+
+
+class VanillaVariant(BaseBCVariant):
+ algorithm = "behavior-cloning"
+ version = "normalized-256-256-256"
+ seed = 0
+ normalize_env = False
+ env_id = "hopper-medium-v2"
+
+
+class TunedBCVariant(BaseBCVariant):
+ seed = 0
+ algorithm = "mg-behavior-cloning"
+ version = "gaussian-tanh-before"
+ trainer_kwargs = TunedBCTrainerKwargs()
+ policy_kwargs = TunedSingleGaussianPolicyKwargs()
+ policy_class = w(GaussianPolicy)
+
+
+class MGBCVariant(VanillaVariant):
+ seed = 0
+ algorithm = "mg-behavior-cloning"
+ version = "4-gaussian-fixed-atanh"
+
+ policy_kwargs = TwoLayerFourGaussianPolicyKwargs()
+ policy_class = w(TanhGaussianMixturePolicy)
+
+
+class MGBCNormalizeWithValVariant(MGBCVariant):
+ normalize_env = True
+ version = "2-gaussian-3-layers-normalize-env-with-val"
+ env_id = "hopper-medium-v2"
+ seed = 0
+ train_ratio = 0.95
+ fold_idx = 2
+
+ policy_kwargs = ThreeLayerTwoGaussianPolicyKwargs()
+ algorithm_kwargs = BCAlgoKwargs()
+ pipeline = w(BCWithValPipeline)
+
+ snapshot_gap = 50
+
+
+class TunedMGBCVariant(BaseBCVariant):
+ seed = 0
+ algorithm = "mg-behavior-cloning"
+ version = "10-gaussian-correct-GM"
+ env_id = "antmaze-umaze-diverse-v0"
+ trainer_kwargs = TunedBCTrainerKwargs()
+ normalize_env = False
+
+ policy_kwargs = TunedPolicyKwargs()
+ policy_class = w(GaussianMixturePolicy)
+
+
+class TwelveGaussianMGBCAntMazeVariant(MGBCVariant):
+ normalize_env = False
+ version = "12-gaussian-3-layers-all-data-rebuttal"
+ env_id = "antmaze-medium-diverse-v0"
+ seed = 0
+
+ policy_kwargs = ThreeLayerTwelveGaussianPolicyKwargs()
+
+
+class EightGaussianMGBCAntMazeVariant(MGBCVariant):
+ normalize_env = False
+ version = "8-gaussian-3-layers-all-data-rebuttal"
+ env_id = "antmaze-medium-diverse-v0"
+ seed = 0
+
+ policy_kwargs = ThreeLayerEightGaussianPolicyKwargs()
+ pipeline = w(BCPipeline)
+
+
+class FourGaussianMGBCAntMazeVariant(MGBCVariant):
+ normalize_env = False
+ version = "4-gaussian-3-layers-all-data-rebuttal"
+ env_id = "antmaze-medium-diverse-v0"
+ seed = 0
+
+ policy_kwargs = ThreeLayerFourGaussianPolicyKwargs()
+ pipeline = w(BCPipeline)
+
+
+class FourGaussianMGBCNormalizeVariant(MGBCVariant):
+ normalize_env = True
+ version = "4-gaussian-3-layers-normalize-env-all-data"
+ env_id = "hopper-medium-v2"
+ seed = 0
+
+ policy_kwargs = ThreeLayerFourGaussianPolicyKwargs()
+ pipeline = w(BCPipeline)
+
+
+class TwoGaussianMGBCNormalizeVariant(MGBCVariant):
+ normalize_env = True
+ version = "2g-normalize-env-all-data"
+ env_id = "hopper-medium-v2"
+ seed = 0
+
+ policy_kwargs = ThreeLayerTwoGaussianPolicyKwargs()
+ pipeline = w(BCPipeline)
diff --git a/cfpi/variants/mg/algorithm.py b/cfpi/variants/mg/algorithm.py
new file mode 100644
index 0000000..2906f16
--- /dev/null
+++ b/cfpi/variants/mg/algorithm.py
@@ -0,0 +1,19 @@
+from cfpi.variants.base.algorithm import OfflineAlgoKwargs
+
+
+class MultistepAlgoKwargs(OfflineAlgoKwargs):
+ num_eval_steps_per_epoch = int(1e5)
+ num_trains_per_train_loop = int(2e5)
+
+ pre_calculate_new_next_actions = True
+ num_epochs = 0
+ start_epoch = -int(5)
+
+
+class IterativeAlgoKwargs(OfflineAlgoKwargs):
+ num_eval_steps_per_epoch = int(1e4)
+ num_trains_per_train_loop = int(2e4)
+
+ pre_calculate_new_next_actions = False
+ num_epochs = 0
+ start_epoch = -int(50)
diff --git a/cfpi/variants/mg/trainer.py b/cfpi/variants/mg/trainer.py
new file mode 100644
index 0000000..2279703
--- /dev/null
+++ b/cfpi/variants/mg/trainer.py
@@ -0,0 +1,62 @@
+# * Setup --------------------------------------------------
+from cfpi.variants.base.trainer import PacTrainerKwargs
+
+
+class BaseMGTrainerKwargs(PacTrainerKwargs):
+ use_max_lambda = True
+ # action_selection_mode = "max"
+ delta_range = [0.5, 0.5] #! lower, upper
+ # delta = [0.2, 0.5, 1.0, 2.0]
+ # delta = [sqrt(log(x) * -2) for x in [0.99, 0.9, 0.6, 0.3, 0.15]]
+ beta_LB = 1.0
+ action_selection_mode = "max_from_both"
+ num_delta = 1
+
+ num_quantiles = 0
+ target_quantile = 0.0
+ policy_lr = 0.0
+ qf_lr = 0.0
+ soft_target_tau = 0.0
+ target_update_period = 0
+
+
+class MultistepTrainerKwargs(BaseMGTrainerKwargs):
+ action_selection_mode = "max"
+ delta_range = [0.5, 0.5]
+ num_delta = 1
+ beta_LB = 1.0
+ IQN = True
+ target_update_period = 1
+
+ num_quantiles = 8
+ target_quantile = 0.0
+ policy_lr = 3e-4
+ qf_lr = 3e-4
+ soft_target_tau = 0.005
+
+
+class MG8TrainerKwargs(BaseMGTrainerKwargs):
+ action_selection_mode = "max_from_both"
+ delta_range = [0.5, 0.5]
+ num_delta = 1
+ beta_LB = 1.0
+
+
+class MoreDeltasMGTrainerKwargs(BaseMGTrainerKwargs):
+ action_selection_mode = "max_from_both"
+ delta_range = [0.5, 1.0]
+ num_delta = 1000
+ beta_LB = 1.0
+
+
+class EasyBCQMGTrainerKwargs(BaseMGTrainerKwargs):
+ action_selection_mode = "easy_bcq"
+ num_candidate_actions = 10
+
+
+class IQLTrainerKwargs(BaseMGTrainerKwargs):
+ IQN = False
+ delta_range = [0.0, 0.0]
+ num_delta = 1
+ trivial_threshold = 0.03
+ action_selection_mode = "max_from_both"
diff --git a/cfpi/variants/mg/variant.py b/cfpi/variants/mg/variant.py
new file mode 100644
index 0000000..ab83cb4
--- /dev/null
+++ b/cfpi/variants/mg/variant.py
@@ -0,0 +1,153 @@
+from cfpi.data_management.env_replay_buffer import (
+ EnvReplayBufferNextAction,
+ EnvReplayBufferNextActionNewAction,
+)
+from cfpi.pytorch.algorithms.cfpi import mixture_gaussian as mg
+from cfpi.pytorch.networks.mlp import QuantileMlp
+from cfpi.variants.base import w
+from cfpi.variants.base.q import QFKwargs, QuantileMLPKwargs
+from cfpi.variants.base.variant import PacVariant
+from cfpi.variants.mg.algorithm import IterativeAlgoKwargs, MultistepAlgoKwargs
+from cfpi.variants.mg.trainer import (
+ BaseMGTrainerKwargs,
+ EasyBCQMGTrainerKwargs,
+ IQLTrainerKwargs,
+ MG8TrainerKwargs,
+ MoreDeltasMGTrainerKwargs,
+ MultistepTrainerKwargs,
+)
+
+
+class IQLQFKwargs(QFKwargs):
+ hidden_sizes = [256, 256]
+
+
+class BaseMGPacVariant(PacVariant):
+ checkpoint_params = "MG4"
+
+ trainer_kwargs = BaseMGTrainerKwargs()
+ trainer_cls = w(mg.MG_CFPI_Trainer)
+ pipeline = w(mg.MGBasePipeline)
+
+
+class VanillaVariant(BaseMGPacVariant):
+ checkpoint_params = "MG8"
+ trainer_kwargs = MG8TrainerKwargs()
+
+ algorithm = "PAC-MG8"
+ version = "use-mean"
+ env_id = "hopper-medium-replay-v2"
+ seed = 1
+
+
+class IQLVariant(BaseMGPacVariant):
+ algorithm = "PAC-MG"
+ version = "iql-eval-1000-policy"
+ env_id = "hopper-medium-v2"
+ seed = 2
+
+ qf_kwargs = IQLQFKwargs()
+ trainer_kwargs = IQLTrainerKwargs()
+ normalize_env = True
+ IQN = False
+ pipeline = w(mg.MGIQLPipeline)
+
+
+class VanillaMGAntMazePacVariant(BaseMGPacVariant):
+ normalize_env = False
+ IQN = False
+ checkpoint_params = "MG12_WITHOUT_NORMALIZE"
+ qf_kwargs = IQLQFKwargs()
+ pipeline = w(mg.MGIQLAntMazePipeline)
+ trainer_kwargs = IQLTrainerKwargs()
+
+ algorithm = "PAC-MG12"
+ version = "vanilla"
+ env_id = "antmaze-umaze-diverse-v0"
+ seed = 1
+
+
+class MultistepMGAntMazePacVariant(VanillaMGAntMazePacVariant):
+ algorithm = "MultiStep-PAC-MG12"
+ version = "vanilla"
+ env_id = "antmaze-umaze-v0"
+ seed = 1
+
+ replay_buffer_class = w(EnvReplayBufferNextActionNewAction)
+ checkpoint_params = "MG12_WITHOUT_NORMALIZE"
+ qf_class = w(QuantileMlp)
+ qf_kwargs = QuantileMLPKwargs()
+ trainer_kwargs = MultistepTrainerKwargs()
+ algorithm_kwargs = MultistepAlgoKwargs()
+ pipeline = w(mg.MultiStepMGAntMazePipeline)
+
+ snapshot_gap = 1
+
+
+class IterativeMGAntMazePacVariant(MultistepMGAntMazePacVariant):
+ algorithm = "Iterative-PAC-MG8"
+ version = "vanilla"
+ env_id = "antmaze-umaze-v0"
+ seed = 1
+
+ replay_buffer_class = w(EnvReplayBufferNextAction)
+ checkpoint_params = "MG8_WITHOUT_NORMALIZE"
+ algorithm_kwargs = IterativeAlgoKwargs()
+ pipeline = w(mg.IterativepMGAntMazePipeline)
+
+ snapshot_gap = 5
+
+
+class MoreDeltasMGPacVariant(BaseMGPacVariant):
+ trainer_kwargs = MoreDeltasMGTrainerKwargs()
+
+ algorithm = "PAC-MG4"
+ version = "rebuttal-more-deltas"
+ env_id = "hopper-medium-expert-v2"
+ seed = 1
+
+
+class EasyBCQMGPacVariant(BaseMGPacVariant):
+ trainer_kwargs = EasyBCQMGTrainerKwargs()
+
+ algorithm = "PAC-MG4"
+ version = "easy-bcq"
+ env_id = "hopper-medium-expert-v2"
+ seed = 1
+
+
+class BCVariant(BaseMGPacVariant):
+ algorithm = "PAC-MG"
+ version = "behavior-cloning-evaluation"
+ env_id = "walker2d-medium-v2"
+ seed = 2
+
+ pipeline = w(mg.MGEvalBCPipeline)
+
+
+class MGEnsembleAntMazePacVariant(VanillaMGAntMazePacVariant):
+ pipeline = w(mg.MGEnsembleIQLAntMazePipeline)
+
+ version = "iql-ensemblerebuttal"
+ env_id = "antmaze-umaze-diverse-v0"
+ seed = 2
+
+
+class AntMazeIQLBCVariant(VanillaMGAntMazePacVariant):
+ algorithm = "PAC-MG4"
+ version = "behavior-cloning-evaluation-vanilla"
+ env_id = "antmaze-medium-diverse-v0"
+ seed = 2
+
+ pipeline = w(mg.MGIQLAntMazeEvalBCPipeline)
+
+
+class VanillaMGPacVariant(BaseMGPacVariant):
+ algorithm = "PAC-MG4"
+ version = "ablation-log-tau"
+ env_id = "hopper-medium-expert-v2"
+ seed = 1
+
+
+class TwoGaussianBaseMGPacVariant(BaseMGPacVariant):
+ checkpoint_params = "MG2"
diff --git a/cfpi/variants/sarsa_iqn/variant.py b/cfpi/variants/sarsa_iqn/variant.py
new file mode 100644
index 0000000..3bd341c
--- /dev/null
+++ b/cfpi/variants/sarsa_iqn/variant.py
@@ -0,0 +1,44 @@
+from cfpi.pytorch.algorithms.q_training.sarsa_iqn import (
+ SarsaIQNPipeline,
+ SarsaIQNTrainer,
+)
+from cfpi.pytorch.networks.mlp import QuantileMlp
+from cfpi.variants.base import w
+from cfpi.variants.base.algorithm import OfflineAlgoKwargs
+from cfpi.variants.base.q import QuantileMLPKwargs
+from cfpi.variants.base.trainer import TrainerKwargs
+from cfpi.variants.base.variant import OfflineVariant
+from cfpi.data_management.env_replay_buffer import EnvReplayBufferNextAction
+
+class SarsaIQNAlgoKwargs(OfflineAlgoKwargs):
+ num_eval_steps_per_epoch = 0
+ max_path_length = 0
+ num_epochs = 0
+ batch_size = 256
+ start_epoch = -int(500)
+
+
+class SarsaIQNTrainerKwargs(TrainerKwargs):
+ num_quantiles = 8
+ qf_lr = 3e-4
+ target_update_period = 1
+
+
+class VanillaVariant(OfflineVariant):
+ d4rl = True
+ algorithm = "sarsa-iqn"
+ version = "normalize-env-neg-one-reward"
+ env_id = "antmaze-umaze-v0"
+ normalize_env = True
+ seed = 2
+
+ qf_class = w(QuantileMlp)
+ qf_kwargs = QuantileMLPKwargs()
+ trainer_cls = w(SarsaIQNTrainer)
+ trainer_kwargs = SarsaIQNTrainerKwargs()
+ algorithm_kwargs = SarsaIQNAlgoKwargs()
+
+ replay_buffer_class = w(EnvReplayBufferNextAction)
+ pipeline = w(SarsaIQNPipeline)
+
+ snapshot_gap = 100
diff --git a/cfpi/variants/sg/trainer.py b/cfpi/variants/sg/trainer.py
new file mode 100644
index 0000000..3f07757
--- /dev/null
+++ b/cfpi/variants/sg/trainer.py
@@ -0,0 +1,27 @@
+from cfpi.variants.base.trainer import PacTrainerKwargs
+from cfpi.variants.mg.trainer import BaseMGTrainerKwargs
+
+
+class IQLTrainerKwargs(BaseMGTrainerKwargs):
+ IQN = False
+ delta_range = [0.5, 0.5]
+ num_delta = 1
+
+
+class BCTrainerKwargs(PacTrainerKwargs):
+ delta_range = [0.0, 0.0]
+
+
+class EasyBCQTrainerKwargs(PacTrainerKwargs):
+ easy_bcq = True
+ num_candidate_actions = 10
+
+
+class ImproveReverseKLTrainerKwargs(BCTrainerKwargs):
+ alpha = 3.0
+ num_delta = 1
+ delta_range = [0.5, 0.5]
+
+
+class EvalReverseKLTrainerKwargs(BCTrainerKwargs):
+ alpha = 1.0
diff --git a/cfpi/variants/sg/variant.py b/cfpi/variants/sg/variant.py
new file mode 100644
index 0000000..77d2b6e
--- /dev/null
+++ b/cfpi/variants/sg/variant.py
@@ -0,0 +1,76 @@
+# * Setup ---------------------------------------------------
+
+
+from cfpi.pytorch.algorithms.cfpi import single_gaussian
+from cfpi.variants.base import w
+from cfpi.variants.base.trainer import PacTrainerKwargs
+from cfpi.variants.base.variant import PacVariant
+from cfpi.variants.sg.trainer import (
+ EasyBCQTrainerKwargs,
+ EvalReverseKLTrainerKwargs,
+ ImproveReverseKLTrainerKwargs,
+ IQLTrainerKwargs,
+)
+
+
+class BaseSGPacVariant(PacVariant):
+ checkpoint_params = "SG"
+
+ trainer_cls = w(single_gaussian.SG_CFPI_Trainer)
+ pipeline = w(single_gaussian.SGBasePipeline)
+
+
+# * Runnable ------------------------------------------------
+class VanillaVariant(BaseSGPacVariant):
+ algorithm = "PAC-SG"
+ version = "ablation-log-tau"
+ env_id = "hopper-medium-v2"
+ seed = 1
+
+
+class GroundTruthVariant(BaseSGPacVariant):
+ algorithm = "PAC"
+ version = "ground_truth"
+ env_id = "hopper-medium-v2"
+ seed = 1
+
+ pipeline = w(single_gaussian.GTExperiment)
+
+
+class EpochBCVariant(BaseSGPacVariant):
+ algorithm = "PAC"
+ version = "variable_ensemble_size"
+ env_id = "hopper-medium-replay-v2"
+ seed = 1
+ epoch_no = -250
+
+ pipeline = w(single_gaussian.EpochBCExperiment)
+
+
+
+class CQLVariant(BaseSGPacVariant):
+ algorithm = "PAC-SG"
+ version = "cql-baseline-3"
+ pipeline = w(single_gaussian.CQLExperiment)
+ trainer_kwargs = IQLTrainerKwargs()
+ env_id = "hopper-medium-v2"
+ normalize_env = False
+ seed = 1
+
+
+class BCVariant(BaseSGPacVariant):
+ algorithm = "PAC-MG"
+ version = "behavior-cloning-evaluation"
+ env_id = "walker2d-medium-v2"
+ seed = 2
+
+ trainer_kwargs = PacTrainerKwargs()
+
+
+class EasyBCQPacVariant(BaseSGPacVariant):
+ trainer_kwargs = EasyBCQTrainerKwargs()
+
+ algorithm = "PAC-SG"
+ version = "easy-bcq"
+ env_id = "hopper-medium-expert-v2"
+ seed = 1
diff --git a/checkpoints b/checkpoints
new file mode 160000
index 0000000..aa0e361
--- /dev/null
+++ b/checkpoints
@@ -0,0 +1 @@
+Subproject commit aa0e3614ecef0eaa16cd220df9459d5344bb3064
diff --git a/cookiecutter-config-file.yml b/cookiecutter-config-file.yml
new file mode 100644
index 0000000..e167d53
--- /dev/null
+++ b/cookiecutter-config-file.yml
@@ -0,0 +1,13 @@
+# This file contains values from Cookiecutter
+
+default_context:
+ project_name: "cfpi"
+ project_description: "Offline Reinforcement Learning with Closed-Form Policy Improvement Operators"
+ organization: "ezhang7423"
+ license: "MIT"
+ minimal_python_version: 3.8
+ github_name: "ezhang7423"
+ email: "ete@ucsb.edu"
+ version: "0.1.0"
+ line_length: "88"
+ create_example_template: "cli"
diff --git a/docker/Dockerfile b/docker/Dockerfile
new file mode 100644
index 0000000..11baab7
--- /dev/null
+++ b/docker/Dockerfile
@@ -0,0 +1,25 @@
+FROM python:3.8-slim-buster
+
+ENV LANG=C.UTF-8 \
+ LC_ALL=C.UTF-8 \
+ PATH="${PATH}:/root/.poetry/bin"
+
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends \
+ curl \
+ && rm -rf /var/lib/apt/lists/*
+
+COPY pyproject.toml ./
+
+# Install Poetry
+RUN curl -sSL https://install.python-poetry.org | POETRY_HOME=/opt/poetry python && \
+ cd /usr/local/bin && \
+ ln -s /opt/poetry/bin/poetry && \
+ poetry config virtualenvs.create false
+
+# Allow installing dev dependencies to run tests
+ARG INSTALL_DEV=false
+RUN bash -c "if [ $INSTALL_DEV == 'true' ] ; then poetry install --no-root ; else poetry install --no-root --no-dev ; fi"
+
+CMD mkdir -p /workspace
+WORKDIR /workspace
diff --git a/docker/README.md b/docker/README.md
new file mode 100644
index 0000000..491d718
--- /dev/null
+++ b/docker/README.md
@@ -0,0 +1,47 @@
+# Docker for cfpi
+
+## Installation
+
+To create Docker you need to run:
+
+```bash
+make docker-build
+```
+
+which is equivalent to:
+
+```bash
+make docker-build VERSION=latest
+```
+
+You may provide name and version for the image.
+Default name is `IMAGE := cfpi`.
+Default version is `VERSION := latest`.
+
+```bash
+make docker-build IMAGE=some_name VERSION=0.1.0
+```
+
+## Usage
+
+```bash
+docker run -it --rm \
+ -v $(pwd):/workspace \
+ cfpi bash
+```
+
+## How to clean up
+
+To uninstall docker image run `make docker-remove` with `VERSION`:
+
+```bash
+make docker-remove VERSION=0.1.0
+```
+
+you may also choose the image name
+
+```bash
+make docker-remove IMAGE=some_name VERSION=latest
+```
+
+If you want to clean all, including `build` and `pycache` run `make cleanup`
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 0000000..31201a1
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,3936 @@
+# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
+
+[[package]]
+name = "absl-py"
+version = "1.4.0"
+description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "absl-py-1.4.0.tar.gz", hash = "sha256:d2c244d01048ba476e7c080bd2c6df5e141d211de80223460d5b3b8a2a58433d"},
+ {file = "absl_py-1.4.0-py3-none-any.whl", hash = "sha256:0d3fe606adfa4f7db64792dd4c7aee4ee0c38ab75dfd353b7a83ed3e957fcb47"},
+]
+
+[[package]]
+name = "ale-py"
+version = "0.7.5"
+description = "The Arcade Learning Environment (ALE) - a platform for AI research."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "ale_py-0.7.5-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:c68d3960f0e14738362069798ff5baa85ecb337038168cb61bd4855125deb894"},
+ {file = "ale_py-0.7.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:003725a8b6f02353066564ab417f4228adc886434aa4c8d314489a7f79fe9dfd"},
+ {file = "ale_py-0.7.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a30270d4aec820527be7a643aad381d0d1c932dc476740f63cc649dd200e065"},
+ {file = "ale_py-0.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:1a72f8a311887fe3800964976c7ce3567dd701e7d9b77209d3a05c5cdf9c6af0"},
+ {file = "ale_py-0.7.5-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:910c5443a836bf0bc6b03c45fefeeb9096254f807d24e247753e214062c12cae"},
+ {file = "ale_py-0.7.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f97301b89a8b49b9d2cc034f8a7df55eb6b1930126fb6cb6c6d6239bab8a89d"},
+ {file = "ale_py-0.7.5-cp37-cp37m-win_amd64.whl", hash = "sha256:38e9f6c2b1cca246253f41028b8ad92d725beec8d4672c0fe247a217a4c7dc4f"},
+ {file = "ale_py-0.7.5-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:48c5abf2c59bed0b46f17ac60ec6c940e6e7d4abc6ff7e5296246286f80e4a23"},
+ {file = "ale_py-0.7.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b2680b9a3c577865572dd9b8f24303189bb22c67b1b9668dcad3c20f59285e78"},
+ {file = "ale_py-0.7.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dec87873c6fe65c71129e20c52ce35dd4507ca409fa1233f4ae9c511144a9c99"},
+ {file = "ale_py-0.7.5-cp38-cp38-win_amd64.whl", hash = "sha256:1f8694a091e13be45c8704a089462b1519f7112a85e1c1b61c842c69a2608a98"},
+ {file = "ale_py-0.7.5-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:4c7e7fed3ce2b305d2db9ca2f87106e3df6da2c4fc2363103782ffd5975b4549"},
+ {file = "ale_py-0.7.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da5be311285a00e587b886805ea56762d44bf4f1da672229c1fbaff209bfcdc2"},
+ {file = "ale_py-0.7.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e513799fbb5128fa045cfb9a31cffa7f7c188dbe885665188b6c4cb50e9d983c"},
+ {file = "ale_py-0.7.5-cp39-cp39-win_amd64.whl", hash = "sha256:9976ceea0251ccad7e562e2021c337d5e9a9b24b5f9eecfe82573c2279337016"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = ">=4.10.0", markers = "python_version < \"3.10\""}
+importlib-resources = "*"
+numpy = "*"
+
+[package.extras]
+test = ["gym (>=0.23,<1.0)", "pytest (>=7.0)"]
+
+[[package]]
+name = "appdirs"
+version = "1.4.4"
+description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+optional = false
+python-versions = "*"
+files = [
+ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
+ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
+]
+
+[[package]]
+name = "appnope"
+version = "0.1.3"
+description = "Disable App Nap on macOS >= 10.9"
+optional = false
+python-versions = "*"
+files = [
+ {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"},
+ {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"},
+]
+
+[[package]]
+name = "astroid"
+version = "2.15.6"
+description = "An abstract syntax tree for Python with inference support."
+optional = false
+python-versions = ">=3.7.2"
+files = [
+ {file = "astroid-2.15.6-py3-none-any.whl", hash = "sha256:389656ca57b6108f939cf5d2f9a2a825a3be50ba9d589670f393236e0a03b91c"},
+ {file = "astroid-2.15.6.tar.gz", hash = "sha256:903f024859b7c7687d7a7f3a3f73b17301f8e42dfd9cc9df9d4418172d3e2dbd"},
+]
+
+[package.dependencies]
+lazy-object-proxy = ">=1.4.0"
+typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""}
+wrapt = {version = ">=1.11,<2", markers = "python_version < \"3.11\""}
+
+[[package]]
+name = "atomicwrites"
+version = "1.4.1"
+description = "Atomic file writes."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"},
+]
+
+[[package]]
+name = "attrs"
+version = "23.1.0"
+description = "Classes Without Boilerplate"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
+ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+]
+
+[package.extras]
+cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
+dev = ["attrs[docs,tests]", "pre-commit"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
+tests = ["attrs[tests-no-zope]", "zope-interface"]
+tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+
+[[package]]
+name = "backcall"
+version = "0.2.0"
+description = "Specifications for callback functions passed in to an API"
+optional = false
+python-versions = "*"
+files = [
+ {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"},
+ {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"},
+]
+
+[[package]]
+name = "bandit"
+version = "1.7.5"
+description = "Security oriented static analyser for python code."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "bandit-1.7.5-py3-none-any.whl", hash = "sha256:75665181dc1e0096369112541a056c59d1c5f66f9bb74a8d686c3c362b83f549"},
+ {file = "bandit-1.7.5.tar.gz", hash = "sha256:bdfc739baa03b880c2d15d0431b31c658ffc348e907fe197e54e0389dd59e11e"},
+]
+
+[package.dependencies]
+colorama = {version = ">=0.3.9", markers = "platform_system == \"Windows\""}
+GitPython = ">=1.0.1"
+PyYAML = ">=5.3.1"
+rich = "*"
+stevedore = ">=1.20.0"
+
+[package.extras]
+test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)", "tomli (>=1.1.0)"]
+toml = ["tomli (>=1.1.0)"]
+yaml = ["PyYAML"]
+
+[[package]]
+name = "beartype"
+version = "0.14.1"
+description = "Unbearably fast runtime type checking in pure Python."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "beartype-0.14.1-py3-none-any.whl", hash = "sha256:0f70fccdb8eb6d7ddfaa3ffe3a0b66cf2edeb13452bd71ad46615775c2fa34f6"},
+ {file = "beartype-0.14.1.tar.gz", hash = "sha256:23df4715d19cebb2ce60e53c3cf44cd925843f00c71938222d777ea6332de3cb"},
+]
+
+[package.extras]
+all = ["typing-extensions (>=3.10.0.0)"]
+dev = ["autoapi (>=0.9.0)", "coverage (>=5.5)", "mypy (>=0.800)", "numpy", "pandera", "pydata-sphinx-theme (<=0.7.2)", "pytest (>=4.0.0)", "sphinx", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)"]
+doc-rtd = ["autoapi (>=0.9.0)", "pydata-sphinx-theme (<=0.7.2)", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)"]
+test-tox = ["mypy (>=0.800)", "numpy", "pandera", "pytest (>=4.0.0)", "sphinx", "typing-extensions (>=3.10.0.0)"]
+test-tox-coverage = ["coverage (>=5.5)"]
+
+[[package]]
+name = "black"
+version = "22.3.0"
+description = "The uncompromising code formatter."
+optional = false
+python-versions = ">=3.6.2"
+files = [
+ {file = "black-22.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2497f9c2386572e28921fa8bec7be3e51de6801f7459dffd6e62492531c47e09"},
+ {file = "black-22.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5795a0375eb87bfe902e80e0c8cfaedf8af4d49694d69161e5bd3206c18618bb"},
+ {file = "black-22.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3556168e2e5c49629f7b0f377070240bd5511e45e25a4497bb0073d9dda776a"},
+ {file = "black-22.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67c8301ec94e3bcc8906740fe071391bce40a862b7be0b86fb5382beefecd968"},
+ {file = "black-22.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:fd57160949179ec517d32ac2ac898b5f20d68ed1a9c977346efbac9c2f1e779d"},
+ {file = "black-22.3.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc1e1de68c8e5444e8f94c3670bb48a2beef0e91dddfd4fcc29595ebd90bb9ce"},
+ {file = "black-22.3.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2fc92002d44746d3e7db7cf9313cf4452f43e9ea77a2c939defce3b10b5c82"},
+ {file = "black-22.3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:a6342964b43a99dbc72f72812bf88cad8f0217ae9acb47c0d4f141a6416d2d7b"},
+ {file = "black-22.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:328efc0cc70ccb23429d6be184a15ce613f676bdfc85e5fe8ea2a9354b4e9015"},
+ {file = "black-22.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06f9d8846f2340dfac80ceb20200ea5d1b3f181dd0556b47af4e8e0b24fa0a6b"},
+ {file = "black-22.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4efa5fad66b903b4a5f96d91461d90b9507a812b3c5de657d544215bb7877a"},
+ {file = "black-22.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e8477ec6bbfe0312c128e74644ac8a02ca06bcdb8982d4ee06f209be28cdf163"},
+ {file = "black-22.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:637a4014c63fbf42a692d22b55d8ad6968a946b4a6ebc385c5505d9625b6a464"},
+ {file = "black-22.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:863714200ada56cbc366dc9ae5291ceb936573155f8bf8e9de92aef51f3ad0f0"},
+ {file = "black-22.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10dbe6e6d2988049b4655b2b739f98785a884d4d6b85bc35133a8fb9a2233176"},
+ {file = "black-22.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:cee3e11161dde1b2a33a904b850b0899e0424cc331b7295f2a9698e79f9a69a0"},
+ {file = "black-22.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5891ef8abc06576985de8fa88e95ab70641de6c1fca97e2a15820a9b69e51b20"},
+ {file = "black-22.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:30d78ba6bf080eeaf0b7b875d924b15cd46fec5fd044ddfbad38c8ea9171043a"},
+ {file = "black-22.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ee8f1f7228cce7dffc2b464f07ce769f478968bfb3dd1254a4c2eeed84928aad"},
+ {file = "black-22.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee227b696ca60dd1c507be80a6bc849a5a6ab57ac7352aad1ffec9e8b805f21"},
+ {file = "black-22.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:9b542ced1ec0ceeff5b37d69838106a6348e60db7b8fdd245294dc1d26136265"},
+ {file = "black-22.3.0-py3-none-any.whl", hash = "sha256:bc58025940a896d7e5356952228b68f793cf5fcb342be703c3a2669a1488cb72"},
+ {file = "black-22.3.0.tar.gz", hash = "sha256:35020b8886c022ced9282b51b5a875b6d1ab0c387b31a065b84db7c33085ca79"},
+]
+
+[package.dependencies]
+click = ">=8.0.0"
+mypy-extensions = ">=0.4.3"
+pathspec = ">=0.9.0"
+platformdirs = ">=2"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+colorama = ["colorama (>=0.4.3)"]
+d = ["aiohttp (>=3.7.4)"]
+jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
+uvloop = ["uvloop (>=0.15.2)"]
+
+[[package]]
+name = "box2d-py"
+version = "2.3.5"
+description = "Python Box2D"
+optional = false
+python-versions = "*"
+files = [
+ {file = "box2d-py-2.3.5.tar.gz", hash = "sha256:b37dc38844bcd7def48a97111d2b082e4f81cca3cece7460feb3eacda0da2207"},
+ {file = "box2d_py-2.3.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:287aa54005c0644b47bf7ad72966e4068d66e56bcf8458f5b4a653ffe42a2618"},
+ {file = "box2d_py-2.3.5-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:483b3f9acd5d156b72bf2013f93cf7f8ca0ee1562e43d2353ab4c0cbec4ee49a"},
+ {file = "box2d_py-2.3.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:a294c2d7cc73cc05dd491287079e15419eb98caa3158df94f40faf85eeb4b6e9"},
+ {file = "box2d_py-2.3.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:0d46068eb8d29e366ed698ab2a4833d4d2d34ed035ebd6a685888007dda05f64"},
+]
+
+[[package]]
+name = "certifi"
+version = "2023.7.22"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
+ {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+]
+
+[[package]]
+name = "cffi"
+version = "1.15.1"
+description = "Foreign Function Interface for Python calling C code."
+optional = false
+python-versions = "*"
+files = [
+ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"},
+ {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"},
+ {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"},
+ {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"},
+ {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"},
+ {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"},
+ {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"},
+ {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"},
+ {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"},
+ {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"},
+ {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"},
+ {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"},
+ {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"},
+ {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"},
+ {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"},
+ {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"},
+ {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"},
+ {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"},
+ {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"},
+]
+
+[package.dependencies]
+pycparser = "*"
+
+[[package]]
+name = "cfgv"
+version = "3.3.1"
+description = "Validate configuration and produce human readable error messages."
+optional = false
+python-versions = ">=3.6.1"
+files = [
+ {file = "cfgv-3.3.1-py2.py3-none-any.whl", hash = "sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426"},
+ {file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"},
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.2.0"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"},
+ {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.6"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.6-py3-none-any.whl", hash = "sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5"},
+ {file = "click-8.1.6.tar.gz", hash = "sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "cloudpickle"
+version = "2.2.1"
+description = "Extended pickling support for Python objects"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "cloudpickle-2.2.1-py3-none-any.whl", hash = "sha256:61f594d1f4c295fa5cd9014ceb3a1fc4a70b0de1164b94fbc2d854ccba056f9f"},
+ {file = "cloudpickle-2.2.1.tar.gz", hash = "sha256:d89684b8de9e34a2a43b3460fbca07d09d6e25ce858df4d5a44240403b6178f5"},
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "contourpy"
+version = "1.1.0"
+description = "Python library for calculating contours of 2D quadrilateral grids"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "contourpy-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:89f06eff3ce2f4b3eb24c1055a26981bffe4e7264acd86f15b97e40530b794bc"},
+ {file = "contourpy-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dffcc2ddec1782dd2f2ce1ef16f070861af4fb78c69862ce0aab801495dda6a3"},
+ {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25ae46595e22f93592d39a7eac3d638cda552c3e1160255258b695f7b58e5655"},
+ {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17cfaf5ec9862bc93af1ec1f302457371c34e688fbd381f4035a06cd47324f48"},
+ {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18a64814ae7bce73925131381603fff0116e2df25230dfc80d6d690aa6e20b37"},
+ {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90c81f22b4f572f8a2110b0b741bb64e5a6427e0a198b2cdc1fbaf85f352a3aa"},
+ {file = "contourpy-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53cc3a40635abedbec7f1bde60f8c189c49e84ac180c665f2cd7c162cc454baa"},
+ {file = "contourpy-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f795597073b09d631782e7245016a4323cf1cf0b4e06eef7ea6627e06a37ff2"},
+ {file = "contourpy-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b7b04ed0961647691cfe5d82115dd072af7ce8846d31a5fac6c142dcce8b882"},
+ {file = "contourpy-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27bc79200c742f9746d7dd51a734ee326a292d77e7d94c8af6e08d1e6c15d545"},
+ {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:052cc634bf903c604ef1a00a5aa093c54f81a2612faedaa43295809ffdde885e"},
+ {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9382a1c0bc46230fb881c36229bfa23d8c303b889b788b939365578d762b5c18"},
+ {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5cec36c5090e75a9ac9dbd0ff4a8cf7cecd60f1b6dc23a374c7d980a1cd710e"},
+ {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f0cbd657e9bde94cd0e33aa7df94fb73c1ab7799378d3b3f902eb8eb2e04a3a"},
+ {file = "contourpy-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:181cbace49874f4358e2929aaf7ba84006acb76694102e88dd15af861996c16e"},
+ {file = "contourpy-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fb3b7d9e6243bfa1efb93ccfe64ec610d85cfe5aec2c25f97fbbd2e58b531256"},
+ {file = "contourpy-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcb41692aa09aeb19c7c213411854402f29f6613845ad2453d30bf421fe68fed"},
+ {file = "contourpy-1.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5d123a5bc63cd34c27ff9c7ac1cd978909e9c71da12e05be0231c608048bb2ae"},
+ {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62013a2cf68abc80dadfd2307299bfa8f5aa0dcaec5b2954caeb5fa094171103"},
+ {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0b6616375d7de55797d7a66ee7d087efe27f03d336c27cf1f32c02b8c1a5ac70"},
+ {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:317267d915490d1e84577924bd61ba71bf8681a30e0d6c545f577363157e5e94"},
+ {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d551f3a442655f3dcc1285723f9acd646ca5858834efeab4598d706206b09c9f"},
+ {file = "contourpy-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7a117ce7df5a938fe035cad481b0189049e8d92433b4b33aa7fc609344aafa1"},
+ {file = "contourpy-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:d4f26b25b4f86087e7d75e63212756c38546e70f2a92d2be44f80114826e1cd4"},
+ {file = "contourpy-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc00bb4225d57bff7ebb634646c0ee2a1298402ec10a5fe7af79df9a51c1bfd9"},
+ {file = "contourpy-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:189ceb1525eb0655ab8487a9a9c41f42a73ba52d6789754788d1883fb06b2d8a"},
+ {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f2931ed4741f98f74b410b16e5213f71dcccee67518970c42f64153ea9313b9"},
+ {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30f511c05fab7f12e0b1b7730ebdc2ec8deedcfb505bc27eb570ff47c51a8f15"},
+ {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143dde50520a9f90e4a2703f367cf8ec96a73042b72e68fcd184e1279962eb6f"},
+ {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e94bef2580e25b5fdb183bf98a2faa2adc5b638736b2c0a4da98691da641316a"},
+ {file = "contourpy-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ed614aea8462735e7d70141374bd7650afd1c3f3cb0c2dbbcbe44e14331bf002"},
+ {file = "contourpy-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:438ba416d02f82b692e371858143970ed2eb6337d9cdbbede0d8ad9f3d7dd17d"},
+ {file = "contourpy-1.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a698c6a7a432789e587168573a864a7ea374c6be8d4f31f9d87c001d5a843493"},
+ {file = "contourpy-1.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:397b0ac8a12880412da3551a8cb5a187d3298a72802b45a3bd1805e204ad8439"},
+ {file = "contourpy-1.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a67259c2b493b00e5a4d0f7bfae51fb4b3371395e47d079a4446e9b0f4d70e76"},
+ {file = "contourpy-1.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2b836d22bd2c7bb2700348e4521b25e077255ebb6ab68e351ab5aa91ca27e027"},
+ {file = "contourpy-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084eaa568400cfaf7179b847ac871582199b1b44d5699198e9602ecbbb5f6104"},
+ {file = "contourpy-1.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:911ff4fd53e26b019f898f32db0d4956c9d227d51338fb3b03ec72ff0084ee5f"},
+ {file = "contourpy-1.1.0.tar.gz", hash = "sha256:e53046c3863828d21d531cc3b53786e6580eb1ba02477e8681009b6aa0870b21"},
+]
+
+[package.dependencies]
+numpy = ">=1.16"
+
+[package.extras]
+bokeh = ["bokeh", "selenium"]
+docs = ["furo", "sphinx-copybutton"]
+mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.2.0)", "types-Pillow"]
+test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
+test-no-images = ["pytest", "pytest-cov", "wurlitzer"]
+
+[[package]]
+name = "coverage"
+version = "6.5.0"
+description = "Code coverage measurement for Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "coverage-6.5.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ef8674b0ee8cc11e2d574e3e2998aea5df5ab242e012286824ea3c6970580e53"},
+ {file = "coverage-6.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:784f53ebc9f3fd0e2a3f6a78b2be1bd1f5575d7863e10c6e12504f240fd06660"},
+ {file = "coverage-6.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4a5be1748d538a710f87542f22c2cad22f80545a847ad91ce45e77417293eb4"},
+ {file = "coverage-6.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:83516205e254a0cb77d2d7bb3632ee019d93d9f4005de31dca0a8c3667d5bc04"},
+ {file = "coverage-6.5.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af4fffaffc4067232253715065e30c5a7ec6faac36f8fc8d6f64263b15f74db0"},
+ {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:97117225cdd992a9c2a5515db1f66b59db634f59d0679ca1fa3fe8da32749cae"},
+ {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a1170fa54185845505fbfa672f1c1ab175446c887cce8212c44149581cf2d466"},
+ {file = "coverage-6.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:11b990d520ea75e7ee8dcab5bc908072aaada194a794db9f6d7d5cfd19661e5a"},
+ {file = "coverage-6.5.0-cp310-cp310-win32.whl", hash = "sha256:5dbec3b9095749390c09ab7c89d314727f18800060d8d24e87f01fb9cfb40b32"},
+ {file = "coverage-6.5.0-cp310-cp310-win_amd64.whl", hash = "sha256:59f53f1dc5b656cafb1badd0feb428c1e7bc19b867479ff72f7a9dd9b479f10e"},
+ {file = "coverage-6.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4a5375e28c5191ac38cca59b38edd33ef4cc914732c916f2929029b4bfb50795"},
+ {file = "coverage-6.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4ed2820d919351f4167e52425e096af41bfabacb1857186c1ea32ff9983ed75"},
+ {file = "coverage-6.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:33a7da4376d5977fbf0a8ed91c4dffaaa8dbf0ddbf4c8eea500a2486d8bc4d7b"},
+ {file = "coverage-6.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8fb6cf131ac4070c9c5a3e21de0f7dc5a0fbe8bc77c9456ced896c12fcdad91"},
+ {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a6b7d95969b8845250586f269e81e5dfdd8ff828ddeb8567a4a2eaa7313460c4"},
+ {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1ef221513e6f68b69ee9e159506d583d31aa3567e0ae84eaad9d6ec1107dddaa"},
+ {file = "coverage-6.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cca4435eebea7962a52bdb216dec27215d0df64cf27fc1dd538415f5d2b9da6b"},
+ {file = "coverage-6.5.0-cp311-cp311-win32.whl", hash = "sha256:98e8a10b7a314f454d9eff4216a9a94d143a7ee65018dd12442e898ee2310578"},
+ {file = "coverage-6.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:bc8ef5e043a2af066fa8cbfc6e708d58017024dc4345a1f9757b329a249f041b"},
+ {file = "coverage-6.5.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4433b90fae13f86fafff0b326453dd42fc9a639a0d9e4eec4d366436d1a41b6d"},
+ {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4f05d88d9a80ad3cac6244d36dd89a3c00abc16371769f1340101d3cb899fc3"},
+ {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:94e2565443291bd778421856bc975d351738963071e9b8839ca1fc08b42d4bef"},
+ {file = "coverage-6.5.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:027018943386e7b942fa832372ebc120155fd970837489896099f5cfa2890f79"},
+ {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:255758a1e3b61db372ec2736c8e2a1fdfaf563977eedbdf131de003ca5779b7d"},
+ {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:851cf4ff24062c6aec510a454b2584f6e998cada52d4cb58c5e233d07172e50c"},
+ {file = "coverage-6.5.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:12adf310e4aafddc58afdb04d686795f33f4d7a6fa67a7a9d4ce7d6ae24d949f"},
+ {file = "coverage-6.5.0-cp37-cp37m-win32.whl", hash = "sha256:b5604380f3415ba69de87a289a2b56687faa4fe04dbee0754bfcae433489316b"},
+ {file = "coverage-6.5.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4a8dbc1f0fbb2ae3de73eb0bdbb914180c7abfbf258e90b311dcd4f585d44bd2"},
+ {file = "coverage-6.5.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d900bb429fdfd7f511f868cedd03a6bbb142f3f9118c09b99ef8dc9bf9643c3c"},
+ {file = "coverage-6.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2198ea6fc548de52adc826f62cb18554caedfb1d26548c1b7c88d8f7faa8f6ba"},
+ {file = "coverage-6.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c4459b3de97b75e3bd6b7d4b7f0db13f17f504f3d13e2a7c623786289dd670e"},
+ {file = "coverage-6.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20c8ac5386253717e5ccc827caad43ed66fea0efe255727b1053a8154d952398"},
+ {file = "coverage-6.5.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b07130585d54fe8dff3d97b93b0e20290de974dc8177c320aeaf23459219c0b"},
+ {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dbdb91cd8c048c2b09eb17713b0c12a54fbd587d79adcebad543bc0cd9a3410b"},
+ {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:de3001a203182842a4630e7b8d1a2c7c07ec1b45d3084a83d5d227a3806f530f"},
+ {file = "coverage-6.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e07f4a4a9b41583d6eabec04f8b68076ab3cd44c20bd29332c6572dda36f372e"},
+ {file = "coverage-6.5.0-cp38-cp38-win32.whl", hash = "sha256:6d4817234349a80dbf03640cec6109cd90cba068330703fa65ddf56b60223a6d"},
+ {file = "coverage-6.5.0-cp38-cp38-win_amd64.whl", hash = "sha256:7ccf362abd726b0410bf8911c31fbf97f09f8f1061f8c1cf03dfc4b6372848f6"},
+ {file = "coverage-6.5.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:633713d70ad6bfc49b34ead4060531658dc6dfc9b3eb7d8a716d5873377ab745"},
+ {file = "coverage-6.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:95203854f974e07af96358c0b261f1048d8e1083f2de9b1c565e1be4a3a48cfc"},
+ {file = "coverage-6.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9023e237f4c02ff739581ef35969c3739445fb059b060ca51771e69101efffe"},
+ {file = "coverage-6.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:265de0fa6778d07de30bcf4d9dc471c3dc4314a23a3c6603d356a3c9abc2dfcf"},
+ {file = "coverage-6.5.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f830ed581b45b82451a40faabb89c84e1a998124ee4212d440e9c6cf70083e5"},
+ {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7b6be138d61e458e18d8e6ddcddd36dd96215edfe5f1168de0b1b32635839b62"},
+ {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42eafe6778551cf006a7c43153af1211c3aaab658d4d66fa5fcc021613d02518"},
+ {file = "coverage-6.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:723e8130d4ecc8f56e9a611e73b31219595baa3bb252d539206f7bbbab6ffc1f"},
+ {file = "coverage-6.5.0-cp39-cp39-win32.whl", hash = "sha256:d9ecf0829c6a62b9b573c7bb6d4dcd6ba8b6f80be9ba4fc7ed50bf4ac9aecd72"},
+ {file = "coverage-6.5.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc2af30ed0d5ae0b1abdb4ebdce598eafd5b35397d4d75deb341a614d333d987"},
+ {file = "coverage-6.5.0-pp36.pp37.pp38-none-any.whl", hash = "sha256:1431986dac3923c5945271f169f59c45b8802a114c8f548d611f2015133df77a"},
+ {file = "coverage-6.5.0.tar.gz", hash = "sha256:f642e90754ee3e06b0e7e51bce3379590e76b7f76b708e1a71ff043f87025c84"},
+]
+
+[package.dependencies]
+tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}
+
+[package.extras]
+toml = ["tomli"]
+
+[[package]]
+name = "coverage-badge"
+version = "1.1.0"
+description = "Generate coverage badges for Coverage.py."
+optional = false
+python-versions = "*"
+files = [
+ {file = "coverage-badge-1.1.0.tar.gz", hash = "sha256:c824a106503e981c02821e7d32f008fb3984b2338aa8c3800ec9357e33345b78"},
+ {file = "coverage_badge-1.1.0-py2.py3-none-any.whl", hash = "sha256:e365d56e5202e923d1b237f82defd628a02d1d645a147f867ac85c58c81d7997"},
+]
+
+[package.dependencies]
+coverage = "*"
+
+[[package]]
+name = "cvxpy"
+version = "1.3.2"
+description = "A domain-specific language for modeling convex optimization problems in Python."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "cvxpy-1.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bf60240f53441abe0f41f147383831345562c0f458e96e112ba55cb4d9030f27"},
+ {file = "cvxpy-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d771ceba9195ebe3164c0b635fd038840577f009ffc12e910db5d1df44b30792"},
+ {file = "cvxpy-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ff9c078919ff19e8297ff75be80262fd208d1ad014a3292baa3311c450f1de0"},
+ {file = "cvxpy-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f372d8702b143f79bb097adcb102a0d5888cbb3e4e7e232fb4a101a04d97cc9b"},
+ {file = "cvxpy-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:ba37dd5499014fde1da022bdea6efc0ef5e998b3f62c052751812c235a1a58cf"},
+ {file = "cvxpy-1.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4bde3515840eb56c8448751d04847abcba074ddc1ebb2f47dbc2c85ad9b78b81"},
+ {file = "cvxpy-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5c9b2b7474647483531ccd469bbfbb4589410d65ac4c28112751c8c56e14d2c3"},
+ {file = "cvxpy-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3bb3db0f7ac49b8acb9a92d3bd5cf814920a6115a9275124bef1299770208c3"},
+ {file = "cvxpy-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b39683655fa099e9b6e51b38bdad5353eaae86344c58dfc7616b33db661bcb0"},
+ {file = "cvxpy-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:647ba8ced43c55241ca684fd9f5be58258e793e92e611abc802c1b540582cb8c"},
+ {file = "cvxpy-1.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a70540d1ed93491b285f3f05451bd76df5f39c16e82c350c79dae31a0f8776c6"},
+ {file = "cvxpy-1.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0046798fd869853dd9d8d111530da08621b35f16c7346ff6a4775071fa2b273"},
+ {file = "cvxpy-1.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b6dde84f42c7d8fd1bf6867a77667ff73529ee5e1ea27892e45716077119e86"},
+ {file = "cvxpy-1.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:843f6cd637c31f70ffcf817df86731b99bb15c847d4554a47cf3c0479571a26d"},
+ {file = "cvxpy-1.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76c4e7b5a3bc540c09c8388302f5ed5033738454033a9d56a06eb78cfb303e11"},
+ {file = "cvxpy-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:05430b56b28bc11edf8be287ce773c34b942335fac154a18ebc298ad6245383b"},
+ {file = "cvxpy-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b423313e614a73bc55d7b8474039160ed5ae74e133a15cef51f58639ab210050"},
+ {file = "cvxpy-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fc6d54393eb926693634134c5109ddd7a17cdb642e8c5a0ff613d8873c0cba3"},
+ {file = "cvxpy-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:fadc5c9761ad3b2e3a28bfffb0635bdf6689d2c26721c6f3feeec08b638dd940"},
+ {file = "cvxpy-1.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:646908a9afbd20deb2f7848c48521f6677916007170db4b31cf8b679c1e193e0"},
+ {file = "cvxpy-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b1e8b7b609630788aabd7ddbfc7dd3d0436a30b814e1896cb45d20155cae8809"},
+ {file = "cvxpy-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4765a1e2529bd4f095720143ddd7e55f1063b5a2793daf9d03ebf7f584f87553"},
+ {file = "cvxpy-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:817d82d1e8b6dc5da244061ce6a63549eb7ef2e3a083c7d170ad578ce09f3071"},
+ {file = "cvxpy-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:11e3d09592427eb62dd3268e0e186ef2a1a2587418589e65c264cfe82331de2e"},
+ {file = "cvxpy-1.3.2.tar.gz", hash = "sha256:0b685e5040f199f3d703f30f5d22d1f865597623455153d1ddd770245aef0975"},
+]
+
+[package.dependencies]
+ecos = ">=2"
+numpy = ">=1.15"
+osqp = ">=0.4.1"
+scipy = ">=1.1.0"
+scs = ">=1.1.6"
+setuptools = ">65.5.1"
+
+[package.extras]
+cbc = ["cylp (>=0.91.5)"]
+clarabel = ["clarabel"]
+cvxopt = ["cvxopt"]
+diffcp = ["diffcp"]
+glop = ["ortools (>=9.3,<9.5)"]
+glpk = ["cvxopt"]
+glpk-mi = ["cvxopt"]
+gurobi = ["gurobipy"]
+highs = ["scipy (>=1.6.1)"]
+mosek = ["Mosek"]
+pdlp = ["ortools (>=9.3,<9.5)"]
+proxqp = ["proxsuite"]
+scip = ["PySCIPOpt"]
+scipy = ["scipy"]
+scs = ["setuptools (>65.5.1)"]
+xpress = ["xpress"]
+
+[[package]]
+name = "cycler"
+version = "0.11.0"
+description = "Composable style cycles"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"},
+ {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"},
+]
+
+[[package]]
+name = "cython"
+version = "3.0.0"
+description = "The Cython compiler for writing C extensions in the Python language."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "Cython-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c7d728e1a49ad01d41181e3a9ea80b8d14e825f4679e4dd837cbf7bca7998a5"},
+ {file = "Cython-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:626a4a6ef4b7ced87c348ea805488e4bd39dad9d0b39659aa9e1040b62bbfedf"},
+ {file = "Cython-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33c900d1ca9f622b969ac7d8fc44bdae140a4a6c7d8819413b51f3ccd0586a09"},
+ {file = "Cython-3.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a65bc50dc1bc2faeafd9425defbdef6a468974f5c4192497ff7f14adccfdcd32"},
+ {file = "Cython-3.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b71b399b10b038b056ad12dce1e317a8aa7a96e99de7e4fa2fa5d1c9415cfb9"},
+ {file = "Cython-3.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f42f304c097cc53e9eb5f1a1d150380353d5018a3191f1b77f0de353c762181e"},
+ {file = "Cython-3.0.0-cp310-cp310-win32.whl", hash = "sha256:3e234e2549e808d9259fdb23ebcfd145be30c638c65118326ec33a8d29248dc2"},
+ {file = "Cython-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:829c8333195100448a23863cf64a07e1334fae6a275aefe871458937911531b6"},
+ {file = "Cython-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06db81b1a01858fcc406616f8528e686ffb6cf7c3d78fb83767832bfecea8ad8"},
+ {file = "Cython-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c93634845238645ce7abf63a56b1c5b6248189005c7caff898fd4a0dac1c5e1e"},
+ {file = "Cython-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa606675c6bd23478b1d174e2a84e3c5a2c660968f97dc455afe0fae198f9d3d"},
+ {file = "Cython-3.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3355e6f690184f984eeb108b0f5bbc4bcf8b9444f8168933acf79603abf7baf"},
+ {file = "Cython-3.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:93a34e1ca8afa4b7075b02ed14a7e4969256297029fb1bfd4cbe48f7290dbcff"},
+ {file = "Cython-3.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bb1165ca9e78823f9ad1efa5b3d83156f868eabd679a615d140a3021bb92cd65"},
+ {file = "Cython-3.0.0-cp311-cp311-win32.whl", hash = "sha256:2fadde1da055944f5e1e17625055f54ddd11f451889110278ef30e07bd5e1695"},
+ {file = "Cython-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:254ed1f03a6c237fa64f0c6e44862058de65bfa2e6a3b48ca3c205492e0653aa"},
+ {file = "Cython-3.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4e212237b7531759befb92699c452cd65074a78051ae4ee36ff8b237395ecf3d"},
+ {file = "Cython-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f29307463eba53747b31f71394ed087e3e3e264dcc433e62de1d51f5c0c966c"},
+ {file = "Cython-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53328a8af0806bebbdb48a4191883b11ee9d9dfb084d84f58fa5a8ab58baefc9"},
+ {file = "Cython-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5962e70b15e863e72bed6910e8c6ffef77d36cc98e2b31c474378f3b9e49b0e3"},
+ {file = "Cython-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9e69139f4e60ab14c50767a568612ea64d6907e9c8e0289590a170eb495e005f"},
+ {file = "Cython-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c40bdbcb2286f0aeeb5df9ce53d45da2d2a9b36a16b331cd0809d212d22a8fc7"},
+ {file = "Cython-3.0.0-cp312-cp312-win32.whl", hash = "sha256:8abb8915eb2e57fa53d918afe641c05d1bcc6ed1913682ec1f28de71f4e3f398"},
+ {file = "Cython-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:30a4bd2481e59bd7ab2539f835b78edc19fc455811e476916f56026b93afd28b"},
+ {file = "Cython-3.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0e1e4b7e4bfbf22fecfa5b852f0e499c442d4853b7ebd33ae37cdec9826ed5d8"},
+ {file = "Cython-3.0.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b00df42cdd1a285a64491ba23de08ab14169d3257c840428d40eb7e8e9979af"},
+ {file = "Cython-3.0.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:650d03ddddc08b051b4659778733f0f173ca7d327415755c05d265a6c1ba02fb"},
+ {file = "Cython-3.0.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4965f2ebade17166f21a508d66dd60d2a0b3a3b90abe3f72003baa17ae020dd6"},
+ {file = "Cython-3.0.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4123c8d03167803df31da6b39de167cb9c04ac0aa4e35d4e5aa9d08ad511b84d"},
+ {file = "Cython-3.0.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:296c53b6c0030cf82987eef163444e8d7631cc139d995f9d58679d9fd1ddbf31"},
+ {file = "Cython-3.0.0-cp36-cp36m-win32.whl", hash = "sha256:0d2c1e172f1c81bafcca703093608e10dc16e3e2d24c5644c17606c7fdb1792c"},
+ {file = "Cython-3.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:bc816d8eb3686d6f8d165f4156bac18c1147e1035dc28a76742d0b7fb5b7c032"},
+ {file = "Cython-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8d86651347bbdbac1aca1824696c5e4c0a3b162946c422edcca2be12a03744d1"},
+ {file = "Cython-3.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84176bd04ce9f3cc8799b47ec6d1959fa1ea5e71424507df7bbf0b0915bbedef"},
+ {file = "Cython-3.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35abcf07b8277ec95bbe49a07b5c8760a2d941942ccfe759a94c8d2fe5602e9f"},
+ {file = "Cython-3.0.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a44d6b9a29b2bff38bb648577b2fcf6a68cf8b1783eee89c2eb749f69494b98d"},
+ {file = "Cython-3.0.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4dc6bbe7cf079db37f1ebb9b0f10d0d7f29e293bb8688e92d50b5ea7a91d82f3"},
+ {file = "Cython-3.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e28763e75e380b8be62b02266a7995a781997c97c119efbdccb8fb954bcd7574"},
+ {file = "Cython-3.0.0-cp37-cp37m-win32.whl", hash = "sha256:edae615cb4af51d5173e76ba9aea212424d025c57012e9cdf2f131f774c5ba71"},
+ {file = "Cython-3.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:20c604e974832aaf8b7a1f5455ee7274b34df62a35ee095cd7d2ed7e818e6c53"},
+ {file = "Cython-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c85fd2b1cbd9400d60ebe074795bb9a9188752f1612be3b35b0831a24879b91f"},
+ {file = "Cython-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:090256c687106932339f87f888b95f0d69c617bc9b18801555545b695d29d8ab"},
+ {file = "Cython-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cec2a67a0a7d9d4399758c0657ca03e5912e37218859cfbf046242cc532bfb3b"},
+ {file = "Cython-3.0.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a1cdd01ce45333bc264a218c6e183700d6b998f029233f586a53c9b13455c2d2"},
+ {file = "Cython-3.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ecee663d2d50ca939fc5db81f2f8a219c2417b4651ad84254c50a03a9cb1aadd"},
+ {file = "Cython-3.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:30f10e79393b411af7677c270ea69807acb9fc30205c8ff25561f4deef780ec1"},
+ {file = "Cython-3.0.0-cp38-cp38-win32.whl", hash = "sha256:609777d3a7a0a23b225e84d967af4ad2485c8bdfcacef8037cf197e87d431ca0"},
+ {file = "Cython-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:7f4a6dfd42ae0a45797f50fc4f6add702abf46ab3e7cd61811a6c6a97a40e1a2"},
+ {file = "Cython-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2d8158277c8942c0b20ff4c074fe6a51c5b89e6ac60cef606818de8c92773596"},
+ {file = "Cython-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54e34f99b2a8c1e11478541b2822e6408c132b98b6b8f5ed89411e5e906631ea"},
+ {file = "Cython-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:877d1c8745df59dd2061a0636c602729e9533ba13f13aa73a498f68662e1cbde"},
+ {file = "Cython-3.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204690be60f0ff32eb70b04f28ef0d1e50ffd7b3f77ba06a7dc2389ee3b848e0"},
+ {file = "Cython-3.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:06fcb4628ccce2ba5abc8630adbeaf4016f63a359b4c6c3827b2d80e0673981c"},
+ {file = "Cython-3.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:090e24cfa31c926d0b13d8bb2ef48175acdd061ae1413343c94a2b12a4a4fa6f"},
+ {file = "Cython-3.0.0-cp39-cp39-win32.whl", hash = "sha256:4cd00f2158dc00f7f93a92444d0f663eda124c9c29bbbd658964f4e89c357fe8"},
+ {file = "Cython-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:5b4cc896d49ce2bae8d6a030f9a4c64965b59c38acfbf4617685e17f7fcf1731"},
+ {file = "Cython-3.0.0-py2.py3-none-any.whl", hash = "sha256:ff1aef1a03cfe293237c7a86ae9625b0411b2df30c53d1a7f29a8d381f38a1df"},
+ {file = "Cython-3.0.0.tar.gz", hash = "sha256:350b18f9673e63101dbbfcf774ee2f57c20ac4636d255741d76ca79016b1bd82"},
+]
+
+[[package]]
+name = "d4rl"
+version = "1.1"
+description = ""
+optional = false
+python-versions = "*"
+files = []
+develop = false
+
+[package.dependencies]
+click = "*"
+dm_control = "*"
+gym = "*"
+h5py = "*"
+mjrl = {git = "https://github.com/aravindr93/mjrl", rev = "master"}
+mujoco_py = "*"
+numpy = "*"
+pybullet = "*"
+termcolor = "*"
+
+[package.source]
+type = "git"
+url = "https://github.com/ezhang7423/d4rl-installable.git"
+reference = "HEAD"
+resolved_reference = "74c41f695385835d7dea3d87a14b299a90bbeffe"
+
+[[package]]
+name = "darglint"
+version = "1.8.1"
+description = "A utility for ensuring Google-style docstrings stay up to date with the source code."
+optional = false
+python-versions = ">=3.6,<4.0"
+files = [
+ {file = "darglint-1.8.1-py3-none-any.whl", hash = "sha256:5ae11c259c17b0701618a20c3da343a3eb98b3bc4b5a83d31cdd94f5ebdced8d"},
+ {file = "darglint-1.8.1.tar.gz", hash = "sha256:080d5106df149b199822e7ee7deb9c012b49891538f14a11be681044f0bb20da"},
+]
+
+[[package]]
+name = "decorator"
+version = "5.1.1"
+description = "Decorators for Humans"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"},
+ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"},
+]
+
+[[package]]
+name = "dill"
+version = "0.3.7"
+description = "serialize all of Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"},
+ {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"},
+]
+
+[package.extras]
+graph = ["objgraph (>=1.7.2)"]
+
+[[package]]
+name = "distlib"
+version = "0.3.7"
+description = "Distribution utilities"
+optional = false
+python-versions = "*"
+files = [
+ {file = "distlib-0.3.7-py2.py3-none-any.whl", hash = "sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057"},
+ {file = "distlib-0.3.7.tar.gz", hash = "sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8"},
+]
+
+[[package]]
+name = "dm-control"
+version = "1.0.14"
+description = "Continuous control environments and MuJoCo Python bindings."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "dm_control-1.0.14-py3-none-any.whl", hash = "sha256:883c63244a7ebf598700a97564ed19fffd3479ca79efd090aed881609cdb9fc6"},
+ {file = "dm_control-1.0.14.tar.gz", hash = "sha256:def1ece747b6f175c581150826b50f1a6134086dab34f8f3fd2d088ea035cf3d"},
+]
+
+[package.dependencies]
+absl-py = ">=0.7.0"
+dm-env = "*"
+dm-tree = "!=0.1.2"
+glfw = "*"
+labmaze = "*"
+lxml = "*"
+mujoco = ">=2.3.7"
+numpy = ">=1.9.0"
+protobuf = ">=3.19.4"
+pyopengl = ">=3.1.4"
+pyparsing = ">=3.0.0"
+requests = "*"
+scipy = "*"
+setuptools = "!=50.0.0"
+tqdm = "*"
+
+[package.extras]
+hdf5 = ["h5py"]
+
+[[package]]
+name = "dm-env"
+version = "1.6"
+description = "A Python interface for Reinforcement Learning environments."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "dm-env-1.6.tar.gz", hash = "sha256:a436eb1c654c39e0c986a516cee218bea7140b510fceff63f97eb4fcff3d93de"},
+ {file = "dm_env-1.6-py3-none-any.whl", hash = "sha256:0eabb6759dd453b625e041032f7ae0c1e87d4eb61b6a96b9ca586483837abf29"},
+]
+
+[package.dependencies]
+absl-py = "*"
+dm-tree = "*"
+numpy = "*"
+
+[[package]]
+name = "dm-tree"
+version = "0.1.8"
+description = "Tree is a library for working with nested data structures."
+optional = false
+python-versions = "*"
+files = [
+ {file = "dm-tree-0.1.8.tar.gz", hash = "sha256:0fcaabbb14e7980377439e7140bd05552739ca5e515ecb3119f234acee4b9430"},
+ {file = "dm_tree-0.1.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:35cc164a79336bfcfafb47e5f297898359123bbd3330c1967f0c4994f9cf9f60"},
+ {file = "dm_tree-0.1.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39070ba268c0491af9fe7a58644d99e8b4f2cde6e5884ba3380bddc84ed43d5f"},
+ {file = "dm_tree-0.1.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2869228d9c619074de501a3c10dc7f07c75422f8fab36ecdcb859b6f1b1ec3ef"},
+ {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d20f2faa3672b52e5013f4077117bfb99c4cfc0b445d3bde1584c34032b57436"},
+ {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5483dca4d7eb1a0d65fe86d3b6a53ae717face83c1f17e0887b1a4a64ae5c410"},
+ {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d7c26e431fc93cc7e0cba867eb000db6a05f6f2b25af11ac4e9dada88fc5bca"},
+ {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d714371bb08839e4e5e29024fc95832d9affe129825ef38836b143028bd144"},
+ {file = "dm_tree-0.1.8-cp310-cp310-win_amd64.whl", hash = "sha256:d40fa4106ca6edc66760246a08f500ec0c85ef55c762fb4a363f6ee739ba02ee"},
+ {file = "dm_tree-0.1.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad16ceba90a56ec47cf45b21856d14962ac314787975ef786efb5e6e9ca75ec7"},
+ {file = "dm_tree-0.1.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:803bfc53b4659f447ac694dbd04235f94a73ef7c1fd1e0df7c84ac41e0bc963b"},
+ {file = "dm_tree-0.1.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:378cc8ad93c5fe3590f405a309980721f021c790ca1bdf9b15bb1d59daec57f5"},
+ {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1607ce49aa42f010d1e5e616d92ce899d66835d4d8bea49679582435285515de"},
+ {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:343a4a4ebaa127451ff971254a4be4084eb4bdc0b2513c32b46f6f728fd03f9e"},
+ {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa42a605d099ee7d41ba2b5fb75e21423951fd26e5d50583a00471238fb3021d"},
+ {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b7764de0d855338abefc6e3ee9fe40d301668310aa3baea3f778ff051f4393"},
+ {file = "dm_tree-0.1.8-cp311-cp311-win_amd64.whl", hash = "sha256:a5d819c38c03f0bb5b3b3703c60e4b170355a0fc6b5819325bf3d4ceb3ae7e80"},
+ {file = "dm_tree-0.1.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8c60a7eadab64c2278861f56bca320b2720f163dca9d7558103c3b77f2416571"},
+ {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af4b3d372f2477dcd89a6e717e4a575ca35ccc20cc4454a8a4b6f8838a00672d"},
+ {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de287fabc464b8734be251e46e06aa9aa1001f34198da2b6ce07bd197172b9cb"},
+ {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:054b461f8176f4bce7a21f7b1870f873a1ced3bdbe1282c816c550bb43c71fa6"},
+ {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f7915660f59c09068e428613c480150180df1060561fd0d1470684ae7007bd1"},
+ {file = "dm_tree-0.1.8-cp37-cp37m-win_amd64.whl", hash = "sha256:b9f89a454e98806b44fe9d40ec9eee61f848388f7e79ac2371a55679bd5a3ac6"},
+ {file = "dm_tree-0.1.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0e9620ccf06393eb6b613b5e366469304622d4ea96ae6540b28a33840e6c89cf"},
+ {file = "dm_tree-0.1.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b095ba4f8ca1ba19350fd53cf1f8f3eb0bd406aa28af64a6dfc86707b32a810a"},
+ {file = "dm_tree-0.1.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b9bd9b9ccb59409d33d51d84b7668010c04c2af7d4a371632874c1ca356cff3d"},
+ {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d3172394079a86c3a759179c65f64c48d1a42b89495fcf38976d11cc3bb952c"},
+ {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1612fcaecd79023dbc6a6ae48d51a80beb5c385d6f3f6d71688e57bc8d07de8"},
+ {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c5c8c12e3fda754ef6af94161bacdaeda816d941995fac415d6855c6c386af68"},
+ {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:694c3654cfd2a81552c08ec66bb5c4a3d48fa292b9a181880fb081c36c5b9134"},
+ {file = "dm_tree-0.1.8-cp38-cp38-win_amd64.whl", hash = "sha256:bb2d109f42190225112da899b9f3d46d0d5f26aef501c61e43529fe9322530b5"},
+ {file = "dm_tree-0.1.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d16e1f2a073604cfcc09f7131ae8d534674f43c3aef4c25742eae295bc60d04f"},
+ {file = "dm_tree-0.1.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:250b692fb75f45f02e2f58fbef9ab338904ef334b90557565621fa251df267cf"},
+ {file = "dm_tree-0.1.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81fce77f22a302d7a5968aebdf4efafef4def7ce96528719a354e6990dcd49c7"},
+ {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7ac31b9aecccb2c6e1ab29706f6ded3eba0c2c69c770322c9c685929c3d6afb"},
+ {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fe962015b2fe1282892b28ebe962faed53c7f98d942da9a4625cbf27baef913"},
+ {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c52cbf4f8b3dbd0beaedf44f69fa85eec5e9dede612e08035e06ada6ec9426"},
+ {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:181c35521d480d0365f39300542cb6cd7fd2b77351bb43d7acfda15aef63b317"},
+ {file = "dm_tree-0.1.8-cp39-cp39-win_amd64.whl", hash = "sha256:8ed3564abed97c806db122c2d3e1a2b64c74a63debe9903aad795167cc301368"},
+]
+
+[[package]]
+name = "docker-pycreds"
+version = "0.4.0"
+description = "Python bindings for the docker credentials store API"
+optional = false
+python-versions = "*"
+files = [
+ {file = "docker-pycreds-0.4.0.tar.gz", hash = "sha256:6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4"},
+ {file = "docker_pycreds-0.4.0-py2.py3-none-any.whl", hash = "sha256:7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49"},
+]
+
+[package.dependencies]
+six = ">=1.4.0"
+
+[[package]]
+name = "dparse"
+version = "0.6.3"
+description = "A parser for Python dependency files"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "dparse-0.6.3-py3-none-any.whl", hash = "sha256:0d8fe18714056ca632d98b24fbfc4e9791d4e47065285ab486182288813a5318"},
+ {file = "dparse-0.6.3.tar.gz", hash = "sha256:27bb8b4bcaefec3997697ba3f6e06b2447200ba273c0b085c3d012a04571b528"},
+]
+
+[package.dependencies]
+packaging = "*"
+tomli = {version = "*", markers = "python_version < \"3.11\""}
+
+[package.extras]
+conda = ["pyyaml"]
+pipenv = ["pipenv (<=2022.12.19)"]
+
+[[package]]
+name = "ecos"
+version = "2.0.12"
+description = "This is the Python package for ECOS: Embedded Cone Solver. See Github page for more information."
+optional = false
+python-versions = "*"
+files = [
+ {file = "ecos-2.0.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:835298a299c88c207b3402fba60ad9b5688b59bbbf2ac34a46de5b37165d773a"},
+ {file = "ecos-2.0.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:608bc822ee8e070927ab3519169b13a1a0fe88f3d562212d6b5dbb1039776360"},
+ {file = "ecos-2.0.12-cp310-cp310-win_amd64.whl", hash = "sha256:5184a9d8521ad1af90ffcd9902a6fa75c7bc473f37d30d86f97beda1033dfca2"},
+ {file = "ecos-2.0.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:eba07599084724eedc20b2862d5580eebebb09609f4740baadc78401cb99827c"},
+ {file = "ecos-2.0.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4979dc2d1cb6667e371a45a61887068505c1305437eef104ed6ef16f4b6aa0e3"},
+ {file = "ecos-2.0.12-cp311-cp311-win_amd64.whl", hash = "sha256:da8fbbca3feb83a9e27075d29b3765417d0c80af8ea83cbdc4a558cae7b564af"},
+ {file = "ecos-2.0.12-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f70e4547966f530fd7715756f7a65d5b9b90b312b9d37f243ef9356c05e7d74c"},
+ {file = "ecos-2.0.12-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:617be25d74222849622b0f82b94a11abcf1fae78ccaf69977b328321ee6ffa0b"},
+ {file = "ecos-2.0.12-cp37-cp37m-win_amd64.whl", hash = "sha256:29d00164eaea66ed54697a3b361c575284a8bca54f2623381a0635806c7303a7"},
+ {file = "ecos-2.0.12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4e86671397d1d2cd7cccff8a9c45be0541b0c60af8b92a0ff3581c9ed869db67"},
+ {file = "ecos-2.0.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:858a4dd3177bdc8cc6e362031732f5177b62138a1e4ef91c0dc3c6bd7d2d1248"},
+ {file = "ecos-2.0.12-cp38-cp38-win_amd64.whl", hash = "sha256:528b02f53835bd1baeb2e23f8153b8d6cc2b3704e1768be6a1a972f542241670"},
+ {file = "ecos-2.0.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e42bd4c19af6e04f76ccc85d941b1f1adc7faeee4d06d482395a6beb7bec895"},
+ {file = "ecos-2.0.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6def54336a15b5a49bc3bfcaa36035e8557cae8a4853b17ca84f5a29c93bcaea"},
+ {file = "ecos-2.0.12-cp39-cp39-win_amd64.whl", hash = "sha256:7af08941552fce108bd80145cdb6be7fa74477a20bacdac170800442cc7027d4"},
+ {file = "ecos-2.0.12.tar.gz", hash = "sha256:f48816d73b87ae325556ea537b7c8743187311403c80e3832035224156337c4e"},
+]
+
+[package.dependencies]
+numpy = ">=1.6"
+scipy = ">=0.9"
+
+[[package]]
+name = "einops"
+version = "0.6.1"
+description = "A new flavour of deep learning operations"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "einops-0.6.1-py3-none-any.whl", hash = "sha256:99149e46cc808956b174932fe563d920db4d6e5dadb8c6ecdaa7483b7ef7cfc3"},
+ {file = "einops-0.6.1.tar.gz", hash = "sha256:f95f8d00f4ded90dbc4b19b6f98b177332614b0357dde66997f3ae5d474dc8c8"},
+]
+
+[[package]]
+name = "executing"
+version = "1.2.0"
+description = "Get the currently executing AST node of a frame, and other information"
+optional = false
+python-versions = "*"
+files = [
+ {file = "executing-1.2.0-py2.py3-none-any.whl", hash = "sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc"},
+ {file = "executing-1.2.0.tar.gz", hash = "sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107"},
+]
+
+[package.extras]
+tests = ["asttokens", "littleutils", "pytest", "rich"]
+
+[[package]]
+name = "ezjaxtyping"
+version = "0.2.20"
+description = "Type annotations and runtime checking for shape and dtype of JAX arrays, and PyTrees."
+optional = false
+python-versions = "~=3.8"
+files = [
+ {file = "ezjaxtyping-0.2.20-py3-none-any.whl", hash = "sha256:466f483aec7265fe60cf622592861d64fb73b2f0d1bbc7cdbcef63e22e422116"},
+ {file = "ezjaxtyping-0.2.20.tar.gz", hash = "sha256:aaa2396bdb515516903fb1aa73a22d53e8d79e5bdd68ac16b47805930692e291"},
+]
+
+[package.dependencies]
+numpy = ">=1.20.0"
+typeguard = ">=2.13.3"
+typing-extensions = ">=3.7.4.1"
+
+[[package]]
+name = "eztils"
+version = "0.4.73"
+description = "eds utilities"
+optional = false
+python-versions = ">=3.8,<4.0"
+files = [
+ {file = "eztils-0.4.73-py3-none-any.whl", hash = "sha256:a6fcd3f880cc2bce964622549689b6308d2fbf5ce0cc04ef913388f82d65d0ba"},
+ {file = "eztils-0.4.73.tar.gz", hash = "sha256:00e4812521f421ed3b4ee9871613e7a7af5002bd102128be01be7dfa2ba76ca9"},
+]
+
+[package.dependencies]
+beartype = ">=0.14.1,<0.15.0"
+einops = ">=0.6.1,<0.7.0"
+ezjaxtyping = {version = ">=0.2.20,<0.3.0", optional = true, markers = "extra == \"torch\""}
+loguru = ">=0.7.0,<0.8.0"
+numpy = ">=1.24.3,<2.0.0"
+rich = ">=13.3.3,<14.0.0"
+tqdm = ">=4.65.0,<5.0.0"
+varname = ">=0.11.2,<0.12.0"
+
+[package.extras]
+torch = ["ezjaxtyping (>=0.2.20,<0.3.0)"]
+
+[[package]]
+name = "filelock"
+version = "3.12.2"
+description = "A platform independent file lock."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "filelock-3.12.2-py3-none-any.whl", hash = "sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec"},
+ {file = "filelock-3.12.2.tar.gz", hash = "sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.5.20)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)", "pytest-timeout (>=2.1)"]
+
+[[package]]
+name = "fonttools"
+version = "4.41.1"
+description = "Tools to manipulate font files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fonttools-4.41.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a7bbb290d13c6dd718ec2c3db46fe6c5f6811e7ea1e07f145fd8468176398224"},
+ {file = "fonttools-4.41.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ec453a45778524f925a8f20fd26a3326f398bfc55d534e37bab470c5e415caa1"},
+ {file = "fonttools-4.41.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2071267deaa6d93cb16288613419679c77220543551cbe61da02c93d92df72f"},
+ {file = "fonttools-4.41.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e3334d51f0e37e2c6056e67141b2adabc92613a968797e2571ca8a03bd64773"},
+ {file = "fonttools-4.41.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cac73bbef7734e78c60949da11c4903ee5837168e58772371bd42a75872f4f82"},
+ {file = "fonttools-4.41.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:edee0900cf0eedb29d17c7876102d6e5a91ee333882b1f5abc83e85b934cadb5"},
+ {file = "fonttools-4.41.1-cp310-cp310-win32.whl", hash = "sha256:2a22b2c425c698dcd5d6b0ff0b566e8e9663172118db6fd5f1941f9b8063da9b"},
+ {file = "fonttools-4.41.1-cp310-cp310-win_amd64.whl", hash = "sha256:547ab36a799dded58a46fa647266c24d0ed43a66028cd1cd4370b246ad426cac"},
+ {file = "fonttools-4.41.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:849ec722bbf7d3501a0e879e57dec1fc54919d31bff3f690af30bb87970f9784"},
+ {file = "fonttools-4.41.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:38cdecd8f1fd4bf4daae7fed1b3170dfc1b523388d6664b2204b351820aa78a7"},
+ {file = "fonttools-4.41.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ae64303ba670f8959fdaaa30ba0c2dabe75364fdec1caeee596c45d51ca3425"},
+ {file = "fonttools-4.41.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f14f3ccea4cc7dd1b277385adf3c3bf18f9860f87eab9c2fb650b0af16800f55"},
+ {file = "fonttools-4.41.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:33191f062549e6bb1a4782c22a04ebd37009c09360e2d6686ac5083774d06d95"},
+ {file = "fonttools-4.41.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:704bccd69b0abb6fab9f5e4d2b75896afa48b427caa2c7988792a2ffce35b441"},
+ {file = "fonttools-4.41.1-cp311-cp311-win32.whl", hash = "sha256:4edc795533421e98f60acee7d28fc8d941ff5ac10f44668c9c3635ad72ae9045"},
+ {file = "fonttools-4.41.1-cp311-cp311-win_amd64.whl", hash = "sha256:aaaef294d8e411f0ecb778a0aefd11bb5884c9b8333cc1011bdaf3b58ca4bd75"},
+ {file = "fonttools-4.41.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3d1f9471134affc1e3b1b806db6e3e2ad3fa99439e332f1881a474c825101096"},
+ {file = "fonttools-4.41.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:59eba8b2e749a1de85760da22333f3d17c42b66e03758855a12a2a542723c6e7"},
+ {file = "fonttools-4.41.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9b3cc10dc9e0834b6665fd63ae0c6964c6bc3d7166e9bc84772e0edd09f9fa2"},
+ {file = "fonttools-4.41.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2c2964bdc827ba6b8a91dc6de792620be4da3922c4cf0599f36a488c07e2b2"},
+ {file = "fonttools-4.41.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7763316111df7b5165529f4183a334aa24c13cdb5375ffa1dc8ce309c8bf4e5c"},
+ {file = "fonttools-4.41.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b2d1ee95be42b80d1f002d1ee0a51d7a435ea90d36f1a5ae331be9962ee5a3f1"},
+ {file = "fonttools-4.41.1-cp38-cp38-win32.whl", hash = "sha256:f48602c0b3fd79cd83a34c40af565fe6db7ac9085c8823b552e6e751e3a5b8be"},
+ {file = "fonttools-4.41.1-cp38-cp38-win_amd64.whl", hash = "sha256:b0938ebbeccf7c80bb9a15e31645cf831572c3a33d5cc69abe436e7000c61b14"},
+ {file = "fonttools-4.41.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e5c2b0a95a221838991e2f0e455dec1ca3a8cc9cd54febd68cc64d40fdb83669"},
+ {file = "fonttools-4.41.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:891cfc5a83b0307688f78b9bb446f03a7a1ad981690ac8362f50518bc6153975"},
+ {file = "fonttools-4.41.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73ef0bb5d60eb02ba4d3a7d23ada32184bd86007cb2de3657cfcb1175325fc83"},
+ {file = "fonttools-4.41.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f240d9adf0583ac8fc1646afe7f4ac039022b6f8fa4f1575a2cfa53675360b69"},
+ {file = "fonttools-4.41.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bdd729744ae7ecd7f7311ad25d99da4999003dcfe43b436cf3c333d4e68de73d"},
+ {file = "fonttools-4.41.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b927e5f466d99c03e6e20961946314b81d6e3490d95865ef88061144d9f62e38"},
+ {file = "fonttools-4.41.1-cp39-cp39-win32.whl", hash = "sha256:afce2aeb80be72b4da7dd114f10f04873ff512793d13ce0b19d12b2a4c44c0f0"},
+ {file = "fonttools-4.41.1-cp39-cp39-win_amd64.whl", hash = "sha256:1df1b6f4c7c4bc8201eb47f3b268adbf2539943aa43c400f84556557e3e109c0"},
+ {file = "fonttools-4.41.1-py3-none-any.whl", hash = "sha256:952cb405f78734cf6466252fec42e206450d1a6715746013f64df9cbd4f896fa"},
+ {file = "fonttools-4.41.1.tar.gz", hash = "sha256:e16a9449f21a93909c5be2f5ed5246420f2316e94195dbfccb5238aaa38f9751"},
+]
+
+[package.extras]
+all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"]
+graphite = ["lz4 (>=1.7.4.2)"]
+interpolatable = ["munkres", "scipy"]
+lxml = ["lxml (>=4.0,<5)"]
+pathops = ["skia-pathops (>=0.5.0)"]
+plot = ["matplotlib"]
+repacker = ["uharfbuzz (>=0.23.0)"]
+symfont = ["sympy"]
+type1 = ["xattr"]
+ufo = ["fs (>=2.2.0,<3)"]
+unicode = ["unicodedata2 (>=15.0.0)"]
+woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
+
+[[package]]
+name = "gitdb"
+version = "4.0.10"
+description = "Git Object Database"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"},
+ {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"},
+]
+
+[package.dependencies]
+smmap = ">=3.0.1,<6"
+
+[[package]]
+name = "gitpython"
+version = "3.1.32"
+description = "GitPython is a Python library used to interact with Git repositories"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "GitPython-3.1.32-py3-none-any.whl", hash = "sha256:e3d59b1c2c6ebb9dfa7a184daf3b6dd4914237e7488a1730a6d8f6f5d0b4187f"},
+ {file = "GitPython-3.1.32.tar.gz", hash = "sha256:8d9b8cb1e80b9735e8717c9362079d3ce4c6e5ddeebedd0361b228c3a67a62f6"},
+]
+
+[package.dependencies]
+gitdb = ">=4.0.1,<5"
+
+[[package]]
+name = "glfw"
+version = "2.6.2"
+description = "A ctypes-based wrapper for GLFW3."
+optional = false
+python-versions = "*"
+files = [
+ {file = "glfw-2.6.2-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-macosx_10_6_intel.whl", hash = "sha256:d8e4f087eba45f7f4815e3e912867ed5ca16d1047b0958c52047f5b53be67059"},
+ {file = "glfw-2.6.2-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-macosx_11_0_arm64.whl", hash = "sha256:faa5596aad5490cdd8657931a66636508c1015a8b7b47018318bb72fcd2b9014"},
+ {file = "glfw-2.6.2-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2010_i686.whl", hash = "sha256:cb04ace3b2162dcfcf642e1011babfc0fd099cfedba41936b8057d76a7e3f413"},
+ {file = "glfw-2.6.2-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2010_x86_64.whl", hash = "sha256:a862a45cc503d604abfabc6ca6ed3c8e78e1941cbbb1161796db525032268ae0"},
+ {file = "glfw-2.6.2-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2014_aarch64.whl", hash = "sha256:fb56cd24f9e173cdddc8f6ebaac4f52304a1dfcae1dadd583038355b73c1ae06"},
+ {file = "glfw-2.6.2-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-manylinux2014_x86_64.whl", hash = "sha256:c385c9976133aed57ff4f0ff5210276844cefb4d8a7bf61bbcc1caf10385744b"},
+ {file = "glfw-2.6.2-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-win32.whl", hash = "sha256:53f586291223abef652def28b82c309b0bd9d111fa409d05359aff6f35b0f3ab"},
+ {file = "glfw-2.6.2-py2.py27.py3.py30.py31.py32.py33.py34.py35.py36.py37.py38-none-win_amd64.whl", hash = "sha256:c2dcf2395d99ff2506428213bee305bb9ba024043d1f574216e61e6f5df808e9"},
+ {file = "glfw-2.6.2.tar.gz", hash = "sha256:4e0a6eac41301640ce9fe7493d08a83c72eebb7e32f78d2efb094514e770d3c4"},
+]
+
+[package.extras]
+preview = ["glfw-preview"]
+
+[[package]]
+name = "gtimer"
+version = "1.0.0b5"
+description = "A global Python timer"
+optional = false
+python-versions = "*"
+files = [
+ {file = "gtimer-1.0.0b5.tar.gz", hash = "sha256:a90d215bd53e748394cc10d6258fe2ee0cea9bf6acaa8bd9488aeb53c622af6a"},
+]
+
+[[package]]
+name = "gym"
+version = "0.23.1"
+description = "Gym: A universal API for reinforcement learning environments"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "gym-0.23.1.tar.gz", hash = "sha256:d0f9b9da34edbdace421c9442fc9205d03b8d15d0fb451053c766cde706d40e0"},
+]
+
+[package.dependencies]
+ale-py = {version = ">=0.7.4,<0.8.0", optional = true, markers = "extra == \"all\""}
+box2d-py = {version = "2.3.5", optional = true, markers = "extra == \"all\""}
+cloudpickle = ">=1.2.0"
+gym_notices = ">=0.0.4"
+importlib_metadata = {version = ">=4.10.0", markers = "python_version < \"3.10\""}
+lz4 = {version = ">=3.1.0", optional = true, markers = "extra == \"all\""}
+mujoco_py = {version = ">=1.50,<2.0", optional = true, markers = "extra == \"all\""}
+numpy = ">=1.18.0"
+opencv-python = {version = ">=3.0", optional = true, markers = "extra == \"all\""}
+pygame = {version = "2.1.0", optional = true, markers = "extra == \"all\""}
+scipy = {version = ">=1.4.1", optional = true, markers = "extra == \"all\""}
+
+[package.extras]
+accept-rom-license = ["autorom[accept-rom-license] (>=0.4.2,<0.5.0)"]
+all = ["ale-py (>=0.7.4,<0.8.0)", "box2d-py (==2.3.5)", "box2d-py (==2.3.5)", "lz4 (>=3.1.0)", "lz4 (>=3.1.0)", "mujoco_py (>=1.50,<2.0)", "opencv-python (>=3.0)", "opencv-python (>=3.0)", "pygame (==2.1.0)", "pygame (==2.1.0)", "pygame (==2.1.0)", "pygame (==2.1.0)", "pygame (==2.1.0)", "pygame (==2.1.0)", "scipy (>=1.4.1)", "scipy (>=1.4.1)"]
+atari = ["ale-py (>=0.7.4,<0.8.0)"]
+box2d = ["box2d-py (==2.3.5)", "pygame (==2.1.0)"]
+classic-control = ["pygame (==2.1.0)"]
+mujoco = ["mujoco_py (>=1.50,<2.0)"]
+nomujoco = ["box2d-py (==2.3.5)", "lz4 (>=3.1.0)", "opencv-python (>=3.0)", "pygame (==2.1.0)", "pygame (==2.1.0)", "pygame (==2.1.0)", "scipy (>=1.4.1)"]
+other = ["lz4 (>=3.1.0)", "opencv-python (>=3.0)"]
+toy-text = ["pygame (==2.1.0)", "scipy (>=1.4.1)"]
+
+[[package]]
+name = "gym-notices"
+version = "0.0.8"
+description = "Notices for gym"
+optional = false
+python-versions = "*"
+files = [
+ {file = "gym-notices-0.0.8.tar.gz", hash = "sha256:ad25e200487cafa369728625fe064e88ada1346618526102659b4640f2b4b911"},
+ {file = "gym_notices-0.0.8-py3-none-any.whl", hash = "sha256:e5f82e00823a166747b4c2a07de63b6560b1acb880638547e0cabf825a01e463"},
+]
+
+[[package]]
+name = "h5py"
+version = "3.9.0"
+description = "Read and write HDF5 files from Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "h5py-3.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eb7bdd5e601dd1739698af383be03f3dad0465fe67184ebd5afca770f50df9d6"},
+ {file = "h5py-3.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:78e44686334cbbf2dd21d9df15823bc38663f27a3061f6a032c68a3e30c47bf7"},
+ {file = "h5py-3.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f68b41efd110ce9af1cbe6fa8af9f4dcbadace6db972d30828b911949e28fadd"},
+ {file = "h5py-3.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12aa556d540f11a2cae53ea7cfb94017353bd271fb3962e1296b342f6550d1b8"},
+ {file = "h5py-3.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:d97409e17915798029e297a84124705c8080da901307ea58f29234e09b073ddc"},
+ {file = "h5py-3.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:551e358db05a874a0f827b22e95b30092f2303edc4b91bb62ad2f10e0236e1a0"},
+ {file = "h5py-3.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6822a814b9d8b8363ff102f76ea8d026f0ca25850bb579d85376029ee3e73b93"},
+ {file = "h5py-3.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54f01202cdea754ab4227dd27014bdbd561a4bbe4b631424fd812f7c2ce9c6ac"},
+ {file = "h5py-3.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64acceaf6aff92af091a4b83f6dee3cf8d3061f924a6bb3a33eb6c4658a8348b"},
+ {file = "h5py-3.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:804c7fb42a34c8ab3a3001901c977a5c24d2e9c586a0f3e7c0a389130b4276fc"},
+ {file = "h5py-3.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8d9492391ff5c3c80ec30ae2fe82a3f0efd1e750833739c25b0d090e3be1b095"},
+ {file = "h5py-3.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9da9e7e63376c32704e37ad4cea2dceae6964cee0d8515185b3ab9cbd6b947bc"},
+ {file = "h5py-3.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e20897c88759cbcbd38fb45b507adc91af3e0f67722aa302d71f02dd44d286"},
+ {file = "h5py-3.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbf5225543ca35ce9f61c950b73899a82be7ba60d58340e76d0bd42bf659235a"},
+ {file = "h5py-3.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:36408f8c62f50007d14e000f9f3acf77e103b9e932c114cbe52a3089e50ebf94"},
+ {file = "h5py-3.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:23e74b878bbe1653ab34ca49b83cac85529cd0b36b9d625516c5830cc5ca2eac"},
+ {file = "h5py-3.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3f457089c5d524b7998e3649bc63240679b8fb0a3859ea53bbb06841f3d755f1"},
+ {file = "h5py-3.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6284061f3214335e1eec883a6ee497dbe7a79f19e6a57fed2dd1f03acd5a8cb"},
+ {file = "h5py-3.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7a745efd0d56076999b52e8da5fad5d30823bac98b59c68ae75588d09991a"},
+ {file = "h5py-3.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:79bbca34696c6f9eeeb36a91776070c49a060b2879828e2c8fa6c58b8ed10dd1"},
+ {file = "h5py-3.9.0.tar.gz", hash = "sha256:e604db6521c1e367c6bd7fad239c847f53cc46646f2d2651372d05ae5e95f817"},
+]
+
+[package.dependencies]
+numpy = ">=1.17.3"
+
+[[package]]
+name = "identify"
+version = "2.5.26"
+description = "File identification library for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "identify-2.5.26-py2.py3-none-any.whl", hash = "sha256:c22a8ead0d4ca11f1edd6c9418c3220669b3b7533ada0a0ffa6cc0ef85cf9b54"},
+ {file = "identify-2.5.26.tar.gz", hash = "sha256:7243800bce2f58404ed41b7c002e53d4d22bcf3ae1b7900c2d7aefd95394bf7f"},
+]
+
+[package.extras]
+license = ["ukkonen"]
+
+[[package]]
+name = "idna"
+version = "3.4"
+description = "Internationalized Domain Names in Applications (IDNA)"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
+ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
+]
+
+[[package]]
+name = "imageio"
+version = "2.31.1"
+description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "imageio-2.31.1-py3-none-any.whl", hash = "sha256:4106fb395ef7f8dc0262d6aa1bb03daba818445c381ca8b7d5dfc7a2089b04df"},
+ {file = "imageio-2.31.1.tar.gz", hash = "sha256:f8436a02af02fd63f272dab50f7d623547a38f0e04a4a73e2b02ae1b8b180f27"},
+]
+
+[package.dependencies]
+numpy = "*"
+pillow = ">=8.3.2"
+
+[package.extras]
+all-plugins = ["astropy", "av", "imageio-ffmpeg", "psutil", "tifffile"]
+all-plugins-pypy = ["av", "imageio-ffmpeg", "psutil", "tifffile"]
+build = ["wheel"]
+dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"]
+docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"]
+ffmpeg = ["imageio-ffmpeg", "psutil"]
+fits = ["astropy"]
+full = ["astropy", "av", "black", "flake8", "fsspec[github]", "gdal", "imageio-ffmpeg", "itk", "numpydoc", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "sphinx (<6)", "tifffile", "wheel"]
+gdal = ["gdal"]
+itk = ["itk"]
+linting = ["black", "flake8"]
+pyav = ["av"]
+test = ["fsspec[github]", "pytest", "pytest-cov"]
+tifffile = ["tifffile"]
+
+[[package]]
+name = "importlib-metadata"
+version = "6.8.0"
+description = "Read metadata from Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"},
+ {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"},
+]
+
+[package.dependencies]
+zipp = ">=0.5"
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+perf = ["ipython"]
+testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
+
+[[package]]
+name = "importlib-resources"
+version = "6.0.0"
+description = "Read resources from Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "importlib_resources-6.0.0-py3-none-any.whl", hash = "sha256:d952faee11004c045f785bb5636e8f885bed30dc3c940d5d42798a2a4541c185"},
+ {file = "importlib_resources-6.0.0.tar.gz", hash = "sha256:4cf94875a8368bd89531a756df9a9ebe1f150e0f885030b461237bc7f2d905f2"},
+]
+
+[package.dependencies]
+zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "ipdb"
+version = "0.13.13"
+description = "IPython-enabled pdb"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4"},
+ {file = "ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726"},
+]
+
+[package.dependencies]
+decorator = {version = "*", markers = "python_version > \"3.6\" and python_version < \"3.11\""}
+ipython = {version = ">=7.31.1", markers = "python_version > \"3.6\" and python_version < \"3.11\""}
+tomli = {version = "*", markers = "python_version > \"3.6\" and python_version < \"3.11\""}
+
+[[package]]
+name = "ipython"
+version = "7.34.0"
+description = "IPython: Productive Interactive Computing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "ipython-7.34.0-py3-none-any.whl", hash = "sha256:c175d2440a1caff76116eb719d40538fbb316e214eda85c5515c303aacbfb23e"},
+ {file = "ipython-7.34.0.tar.gz", hash = "sha256:af3bdb46aa292bce5615b1b2ebc76c2080c5f77f54bda2ec72461317273e7cd6"},
+]
+
+[package.dependencies]
+appnope = {version = "*", markers = "sys_platform == \"darwin\""}
+backcall = "*"
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+decorator = "*"
+jedi = ">=0.16"
+matplotlib-inline = "*"
+pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""}
+pickleshare = "*"
+prompt-toolkit = ">=2.0.0,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.1.0"
+pygments = "*"
+setuptools = ">=18.5"
+traitlets = ">=4.2"
+
+[package.extras]
+all = ["Sphinx (>=1.3)", "ipykernel", "ipyparallel", "ipywidgets", "nbconvert", "nbformat", "nose (>=0.10.1)", "notebook", "numpy (>=1.17)", "pygments", "qtconsole", "requests", "testpath"]
+doc = ["Sphinx (>=1.3)"]
+kernel = ["ipykernel"]
+nbconvert = ["nbconvert"]
+nbformat = ["nbformat"]
+notebook = ["ipywidgets", "notebook"]
+parallel = ["ipyparallel"]
+qtconsole = ["qtconsole"]
+test = ["ipykernel", "nbformat", "nose (>=0.10.1)", "numpy (>=1.17)", "pygments", "requests", "testpath"]
+
+[[package]]
+name = "isort"
+version = "5.12.0"
+description = "A Python utility / library to sort Python imports."
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"},
+ {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"},
+]
+
+[package.dependencies]
+colorama = {version = ">=0.4.3", optional = true, markers = "extra == \"colors\""}
+
+[package.extras]
+colors = ["colorama (>=0.4.3)"]
+pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"]
+plugins = ["setuptools"]
+requirements-deprecated-finder = ["pip-api", "pipreqs"]
+
+[[package]]
+name = "jedi"
+version = "0.18.2"
+description = "An autocompletion tool for Python that can be used for text editors."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "jedi-0.18.2-py2.py3-none-any.whl", hash = "sha256:203c1fd9d969ab8f2119ec0a3342e0b49910045abe6af0a3ae83a5764d54639e"},
+ {file = "jedi-0.18.2.tar.gz", hash = "sha256:bae794c30d07f6d910d32a7048af09b5a39ed740918da923c6b780790ebac612"},
+]
+
+[package.dependencies]
+parso = ">=0.8.0,<0.9.0"
+
+[package.extras]
+docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"]
+qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
+testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
+
+[[package]]
+name = "jinja2"
+version = "3.1.2"
+description = "A very fast and expressive template engine."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
+ {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.0"
+
+[package.extras]
+i18n = ["Babel (>=2.7)"]
+
+[[package]]
+name = "kiwisolver"
+version = "1.4.4"
+description = "A fast implementation of the Cassowary constraint solver"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c79ebe8f3676a4c6630fd3f777f3cfecf9289666c84e775a67d1d358578dc2e3"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:abbe9fa13da955feb8202e215c4018f4bb57469b1b78c7a4c5c7b93001699938"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7577c1987baa3adc4b3c62c33bd1118c3ef5c8ddef36f0f2c950ae0b199e100d"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ad8285b01b0d4695102546b342b493b3ccc6781fc28c8c6a1bb63e95d22f09"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ed58b8acf29798b036d347791141767ccf65eee7f26bde03a71c944449e53de"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a68b62a02953b9841730db7797422f983935aeefceb1679f0fc85cbfbd311c32"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-win32.whl", hash = "sha256:e92a513161077b53447160b9bd8f522edfbed4bd9759e4c18ab05d7ef7e49408"},
+ {file = "kiwisolver-1.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:3fe20f63c9ecee44560d0e7f116b3a747a5d7203376abeea292ab3152334d004"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ea21f66820452a3f5d1655f8704a60d66ba1191359b96541eaf457710a5fc6"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bc9db8a3efb3e403e4ecc6cd9489ea2bac94244f80c78e27c31dcc00d2790ac2"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d5b61785a9ce44e5a4b880272baa7cf6c8f48a5180c3e81c59553ba0cb0821ca"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c2dbb44c3f7e6c4d3487b31037b1bdbf424d97687c1747ce4ff2895795c9bf69"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6295ecd49304dcf3bfbfa45d9a081c96509e95f4b9d0eb7ee4ec0530c4a96514"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bd472dbe5e136f96a4b18f295d159d7f26fd399136f5b17b08c4e5f498cd494"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf7d9fce9bcc4752ca4a1b80aabd38f6d19009ea5cbda0e0856983cf6d0023f5"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d6601aed50c74e0ef02f4204da1816147a6d3fbdc8b3872d263338a9052c51"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:877272cf6b4b7e94c9614f9b10140e198d2186363728ed0f701c6eee1baec1da"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:db608a6757adabb32f1cfe6066e39b3706d8c3aa69bbc353a5b61edad36a5cb4"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5853eb494c71e267912275e5586fe281444eb5e722de4e131cddf9d442615626"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f0a1dbdb5ecbef0d34eb77e56fcb3e95bbd7e50835d9782a45df81cc46949750"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:283dffbf061a4ec60391d51e6155e372a1f7a4f5b15d59c8505339454f8989e4"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-win32.whl", hash = "sha256:d06adcfa62a4431d404c31216f0f8ac97397d799cd53800e9d3efc2fbb3cf14e"},
+ {file = "kiwisolver-1.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:e7da3fec7408813a7cebc9e4ec55afed2d0fd65c4754bc376bf03498d4e92686"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:62ac9cc684da4cf1778d07a89bf5f81b35834cb96ca523d3a7fb32509380cbf6"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41dae968a94b1ef1897cb322b39360a0812661dba7c682aa45098eb8e193dbdf"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0611a0a2a518464c05ddd5a3a1a0e856ccc10e67079bb17f265ad19ab3c7597"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:db5283d90da4174865d520e7366801a93777201e91e79bacbac6e6927cbceede"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1041feb4cda8708ce73bb4dcb9ce1ccf49d553bf87c3954bdfa46f0c3f77252c"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-win32.whl", hash = "sha256:a553dadda40fef6bfa1456dc4be49b113aa92c2a9a9e8711e955618cd69622e3"},
+ {file = "kiwisolver-1.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:841293b17ad704d70c578f1f0013c890e219952169ce8a24ebc063eecf775454"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f4f270de01dd3e129a72efad823da90cc4d6aafb64c410c9033aba70db9f1ff0"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f9f39e2f049db33a908319cf46624a569b36983c7c78318e9726a4cb8923b26c"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97528e64cb9ebeff9701e7938653a9951922f2a38bd847787d4a8e498cc83ae"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d1573129aa0fd901076e2bfb4275a35f5b7aa60fbfb984499d661ec950320b0"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad881edc7ccb9d65b0224f4e4d05a1e85cf62d73aab798943df6d48ab0cd79a1"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b428ef021242344340460fa4c9185d0b1f66fbdbfecc6c63eff4b7c29fad429d"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e407cb4bd5a13984a6c2c0fe1845e4e41e96f183e5e5cd4d77a857d9693494c"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-win32.whl", hash = "sha256:75facbe9606748f43428fc91a43edb46c7ff68889b91fa31f53b58894503a191"},
+ {file = "kiwisolver-1.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:5bce61af018b0cb2055e0e72e7d65290d822d3feee430b7b8203d8a855e78766"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8c808594c88a025d4e322d5bb549282c93c8e1ba71b790f539567932722d7bd8"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0a71d85ecdd570ded8ac3d1c0f480842f49a40beb423bb8014539a9f32a5897"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b533558eae785e33e8c148a8d9921692a9fe5aa516efbdff8606e7d87b9d5824"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:efda5fc8cc1c61e4f639b8067d118e742b812c930f708e6667a5ce0d13499e29"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7c43e1e1206cd421cd92e6b3280d4385d41d7166b3ed577ac20444b6995a445f"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc8d3bd6c72b2dd9decf16ce70e20abcb3274ba01b4e1c96031e0c4067d1e7cd"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ea39b0ccc4f5d803e3337dd46bcce60b702be4d86fd0b3d7531ef10fd99a1ac"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968f44fdbf6dd757d12920d63b566eeb4d5b395fd2d00d29d7ef00a00582aac9"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-win32.whl", hash = "sha256:da7e547706e69e45d95e116e6939488d62174e033b763ab1496b4c29b76fabea"},
+ {file = "kiwisolver-1.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:ba59c92039ec0a66103b1d5fe588fa546373587a7d68f5c96f743c3396afc04b"},
+ {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:91672bacaa030f92fc2f43b620d7b337fd9a5af28b0d6ed3f77afc43c4a64b5a"},
+ {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:787518a6789009c159453da4d6b683f468ef7a65bbde796bcea803ccf191058d"},
+ {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da152d8cdcab0e56e4f45eb08b9aea6455845ec83172092f09b0e077ece2cf7a"},
+ {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ecb1fa0db7bf4cff9dac752abb19505a233c7f16684c5826d1f11ebd9472b871"},
+ {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:28bc5b299f48150b5f822ce68624e445040595a4ac3d59251703779836eceff9"},
+ {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:81e38381b782cc7e1e46c4e14cd997ee6040768101aefc8fa3c24a4cc58e98f8"},
+ {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2a66fdfb34e05b705620dd567f5a03f239a088d5a3f321e7b6ac3239d22aa286"},
+ {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:872b8ca05c40d309ed13eb2e582cab0c5a05e81e987ab9c521bf05ad1d5cf5cb"},
+ {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:70e7c2e7b750585569564e2e5ca9845acfaa5da56ac46df68414f29fea97be9f"},
+ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9f85003f5dfa867e86d53fac6f7e6f30c045673fa27b603c397753bebadc3008"},
+ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e307eb9bd99801f82789b44bb45e9f541961831c7311521b13a6c85afc09767"},
+ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1792d939ec70abe76f5054d3f36ed5656021dcad1322d1cc996d4e54165cef9"},
+ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6cb459eea32a4e2cf18ba5fcece2dbdf496384413bc1bae15583f19e567f3b2"},
+ {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36dafec3d6d6088d34e2de6b85f9d8e2324eb734162fba59d2ba9ed7a2043d5b"},
+ {file = "kiwisolver-1.4.4.tar.gz", hash = "sha256:d41997519fcba4a1e46eb4a2fe31bc12f0ff957b2b81bac28db24744f333e955"},
+]
+
+[[package]]
+name = "labmaze"
+version = "1.0.6"
+description = "LabMaze: DeepMind Lab's text maze generator."
+optional = false
+python-versions = "*"
+files = [
+ {file = "labmaze-1.0.6-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b2ddef976dfd8d992b19cfa6c633f2eba7576d759c2082da534e3f727479a84a"},
+ {file = "labmaze-1.0.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:157efaa93228c8ccce5cae337902dd652093e0fba9d3a0f6506e4bee272bb66f"},
+ {file = "labmaze-1.0.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3ce98b9541c5fe6a306e411e7d018121dd646f2c9978d763fad86f9f30c5f57"},
+ {file = "labmaze-1.0.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e6433bd49bc541791de8191040526fddfebb77151620eb04203453f43ee486a"},
+ {file = "labmaze-1.0.6-cp310-cp310-win_amd64.whl", hash = "sha256:6a507fc35961f1b1479708e2716f65e0d0611cefb55f31a77be29ce2339b6fef"},
+ {file = "labmaze-1.0.6-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a0c2cb9dec971814ea9c5d7150af15fa3964482131fa969e0afb94bd224348af"},
+ {file = "labmaze-1.0.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2c6ba9538d819543f4be448d36b4926a3881e53646a2b331ebb5a1f353047d05"},
+ {file = "labmaze-1.0.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70635d1cdb0147a02efb6b3f607a52cdc51723bc3dcc42717a0d4ef55fa0a987"},
+ {file = "labmaze-1.0.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff472793238bd9b6dabea8094594d6074ad3c111455de3afcae72f6c40c6817e"},
+ {file = "labmaze-1.0.6-cp311-cp311-win_amd64.whl", hash = "sha256:2317e65e12fa3d1abecda7e0488dab15456cee8a2e717a586bfc8f02a91579e7"},
+ {file = "labmaze-1.0.6-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:a4c5bc6e56baa55ce63b97569afec2f80cab0f6b952752a131e1f83eed190a53"},
+ {file = "labmaze-1.0.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3955f24fe5f708e1e97495b4cfe284b70ae4fd51be5e17b75a6fc04ffbd67bca"},
+ {file = "labmaze-1.0.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed96ddc0bb8d66df36428c94db83949fd84a15867e8250763a4c5e3d82104c54"},
+ {file = "labmaze-1.0.6-cp37-cp37m-win_amd64.whl", hash = "sha256:3bd0458a29e55aa09f146e28a168d2e00b8ccf19e2259a3f71154cfff3536b1d"},
+ {file = "labmaze-1.0.6-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:33f5154edc83dff55a150e54b60c8582fdafc7ec45195049809cbcc01f5e8f34"},
+ {file = "labmaze-1.0.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0971055ef2a5f7d8517fdc42b67c057093698f1eb911f46faa7018867b73fcc9"},
+ {file = "labmaze-1.0.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de18d09680007302abf49111f3fe822d8435e4fbc4468b9ec07d50a78e267865"},
+ {file = "labmaze-1.0.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f18126066db2218a52853c7dd490b4c3d8129fc22eb3a47eb23007524b911d53"},
+ {file = "labmaze-1.0.6-cp38-cp38-win_amd64.whl", hash = "sha256:f9aef09a76877342bb4d634b7e05f43b038a49c4f34adfb8f1b8ac57c29472f2"},
+ {file = "labmaze-1.0.6-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5dd28899418f1b8b1c7d1e1b40a4593150a7cfa95ca91e23860b9785b82cc0ee"},
+ {file = "labmaze-1.0.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:965569f37ee33090b4d4b3aa5aa7c9dcc4f62e2ae5d761e7f73ec76fc9d8aa96"},
+ {file = "labmaze-1.0.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05eccfa98c0e781bc9f939076ae600b2e25ca736e123f2a530606aedec3b531c"},
+ {file = "labmaze-1.0.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bee8c94e0fb3fc2d8180214947245c1d74a3489349a9da90b868296e77a521e9"},
+ {file = "labmaze-1.0.6-cp39-cp39-win_amd64.whl", hash = "sha256:d486e9ca3a335ad628e3bd48a09c42f1aa5f51040952ef0fe32507afedcd694b"},
+ {file = "labmaze-1.0.6.tar.gz", hash = "sha256:2e8de7094042a77d6972f1965cf5c9e8f971f1b34d225752f343190a825ebe73"},
+]
+
+[package.dependencies]
+absl-py = "*"
+numpy = ">=1.8.0"
+setuptools = "!=50.0.0"
+
+[[package]]
+name = "lazy-object-proxy"
+version = "1.9.0"
+description = "A fast and thorough lazy object proxy."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "lazy-object-proxy-1.9.0.tar.gz", hash = "sha256:659fb5809fa4629b8a1ac5106f669cfc7bef26fbb389dda53b3e010d1ac4ebae"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b40387277b0ed2d0602b8293b94d7257e17d1479e257b4de114ea11a8cb7f2d7"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8c6cfb338b133fbdbc5cfaa10fe3c6aeea827db80c978dbd13bc9dd8526b7d4"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:721532711daa7db0d8b779b0bb0318fa87af1c10d7fe5e52ef30f8eff254d0cd"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66a3de4a3ec06cd8af3f61b8e1ec67614fbb7c995d02fa224813cb7afefee701"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1aa3de4088c89a1b69f8ec0dcc169aa725b0ff017899ac568fe44ddc1396df46"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-win32.whl", hash = "sha256:f0705c376533ed2a9e5e97aacdbfe04cecd71e0aa84c7c0595d02ef93b6e4455"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:ea806fd4c37bf7e7ad82537b0757999264d5f70c45468447bb2b91afdbe73a6e"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:946d27deaff6cf8452ed0dba83ba38839a87f4f7a9732e8f9fd4107b21e6ff07"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79a31b086e7e68b24b99b23d57723ef7e2c6d81ed21007b6281ebcd1688acb0a"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f699ac1c768270c9e384e4cbd268d6e67aebcfae6cd623b4d7c3bfde5a35db59"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bfb38f9ffb53b942f2b5954e0f610f1e721ccebe9cce9025a38c8ccf4a5183a4"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:189bbd5d41ae7a498397287c408617fe5c48633e7755287b21d741f7db2706a9"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-win32.whl", hash = "sha256:81fc4d08b062b535d95c9ea70dbe8a335c45c04029878e62d744bdced5141586"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:f2457189d8257dd41ae9b434ba33298aec198e30adf2dcdaaa3a28b9994f6adb"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d9e25ef10a39e8afe59a5c348a4dbf29b4868ab76269f81ce1674494e2565a6e"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cbf9b082426036e19c6924a9ce90c740a9861e2bdc27a4834fd0a910742ac1e8"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f5fa4a61ce2438267163891961cfd5e32ec97a2c444e5b842d574251ade27d2"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8fa02eaab317b1e9e03f69aab1f91e120e7899b392c4fc19807a8278a07a97e8"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e7c21c95cae3c05c14aafffe2865bbd5e377cfc1348c4f7751d9dc9a48ca4bda"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win32.whl", hash = "sha256:f12ad7126ae0c98d601a7ee504c1122bcef553d1d5e0c3bfa77b16b3968d2734"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:edd20c5a55acb67c7ed471fa2b5fb66cb17f61430b7a6b9c3b4a1e40293b1671"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0daa332786cf3bb49e10dc6a17a52f6a8f9601b4cf5c295a4f85854d61de63"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cd077f3d04a58e83d04b20e334f678c2b0ff9879b9375ed107d5d07ff160171"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c94ea760b3ce47d1855a30984c78327500493d396eac4dfd8bd82041b22be"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:212774e4dfa851e74d393a2370871e174d7ff0ebc980907723bb67d25c8a7c30"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0117049dd1d5635bbff65444496c90e0baa48ea405125c088e93d9cf4525b11"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-win32.whl", hash = "sha256:0a891e4e41b54fd5b8313b96399f8b0e173bbbfc03c7631f01efbe29bb0bcf82"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:9990d8e71b9f6488e91ad25f322898c136b008d87bf852ff65391b004da5e17b"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e7551208b2aded9c1447453ee366f1c4070602b3d932ace044715d89666899b"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f83ac4d83ef0ab017683d715ed356e30dd48a93746309c8f3517e1287523ef4"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7322c3d6f1766d4ef1e51a465f47955f1e8123caee67dd641e67d539a534d006"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:18b78ec83edbbeb69efdc0e9c1cb41a3b1b1ed11ddd8ded602464c3fc6020494"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:09763491ce220c0299688940f8dc2c5d05fd1f45af1e42e636b2e8b2303e4382"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-win32.whl", hash = "sha256:9090d8e53235aa280fc9239a86ae3ea8ac58eff66a705fa6aa2ec4968b95c821"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:db1c1722726f47e10e0b5fdbf15ac3b8adb58c091d12b3ab713965795036985f"},
+]
+
+[[package]]
+name = "lockfile"
+version = "0.12.2"
+description = "Platform-independent file locking module"
+optional = false
+python-versions = "*"
+files = [
+ {file = "lockfile-0.12.2-py2.py3-none-any.whl", hash = "sha256:6c3cb24f344923d30b2785d5ad75182c8ea7ac1b6171b08657258ec7429d50fa"},
+ {file = "lockfile-0.12.2.tar.gz", hash = "sha256:6aed02de03cba24efabcd600b30540140634fc06cfa603822d508d5361e9f799"},
+]
+
+[[package]]
+name = "loguru"
+version = "0.7.0"
+description = "Python logging made (stupidly) simple"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "loguru-0.7.0-py3-none-any.whl", hash = "sha256:b93aa30099fa6860d4727f1b81f8718e965bb96253fa190fab2077aaad6d15d3"},
+ {file = "loguru-0.7.0.tar.gz", hash = "sha256:1612053ced6ae84d7959dd7d5e431a0532642237ec21f7fd83ac73fe539e03e1"},
+]
+
+[package.dependencies]
+colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""}
+win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""}
+
+[package.extras]
+dev = ["Sphinx (==5.3.0)", "colorama (==0.4.5)", "colorama (==0.4.6)", "freezegun (==1.1.0)", "freezegun (==1.2.2)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v0.990)", "pre-commit (==3.2.1)", "pytest (==6.1.2)", "pytest (==7.2.1)", "pytest-cov (==2.12.1)", "pytest-cov (==4.0.0)", "pytest-mypy-plugins (==1.10.1)", "pytest-mypy-plugins (==1.9.3)", "sphinx-autobuild (==2021.3.14)", "sphinx-rtd-theme (==1.2.0)", "tox (==3.27.1)", "tox (==4.4.6)"]
+
+[[package]]
+name = "lxml"
+version = "4.9.3"
+description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
+files = [
+ {file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"},
+ {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"},
+ {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"},
+ {file = "lxml-4.9.3-cp27-cp27m-win32.whl", hash = "sha256:2c74524e179f2ad6d2a4f7caf70e2d96639c0954c943ad601a9e146c76408ed7"},
+ {file = "lxml-4.9.3-cp27-cp27m-win_amd64.whl", hash = "sha256:4f1026bc732b6a7f96369f7bfe1a4f2290fb34dce00d8644bc3036fb351a4ca1"},
+ {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"},
+ {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"},
+ {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"},
+ {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"},
+ {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"},
+ {file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"},
+ {file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"},
+ {file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"},
+ {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"},
+ {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"},
+ {file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"},
+ {file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"},
+ {file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"},
+ {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"},
+ {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"},
+ {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"},
+ {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"},
+ {file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"},
+ {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56dc1f1ebccc656d1b3ed288f11e27172a01503fc016bcabdcbc0978b19352b7"},
+ {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:578695735c5a3f51569810dfebd05dd6f888147a34f0f98d4bb27e92b76e05c2"},
+ {file = "lxml-4.9.3-cp35-cp35m-win32.whl", hash = "sha256:704f61ba8c1283c71b16135caf697557f5ecf3e74d9e453233e4771d68a1f42d"},
+ {file = "lxml-4.9.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c41bfca0bd3532d53d16fd34d20806d5c2b1ace22a2f2e4c0008570bf2c58833"},
+ {file = "lxml-4.9.3-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:64f479d719dc9f4c813ad9bb6b28f8390360660b73b2e4beb4cb0ae7104f1c12"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:dd708cf4ee4408cf46a48b108fb9427bfa00b9b85812a9262b5c668af2533ea5"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c31c7462abdf8f2ac0577d9f05279727e698f97ecbb02f17939ea99ae8daa98"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e3cd95e10c2610c360154afdc2f1480aea394f4a4f1ea0a5eacce49640c9b190"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:4930be26af26ac545c3dffb662521d4e6268352866956672231887d18f0eaab2"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4aec80cde9197340bc353d2768e2a75f5f60bacda2bab72ab1dc499589b3878c"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:14e019fd83b831b2e61baed40cab76222139926b1fb5ed0e79225bc0cae14584"},
+ {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0c0850c8b02c298d3c7006b23e98249515ac57430e16a166873fc47a5d549287"},
+ {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aca086dc5f9ef98c512bac8efea4483eb84abbf926eaeedf7b91479feb092458"},
+ {file = "lxml-4.9.3-cp36-cp36m-win32.whl", hash = "sha256:50baa9c1c47efcaef189f31e3d00d697c6d4afda5c3cde0302d063492ff9b477"},
+ {file = "lxml-4.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bef4e656f7d98aaa3486d2627e7d2df1157d7e88e7efd43a65aa5dd4714916cf"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:46f409a2d60f634fe550f7133ed30ad5321ae2e6630f13657fb9479506b00601"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4c28a9144688aef80d6ea666c809b4b0e50010a2aca784c97f5e6bf143d9f129"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:141f1d1a9b663c679dc524af3ea1773e618907e96075262726c7612c02b149a4"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:53ace1c1fd5a74ef662f844a0413446c0629d151055340e9893da958a374f70d"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17a753023436a18e27dd7769e798ce302963c236bc4114ceee5b25c18c52c693"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d298a1bd60c067ea75d9f684f5f3992c9d6766fadbc0bcedd39750bf344c2f4"},
+ {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:081d32421db5df44c41b7f08a334a090a545c54ba977e47fd7cc2deece78809a"},
+ {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:23eed6d7b1a3336ad92d8e39d4bfe09073c31bfe502f20ca5116b2a334f8ec02"},
+ {file = "lxml-4.9.3-cp37-cp37m-win32.whl", hash = "sha256:1509dd12b773c02acd154582088820893109f6ca27ef7291b003d0e81666109f"},
+ {file = "lxml-4.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:120fa9349a24c7043854c53cae8cec227e1f79195a7493e09e0c12e29f918e52"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4d2d1edbca80b510443f51afd8496be95529db04a509bc8faee49c7b0fb6d2cc"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d7e43bd40f65f7d97ad8ef5c9b1778943d02f04febef12def25f7583d19baac"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:71d66ee82e7417828af6ecd7db817913cb0cf9d4e61aa0ac1fde0583d84358db"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6fc3c450eaa0b56f815c7b62f2b7fba7266c4779adcf1cece9e6deb1de7305ce"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65299ea57d82fb91c7f019300d24050c4ddeb7c5a190e076b5f48a2b43d19c42"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eadfbbbfb41b44034a4c757fd5d70baccd43296fb894dba0295606a7cf3124aa"},
+ {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e9bdd30efde2b9ccfa9cb5768ba04fe71b018a25ea093379c857c9dad262c40"},
+ {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcdd00edfd0a3001e0181eab3e63bd5c74ad3e67152c84f93f13769a40e073a7"},
+ {file = "lxml-4.9.3-cp38-cp38-win32.whl", hash = "sha256:57aba1bbdf450b726d58b2aea5fe47c7875f5afb2c4a23784ed78f19a0462574"},
+ {file = "lxml-4.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:92af161ecbdb2883c4593d5ed4815ea71b31fafd7fd05789b23100d081ecac96"},
+ {file = "lxml-4.9.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9bb6ad405121241e99a86efff22d3ef469024ce22875a7ae045896ad23ba2340"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8ed74706b26ad100433da4b9d807eae371efaa266ffc3e9191ea436087a9d6a7"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fbf521479bcac1e25a663df882c46a641a9bff6b56dc8b0fafaebd2f66fb231b"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:303bf1edce6ced16bf67a18a1cf8339d0db79577eec5d9a6d4a80f0fb10aa2da"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:5515edd2a6d1a5a70bfcdee23b42ec33425e405c5b351478ab7dc9347228f96e"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:690dafd0b187ed38583a648076865d8c229661ed20e48f2335d68e2cf7dc829d"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6420a005548ad52154c8ceab4a1290ff78d757f9e5cbc68f8c77089acd3c432"},
+ {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb3bb49c7a6ad9d981d734ef7c7193bc349ac338776a0360cc671eaee89bcf69"},
+ {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d27be7405547d1f958b60837dc4c1007da90b8b23f54ba1f8b728c78fdb19d50"},
+ {file = "lxml-4.9.3-cp39-cp39-win32.whl", hash = "sha256:8df133a2ea5e74eef5e8fc6f19b9e085f758768a16e9877a60aec455ed2609b2"},
+ {file = "lxml-4.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:4dd9a263e845a72eacb60d12401e37c616438ea2e5442885f65082c276dfb2b2"},
+ {file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"},
+ {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"},
+ {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"},
+ {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"},
+ {file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"},
+]
+
+[package.extras]
+cssselect = ["cssselect (>=0.7)"]
+html5 = ["html5lib"]
+htmlsoup = ["BeautifulSoup4"]
+source = ["Cython (>=0.29.35)"]
+
+[[package]]
+name = "lz4"
+version = "4.3.2"
+description = "LZ4 Bindings for Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "lz4-4.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1c4c100d99eed7c08d4e8852dd11e7d1ec47a3340f49e3a96f8dfbba17ffb300"},
+ {file = "lz4-4.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:edd8987d8415b5dad25e797043936d91535017237f72fa456601be1479386c92"},
+ {file = "lz4-4.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7c50542b4ddceb74ab4f8b3435327a0861f06257ca501d59067a6a482535a77"},
+ {file = "lz4-4.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f5614d8229b33d4a97cb527db2a1ac81308c6e796e7bdb5d1309127289f69d5"},
+ {file = "lz4-4.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f00a9ba98f6364cadda366ae6469b7b3568c0cced27e16a47ddf6b774169270"},
+ {file = "lz4-4.3.2-cp310-cp310-win32.whl", hash = "sha256:b10b77dc2e6b1daa2f11e241141ab8285c42b4ed13a8642495620416279cc5b2"},
+ {file = "lz4-4.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:86480f14a188c37cb1416cdabacfb4e42f7a5eab20a737dac9c4b1c227f3b822"},
+ {file = "lz4-4.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7c2df117def1589fba1327dceee51c5c2176a2b5a7040b45e84185ce0c08b6a3"},
+ {file = "lz4-4.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1f25eb322eeb24068bb7647cae2b0732b71e5c639e4e4026db57618dcd8279f0"},
+ {file = "lz4-4.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8df16c9a2377bdc01e01e6de5a6e4bbc66ddf007a6b045688e285d7d9d61d1c9"},
+ {file = "lz4-4.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f571eab7fec554d3b1db0d666bdc2ad85c81f4b8cb08906c4c59a8cad75e6e22"},
+ {file = "lz4-4.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7211dc8f636ca625abc3d4fb9ab74e5444b92df4f8d58ec83c8868a2b0ff643d"},
+ {file = "lz4-4.3.2-cp311-cp311-win32.whl", hash = "sha256:867664d9ca9bdfce840ac96d46cd8838c9ae891e859eb98ce82fcdf0e103a947"},
+ {file = "lz4-4.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:a6a46889325fd60b8a6b62ffc61588ec500a1883db32cddee9903edfba0b7584"},
+ {file = "lz4-4.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a85b430138882f82f354135b98c320dafb96fc8fe4656573d95ab05de9eb092"},
+ {file = "lz4-4.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65d5c93f8badacfa0456b660285e394e65023ef8071142e0dcbd4762166e1be0"},
+ {file = "lz4-4.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b50f096a6a25f3b2edca05aa626ce39979d63c3b160687c8c6d50ac3943d0ba"},
+ {file = "lz4-4.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:200d05777d61ba1ff8d29cb51c534a162ea0b4fe6d3c28be3571a0a48ff36080"},
+ {file = "lz4-4.3.2-cp37-cp37m-win32.whl", hash = "sha256:edc2fb3463d5d9338ccf13eb512aab61937be50aa70734bcf873f2f493801d3b"},
+ {file = "lz4-4.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:83acfacab3a1a7ab9694333bcb7950fbeb0be21660d236fd09c8337a50817897"},
+ {file = "lz4-4.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7a9eec24ec7d8c99aab54de91b4a5a149559ed5b3097cf30249b665689b3d402"},
+ {file = "lz4-4.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:31d72731c4ac6ebdce57cd9a5cabe0aecba229c4f31ba3e2c64ae52eee3fdb1c"},
+ {file = "lz4-4.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83903fe6db92db0be101acedc677aa41a490b561567fe1b3fe68695b2110326c"},
+ {file = "lz4-4.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:926b26db87ec8822cf1870efc3d04d06062730ec3279bbbd33ba47a6c0a5c673"},
+ {file = "lz4-4.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e05afefc4529e97c08e65ef92432e5f5225c0bb21ad89dee1e06a882f91d7f5e"},
+ {file = "lz4-4.3.2-cp38-cp38-win32.whl", hash = "sha256:ad38dc6a7eea6f6b8b642aaa0683253288b0460b70cab3216838747163fb774d"},
+ {file = "lz4-4.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:7e2dc1bd88b60fa09b9b37f08553f45dc2b770c52a5996ea52b2b40f25445676"},
+ {file = "lz4-4.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:edda4fb109439b7f3f58ed6bede59694bc631c4b69c041112b1b7dc727fffb23"},
+ {file = "lz4-4.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ca83a623c449295bafad745dcd399cea4c55b16b13ed8cfea30963b004016c9"},
+ {file = "lz4-4.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5ea0e788dc7e2311989b78cae7accf75a580827b4d96bbaf06c7e5a03989bd5"},
+ {file = "lz4-4.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a98b61e504fb69f99117b188e60b71e3c94469295571492a6468c1acd63c37ba"},
+ {file = "lz4-4.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4931ab28a0d1c133104613e74eec1b8bb1f52403faabe4f47f93008785c0b929"},
+ {file = "lz4-4.3.2-cp39-cp39-win32.whl", hash = "sha256:ec6755cacf83f0c5588d28abb40a1ac1643f2ff2115481089264c7630236618a"},
+ {file = "lz4-4.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:4caedeb19e3ede6c7a178968b800f910db6503cb4cb1e9cc9221157572139b49"},
+ {file = "lz4-4.3.2.tar.gz", hash = "sha256:e1431d84a9cfb23e6773e72078ce8e65cad6745816d4cbf9ae67da5ea419acda"},
+]
+
+[package.extras]
+docs = ["sphinx (>=1.6.0)", "sphinx-bootstrap-theme"]
+flake8 = ["flake8"]
+tests = ["psutil", "pytest (!=3.3.0)", "pytest-cov"]
+
+[[package]]
+name = "markdown-it-py"
+version = "3.0.0"
+description = "Python port of markdown-it. Markdown parsing, done right!"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
+ {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
+]
+
+[package.dependencies]
+mdurl = ">=0.1,<1.0"
+
+[package.extras]
+benchmarking = ["psutil", "pytest", "pytest-benchmark"]
+code-style = ["pre-commit (>=3.0,<4.0)"]
+compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"]
+linkify = ["linkify-it-py (>=1,<3)"]
+plugins = ["mdit-py-plugins"]
+profiling = ["gprof2dot"]
+rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
+testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
+
+[[package]]
+name = "markupsafe"
+version = "2.1.3"
+description = "Safely add untrusted strings to HTML/XML markup."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"},
+ {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"},
+]
+
+[[package]]
+name = "matplotlib"
+version = "3.7.2"
+description = "Python plotting package"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "matplotlib-3.7.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:2699f7e73a76d4c110f4f25be9d2496d6ab4f17345307738557d345f099e07de"},
+ {file = "matplotlib-3.7.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a8035ba590658bae7562786c9cc6ea1a84aa49d3afab157e414c9e2ea74f496d"},
+ {file = "matplotlib-3.7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f8e4a49493add46ad4a8c92f63e19d548b2b6ebbed75c6b4c7f46f57d36cdd1"},
+ {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71667eb2ccca4c3537d9414b1bc00554cb7f91527c17ee4ec38027201f8f1603"},
+ {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:152ee0b569a37630d8628534c628456b28686e085d51394da6b71ef84c4da201"},
+ {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:070f8dddd1f5939e60aacb8fa08f19551f4b0140fab16a3669d5cd6e9cb28fc8"},
+ {file = "matplotlib-3.7.2-cp310-cp310-win32.whl", hash = "sha256:fdbb46fad4fb47443b5b8ac76904b2e7a66556844f33370861b4788db0f8816a"},
+ {file = "matplotlib-3.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:23fb1750934e5f0128f9423db27c474aa32534cec21f7b2153262b066a581fd1"},
+ {file = "matplotlib-3.7.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:30e1409b857aa8a747c5d4f85f63a79e479835f8dffc52992ac1f3f25837b544"},
+ {file = "matplotlib-3.7.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:50e0a55ec74bf2d7a0ebf50ac580a209582c2dd0f7ab51bc270f1b4a0027454e"},
+ {file = "matplotlib-3.7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ac60daa1dc83e8821eed155796b0f7888b6b916cf61d620a4ddd8200ac70cd64"},
+ {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:305e3da477dc8607336ba10bac96986d6308d614706cae2efe7d3ffa60465b24"},
+ {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c308b255efb9b06b23874236ec0f10f026673ad6515f602027cc8ac7805352d"},
+ {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60c521e21031632aa0d87ca5ba0c1c05f3daacadb34c093585a0be6780f698e4"},
+ {file = "matplotlib-3.7.2-cp311-cp311-win32.whl", hash = "sha256:26bede320d77e469fdf1bde212de0ec889169b04f7f1179b8930d66f82b30cbc"},
+ {file = "matplotlib-3.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:af4860132c8c05261a5f5f8467f1b269bf1c7c23902d75f2be57c4a7f2394b3e"},
+ {file = "matplotlib-3.7.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:a1733b8e84e7e40a9853e505fe68cc54339f97273bdfe6f3ed980095f769ddc7"},
+ {file = "matplotlib-3.7.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d9881356dc48e58910c53af82b57183879129fa30492be69058c5b0d9fddf391"},
+ {file = "matplotlib-3.7.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f081c03f413f59390a80b3e351cc2b2ea0205839714dbc364519bcf51f4b56ca"},
+ {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cd120fca3407a225168238b790bd5c528f0fafde6172b140a2f3ab7a4ea63e9"},
+ {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a2c1590b90aa7bd741b54c62b78de05d4186271e34e2377e0289d943b3522273"},
+ {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d2ff3c984b8a569bc1383cd468fc06b70d7b59d5c2854ca39f1436ae8394117"},
+ {file = "matplotlib-3.7.2-cp38-cp38-win32.whl", hash = "sha256:5dea00b62d28654b71ca92463656d80646675628d0828e08a5f3b57e12869e13"},
+ {file = "matplotlib-3.7.2-cp38-cp38-win_amd64.whl", hash = "sha256:0f506a1776ee94f9e131af1ac6efa6e5bc7cb606a3e389b0ccb6e657f60bb676"},
+ {file = "matplotlib-3.7.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:6515e878f91894c2e4340d81f0911857998ccaf04dbc1bba781e3d89cbf70608"},
+ {file = "matplotlib-3.7.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:71f7a8c6b124e904db550f5b9fe483d28b896d4135e45c4ea381ad3b8a0e3256"},
+ {file = "matplotlib-3.7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12f01b92ecd518e0697da4d97d163b2b3aa55eb3eb4e2c98235b3396d7dad55f"},
+ {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7e28d6396563955f7af437894a36bf2b279462239a41028323e04b85179058b"},
+ {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbcf59334ff645e6a67cd5f78b4b2cdb76384cdf587fa0d2dc85f634a72e1a3e"},
+ {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:318c89edde72ff95d8df67d82aca03861240512994a597a435a1011ba18dbc7f"},
+ {file = "matplotlib-3.7.2-cp39-cp39-win32.whl", hash = "sha256:ce55289d5659b5b12b3db4dc9b7075b70cef5631e56530f14b2945e8836f2d20"},
+ {file = "matplotlib-3.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:2ecb5be2b2815431c81dc115667e33da0f5a1bcf6143980d180d09a717c4a12e"},
+ {file = "matplotlib-3.7.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fdcd28360dbb6203fb5219b1a5658df226ac9bebc2542a9e8f457de959d713d0"},
+ {file = "matplotlib-3.7.2-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c3cca3e842b11b55b52c6fb8bd6a4088693829acbfcdb3e815fa9b7d5c92c1b"},
+ {file = "matplotlib-3.7.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebf577c7a6744e9e1bd3fee45fc74a02710b214f94e2bde344912d85e0c9af7c"},
+ {file = "matplotlib-3.7.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:936bba394682049919dda062d33435b3be211dc3dcaa011e09634f060ec878b2"},
+ {file = "matplotlib-3.7.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bc221ffbc2150458b1cd71cdd9ddd5bb37962b036e41b8be258280b5b01da1dd"},
+ {file = "matplotlib-3.7.2-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35d74ebdb3f71f112b36c2629cf32323adfbf42679e2751252acd468f5001c07"},
+ {file = "matplotlib-3.7.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:717157e61b3a71d3d26ad4e1770dc85156c9af435659a25ee6407dc866cb258d"},
+ {file = "matplotlib-3.7.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:20f844d6be031948148ba49605c8b96dfe7d3711d1b63592830d650622458c11"},
+ {file = "matplotlib-3.7.2.tar.gz", hash = "sha256:a8cdb91dddb04436bd2f098b8fdf4b81352e68cf4d2c6756fcc414791076569b"},
+]
+
+[package.dependencies]
+contourpy = ">=1.0.1"
+cycler = ">=0.10"
+fonttools = ">=4.22.0"
+importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""}
+kiwisolver = ">=1.0.1"
+numpy = ">=1.20"
+packaging = ">=20.0"
+pillow = ">=6.2.0"
+pyparsing = ">=2.3.1,<3.1"
+python-dateutil = ">=2.7"
+
+[[package]]
+name = "matplotlib-inline"
+version = "0.1.6"
+description = "Inline Matplotlib backend for Jupyter"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"},
+ {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"},
+]
+
+[package.dependencies]
+traitlets = "*"
+
+[[package]]
+name = "mccabe"
+version = "0.7.0"
+description = "McCabe checker, plugin for flake8"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
+]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+description = "Markdown URL utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
+ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
+]
+
+[[package]]
+name = "mjrl"
+version = "1.0.0"
+description = "RL algorithms for environments in MuJoCo"
+optional = false
+python-versions = "*"
+files = []
+develop = false
+
+[package.source]
+type = "git"
+url = "https://github.com/aravindr93/mjrl"
+reference = "master"
+resolved_reference = "3871d93763d3b49c4741e6daeaebbc605fe140dc"
+
+[[package]]
+name = "mpmath"
+version = "1.3.0"
+description = "Python library for arbitrary-precision floating-point arithmetic"
+optional = false
+python-versions = "*"
+files = [
+ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"},
+ {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"},
+]
+
+[package.extras]
+develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"]
+docs = ["sphinx"]
+gmpy = ["gmpy2 (>=2.1.0a4)"]
+tests = ["pytest (>=4.6)"]
+
+[[package]]
+name = "mujoco"
+version = "2.3.7"
+description = "MuJoCo Physics Simulator"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "mujoco-2.3.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a934315f858a4e0c4b90a682fde519471cfdd7baa64435179da8cd20d4ae3f99"},
+ {file = "mujoco-2.3.7-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:36513024330f88b5f9a43558efef5692b33599bffd5141029b690a27918ffcbe"},
+ {file = "mujoco-2.3.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d4eede8ba8210fbd3d3cd1dbf69e24dd1541aa74c5af5b8adbbbf65504b6dba"},
+ {file = "mujoco-2.3.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab85fafc9d5a091c712947573b7e694512d283876bf7f33ae3f8daad3a20c0db"},
+ {file = "mujoco-2.3.7-cp310-cp310-win_amd64.whl", hash = "sha256:f8b7e13fef8c813d91b78f975ed0815157692777907ffa4b4be53a4edb75019b"},
+ {file = "mujoco-2.3.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9d4018053879016282d27ab7a91e292c72d44efb5a88553feacfe5b843dde103"},
+ {file = "mujoco-2.3.7-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:3149b16b8122ee62642474bfd2871064e8edc40235471cf5d84be3569afc0312"},
+ {file = "mujoco-2.3.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c08660a8d52ef3efde76095f0991e807703a950c1e882d2bcd984b9a846626f7"},
+ {file = "mujoco-2.3.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:426af8965f8636d94a0f75740c3024a62b3e585020ee817ef5208ec844a1ad94"},
+ {file = "mujoco-2.3.7-cp311-cp311-win_amd64.whl", hash = "sha256:215415a8e98a4b50625beae859079d5e0810b2039e50420f0ba81763c34abb59"},
+ {file = "mujoco-2.3.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5c6f5a51d6f537a4bf294cf73816f3a6384573f8f10a5452b044df2771412a96"},
+ {file = "mujoco-2.3.7-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:ea8911e6047f92d7d775701f37e4c093971b6def3160f01d0b6926e29a7e962e"},
+ {file = "mujoco-2.3.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7473a3de4dd1a8762d569ffb139196b4c5e7eca27d256df97b6cd4c66d2a09b2"},
+ {file = "mujoco-2.3.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7e2d8f93d2495ec74efec84e5118ecc6e1d85157a844789c73c9ac9a4e28e"},
+ {file = "mujoco-2.3.7-cp38-cp38-win_amd64.whl", hash = "sha256:720bc228a2023b3b0ed6af78f5b0f8ea36867be321d473321555c57dbf6e4e5b"},
+ {file = "mujoco-2.3.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:98947f4a742d34d36f3c3f83e9167025bb0414bbaa4bd859b0673bdab9959963"},
+ {file = "mujoco-2.3.7-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:d42818f2ee5d1632dbce31d136ed5ff868db54b04e4e9aca0c5a3ac329f8a90f"},
+ {file = "mujoco-2.3.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9237e1ba14bced9449c31199e6d5be49547f3a4c99bc83b196af7ca45fd73b83"},
+ {file = "mujoco-2.3.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b728ea638245b150e2650c5433e6952e0ed3798c63e47e264574270caea2a3"},
+ {file = "mujoco-2.3.7-cp39-cp39-win_amd64.whl", hash = "sha256:9c721a5042b99d948d5f0296a534bcce3f142c777c4d7642f503a539513f3912"},
+ {file = "mujoco-2.3.7.tar.gz", hash = "sha256:422041f1ce37c6d151fbced1048df626837e94fe3cd9f813585907046336a7d0"},
+]
+
+[package.dependencies]
+absl-py = "*"
+glfw = "*"
+numpy = "*"
+pyopengl = "*"
+
+[[package]]
+name = "mujoco-py"
+version = "1.50.1.68"
+description = ""
+optional = false
+python-versions = "*"
+files = [
+ {file = "mujoco-py-1.50.1.68.tar.gz", hash = "sha256:62fd164401e576a9e7026d81a9ff23316c32b68cea4c5f671423aa073b3da2be"},
+]
+
+[package.dependencies]
+cffi = ">=1.10"
+Cython = ">=0.27.2"
+glfw = ">=1.4.0"
+imageio = ">=2.1.2"
+lockfile = ">=0.12.2"
+numpy = ">=1.11"
+
+[[package]]
+name = "mypy"
+version = "0.910"
+description = "Optional static typing for Python"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "mypy-0.910-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:a155d80ea6cee511a3694b108c4494a39f42de11ee4e61e72bc424c490e46457"},
+ {file = "mypy-0.910-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:b94e4b785e304a04ea0828759172a15add27088520dc7e49ceade7834275bedb"},
+ {file = "mypy-0.910-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:088cd9c7904b4ad80bec811053272986611b84221835e079be5bcad029e79dd9"},
+ {file = "mypy-0.910-cp35-cp35m-win_amd64.whl", hash = "sha256:adaeee09bfde366d2c13fe6093a7df5df83c9a2ba98638c7d76b010694db760e"},
+ {file = "mypy-0.910-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ecd2c3fe726758037234c93df7e98deb257fd15c24c9180dacf1ef829da5f921"},
+ {file = "mypy-0.910-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:d9dd839eb0dc1bbe866a288ba3c1afc33a202015d2ad83b31e875b5905a079b6"},
+ {file = "mypy-0.910-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:3e382b29f8e0ccf19a2df2b29a167591245df90c0b5a2542249873b5c1d78212"},
+ {file = "mypy-0.910-cp36-cp36m-win_amd64.whl", hash = "sha256:53fd2eb27a8ee2892614370896956af2ff61254c275aaee4c230ae771cadd885"},
+ {file = "mypy-0.910-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b6fb13123aeef4a3abbcfd7e71773ff3ff1526a7d3dc538f3929a49b42be03f0"},
+ {file = "mypy-0.910-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e4dab234478e3bd3ce83bac4193b2ecd9cf94e720ddd95ce69840273bf44f6de"},
+ {file = "mypy-0.910-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:7df1ead20c81371ccd6091fa3e2878559b5c4d4caadaf1a484cf88d93ca06703"},
+ {file = "mypy-0.910-cp37-cp37m-win_amd64.whl", hash = "sha256:0aadfb2d3935988ec3815952e44058a3100499f5be5b28c34ac9d79f002a4a9a"},
+ {file = "mypy-0.910-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec4e0cd079db280b6bdabdc807047ff3e199f334050db5cbb91ba3e959a67504"},
+ {file = "mypy-0.910-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:119bed3832d961f3a880787bf621634ba042cb8dc850a7429f643508eeac97b9"},
+ {file = "mypy-0.910-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:866c41f28cee548475f146aa4d39a51cf3b6a84246969f3759cb3e9c742fc072"},
+ {file = "mypy-0.910-cp38-cp38-win_amd64.whl", hash = "sha256:ceb6e0a6e27fb364fb3853389607cf7eb3a126ad335790fa1e14ed02fba50811"},
+ {file = "mypy-0.910-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1a85e280d4d217150ce8cb1a6dddffd14e753a4e0c3cf90baabb32cefa41b59e"},
+ {file = "mypy-0.910-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:42c266ced41b65ed40a282c575705325fa7991af370036d3f134518336636f5b"},
+ {file = "mypy-0.910-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:3c4b8ca36877fc75339253721f69603a9c7fdb5d4d5a95a1a1b899d8b86a4de2"},
+ {file = "mypy-0.910-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:c0df2d30ed496a08de5daed2a9ea807d07c21ae0ab23acf541ab88c24b26ab97"},
+ {file = "mypy-0.910-cp39-cp39-win_amd64.whl", hash = "sha256:c6c2602dffb74867498f86e6129fd52a2770c48b7cd3ece77ada4fa38f94eba8"},
+ {file = "mypy-0.910-py3-none-any.whl", hash = "sha256:ef565033fa5a958e62796867b1df10c40263ea9ded87164d67572834e57a174d"},
+ {file = "mypy-0.910.tar.gz", hash = "sha256:704098302473cb31a218f1775a873b376b30b4c18229421e9e9dc8916fd16150"},
+]
+
+[package.dependencies]
+mypy-extensions = ">=0.4.3,<0.5.0"
+toml = "*"
+typing-extensions = ">=3.7.4"
+
+[package.extras]
+dmypy = ["psutil (>=4.0)"]
+python2 = ["typed-ast (>=1.4.0,<1.5.0)"]
+
+[[package]]
+name = "mypy-extensions"
+version = "0.4.4"
+description = "Experimental type system extensions for programs checked with the mypy typechecker."
+optional = false
+python-versions = ">=2.7"
+files = [
+ {file = "mypy_extensions-0.4.4.tar.gz", hash = "sha256:c8b707883a96efe9b4bb3aaf0dcc07e7e217d7d8368eec4db4049ee9e142f4fd"},
+]
+
+[[package]]
+name = "networkx"
+version = "3.1"
+description = "Python package for creating and manipulating graphs and networks"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"},
+ {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"},
+]
+
+[package.extras]
+default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"]
+developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"]
+doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"]
+extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"]
+test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"]
+
+[[package]]
+name = "nodeenv"
+version = "1.8.0"
+description = "Node.js virtual environment builder"
+optional = false
+python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*"
+files = [
+ {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"},
+ {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"},
+]
+
+[package.dependencies]
+setuptools = "*"
+
+[[package]]
+name = "numpy"
+version = "1.24.4"
+description = "Fundamental package for array computing in Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"},
+ {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"},
+ {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"},
+ {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"},
+ {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"},
+ {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"},
+ {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"},
+ {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"},
+ {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"},
+ {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"},
+ {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"},
+ {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"},
+ {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"},
+ {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"},
+ {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"},
+ {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"},
+ {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"},
+ {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"},
+ {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"},
+ {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"},
+ {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"},
+ {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"},
+ {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"},
+ {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"},
+ {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"},
+]
+
+[[package]]
+name = "opencv-python"
+version = "4.8.0.74"
+description = "Wrapper package for OpenCV python bindings."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "opencv-python-4.8.0.74.tar.gz", hash = "sha256:009e3ce356a0cd2d7423723e00a32fd3d3cc5bb5970ed27a9a1f8a8f221d1db5"},
+ {file = "opencv_python-4.8.0.74-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:31d0d59fc8fdf703de4cec46c79b9f8d026fdde9d23d6e2e6a66809feeebbda9"},
+ {file = "opencv_python-4.8.0.74-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:66eadb5882ee56848b67f9fb57aadcaca2f4c9d9d00a0ef11043041925b51291"},
+ {file = "opencv_python-4.8.0.74-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:038ba7075e55cb8e2846663ae970f0fb776a45b48ee69a887bf4ee15e2570083"},
+ {file = "opencv_python-4.8.0.74-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43dd0dfe331fb95767af581bf3b2781d7a72cf6560ddf2f55949fe547f3e5c9f"},
+ {file = "opencv_python-4.8.0.74-cp37-abi3-win32.whl", hash = "sha256:458e5dc377f15fcf769d80314f3d885bd95457b1a2891bee67df2eb24a1d3a52"},
+ {file = "opencv_python-4.8.0.74-cp37-abi3-win_amd64.whl", hash = "sha256:8fe0018d0056a5187c57120b6b3f6c3e706c13b45c48e54e86d245a9a16fac84"},
+]
+
+[package.dependencies]
+numpy = [
+ {version = ">=1.21.0", markers = "python_version <= \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""},
+ {version = ">=1.19.3", markers = "python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\""},
+ {version = ">=1.17.0", markers = "python_version >= \"3.7\""},
+ {version = ">=1.17.3", markers = "python_version >= \"3.8\""},
+]
+
+[[package]]
+name = "osqp"
+version = "0.6.3"
+description = "OSQP: The Operator Splitting QP Solver"
+optional = false
+python-versions = "*"
+files = [
+ {file = "osqp-0.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6b7d923c836f1d07115057e595245ccc1694ecae730a1affda78fc6f3c8d239"},
+ {file = "osqp-0.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1dfda08c38c3521012740a73ef782f97dfc54a41deae4b0bc4afd18d0e74da0"},
+ {file = "osqp-0.6.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7eafa3f3e82dd36c52f3f4ef19a95142405c807c272c4b53c5971c53535d7804"},
+ {file = "osqp-0.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:3cbb6efdaffb7387dc0037dfe3259d4803e5ad7217e6f20fb605c92953214b9d"},
+ {file = "osqp-0.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1b2049b2c42565dcaa63ddca1c4028b1fb20aab141453f5d77e8ff5b1a99a2cf"},
+ {file = "osqp-0.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:146b89f2cfbf59eaeb2c47e3a312f2034138df78d80ce052364810dc0ef70fc4"},
+ {file = "osqp-0.6.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0084e3d733c75687d68bc133bc380ce471dfe6f7724af2718a43491782eec8d6"},
+ {file = "osqp-0.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:1b573fe1cd0e82239a279c58817c1d365187ef862e928b2b9c828c3c516ad3c2"},
+ {file = "osqp-0.6.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:6c3951ef505177b858c6cd34de980346014cae3d2234c93db960b12c5885f9a2"},
+ {file = "osqp-0.6.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc18f87c9549032c163ce590a5e32079df94ee656c8fb357ba607aa9d78fab81"},
+ {file = "osqp-0.6.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c07b1a4b538aab629b0fae69f644b7e76f81f94d65230014d482e296dacd046b"},
+ {file = "osqp-0.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:60abec3593870990b16f00bd5017096a7091fb00b68d0db3383fc048ca8e55c9"},
+ {file = "osqp-0.6.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b73bdd9589901841af83c5ed6a4092b4fac5a0beff9e32682d8526d1f16a728c"},
+ {file = "osqp-0.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71d9f611823af4a8b241c86805920e5382cd65c7f94fd3615b4eef999ed94c7c"},
+ {file = "osqp-0.6.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30fbc3b3c028c06a6c5f1e66be7b7106ad48a29e0dc5bd82393f82dd68235ef8"},
+ {file = "osqp-0.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fe57e4bde071b388518ecb068f26319506dd9cb107363d3d80c12d2e59fc1e81"},
+ {file = "osqp-0.6.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:41f304d1d7f91af07d8f0b01e5af29ec3bb8824f0102c7fd8b13b497be120da4"},
+ {file = "osqp-0.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea7d8c92bcdf4fef98d777f13d39060d425ef2e8778ed487c96a6fa10848cdea"},
+ {file = "osqp-0.6.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f3a3c6d2708868e5e3fe2da300d6523cbf68a3d8734ce9c5043db37391969f5"},
+ {file = "osqp-0.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:1c548a0b3691850e7e22f3624a128d8af33416d70a9b5976a47d4d832028dcd8"},
+ {file = "osqp-0.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:387e7abd737dfe32c9ec00ad74af25328cdd0d0f634d79530655c040a5cb9590"},
+ {file = "osqp-0.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1445e10a94e01698e13c87a7debf6ac1a15f3acd1f8f6340cb1ad945db4732b"},
+ {file = "osqp-0.6.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0441c10f7fe5f46692a9b44a57138977bb112ae3f8127151671968c5d9ec5dbb"},
+ {file = "osqp-0.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:b15e65a307fbbabf60248bb9bc204e61d5d4ae64e00427a69e2dad9622f4c29d"},
+ {file = "osqp-0.6.3.tar.gz", hash = "sha256:03e460e683ec2ce0f839353ddfa3c4c8ffa509ab8cf6a2b2afbb586fa453e180"},
+]
+
+[package.dependencies]
+numpy = ">=1.7"
+qdldl = "*"
+scipy = ">=0.13.2"
+
+[[package]]
+name = "packaging"
+version = "21.3"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
+ {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
+]
+
+[package.dependencies]
+pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
+
+[[package]]
+name = "parso"
+version = "0.8.3"
+description = "A Python Parser"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"},
+ {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"},
+]
+
+[package.extras]
+qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
+testing = ["docopt", "pytest (<6.0.0)"]
+
+[[package]]
+name = "patchelf"
+version = "0.17.2.1"
+description = "A small utility to modify the dynamic linker and RPATH of ELF executables."
+optional = false
+python-versions = "*"
+files = [
+ {file = "patchelf-0.17.2.1-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:fc329da0e8f628bd836dfb8eaf523547e342351fa8f739bf2b3fe4a6db5a297c"},
+ {file = "patchelf-0.17.2.1-py2.py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.musllinux_1_1_armv7l.whl", hash = "sha256:ccb266a94edf016efe80151172c26cff8c2ec120a57a1665d257b0442784195d"},
+ {file = "patchelf-0.17.2.1-py2.py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.musllinux_1_1_ppc64le.whl", hash = "sha256:f47b5bdd6885cfb20abdd14c707d26eb6f499a7f52e911865548d4aa43385502"},
+ {file = "patchelf-0.17.2.1-py2.py3-none-manylinux_2_17_s390x.manylinux2014_s390x.musllinux_1_1_s390x.whl", hash = "sha256:a9e6ebb0874a11f7ed56d2380bfaa95f00612b23b15f896583da30c2059fcfa8"},
+ {file = "patchelf-0.17.2.1-py2.py3-none-manylinux_2_5_i686.manylinux1_i686.musllinux_1_1_i686.whl", hash = "sha256:3c8d58f0e4c1929b1c7c45ba8da5a84a8f1aa6a82a46e1cfb2e44a4d40f350e5"},
+ {file = "patchelf-0.17.2.1-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.musllinux_1_1_x86_64.whl", hash = "sha256:d1a9bc0d4fd80c038523ebdc451a1cce75237cfcc52dbd1aca224578001d5927"},
+ {file = "patchelf-0.17.2.1.tar.gz", hash = "sha256:a6eb0dd452ce4127d0d5e1eb26515e39186fa609364274bc1b0b77539cfa7031"},
+]
+
+[package.extras]
+test = ["importlib-metadata", "pytest"]
+
+[[package]]
+name = "pathspec"
+version = "0.11.1"
+description = "Utility library for gitignore style pattern matching of file paths."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"},
+ {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"},
+]
+
+[[package]]
+name = "pathtools"
+version = "0.1.2"
+description = "File system general utilities"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pathtools-0.1.2.tar.gz", hash = "sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0"},
+]
+
+[[package]]
+name = "pbr"
+version = "5.11.1"
+description = "Python Build Reasonableness"
+optional = false
+python-versions = ">=2.6"
+files = [
+ {file = "pbr-5.11.1-py2.py3-none-any.whl", hash = "sha256:567f09558bae2b3ab53cb3c1e2e33e726ff3338e7bae3db5dc954b3a44eef12b"},
+ {file = "pbr-5.11.1.tar.gz", hash = "sha256:aefc51675b0b533d56bb5fd1c8c6c0522fe31896679882e1c4c63d5e4a0fccb3"},
+]
+
+[[package]]
+name = "pexpect"
+version = "4.8.0"
+description = "Pexpect allows easy control of interactive console applications."
+optional = false
+python-versions = "*"
+files = [
+ {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"},
+ {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"},
+]
+
+[package.dependencies]
+ptyprocess = ">=0.5"
+
+[[package]]
+name = "pickleshare"
+version = "0.7.5"
+description = "Tiny 'shelve'-like database with concurrency support"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"},
+ {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"},
+]
+
+[[package]]
+name = "pillow"
+version = "10.0.0"
+description = "Python Imaging Library (Fork)"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"},
+ {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"},
+ {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"},
+ {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"},
+ {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"},
+ {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"},
+ {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"},
+ {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"},
+ {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"},
+ {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"},
+ {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"},
+ {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"},
+ {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"},
+ {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"},
+ {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"},
+ {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"},
+ {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"},
+ {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"},
+ {file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"},
+ {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"},
+ {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"},
+ {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"},
+ {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"},
+ {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"},
+ {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"},
+ {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"},
+ {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"},
+ {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"},
+ {file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"},
+ {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"},
+ {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"},
+ {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"},
+ {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"},
+ {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"},
+ {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"},
+ {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"},
+ {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"},
+ {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"},
+ {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"},
+ {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"},
+ {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"},
+ {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"},
+ {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"},
+ {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"},
+ {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"},
+ {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"},
+ {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"},
+ {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"},
+ {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"},
+ {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"},
+ {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"},
+ {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"},
+ {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"},
+ {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"},
+ {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"},
+ {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"},
+]
+
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
+tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+
+[[package]]
+name = "platformdirs"
+version = "3.9.1"
+description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "platformdirs-3.9.1-py3-none-any.whl", hash = "sha256:ad8291ae0ae5072f66c16945166cb11c63394c7a3ad1b1bc9828ca3162da8c2f"},
+ {file = "platformdirs-3.9.1.tar.gz", hash = "sha256:1b42b450ad933e981d56e59f1b97495428c9bd60698baab9f3eb3d00d5822421"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.5.20)", "proselint (>=0.13)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)"]
+
+[[package]]
+name = "pluggy"
+version = "1.2.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pluggy-1.2.0-py3-none-any.whl", hash = "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849"},
+ {file = "pluggy-1.2.0.tar.gz", hash = "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "portalocker"
+version = "2.7.0"
+description = "Wraps the portalocker recipe for easy usage"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "portalocker-2.7.0-py2.py3-none-any.whl", hash = "sha256:a07c5b4f3985c3cf4798369631fb7011adb498e2a46d8440efc75a8f29a0f983"},
+ {file = "portalocker-2.7.0.tar.gz", hash = "sha256:032e81d534a88ec1736d03f780ba073f047a06c478b06e2937486f334e955c51"},
+]
+
+[package.dependencies]
+pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+docs = ["sphinx (>=1.7.1)"]
+redis = ["redis"]
+tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)"]
+
+[[package]]
+name = "pre-commit"
+version = "2.21.0"
+description = "A framework for managing and maintaining multi-language pre-commit hooks."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pre_commit-2.21.0-py2.py3-none-any.whl", hash = "sha256:e2f91727039fc39a92f58a588a25b87f936de6567eed4f0e673e0507edc75bad"},
+ {file = "pre_commit-2.21.0.tar.gz", hash = "sha256:31ef31af7e474a8d8995027fefdfcf509b5c913ff31f2015b4ec4beb26a6f658"},
+]
+
+[package.dependencies]
+cfgv = ">=2.0.0"
+identify = ">=1.0.0"
+nodeenv = ">=0.11.1"
+pyyaml = ">=5.1"
+virtualenv = ">=20.10.0"
+
+[[package]]
+name = "prompt-toolkit"
+version = "3.0.39"
+description = "Library for building powerful interactive command lines in Python"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "prompt_toolkit-3.0.39-py3-none-any.whl", hash = "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"},
+ {file = "prompt_toolkit-3.0.39.tar.gz", hash = "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac"},
+]
+
+[package.dependencies]
+wcwidth = "*"
+
+[[package]]
+name = "protobuf"
+version = "4.23.4"
+description = ""
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "protobuf-4.23.4-cp310-abi3-win32.whl", hash = "sha256:5fea3c64d41ea5ecf5697b83e41d09b9589e6f20b677ab3c48e5f242d9b7897b"},
+ {file = "protobuf-4.23.4-cp310-abi3-win_amd64.whl", hash = "sha256:7b19b6266d92ca6a2a87effa88ecc4af73ebc5cfde194dc737cf8ef23a9a3b12"},
+ {file = "protobuf-4.23.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8547bf44fe8cec3c69e3042f5c4fb3e36eb2a7a013bb0a44c018fc1e427aafbd"},
+ {file = "protobuf-4.23.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:fee88269a090ada09ca63551bf2f573eb2424035bcf2cb1b121895b01a46594a"},
+ {file = "protobuf-4.23.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:effeac51ab79332d44fba74660d40ae79985901ac21bca408f8dc335a81aa597"},
+ {file = "protobuf-4.23.4-cp37-cp37m-win32.whl", hash = "sha256:c3e0939433c40796ca4cfc0fac08af50b00eb66a40bbbc5dee711998fb0bbc1e"},
+ {file = "protobuf-4.23.4-cp37-cp37m-win_amd64.whl", hash = "sha256:9053df6df8e5a76c84339ee4a9f5a2661ceee4a0dab019e8663c50ba324208b0"},
+ {file = "protobuf-4.23.4-cp38-cp38-win32.whl", hash = "sha256:e1c915778d8ced71e26fcf43c0866d7499891bca14c4368448a82edc61fdbc70"},
+ {file = "protobuf-4.23.4-cp38-cp38-win_amd64.whl", hash = "sha256:351cc90f7d10839c480aeb9b870a211e322bf05f6ab3f55fcb2f51331f80a7d2"},
+ {file = "protobuf-4.23.4-cp39-cp39-win32.whl", hash = "sha256:6dd9b9940e3f17077e820b75851126615ee38643c2c5332aa7a359988820c720"},
+ {file = "protobuf-4.23.4-cp39-cp39-win_amd64.whl", hash = "sha256:0a5759f5696895de8cc913f084e27fd4125e8fb0914bb729a17816a33819f474"},
+ {file = "protobuf-4.23.4-py3-none-any.whl", hash = "sha256:e9d0be5bf34b275b9f87ba7407796556abeeba635455d036c7351f7c183ef8ff"},
+ {file = "protobuf-4.23.4.tar.gz", hash = "sha256:ccd9430c0719dce806b93f89c91de7977304729e55377f872a92465d548329a9"},
+]
+
+[[package]]
+name = "psutil"
+version = "5.9.5"
+description = "Cross-platform lib for process and system monitoring in Python."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"},
+ {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"},
+ {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"},
+ {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"},
+ {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"},
+ {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"},
+ {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"},
+ {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"},
+ {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"},
+ {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"},
+ {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"},
+ {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"},
+ {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"},
+ {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"},
+]
+
+[package.extras]
+test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
+
+[[package]]
+name = "ptyprocess"
+version = "0.7.0"
+description = "Run a subprocess in a pseudo terminal"
+optional = false
+python-versions = "*"
+files = [
+ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"},
+ {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"},
+]
+
+[[package]]
+name = "py"
+version = "1.11.0"
+description = "library with cross-python path, ini-parsing, io, code, log facilities"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"},
+ {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"},
+]
+
+[[package]]
+name = "pybullet"
+version = "3.2.5"
+description = "Official Python Interface for the Bullet Physics SDK specialized for Robotics Simulation and Reinforcement Learning"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pybullet-3.2.5-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:4970aec0dd968924f6b1820655a20f80650da2f85ba38b641937c9701a8a2b14"},
+ {file = "pybullet-3.2.5-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b64e4523a11d03729035e0a5baa0ce4d2ca58de8d0a242c0b91e8253781b24c4"},
+ {file = "pybullet-3.2.5-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:49e80fd708a3ffd1d0dac3149e13852bd59cca056bb328bf35b25ea26a8bf504"},
+ {file = "pybullet-3.2.5-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:56456b7b53ab00f33d52a3eb96fb0d7b4b8e16f21987d727b34baecc2019702f"},
+ {file = "pybullet-3.2.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3e22fdb949d0a67e18cc3e248d6199ff788704c68c3edbfc3b5c02fc58f52f9a"},
+ {file = "pybullet-3.2.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3f9c4289f1773b55915f4efb7514b088539d59b4a082465d68ee7caac11355d1"},
+ {file = "pybullet-3.2.5-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9adcaa00de674a02549949f6f8d51b485bd7a23fbc87a1defb2067e1364f8202"},
+ {file = "pybullet-3.2.5.tar.gz", hash = "sha256:1bcb9afb87a086be1b2de18f084d1fdab8194da1bf71f264743ca26baa39c351"},
+]
+
+[[package]]
+name = "pycparser"
+version = "2.21"
+description = "C parser in Python"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
+ {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
+]
+
+[[package]]
+name = "pydantic"
+version = "1.10.12"
+description = "Data validation and settings management using python type hints"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pydantic-1.10.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718"},
+ {file = "pydantic-1.10.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe"},
+ {file = "pydantic-1.10.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b"},
+ {file = "pydantic-1.10.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d"},
+ {file = "pydantic-1.10.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09"},
+ {file = "pydantic-1.10.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed"},
+ {file = "pydantic-1.10.12-cp310-cp310-win_amd64.whl", hash = "sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a"},
+ {file = "pydantic-1.10.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc"},
+ {file = "pydantic-1.10.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405"},
+ {file = "pydantic-1.10.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62"},
+ {file = "pydantic-1.10.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494"},
+ {file = "pydantic-1.10.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246"},
+ {file = "pydantic-1.10.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33"},
+ {file = "pydantic-1.10.12-cp311-cp311-win_amd64.whl", hash = "sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f"},
+ {file = "pydantic-1.10.12-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a"},
+ {file = "pydantic-1.10.12-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565"},
+ {file = "pydantic-1.10.12-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350"},
+ {file = "pydantic-1.10.12-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303"},
+ {file = "pydantic-1.10.12-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5"},
+ {file = "pydantic-1.10.12-cp37-cp37m-win_amd64.whl", hash = "sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8"},
+ {file = "pydantic-1.10.12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62"},
+ {file = "pydantic-1.10.12-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb"},
+ {file = "pydantic-1.10.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0"},
+ {file = "pydantic-1.10.12-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c"},
+ {file = "pydantic-1.10.12-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d"},
+ {file = "pydantic-1.10.12-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33"},
+ {file = "pydantic-1.10.12-cp38-cp38-win_amd64.whl", hash = "sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47"},
+ {file = "pydantic-1.10.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6"},
+ {file = "pydantic-1.10.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523"},
+ {file = "pydantic-1.10.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86"},
+ {file = "pydantic-1.10.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1"},
+ {file = "pydantic-1.10.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe"},
+ {file = "pydantic-1.10.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb"},
+ {file = "pydantic-1.10.12-cp39-cp39-win_amd64.whl", hash = "sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d"},
+ {file = "pydantic-1.10.12-py3-none-any.whl", hash = "sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942"},
+ {file = "pydantic-1.10.12.tar.gz", hash = "sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303"},
+]
+
+[package.dependencies]
+typing-extensions = ">=4.2.0"
+
+[package.extras]
+dotenv = ["python-dotenv (>=0.10.4)"]
+email = ["email-validator (>=1.0.3)"]
+
+[[package]]
+name = "pydocstyle"
+version = "6.3.0"
+description = "Python docstring style checker"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pydocstyle-6.3.0-py3-none-any.whl", hash = "sha256:118762d452a49d6b05e194ef344a55822987a462831ade91ec5c06fd2169d019"},
+ {file = "pydocstyle-6.3.0.tar.gz", hash = "sha256:7ce43f0c0ac87b07494eb9c0b462c0b73e6ff276807f204d6b53edc72b7e44e1"},
+]
+
+[package.dependencies]
+snowballstemmer = ">=2.2.0"
+
+[package.extras]
+toml = ["tomli (>=1.2.3)"]
+
+[[package]]
+name = "pyflyby"
+version = "1.8.5"
+description = "pyflyby - Python development productivity tools, in particular automatic import management"
+optional = false
+python-versions = ">3.0, !=3.0.*, !=3.1.*, !=3.2.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, <4"
+files = [
+ {file = "pyflyby-1.8.5.tar.gz", hash = "sha256:1efdb53a46e0385d64f4d2475aff93b7d5fc814933aa07b67451909e26cc186b"},
+]
+
+[package.dependencies]
+six = "*"
+toml = "*"
+
+[[package]]
+name = "pygame"
+version = "2.1.0"
+description = "Python Game Development"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pygame-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c84a93e6d33dafce9e25080ac557342333e15ef7e378ba84cb6181c52a8fd663"},
+ {file = "pygame-2.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0842458b49257ab539b7b6622a242cabcddcb61178b8ae074aaceb890be75b6"},
+ {file = "pygame-2.1.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6efa3fa472acb97c784224b59a89e80da6231f0dbf54df8442ffa3352c0534d6"},
+ {file = "pygame-2.1.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:02a26b3be6cc478f18f4efa506ee5a585f68350857ac5e68e187301e943e3d6d"},
+ {file = "pygame-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5c62fbdb30082f7e1dcfa253da48e7b4be7342d275b34b2efa51f6cffc5942b"},
+ {file = "pygame-2.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a305dcf44f03a8dd7baefb97dc24949d7e719fd686cd3211121639aec4ce464"},
+ {file = "pygame-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b4bc22edb1d77c992b5d56b19e1ab52e14687adb8bc3ed12a8a98fbd7e1ff"},
+ {file = "pygame-2.1.0-cp310-cp310-win32.whl", hash = "sha256:e9368c105a8bccc8adfe7fd7fa5220d2b6c03979a3a57a8178c42f6fa9914ebc"},
+ {file = "pygame-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9a81d057a7dea95850e44118f141a892fde93c938ccb08fbc5dd7f1a26c2f1fe"},
+ {file = "pygame-2.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ada3d33e7e6907d5c3bf771dc58c47ee6994a1e28fed55e4f8f8b817367beb8f"},
+ {file = "pygame-2.1.0-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5a3edc8211d0cf39d1e4d7ded1a0727c53aeb21205963f184199521708bbb05c"},
+ {file = "pygame-2.1.0-cp36-cp36m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:53c6fa767e3eef52d403eda5d032e48b6040ccce03fbd64af2f71843168118da"},
+ {file = "pygame-2.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c28c6f764aa03a0245db12346f1da327c6f49bcc20e53aefec6eed57e4fbe1ce"},
+ {file = "pygame-2.1.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5d36d530a8994c5bb8889816981f82b7942d8ec7651ca1d922d01302c1feecd2"},
+ {file = "pygame-2.1.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdd488daa4ad33748d5ea806e311bfe01b9cc506def5288400072fcd66d226cf"},
+ {file = "pygame-2.1.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9284e76923777c21b8bea19d8528be9cd62d0915139ed3c3cde6c43f849466f5"},
+ {file = "pygame-2.1.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:49e5fb589a86169aa95b83d3429ee034799792374e13dbc0da83091d86365a4b"},
+ {file = "pygame-2.1.0-cp36-cp36m-win32.whl", hash = "sha256:c6ee571995527e779b46cafee7ebef2dceb1a9c375143828e019293ff0efa167"},
+ {file = "pygame-2.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:b400edd7391972e75b4243113089d6ea10b032e1306e8721efabb36d33c2d0f2"},
+ {file = "pygame-2.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0d2f80b501aacd74a660d4422793ea1cd4e209bee385aac18d0a07bd671511ee"},
+ {file = "pygame-2.1.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:32cb64627c2eb5c4c067ffe614e08ccb8987d096100d225e070dddce05725b63"},
+ {file = "pygame-2.1.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:38b5a43ab02c162501e62b857ff2cb128076b0786dd4e1d8bea63db8326f9da1"},
+ {file = "pygame-2.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba5bf655c892bbf4a9bafb4fcbc4c71023cc9a65f0cae0f3eba09a11018a858e"},
+ {file = "pygame-2.1.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:add546fcbf8954f00647f5e7d595ab9389f6a7542a99fc5dca514e14fd799773"},
+ {file = "pygame-2.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:987c0d5fcd7737c31b35df06f78932c48eeff2c97473001e224fdebd3292b2db"},
+ {file = "pygame-2.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:594234050b50b57c538842155dc3095c9d4f994266325adb4dd008aee526157f"},
+ {file = "pygame-2.1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:59a5461ef317e4d233d1bb5ce63311ccad3e911a652bda159d3922351050158c"},
+ {file = "pygame-2.1.0-cp37-cp37m-win32.whl", hash = "sha256:9b2ad10ffaa226ca40ae229143b0a118426aff42e2459b626d355846c59a765d"},
+ {file = "pygame-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4f73058569573af12c8181e032745f11d85f0799510965d938b1f16c7f13afcb"},
+ {file = "pygame-2.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:85844714f82a5379100825473b1a7b24192b4a944aed3128da9386e26adc3bed"},
+ {file = "pygame-2.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b0e96c0f68f6bb88da216765920c6dbc55ae83e70435d8ebac87d271fc058646"},
+ {file = "pygame-2.1.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:3d5a76fa826202182d989e8399fca0c3c163fbb4f8ece773e77955a7a62cbed3"},
+ {file = "pygame-2.1.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2bfefabe78bda7a1bfba253cbe2131038402ce2b32e4218feeba6431fe429abb"},
+ {file = "pygame-2.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3804476fab6ec7230aa817ee5c3b378ba956321fdd5f91f51c97452c588869d2"},
+ {file = "pygame-2.1.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70a11eec9bae6e8970c5bc4b3d0908eb2c42d4bd4ed488e41e49774b7cb41f57"},
+ {file = "pygame-2.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eff1db92d53dc2e49ed832dd6c76530e1e2b5954eef091f6af41b41d2d5c3ac"},
+ {file = "pygame-2.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1eb91198fc47c2e4fdc19c544b5d94534a70fd877f5c342228feb05e9fc4bef"},
+ {file = "pygame-2.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:15d4e42214f93d8c60120e16b690ad03da7f0b3b66f75db8966bccf8c66c4690"},
+ {file = "pygame-2.1.0-cp38-cp38-win32.whl", hash = "sha256:e533f4bf9dc1a91cfd608b9bfb028c6a92383e731c502660933f0f9b812045a6"},
+ {file = "pygame-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:692fe4498c353d663d45d05354fb47c9f6bf324d10b53844b9ed7f60e6c8cefa"},
+ {file = "pygame-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:472b81ba6b61ffe5879ac3d0da2e5cb235e0e4da471ad4038f013a7710ab53ab"},
+ {file = "pygame-2.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bb55368d455ab9518b97febd33a8d417988397b019c9408993be034e0b5a7db6"},
+ {file = "pygame-2.1.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f8379052cfbc278b11e31bc97f2e7f5998959c50837c4d54f4e424a541e0c5d9"},
+ {file = "pygame-2.1.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b545634f96132af1d31dcb873cf03a9c4a5654ae39d9ee126db0b2eba2806788"},
+ {file = "pygame-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5eb3dede55d005adea8504f8c9230b9dc2c84c1c728efe93a9718fa1af824dc8"},
+ {file = "pygame-2.1.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f628f9f26c8dadf72fabc9ae0ce5fe7f60d76be71a3407abc756b4d1fd030fa0"},
+ {file = "pygame-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4061ac4e81bb36ec8f0a7027582c1c4dd32a939882e008165627103cb0b3985"},
+ {file = "pygame-2.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fad7b5351931cb68d19d7ecc0b21021fe23237d8fba8c455b5af4a79e1c7c536"},
+ {file = "pygame-2.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a0ab3e4763e0cebf08c55154f4167cdae3683674604a71e1437123225f2a9b36"},
+ {file = "pygame-2.1.0-cp39-cp39-win32.whl", hash = "sha256:64ec45215c2cfc4051bb0f58d26aee3b50a39b1b0a2e6fe8417bb352a6443aad"},
+ {file = "pygame-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:86c66b917afc6330a91ac8c7169c36c77ec536578d1d7724644d41f904e2d146"},
+ {file = "pygame-2.1.0-pp36-pypy36_pp73-win32.whl", hash = "sha256:b0e405fdde643f14d60c2dd140f110a5a38f588396a8b61a1a86374f25cba589"},
+ {file = "pygame-2.1.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:646e871ff5ab7f933cde5ea2bff7b6cd74d7369f43e84a291baebe00bb9a8f6f"},
+ {file = "pygame-2.1.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:88a2dabe617e6173003b65762c636947719da3e2d881a4ea47298e8d70886386"},
+ {file = "pygame-2.1.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7281366b4ebd7f16eac8ec6a6e2adb4c729beda178ea82637d9981e93dd40c9b"},
+ {file = "pygame-2.1.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0227728f2ef751fac43b89f4bcc5c65ce39c855b2a3391ddf2e6024dd667e6bd"},
+ {file = "pygame-2.1.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ab5aba8677d135b94c4714e8256efdfffefc164f354a4d05b846588caf43b99"},
+ {file = "pygame-2.1.0.tar.gz", hash = "sha256:232e51104db0e573221660d172af8e6fc2c0fda183c5dbf2aa52170f29aa9ec9"},
+]
+
+[[package]]
+name = "pygments"
+version = "2.15.1"
+description = "Pygments is a syntax highlighting package written in Python."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"},
+ {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"},
+]
+
+[package.extras]
+plugins = ["importlib-metadata"]
+
+[[package]]
+name = "pylint"
+version = "2.17.4"
+description = "python code static checker"
+optional = false
+python-versions = ">=3.7.2"
+files = [
+ {file = "pylint-2.17.4-py3-none-any.whl", hash = "sha256:7a1145fb08c251bdb5cca11739722ce64a63db479283d10ce718b2460e54123c"},
+ {file = "pylint-2.17.4.tar.gz", hash = "sha256:5dcf1d9e19f41f38e4e85d10f511e5b9c35e1aa74251bf95cdd8cb23584e2db1"},
+]
+
+[package.dependencies]
+astroid = ">=2.15.4,<=2.17.0-dev0"
+colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""}
+dill = {version = ">=0.2", markers = "python_version < \"3.11\""}
+isort = ">=4.2.5,<6"
+mccabe = ">=0.6,<0.8"
+platformdirs = ">=2.2.0"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+tomlkit = ">=0.10.1"
+typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+spelling = ["pyenchant (>=3.2,<4.0)"]
+testutils = ["gitpython (>3)"]
+
+[[package]]
+name = "pyopengl"
+version = "3.1.7"
+description = "Standard OpenGL bindings for Python"
+optional = false
+python-versions = "*"
+files = [
+ {file = "PyOpenGL-3.1.7-py3-none-any.whl", hash = "sha256:a6ab19cf290df6101aaf7470843a9c46207789855746399d0af92521a0a92b7a"},
+ {file = "PyOpenGL-3.1.7.tar.gz", hash = "sha256:eef31a3888e6984fd4d8e6c9961b184c9813ca82604d37fe3da80eb000a76c86"},
+]
+
+[[package]]
+name = "pyparsing"
+version = "3.0.9"
+description = "pyparsing module - Classes and methods to define and execute parsing grammars"
+optional = false
+python-versions = ">=3.6.8"
+files = [
+ {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
+ {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
+]
+
+[package.extras]
+diagrams = ["jinja2", "railroad-diagrams"]
+
+[[package]]
+name = "pytest"
+version = "7.0.1"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pytest-7.0.1-py3-none-any.whl", hash = "sha256:9ce3ff477af913ecf6321fe337b93a2c0dcf2a0a1439c43f5452112c1e4280db"},
+ {file = "pytest-7.0.1.tar.gz", hash = "sha256:e30905a0c131d3d94b89624a1cc5afec3e0ba2fbdb151867d8e0ebd49850f171"},
+]
+
+[package.dependencies]
+atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""}
+attrs = ">=19.2.0"
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+py = ">=1.8.2"
+tomli = ">=1.0.0"
+
+[package.extras]
+testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"]
+
+[[package]]
+name = "pytest-cov"
+version = "3.0.0"
+description = "Pytest plugin for measuring coverage."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"},
+ {file = "pytest_cov-3.0.0-py3-none-any.whl", hash = "sha256:578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6"},
+]
+
+[package.dependencies]
+coverage = {version = ">=5.2.1", extras = ["toml"]}
+pytest = ">=4.6"
+
+[package.extras]
+testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"]
+
+[[package]]
+name = "pytest-html"
+version = "3.2.0"
+description = "pytest plugin for generating HTML reports"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pytest-html-3.2.0.tar.gz", hash = "sha256:c4e2f4bb0bffc437f51ad2174a8a3e71df81bbc2f6894604e604af18fbe687c3"},
+ {file = "pytest_html-3.2.0-py3-none-any.whl", hash = "sha256:868c08564a68d8b2c26866f1e33178419bb35b1e127c33784a28622eb827f3f3"},
+]
+
+[package.dependencies]
+py = ">=1.8.2"
+pytest = ">=5.0,<6.0.0 || >6.0.0"
+pytest-metadata = "*"
+
+[[package]]
+name = "pytest-metadata"
+version = "3.0.0"
+description = "pytest plugin for test session metadata"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest_metadata-3.0.0-py3-none-any.whl", hash = "sha256:a17b1e40080401dc23177599208c52228df463db191c1a573ccdffacd885e190"},
+ {file = "pytest_metadata-3.0.0.tar.gz", hash = "sha256:769a9c65d2884bd583bc626b0ace77ad15dbe02dd91a9106d47fd46d9c2569ca"},
+]
+
+[package.dependencies]
+pytest = ">=7.0.0"
+
+[package.extras]
+test = ["black (>=22.1.0)", "flake8 (>=4.0.1)", "pre-commit (>=2.17.0)", "tox (>=3.24.5)"]
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.2"
+description = "Extensions to the standard Python datetime module"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+]
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "pyupgrade"
+version = "2.38.4"
+description = "A tool to automatically upgrade syntax for newer versions."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pyupgrade-2.38.4-py2.py3-none-any.whl", hash = "sha256:944ff993c396ddc2b9012eb3de4cda138eb4c149b22c6c560d4c8bfd0e180982"},
+ {file = "pyupgrade-2.38.4.tar.gz", hash = "sha256:1eb43a49f416752929741ba4d706bf3f33593d3cac9bdc217fc1ef55c047c1f4"},
+]
+
+[package.dependencies]
+tokenize-rt = "<5"
+
+[[package]]
+name = "pywin32"
+version = "306"
+description = "Python for Window Extensions"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"},
+ {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"},
+ {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"},
+ {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"},
+ {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"},
+ {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"},
+ {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"},
+ {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"},
+ {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"},
+ {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"},
+ {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"},
+ {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"},
+ {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"},
+ {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"},
+]
+
+[[package]]
+name = "pyyaml"
+version = "6.0.1"
+description = "YAML parser and emitter for Python"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
+ {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
+ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
+]
+
+[[package]]
+name = "qdldl"
+version = "0.1.7.post0"
+description = "QDLDL, a free LDL factorization routine."
+optional = false
+python-versions = "*"
+files = [
+ {file = "qdldl-0.1.7.post0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8ab02e8b9ff86bd644a1935718387c82fbe04c31e3309cf9f7a121d02b1deda8"},
+ {file = "qdldl-0.1.7.post0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e5d6753310377451ed4dc09b1ef28faf40108b713e7f55c8a8ae94d679a672"},
+ {file = "qdldl-0.1.7.post0-cp310-cp310-win_amd64.whl", hash = "sha256:718d8e141832e96ba71ca1807a74813836c6403110faaa3d33a67de1af3b29c4"},
+ {file = "qdldl-0.1.7.post0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0e3f06e8a49ddd834b24fc3d7afbba4fec0923101045aa2666e18d2a9980e329"},
+ {file = "qdldl-0.1.7.post0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a81c46522dd6b3042e2348fa98128bb5c0e466f42bce214e80cfb766ff40930"},
+ {file = "qdldl-0.1.7.post0-cp311-cp311-win_amd64.whl", hash = "sha256:4a86155f3de66c5db0e21544b7a2421c671028fa20da407686d2a8d0e9b57e51"},
+ {file = "qdldl-0.1.7.post0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:717cb1892b033c01a0aae84ededcfa1f05bcb97013095d779c497e6c32f90dac"},
+ {file = "qdldl-0.1.7.post0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fc35432913085d94b2327242cf51388467ef7a37ac0d71eb31b594b575dd498"},
+ {file = "qdldl-0.1.7.post0-cp36-cp36m-win_amd64.whl", hash = "sha256:fd5cfd8c50f33ddacb830594a63b8c1093a24aea45312b9d2ed826cea5ece08a"},
+ {file = "qdldl-0.1.7.post0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:981ca8672e9506976c663552c1eb6f6daf9726d62650b3bf5900260946156166"},
+ {file = "qdldl-0.1.7.post0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8ec670d97cf756f9159dc0a11de5cf054e88aefe84bea1c7282f00334642843"},
+ {file = "qdldl-0.1.7.post0-cp37-cp37m-win_amd64.whl", hash = "sha256:aa208703b44337a7e77f6f2663f7a452144becb4421970d534ff8297b92e1e10"},
+ {file = "qdldl-0.1.7.post0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b42649484f7c0d8ee659224ecaac0a3e97f12531018207f4d7323e4071320eb1"},
+ {file = "qdldl-0.1.7.post0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26aa3d6f0da7779265d72e8f418094003e75fa53c515a53bc03fd8b9bcfbf7de"},
+ {file = "qdldl-0.1.7.post0-cp38-cp38-win_amd64.whl", hash = "sha256:e55bcd6962178029faf543addd49db145302dd51e19855fefa71b5fd55840eea"},
+ {file = "qdldl-0.1.7.post0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c1dd0e570e65aaf35e10b7fb345f7ac763fd05a2227b9c06ce65e07993fc4984"},
+ {file = "qdldl-0.1.7.post0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae161342529852b6248ace4642bc4ee371a7c1e0707b7bc43a43ef7e73c06ca3"},
+ {file = "qdldl-0.1.7.post0-cp39-cp39-win_amd64.whl", hash = "sha256:092f6606690a2b9bd3c939f3147887e02de13bb068fbed5ffdc7459034def623"},
+ {file = "qdldl-0.1.7.post0.tar.gz", hash = "sha256:f346a114c8342ee6d4dbd6471eef314199fb268d3bf7b95885ca351fde2b023f"},
+]
+
+[package.dependencies]
+numpy = ">=1.7"
+scipy = ">=0.13.2"
+
+[[package]]
+name = "requests"
+version = "2.31.0"
+description = "Python HTTP for Humans."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "rich"
+version = "13.4.2"
+description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "rich-13.4.2-py3-none-any.whl", hash = "sha256:8f87bc7ee54675732fa66a05ebfe489e27264caeeff3728c945d25971b6485ec"},
+ {file = "rich-13.4.2.tar.gz", hash = "sha256:d653d6bccede5844304c605d5aac802c7cf9621efd700b46c7ec2b51ea914898"},
+]
+
+[package.dependencies]
+markdown-it-py = ">=2.2.0"
+pygments = ">=2.13.0,<3.0.0"
+typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
+
+[package.extras]
+jupyter = ["ipywidgets (>=7.5.1,<9)"]
+
+[[package]]
+name = "ruamel-yaml"
+version = "0.17.32"
+description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "ruamel.yaml-0.17.32-py3-none-any.whl", hash = "sha256:23cd2ed620231677564646b0c6a89d138b6822a0d78656df7abda5879ec4f447"},
+ {file = "ruamel.yaml-0.17.32.tar.gz", hash = "sha256:ec939063761914e14542972a5cba6d33c23b0859ab6342f61cf070cfc600efc2"},
+]
+
+[package.dependencies]
+"ruamel.yaml.clib" = {version = ">=0.2.7", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.12\""}
+
+[package.extras]
+docs = ["ryd"]
+jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"]
+
+[[package]]
+name = "ruamel-yaml-clib"
+version = "0.2.7"
+description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d5859983f26d8cd7bb5c287ef452e8aacc86501487634573d260968f753e1d71"},
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:debc87a9516b237d0466a711b18b6ebeb17ba9f391eb7f91c649c5c4ec5006c7"},
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:df5828871e6648db72d1c19b4bd24819b80a755c4541d3409f0f7acd0f335c80"},
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:efa08d63ef03d079dcae1dfe334f6c8847ba8b645d08df286358b1f5293d24ab"},
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-win32.whl", hash = "sha256:763d65baa3b952479c4e972669f679fe490eee058d5aa85da483ebae2009d231"},
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-win_amd64.whl", hash = "sha256:d000f258cf42fec2b1bbf2863c61d7b8918d31ffee905da62dede869254d3b8a"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:045e0626baf1c52e5527bd5db361bc83180faaba2ff586e763d3d5982a876a9e"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:1a6391a7cabb7641c32517539ca42cf84b87b667bad38b78d4d42dd23e957c81"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:9c7617df90c1365638916b98cdd9be833d31d337dbcd722485597b43c4a215bf"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:41d0f1fa4c6830176eef5b276af04c89320ea616655d01327d5ce65e50575c94"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-win32.whl", hash = "sha256:f6d3d39611ac2e4f62c3128a9eed45f19a6608670c5a2f4f07f24e8de3441d38"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-win_amd64.whl", hash = "sha256:da538167284de58a52109a9b89b8f6a53ff8437dd6dc26d33b57bf6699153122"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:4b3a93bb9bc662fc1f99c5c3ea8e623d8b23ad22f861eb6fce9377ac07ad6072"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-macosx_12_0_arm64.whl", hash = "sha256:a234a20ae07e8469da311e182e70ef6b199d0fbeb6c6cc2901204dd87fb867e8"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:15910ef4f3e537eea7fe45f8a5d19997479940d9196f357152a09031c5be59f3"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:370445fd795706fd291ab00c9df38a0caed0f17a6fb46b0f607668ecb16ce763"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-win32.whl", hash = "sha256:ecdf1a604009bd35c674b9225a8fa609e0282d9b896c03dd441a91e5f53b534e"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-win_amd64.whl", hash = "sha256:f34019dced51047d6f70cb9383b2ae2853b7fc4dce65129a5acd49f4f9256646"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2aa261c29a5545adfef9296b7e33941f46aa5bbd21164228e833412af4c9c75f"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f01da5790e95815eb5a8a138508c01c758e5f5bc0ce4286c4f7028b8dd7ac3d0"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:40d030e2329ce5286d6b231b8726959ebbe0404c92f0a578c0e2482182e38282"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c3ca1fbba4ae962521e5eb66d72998b51f0f4d0f608d3c0347a48e1af262efa7"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-win32.whl", hash = "sha256:7bdb4c06b063f6fd55e472e201317a3bb6cdeeee5d5a38512ea5c01e1acbdd93"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-win_amd64.whl", hash = "sha256:be2a7ad8fd8f7442b24323d24ba0b56c51219513cfa45b9ada3b87b76c374d4b"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91a789b4aa0097b78c93e3dc4b40040ba55bef518f84a40d4442f713b4094acb"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:99e77daab5d13a48a4054803d052ff40780278240a902b880dd37a51ba01a307"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3243f48ecd450eddadc2d11b5feb08aca941b5cd98c9b1db14b2fd128be8c697"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8831a2cedcd0f0927f788c5bdf6567d9dc9cc235646a434986a852af1cb54b4b"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-win32.whl", hash = "sha256:3110a99e0f94a4a3470ff67fc20d3f96c25b13d24c6980ff841e82bafe827cac"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-win_amd64.whl", hash = "sha256:92460ce908546ab69770b2e576e4f99fbb4ce6ab4b245345a3869a0a0410488f"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5bc0667c1eb8f83a3752b71b9c4ba55ef7c7058ae57022dd9b29065186a113d9"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:4a4d8d417868d68b979076a9be6a38c676eca060785abaa6709c7b31593c35d1"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf9a6bc4a0221538b1a7de3ed7bca4c93c02346853f44e1cd764be0023cd3640"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a7b301ff08055d73223058b5c46c55638917f04d21577c95e00e0c4d79201a6b"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-win32.whl", hash = "sha256:d5e51e2901ec2366b79f16c2299a03e74ba4531ddcfacc1416639c557aef0ad8"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-win_amd64.whl", hash = "sha256:184faeaec61dbaa3cace407cffc5819f7b977e75360e8d5ca19461cd851a5fc5"},
+ {file = "ruamel.yaml.clib-0.2.7.tar.gz", hash = "sha256:1f08fd5a2bea9c4180db71678e850b995d2a5f4537be0e94557668cf0f5f9497"},
+]
+
+[[package]]
+name = "safety"
+version = "2.3.5"
+description = "Checks installed dependencies for known vulnerabilities and licenses."
+optional = false
+python-versions = "*"
+files = [
+ {file = "safety-2.3.5-py3-none-any.whl", hash = "sha256:2227fcac1b22b53c1615af78872b48348661691450aa25d6704a5504dbd1f7e2"},
+ {file = "safety-2.3.5.tar.gz", hash = "sha256:a60c11f8952f412cbb165d70cb1f673a3b43a2ba9a93ce11f97e6a4de834aa3a"},
+]
+
+[package.dependencies]
+Click = ">=8.0.2"
+dparse = ">=0.6.2"
+packaging = ">=21.0,<22.0"
+requests = "*"
+"ruamel.yaml" = ">=0.17.21"
+setuptools = ">=19.3"
+
+[package.extras]
+github = ["jinja2 (>=3.1.0)", "pygithub (>=1.43.3)"]
+gitlab = ["python-gitlab (>=1.3.0)"]
+
+[[package]]
+name = "scipy"
+version = "1.10.1"
+description = "Fundamental algorithms for scientific computing in Python"
+optional = false
+python-versions = "<3.12,>=3.8"
+files = [
+ {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"},
+ {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"},
+ {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"},
+ {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"},
+ {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"},
+ {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"},
+ {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"},
+ {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"},
+ {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"},
+ {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"},
+ {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"},
+ {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"},
+ {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"},
+ {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"},
+ {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"},
+ {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"},
+ {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"},
+ {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"},
+ {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"},
+ {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"},
+ {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"},
+]
+
+[package.dependencies]
+numpy = ">=1.19.5,<1.27.0"
+
+[package.extras]
+dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"]
+doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"]
+test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
+
+[[package]]
+name = "scs"
+version = "3.2.3"
+description = "scs: splitting conic solver"
+optional = false
+python-versions = "*"
+files = [
+ {file = "scs-3.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9d7f7fd2d2cd88938c159b15e8915d9536610e50a9c34ecf36ce0290807afe55"},
+ {file = "scs-3.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:368194620918301bf5309a35a7cd0444f1b1992b182c0a29033c26eb97b3dcb2"},
+ {file = "scs-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:2d835a74c283be73bff6e1978d3ae77a60d9e87db1fdd12916464fa2a1dda517"},
+ {file = "scs-3.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:81511fda3254c0d29089443dcd2305e81d203509e4d77afd160e9174b15ad75a"},
+ {file = "scs-3.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:715ca4532de39b462bd393f9e8b4bf57be4122e20f0780d00db3cab1450a585d"},
+ {file = "scs-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:fcf4b985a787135b3e83682a4c5b9bce9c6290cfead1d7225c38f34f5ead7187"},
+ {file = "scs-3.2.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:91f5194cfabe354c9b1f0ea1de82114028d81c5a4a633177b8da2fe36f301758"},
+ {file = "scs-3.2.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0d15f21e9053c5df37dab0d700da55fcc71f2f454748f364b9de594988b2ab3"},
+ {file = "scs-3.2.3-cp37-cp37m-win_amd64.whl", hash = "sha256:6a80727167ad73151ced202a1ac6c0c7644b00b2e2607edec8a8807fc0443ac8"},
+ {file = "scs-3.2.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:79d7d6c42ee636821460d317b8250945ce04363a47a63aef6b1eae0bd7a418fc"},
+ {file = "scs-3.2.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6f64d23247797cfa289095fb5ddea6eeff5adf98961e953da90233278827e0c"},
+ {file = "scs-3.2.3-cp38-cp38-win_amd64.whl", hash = "sha256:9a14a7c80efb34b469eb4dbaf26a9104dd2ca93e477985f948d8f28cd4b1a2ba"},
+ {file = "scs-3.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3eb601738b260e3dcad117f3e02aceaca5d1e8eac2be225be1c0f9cbf83e75cb"},
+ {file = "scs-3.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1b24176de97ecedf698596086f85da6dad472fe38a4b21cf4b460f87cae2c37"},
+ {file = "scs-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:ddaa5af34a0e1f636d312eb1901bd407383f0b04dda50fba7242d56e618c0966"},
+ {file = "scs-3.2.3.tar.gz", hash = "sha256:e3bd779e7e977e3ae5a2f2035aa4c2a309e29082d59a722d5d6592edc4bdb4b3"},
+]
+
+[package.dependencies]
+numpy = ">=1.7"
+scipy = ">=0.13.2"
+
+[[package]]
+name = "sentry-sdk"
+version = "1.28.1"
+description = "Python client for Sentry (https://sentry.io)"
+optional = false
+python-versions = "*"
+files = [
+ {file = "sentry-sdk-1.28.1.tar.gz", hash = "sha256:dcd88c68aa64dae715311b5ede6502fd684f70d00a7cd4858118f0ba3153a3ae"},
+ {file = "sentry_sdk-1.28.1-py2.py3-none-any.whl", hash = "sha256:6bdb25bd9092478d3a817cb0d01fa99e296aea34d404eac3ca0037faa5c2aa0a"},
+]
+
+[package.dependencies]
+certifi = "*"
+urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""}
+
+[package.extras]
+aiohttp = ["aiohttp (>=3.5)"]
+arq = ["arq (>=0.23)"]
+beam = ["apache-beam (>=2.12)"]
+bottle = ["bottle (>=0.12.13)"]
+celery = ["celery (>=3)"]
+chalice = ["chalice (>=1.16.0)"]
+django = ["django (>=1.8)"]
+falcon = ["falcon (>=1.4)"]
+fastapi = ["fastapi (>=0.79.0)"]
+flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
+grpcio = ["grpcio (>=1.21.1)"]
+httpx = ["httpx (>=0.16.0)"]
+huey = ["huey (>=2)"]
+loguru = ["loguru (>=0.5)"]
+opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
+pure-eval = ["asttokens", "executing", "pure-eval"]
+pymongo = ["pymongo (>=3.1)"]
+pyspark = ["pyspark (>=2.4.4)"]
+quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
+rq = ["rq (>=0.6)"]
+sanic = ["sanic (>=0.8)"]
+sqlalchemy = ["sqlalchemy (>=1.2)"]
+starlette = ["starlette (>=0.19.1)"]
+starlite = ["starlite (>=1.48)"]
+tornado = ["tornado (>=5)"]
+
+[[package]]
+name = "setproctitle"
+version = "1.3.2"
+description = "A Python module to customize the process title"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "setproctitle-1.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:288943dec88e178bb2fd868adf491197cc0fc8b6810416b1c6775e686bab87fe"},
+ {file = "setproctitle-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:630f6fe5e24a619ccf970c78e084319ee8be5be253ecc9b5b216b0f474f5ef18"},
+ {file = "setproctitle-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c877691b90026670e5a70adfbcc735460a9f4c274d35ec5e8a43ce3f8443005"},
+ {file = "setproctitle-1.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a55fe05f15c10e8c705038777656fe45e3bd676d49ad9ac8370b75c66dd7cd7"},
+ {file = "setproctitle-1.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab45146c71ca6592c9cc8b354a2cc9cc4843c33efcbe1d245d7d37ce9696552d"},
+ {file = "setproctitle-1.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e00c9d5c541a2713ba0e657e0303bf96ddddc412ef4761676adc35df35d7c246"},
+ {file = "setproctitle-1.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:265ecbe2c6eafe82e104f994ddd7c811520acdd0647b73f65c24f51374cf9494"},
+ {file = "setproctitle-1.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c2c46200656280a064073447ebd363937562debef329482fd7e570c8d498f806"},
+ {file = "setproctitle-1.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:fa2f50678f04fda7a75d0fe5dd02bbdd3b13cbe6ed4cf626e4472a7ccf47ae94"},
+ {file = "setproctitle-1.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7f2719a398e1a2c01c2a63bf30377a34d0b6ef61946ab9cf4d550733af8f1ef1"},
+ {file = "setproctitle-1.3.2-cp310-cp310-win32.whl", hash = "sha256:e425be62524dc0c593985da794ee73eb8a17abb10fe692ee43bb39e201d7a099"},
+ {file = "setproctitle-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:e85e50b9c67854f89635a86247412f3ad66b132a4d8534ac017547197c88f27d"},
+ {file = "setproctitle-1.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2a97d51c17d438cf5be284775a322d57b7ca9505bb7e118c28b1824ecaf8aeaa"},
+ {file = "setproctitle-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:587c7d6780109fbd8a627758063d08ab0421377c0853780e5c356873cdf0f077"},
+ {file = "setproctitle-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d17c8bd073cbf8d141993db45145a70b307385b69171d6b54bcf23e5d644de"},
+ {file = "setproctitle-1.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e932089c35a396dc31a5a1fc49889dd559548d14cb2237adae260382a090382e"},
+ {file = "setproctitle-1.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e4f8f12258a8739c565292a551c3db62cca4ed4f6b6126664e2381acb4931bf"},
+ {file = "setproctitle-1.3.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:570d255fd99c7f14d8f91363c3ea96bd54f8742275796bca67e1414aeca7d8c3"},
+ {file = "setproctitle-1.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a8e0881568c5e6beff91ef73c0ec8ac2a9d3ecc9edd6bd83c31ca34f770910c4"},
+ {file = "setproctitle-1.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4bba3be4c1fabf170595b71f3af46c6d482fbe7d9e0563999b49999a31876f77"},
+ {file = "setproctitle-1.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:37ece938110cab2bb3957e3910af8152ca15f2b6efdf4f2612e3f6b7e5459b80"},
+ {file = "setproctitle-1.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db684d6bbb735a80bcbc3737856385b55d53f8a44ce9b46e9a5682c5133a9bf7"},
+ {file = "setproctitle-1.3.2-cp311-cp311-win32.whl", hash = "sha256:ca58cd260ea02759238d994cfae844fc8b1e206c684beb8f38877dcab8451dfc"},
+ {file = "setproctitle-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:88486e6cce2a18a033013d17b30a594f1c5cb42520c49c19e6ade40b864bb7ff"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:92c626edc66169a1b09e9541b9c0c9f10488447d8a2b1d87c8f0672e771bc927"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:710e16fa3bade3b026907e4a5e841124983620046166f355bbb84be364bf2a02"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f29b75e86260b0ab59adb12661ef9f113d2f93a59951373eb6d68a852b13e83"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c8d9650154afaa86a44ff195b7b10d683c73509d085339d174e394a22cccbb9"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0452282258dfcc01697026a8841258dd2057c4438b43914b611bccbcd048f10"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:e49ae693306d7624015f31cb3e82708916759d592c2e5f72a35c8f4cc8aef258"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1ff863a20d1ff6ba2c24e22436a3daa3cd80be1dfb26891aae73f61b54b04aca"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:55ce1e9925ce1765865442ede9dca0ba9bde10593fcd570b1f0fa25d3ec6b31c"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7fe9df7aeb8c64db6c34fc3b13271a363475d77bc157d3f00275a53910cb1989"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-win32.whl", hash = "sha256:e5c50e164cd2459bc5137c15288a9ef57160fd5cbf293265ea3c45efe7870865"},
+ {file = "setproctitle-1.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a499fff50387c1520c085a07578a000123f519e5f3eee61dd68e1d301659651f"},
+ {file = "setproctitle-1.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5b932c3041aa924163f4aab970c2f0e6b4d9d773f4d50326e0ea1cd69240e5c5"},
+ {file = "setproctitle-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f4bfc89bd33ebb8e4c0e9846a09b1f5a4a86f5cb7a317e75cc42fee1131b4f4f"},
+ {file = "setproctitle-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcd3cf4286a60fdc95451d8d14e0389a6b4f5cebe02c7f2609325eb016535963"},
+ {file = "setproctitle-1.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fb4f769c02f63fac90989711a3fee83919f47ae9afd4758ced5d86596318c65"},
+ {file = "setproctitle-1.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5194b4969f82ea842a4f6af2f82cd16ebdc3f1771fb2771796e6add9835c1973"},
+ {file = "setproctitle-1.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f0cde41857a644b7353a0060b5f94f7ba7cf593ebde5a1094da1be581ac9a31"},
+ {file = "setproctitle-1.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9124bedd8006b0e04d4e8a71a0945da9b67e7a4ab88fdad7b1440dc5b6122c42"},
+ {file = "setproctitle-1.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c8a09d570b39517de10ee5b718730e171251ce63bbb890c430c725c8c53d4484"},
+ {file = "setproctitle-1.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:8ff3c8cb26afaed25e8bca7b9dd0c1e36de71f35a3a0706b5c0d5172587a3827"},
+ {file = "setproctitle-1.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:589be87172b238f839e19f146b9ea47c71e413e951ef0dc6db4218ddacf3c202"},
+ {file = "setproctitle-1.3.2-cp38-cp38-win32.whl", hash = "sha256:4749a2b0c9ac52f864d13cee94546606f92b981b50e46226f7f830a56a9dc8e1"},
+ {file = "setproctitle-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:e43f315c68aa61cbdef522a2272c5a5b9b8fd03c301d3167b5e1343ef50c676c"},
+ {file = "setproctitle-1.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:de3a540cd1817ede31f530d20e6a4935bbc1b145fd8f8cf393903b1e02f1ae76"},
+ {file = "setproctitle-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4058564195b975ddc3f0462375c533cce310ccdd41b80ac9aed641c296c3eff4"},
+ {file = "setproctitle-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c5d5dad7c28bdd1ec4187d818e43796f58a845aa892bb4481587010dc4d362b"},
+ {file = "setproctitle-1.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ffc61a388a5834a97953d6444a2888c24a05f2e333f9ed49f977a87bb1ad4761"},
+ {file = "setproctitle-1.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fa1a0fbee72b47dc339c87c890d3c03a72ea65c061ade3204f285582f2da30f"},
+ {file = "setproctitle-1.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8a988c7220c002c45347430993830666e55bc350179d91fcee0feafe64e1d4"},
+ {file = "setproctitle-1.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bae283e85fc084b18ffeb92e061ff7ac5af9e183c9d1345c93e178c3e5069cbe"},
+ {file = "setproctitle-1.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:fed18e44711c5af4b681c2b3b18f85e6f0f1b2370a28854c645d636d5305ccd8"},
+ {file = "setproctitle-1.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:b34baef93bfb20a8ecb930e395ccd2ae3268050d8cf4fe187de5e2bd806fd796"},
+ {file = "setproctitle-1.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7f0bed90a216ef28b9d227d8d73e28a8c9b88c0f48a082d13ab3fa83c581488f"},
+ {file = "setproctitle-1.3.2-cp39-cp39-win32.whl", hash = "sha256:4d8938249a7cea45ab7e1e48b77685d0f2bab1ebfa9dde23e94ab97968996a7c"},
+ {file = "setproctitle-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a47d97a75fd2d10c37410b180f67a5835cb1d8fdea2648fd7f359d4277f180b9"},
+ {file = "setproctitle-1.3.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:dad42e676c5261eb50fdb16bdf3e2771cf8f99a79ef69ba88729aeb3472d8575"},
+ {file = "setproctitle-1.3.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c91b9bc8985d00239f7dc08a49927a7ca1ca8a6af2c3890feec3ed9665b6f91e"},
+ {file = "setproctitle-1.3.2-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8579a43eafd246e285eb3a5b939e7158073d5087aacdd2308f23200eac2458b"},
+ {file = "setproctitle-1.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:2fbd8187948284293f43533c150cd69a0e4192c83c377da837dbcd29f6b83084"},
+ {file = "setproctitle-1.3.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:faec934cfe5fd6ac1151c02e67156c3f526e82f96b24d550b5d51efa4a5527c6"},
+ {file = "setproctitle-1.3.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1aafc91cbdacc9e5fe712c52077369168e6b6c346f3a9d51bf600b53eae56bb"},
+ {file = "setproctitle-1.3.2-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b617f12c9be61e8f4b2857be4a4319754756845dbbbd9c3718f468bbb1e17bcb"},
+ {file = "setproctitle-1.3.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b2c9cb2705fc84cb8798f1ba74194f4c080aaef19d9dae843591c09b97678e98"},
+ {file = "setproctitle-1.3.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a149a5f7f2c5a065d4e63cb0d7a4b6d3b66e6e80f12e3f8827c4f63974cbf122"},
+ {file = "setproctitle-1.3.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e3ac25bfc4a0f29d2409650c7532d5ddfdbf29f16f8a256fc31c47d0dc05172"},
+ {file = "setproctitle-1.3.2-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65d884e22037b23fa25b2baf1a3316602ed5c5971eb3e9d771a38c3a69ce6e13"},
+ {file = "setproctitle-1.3.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7aa0aac1711fadffc1d51e9d00a3bea61f68443d6ac0241a224e4d622489d665"},
+ {file = "setproctitle-1.3.2.tar.gz", hash = "sha256:b9fb97907c830d260fa0658ed58afd48a86b2b88aac521135c352ff7fd3477fd"},
+]
+
+[package.extras]
+test = ["pytest"]
+
+[[package]]
+name = "setuptools"
+version = "68.0.0"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "setuptools-68.0.0-py3-none-any.whl", hash = "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f"},
+ {file = "setuptools-68.0.0.tar.gz", hash = "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
+
+[[package]]
+name = "shellingham"
+version = "1.5.0.post1"
+description = "Tool to Detect Surrounding Shell"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "shellingham-1.5.0.post1-py2.py3-none-any.whl", hash = "sha256:368bf8c00754fd4f55afb7bbb86e272df77e4dc76ac29dbcbb81a59e9fc15744"},
+ {file = "shellingham-1.5.0.post1.tar.gz", hash = "sha256:823bc5fb5c34d60f285b624e7264f4dda254bc803a3774a147bf99c0e3004a28"},
+]
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+
+[[package]]
+name = "smmap"
+version = "5.0.0"
+description = "A pure Python implementation of a sliding window memory map manager"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"},
+ {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"},
+]
+
+[[package]]
+name = "snowballstemmer"
+version = "2.2.0"
+description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
+optional = false
+python-versions = "*"
+files = [
+ {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"},
+ {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
+]
+
+[[package]]
+name = "stevedore"
+version = "5.1.0"
+description = "Manage dynamic plugins for Python applications"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "stevedore-5.1.0-py3-none-any.whl", hash = "sha256:8cc040628f3cea5d7128f2e76cf486b2251a4e543c7b938f58d9a377f6694a2d"},
+ {file = "stevedore-5.1.0.tar.gz", hash = "sha256:a54534acf9b89bc7ed264807013b505bf07f74dbe4bcfa37d32bd063870b087c"},
+]
+
+[package.dependencies]
+pbr = ">=2.0.0,<2.1.0 || >2.1.0"
+
+[[package]]
+name = "swig"
+version = "4.1.1"
+description = "SWIG is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages."
+optional = false
+python-versions = "*"
+files = [
+ {file = "swig-4.1.1-py2.py3-none-macosx_10_9_universal2.whl", hash = "sha256:42c6c203dba1c1afa60c8e3c6a4da7f39ce95a44bf0bf5b0f5cb16aa6caa51fe"},
+ {file = "swig-4.1.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:fe9b0a49190c484816a5ad020a88fa35b42628fb6c5f3d4a6f3da5f3bb70b31a"},
+ {file = "swig-4.1.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:cee47d9f21bc34fcc0bcb529795313041e589bf4b69fc7bffa1ef325dae0972f"},
+ {file = "swig-4.1.1-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eb403545dbe2e16a612abc7cfce5e29da2ef9b2e81944fb7669fdd3a22810f4"},
+ {file = "swig-4.1.1-py2.py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d0bb43aecb5043be8cabd94e391b7c4eed2b900e2183b86be4f9d37da13dc43e"},
+ {file = "swig-4.1.1-py2.py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9ff3aa551c42aee4170d42667b11f55bdd2ec43532717b03e6a10b97604b438c"},
+ {file = "swig-4.1.1-py2.py3-none-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e669f7d82e34a866aef1c7d7c2e9d05c5dad033fd7094c02898a85d1bc5905d7"},
+ {file = "swig-4.1.1-py2.py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3f9da5564718bb81234dd497dc265025b0456c6c9378dfa8206cda56a7fa65ba"},
+ {file = "swig-4.1.1-py2.py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:326a1e68dc5531f1ccb16d314d33afb8c964294d14111912ef069e90573c0b2a"},
+ {file = "swig-4.1.1-py2.py3-none-musllinux_1_1_i686.whl", hash = "sha256:1f8d43e6b29d6024374c4bcafa88cb5149f3e335d13db2556829d94dad1178eb"},
+ {file = "swig-4.1.1-py2.py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:c51a5d9d6791151a42bcf5524ab33ace6c206d8fa874f75961e91649ba5adb16"},
+ {file = "swig-4.1.1-py2.py3-none-musllinux_1_1_s390x.whl", hash = "sha256:32f91c83ed5cb09b80ef35c38aedbb80bd495d18716e6ca7ff2f9ec1f39ad8bc"},
+ {file = "swig-4.1.1-py2.py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:5df85ef91652d928fa2c45c4956def2ae3beae0957013c559152bb0da6643f4b"},
+ {file = "swig-4.1.1-py2.py3-none-win32.whl", hash = "sha256:bc753a7417a0a232fc34add810d7e46e1c78f49342f704a1a38a5ac920de6ca0"},
+ {file = "swig-4.1.1-py2.py3-none-win_amd64.whl", hash = "sha256:20a00158ddea0c11ce3535f5b9ddd808acdeb4918c89884fba6a697a0e21c33f"},
+ {file = "swig-4.1.1.tar.gz", hash = "sha256:7507e9ccd394a84dc080896277f72fda8393fa122e2e194f47f32fbf3a4cd564"},
+]
+
+[[package]]
+name = "sympy"
+version = "1.12"
+description = "Computer algebra system (CAS) in Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"},
+ {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"},
+]
+
+[package.dependencies]
+mpmath = ">=0.19"
+
+[[package]]
+name = "tensorboardx"
+version = "2.6.1"
+description = "TensorBoardX lets you watch Tensors Flow without Tensorflow"
+optional = false
+python-versions = "*"
+files = [
+ {file = "tensorboardX-2.6.1-py2.py3-none-any.whl", hash = "sha256:4960feb79b1b84fd2b020885b09fd70962caec277d4bc194f338a6c203cd78ca"},
+ {file = "tensorboardX-2.6.1.tar.gz", hash = "sha256:02e2b84d7dc102edb7a052c77041db30fd6ba9b990635178919b8e9cfa157e96"},
+]
+
+[package.dependencies]
+numpy = "*"
+packaging = "*"
+protobuf = ">=4.22.3"
+
+[[package]]
+name = "termcolor"
+version = "2.3.0"
+description = "ANSI color formatting for output in terminal"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "termcolor-2.3.0-py3-none-any.whl", hash = "sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475"},
+ {file = "termcolor-2.3.0.tar.gz", hash = "sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a"},
+]
+
+[package.extras]
+tests = ["pytest", "pytest-cov"]
+
+[[package]]
+name = "tokenize-rt"
+version = "4.2.1"
+description = "A wrapper around the stdlib `tokenize` which roundtrips."
+optional = false
+python-versions = ">=3.6.1"
+files = [
+ {file = "tokenize_rt-4.2.1-py2.py3-none-any.whl", hash = "sha256:08a27fa032a81cf45e8858d0ac706004fcd523e8463415ddf1442be38e204ea8"},
+ {file = "tokenize_rt-4.2.1.tar.gz", hash = "sha256:0d4f69026fed520f8a1e0103aa36c406ef4661417f20ca643f913e33531b3b94"},
+]
+
+[[package]]
+name = "toml"
+version = "0.10.2"
+description = "Python Library for Tom's Obvious, Minimal Language"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
+ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
+]
+
+[[package]]
+name = "tomli"
+version = "2.0.1"
+description = "A lil' TOML parser"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
+]
+
+[[package]]
+name = "tomlkit"
+version = "0.11.8"
+description = "Style preserving TOML library"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomlkit-0.11.8-py3-none-any.whl", hash = "sha256:8c726c4c202bdb148667835f68d68780b9a003a9ec34167b6c673b38eff2a171"},
+ {file = "tomlkit-0.11.8.tar.gz", hash = "sha256:9330fc7faa1db67b541b28e62018c17d20be733177d290a13b24c62d1614e0c3"},
+]
+
+[[package]]
+name = "torch"
+version = "2.0.1"
+description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "torch-2.0.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8ced00b3ba471856b993822508f77c98f48a458623596a4c43136158781e306a"},
+ {file = "torch-2.0.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:359bfaad94d1cda02ab775dc1cc386d585712329bb47b8741607ef6ef4950747"},
+ {file = "torch-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:7c84e44d9002182edd859f3400deaa7410f5ec948a519cc7ef512c2f9b34d2c4"},
+ {file = "torch-2.0.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:567f84d657edc5582d716900543e6e62353dbe275e61cdc36eda4929e46df9e7"},
+ {file = "torch-2.0.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:787b5a78aa7917465e9b96399b883920c88a08f4eb63b5a5d2d1a16e27d2f89b"},
+ {file = "torch-2.0.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e617b1d0abaf6ced02dbb9486803abfef0d581609b09641b34fa315c9c40766d"},
+ {file = "torch-2.0.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b6019b1de4978e96daa21d6a3ebb41e88a0b474898fe251fd96189587408873e"},
+ {file = "torch-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:dbd68cbd1cd9da32fe5d294dd3411509b3d841baecb780b38b3b7b06c7754434"},
+ {file = "torch-2.0.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:ef654427d91600129864644e35deea761fb1fe131710180b952a6f2e2207075e"},
+ {file = "torch-2.0.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:25aa43ca80dcdf32f13da04c503ec7afdf8e77e3a0183dd85cd3e53b2842e527"},
+ {file = "torch-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5ef3ea3d25441d3957348f7e99c7824d33798258a2bf5f0f0277cbcadad2e20d"},
+ {file = "torch-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:0882243755ff28895e8e6dc6bc26ebcf5aa0911ed81b2a12f241fc4b09075b13"},
+ {file = "torch-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:f66aa6b9580a22b04d0af54fcd042f52406a8479e2b6a550e3d9f95963e168c8"},
+ {file = "torch-2.0.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:1adb60d369f2650cac8e9a95b1d5758e25d526a34808f7448d0bd599e4ae9072"},
+ {file = "torch-2.0.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:1bcffc16b89e296826b33b98db5166f990e3b72654a2b90673e817b16c50e32b"},
+ {file = "torch-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e10e1597f2175365285db1b24019eb6f04d53dcd626c735fc502f1e8b6be9875"},
+ {file = "torch-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:423e0ae257b756bb45a4b49072046772d1ad0c592265c5080070e0767da4e490"},
+ {file = "torch-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8742bdc62946c93f75ff92da00e3803216c6cce9b132fbca69664ca38cfb3e18"},
+ {file = "torch-2.0.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:c62df99352bd6ee5a5a8d1832452110435d178b5164de450831a3a8cc14dc680"},
+ {file = "torch-2.0.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:671a2565e3f63b8fe8e42ae3e36ad249fe5e567435ea27b94edaa672a7d0c416"},
+]
+
+[package.dependencies]
+filelock = "*"
+jinja2 = "*"
+networkx = "*"
+sympy = "*"
+typing-extensions = "*"
+
+[package.extras]
+opt-einsum = ["opt-einsum (>=3.3)"]
+
+[[package]]
+name = "torchtyping"
+version = "0.1.4"
+description = "Runtime type annotations for the shape, dtype etc. of PyTorch Tensors."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "torchtyping-0.1.4-py3-none-any.whl", hash = "sha256:485fb6ef3965c39b0de15f00d6f49373e0a3a6993e9733942a63c5e207d35390"},
+ {file = "torchtyping-0.1.4.tar.gz", hash = "sha256:4763375d17752641bd1bff0faaddade29be3c125fca6355e3cee7700e975fdb5"},
+]
+
+[package.dependencies]
+torch = ">=1.7.0"
+typeguard = ">=2.11.1"
+
+[[package]]
+name = "torchvision"
+version = "0.15.2"
+description = "image and video datasets and models for torch deep learning"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "torchvision-0.15.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7754088774e810c5672b142a45dcf20b1bd986a5a7da90f8660c43dc43fb850c"},
+ {file = "torchvision-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37eb138e13f6212537a3009ac218695483a635c404b6cc1d8e0d0d978026a86d"},
+ {file = "torchvision-0.15.2-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:54143f7cc0797d199b98a53b7d21c3f97615762d4dd17ad45a41c7e80d880e73"},
+ {file = "torchvision-0.15.2-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:1eefebf5fbd01a95fe8f003d623d941601c94b5cec547b420da89cb369d9cf96"},
+ {file = "torchvision-0.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:96fae30c5ca8423f4b9790df0f0d929748e32718d88709b7b567d2f630c042e3"},
+ {file = "torchvision-0.15.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5f35f6bd5bcc4568e6522e4137fa60fcc72f4fa3e615321c26cd87e855acd398"},
+ {file = "torchvision-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:757505a0ab2be7096cb9d2bf4723202c971cceddb72c7952a7e877f773de0f8a"},
+ {file = "torchvision-0.15.2-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:012ad25cfd9019ff9b0714a168727e3845029be1af82296ff1e1482931fa4b80"},
+ {file = "torchvision-0.15.2-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b02a7ffeaa61448737f39a4210b8ee60234bda0515a0c0d8562f884454105b0f"},
+ {file = "torchvision-0.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:10be76ceded48329d0a0355ac33da131ee3993ff6c125e4a02ab34b5baa2472c"},
+ {file = "torchvision-0.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8f12415b686dba884fb086f53ac803f692be5a5cdd8a758f50812b30fffea2e4"},
+ {file = "torchvision-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:31211c01f8b8ec33b8a638327b5463212e79a03e43c895f88049f97af1bd12fd"},
+ {file = "torchvision-0.15.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c55f9889e436f14b4f84a9c00ebad0d31f5b4626f10cf8018e6c676f92a6d199"},
+ {file = "torchvision-0.15.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:9a192f2aa979438f23c20e883980b23d13268ab9f819498774a6d2eb021802c2"},
+ {file = "torchvision-0.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:c07071bc8d02aa8fcdfe139ab6a1ef57d3b64c9e30e84d12d45c9f4d89fb6536"},
+ {file = "torchvision-0.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4790260fcf478a41c7ecc60a6d5200a88159fdd8d756e9f29f0f8c59c4a67a68"},
+ {file = "torchvision-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:987ab62225b4151a11e53fd06150c5258ced24ac9d7c547e0e4ab6fbca92a5ce"},
+ {file = "torchvision-0.15.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:63df26673e66cba3f17e07c327a8cafa3cce98265dbc3da329f1951d45966838"},
+ {file = "torchvision-0.15.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b85f98d4cc2f72452f6792ab4463a3541bc5678a8cdd3da0e139ba2fe8b56d42"},
+ {file = "torchvision-0.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:07c462524cc1bba5190c16a9d47eac1fca024d60595a310f23c00b4ffff18b30"},
+]
+
+[package.dependencies]
+numpy = "*"
+pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0"
+requests = "*"
+torch = "2.0.1"
+
+[package.extras]
+scipy = ["scipy"]
+
+[[package]]
+name = "tqdm"
+version = "4.65.0"
+description = "Fast, Extensible Progress Meter"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"},
+ {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["py-make (>=0.1.0)", "twine", "wheel"]
+notebook = ["ipywidgets (>=6)"]
+slack = ["slack-sdk"]
+telegram = ["requests"]
+
+[[package]]
+name = "traitlets"
+version = "5.9.0"
+description = "Traitlets Python configuration system"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"},
+ {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"},
+]
+
+[package.extras]
+docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
+test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"]
+
+[[package]]
+name = "typeguard"
+version = "4.0.0"
+description = "Run-time type checker for Python"
+optional = false
+python-versions = ">=3.7.4"
+files = [
+ {file = "typeguard-4.0.0-py3-none-any.whl", hash = "sha256:c4a40af0ba8a41077221271b46d0a6d8d46045443e4d887887c69254ca861952"},
+ {file = "typeguard-4.0.0.tar.gz", hash = "sha256:194fb3dbcb06ea9caf7088f3befee014de57961689f9c859ac5239b1ef61d987"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""}
+typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+doc = ["packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["mypy (>=1.2.0)", "pytest (>=7)"]
+
+[[package]]
+name = "typer"
+version = "0.9.0"
+description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"},
+ {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"},
+]
+
+[package.dependencies]
+click = ">=7.1.1,<9.0.0"
+colorama = {version = ">=0.4.3,<0.5.0", optional = true, markers = "extra == \"all\""}
+rich = {version = ">=10.11.0,<14.0.0", optional = true, markers = "extra == \"all\""}
+shellingham = {version = ">=1.3.0,<2.0.0", optional = true, markers = "extra == \"all\""}
+typing-extensions = ">=3.7.4.3"
+
+[package.extras]
+all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"]
+dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"]
+doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"]
+test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"]
+
+[[package]]
+name = "types-python-dateutil"
+version = "2.8.19.14"
+description = "Typing stubs for python-dateutil"
+optional = false
+python-versions = "*"
+files = [
+ {file = "types-python-dateutil-2.8.19.14.tar.gz", hash = "sha256:1f4f10ac98bb8b16ade9dbee3518d9ace017821d94b057a425b069f834737f4b"},
+ {file = "types_python_dateutil-2.8.19.14-py3-none-any.whl", hash = "sha256:f977b8de27787639986b4e28963263fd0e5158942b3ecef91b9335c130cb1ce9"},
+]
+
+[[package]]
+name = "types-setuptools"
+version = "68.0.0.3"
+description = "Typing stubs for setuptools"
+optional = false
+python-versions = "*"
+files = [
+ {file = "types-setuptools-68.0.0.3.tar.gz", hash = "sha256:d57ae6076100b5704b3cc869fdefc671e1baf4c2cd6643f84265dfc0b955bf05"},
+ {file = "types_setuptools-68.0.0.3-py3-none-any.whl", hash = "sha256:fec09e5c18264c5c09351c00be01a34456fb7a88e457abe97401325f84ad9d36"},
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.7.1"
+description = "Backported and Experimental Type Hints for Python 3.7+"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"},
+ {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"},
+]
+
+[[package]]
+name = "urllib3"
+version = "2.0.4"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "urllib3-2.0.4-py3-none-any.whl", hash = "sha256:de7df1803967d2c2a98e4b11bb7d6bd9210474c46e8a0401514e3a42a75ebde4"},
+ {file = "urllib3-2.0.4.tar.gz", hash = "sha256:8d22f86aae8ef5e410d4f539fde9ce6b2113a001bb4d189e0aed70642d602b11"},
+]
+
+[package.extras]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"]
+socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
+zstd = ["zstandard (>=0.18.0)"]
+
+[[package]]
+name = "varname"
+version = "0.11.2"
+description = "Dark magics about variable names in python."
+optional = false
+python-versions = ">=3.8,<4.0"
+files = [
+ {file = "varname-0.11.2-py3-none-any.whl", hash = "sha256:0b251b7a5a470cfa32e276ce69856f5b52250bb2d955fdb80328255727bc85d0"},
+ {file = "varname-0.11.2.tar.gz", hash = "sha256:63338d0ff2420529353ba33120d980981d229e872ea1e3d191d414c540b51b80"},
+]
+
+[package.dependencies]
+executing = ">=1.2,<2.0"
+
+[package.extras]
+all = ["asttokens (==2.*)", "pure_eval (==0.*)"]
+
+[[package]]
+name = "virtualenv"
+version = "20.24.2"
+description = "Virtual Python Environment builder"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "virtualenv-20.24.2-py3-none-any.whl", hash = "sha256:43a3052be36080548bdee0b42919c88072037d50d56c28bd3f853cbe92b953ff"},
+ {file = "virtualenv-20.24.2.tar.gz", hash = "sha256:fd8a78f46f6b99a67b7ec5cf73f92357891a7b3a40fd97637c27f854aae3b9e0"},
+]
+
+[package.dependencies]
+distlib = ">=0.3.7,<1"
+filelock = ">=3.12.2,<4"
+platformdirs = ">=3.9.1,<4"
+
+[package.extras]
+docs = ["furo (>=2023.5.20)", "proselint (>=0.13)", "sphinx (>=7.0.1)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"]
+test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"]
+
+[[package]]
+name = "wandb"
+version = "0.15.7"
+description = "A CLI and library for interacting with the Weights and Biases API."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "wandb-0.15.7-py3-none-any.whl", hash = "sha256:65de59ea9c38e04536d59926487392fa87db76e3437b010cebe1b51a702b020c"},
+ {file = "wandb-0.15.7.tar.gz", hash = "sha256:af5adaf0529ed842d1facd31bc3839438e4ab397744a597f38327445edfaa89e"},
+]
+
+[package.dependencies]
+appdirs = ">=1.4.3"
+Click = ">=7.1,<8.0.0 || >8.0.0"
+docker-pycreds = ">=0.4.0"
+GitPython = ">=1.0.0,<3.1.29 || >3.1.29"
+pathtools = "*"
+protobuf = [
+ {version = ">=3.12.0,<4.21.0 || >4.21.0,<5", markers = "python_version < \"3.9\" and sys_platform == \"linux\""},
+ {version = ">=3.19.0,<4.21.0 || >4.21.0,<5", markers = "sys_platform != \"linux\""},
+]
+psutil = ">=5.0.0"
+PyYAML = "*"
+requests = ">=2.0.0,<3"
+sentry-sdk = ">=1.0.0"
+setproctitle = "*"
+setuptools = "*"
+typing-extensions = {version = "*", markers = "python_version < \"3.10\""}
+
+[package.extras]
+async = ["httpx (>=0.22.0)"]
+aws = ["boto3"]
+azure = ["azure-identity", "azure-storage-blob"]
+gcp = ["google-cloud-storage"]
+grpc = ["grpcio (>=1.27.2)"]
+kubeflow = ["google-cloud-storage", "kubernetes", "minio", "sh"]
+launch = ["awscli", "azure-containerregistry", "azure-identity", "azure-storage-blob", "boto3", "botocore", "chardet", "google-auth", "google-cloud-artifact-registry", "google-cloud-compute", "google-cloud-storage", "iso8601", "kubernetes", "nbconvert", "nbformat", "optuna", "typing-extensions"]
+media = ["bokeh", "moviepy", "numpy", "pillow", "plotly", "rdkit-pypi", "soundfile"]
+models = ["cloudpickle"]
+sweeps = ["sweeps (>=0.2.0)"]
+
+[[package]]
+name = "wcwidth"
+version = "0.2.6"
+description = "Measures the displayed width of unicode strings in a terminal"
+optional = false
+python-versions = "*"
+files = [
+ {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"},
+ {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"},
+]
+
+[[package]]
+name = "win32-setctime"
+version = "1.1.0"
+description = "A small Python utility to set file creation time on Windows"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad"},
+ {file = "win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2"},
+]
+
+[package.extras]
+dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"]
+
+[[package]]
+name = "wrapt"
+version = "1.15.0"
+description = "Module for decorators, wrappers and monkey patching."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+files = [
+ {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"},
+ {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"},
+ {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"},
+ {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"},
+ {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"},
+ {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"},
+ {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"},
+ {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"},
+ {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"},
+ {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"},
+ {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"},
+ {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"},
+ {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"},
+ {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"},
+ {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"},
+ {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"},
+ {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"},
+ {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"},
+ {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"},
+ {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"},
+ {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"},
+ {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"},
+ {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"},
+ {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"},
+ {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"},
+ {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"},
+ {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"},
+]
+
+[[package]]
+name = "zipp"
+version = "3.16.2"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "zipp-3.16.2-py3-none-any.whl", hash = "sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0"},
+ {file = "zipp-3.16.2.tar.gz", hash = "sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = "~3.8"
+content-hash = "c9d5950e919f48b5128d1818bf29b6b6f746a947318d9e659c7c5862399520f1"
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..ce05cd7
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,205 @@
+# Poetry pyproject.toml: https://python-poetry.org/docs/pyproject/
+[build-system]
+build-backend = "poetry.core.masonry.api"
+requires = ["poetry_core>=1.0.0"]
+
+[tool.poetry]
+authors = ["ezhang7423 ", "Ji4chenLi "]
+description = "Offline Reinforcement Learning with Closed-Form Policy Improvement Operators"
+homepage = "https://github.com/ezhang7423/cfpi"
+license = "MIT"
+name = "cfpi"
+readme = "README.md"
+repository = "https://github.com/ezhang7423/cfpi"
+version = "0.1.0"
+
+# Keywords description https://python-poetry.org/docs/pyproject/#keywords
+keywords = [
+ "offline-rl",
+ "rl",
+ "reinforcement-learning",
+ "closed-form",
+ "deep learning",
+ "deep-rl",
+]
+
+# Pypi classifiers: https://pypi.org/classifiers/
+classifiers = [
+ "Development Status :: 3 - Alpha",
+ "Intended Audience :: Developers",
+ "Operating System :: OS Independent",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "License :: OSI Approved :: MIT License",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.8",
+]
+
+[tool.poetry.scripts]
+# Entry points for the package https://python-poetry.org/docs/pyproject/#scripts
+"cfpi" = "cfpi.__main__:app"
+
+[tool.poetry.dependencies]
+python = "~3.8"
+
+cloudpickle = "^2.2.1"
+cvxpy = "^1.3.2"
+d4rl = {git = "https://github.com/ezhang7423/d4rl-installable.git"}
+ezjaxtyping = "^0.2.20"
+eztils = {extras = ["torch"], version = "^0.4.73"}
+gitpython = "^3.1.31"
+gtimer = "^1.0.0b5"
+gym = {extras = ["all"], version = "0.23.1"}
+matplotlib = "^3.7.1"
+mujoco = "^2.2.0"
+numpy = "^1.21.6"
+patchelf = "^0.17.2.1"
+portalocker = "^2.7.0"
+pydantic = "^1.9.1"
+pygame = "^2.1.0"
+pytest = "7.0.1"
+rich = "^13.4.2"
+swig = "^4.1.1"
+tensorboardx = "^2.6.1"
+torch = "^2.0.1"
+torchtyping = "^0.1.4"
+torchvision = "^0.15.2"
+tqdm = "^4.65.0"
+typer = {extras = ["all"], version = "^0.9.0"}
+types-python-dateutil = "^2.8.19.13"
+types-setuptools = "^68.0.0.1"
+wandb = "^0.15.5"
+
+[tool.poetry.dev-dependencies]
+bandit = "^1.7.1"
+black = {version = "22.3.0", allow-prereleases = true}
+coverage = "^6.1.2"
+coverage-badge = "^1.1.0"
+darglint = "^1.8.1"
+ipdb = "^0.13.13"
+ipython = "^7.34.0"
+isort = {extras = ["colors"], version = "^5.10.1"}
+mypy = "^0.910"
+mypy-extensions = "^0.4.3"
+pre-commit = "^2.15.0"
+pydocstyle = "^6.1.1"
+pyflyby = "^1.8.4"
+pylint = "^2.11.1"
+pytest-cov = "^3.0.0"
+pytest-html = "^3.1.1"
+pyupgrade = "^2.29.1"
+safety = "^2.3.5"
+
+[tool.black]
+# https://github.com/psf/black
+color = true
+line-length = 88
+target-version = ["py38"]
+
+exclude = '''
+/(
+ \.git
+ | \.hg
+ | \.mypy_cache
+ | \.tox
+ | \.venv
+ | _build
+ | buck-out
+ | build
+ | dist
+ | env
+ | venv
+)/
+'''
+
+[tool.isort]
+# https://github.com/timothycrosley/isort/
+line_length = 88
+py_version = 38
+
+color_output = true
+include_trailing_comma = true
+indent = 4
+known_typing = [
+ "typing",
+ "types",
+ "typing_extensions",
+ "mypy",
+ "mypy_extensions",
+]
+multi_line_output = 3
+profile = "black"
+sections = [
+ "FUTURE",
+ "TYPING",
+ "STDLIB",
+ "THIRDPARTY",
+ "FIRSTPARTY",
+ "LOCALFOLDER",
+]
+
+[tool.mypy]
+# https://mypy.readthedocs.io/en/latest/config_file.html#using-a-pyproject-toml-file
+color_output = true
+pretty = true
+python_version = 3.8
+show_traceback = true
+
+allow_redefinition = false
+check_untyped_defs = true
+disallow_any_generics = true
+disallow_incomplete_defs = true
+ignore_missing_imports = true
+implicit_reexport = false
+no_implicit_optional = true
+show_column_numbers = true
+show_error_codes = true
+show_error_context = true
+strict_equality = true
+strict_optional = true
+warn_no_return = true
+warn_redundant_casts = true
+warn_return_any = true
+warn_unreachable = true
+warn_unused_configs = true
+warn_unused_ignores = true
+
+[tool.pytest.ini_options]
+# https://docs.pytest.org/en/6.2.x/customize.html#pyproject-toml
+# Directories that are not visited by pytest collector:
+doctest_optionflags = [
+ "NUMBER",
+ "NORMALIZE_WHITESPACE",
+ "IGNORE_EXCEPTION_DETAIL",
+]
+norecursedirs = [
+ "hooks",
+ "*.egg",
+ ".eggs",
+ "dist",
+ "build",
+ "docs",
+ ".tox",
+ ".git",
+ "__pycache__",
+]
+
+# Extra options:
+addopts = [
+ "--strict-markers",
+ "--tb=short",
+ "--doctest-modules",
+ "--doctest-continue-on-failure",
+]
+
+[tool.coverage.run]
+source = ["tests"]
+
+[coverage.paths]
+source = "cfpi"
+
+[coverage.run]
+branch = true
+
+[coverage.report]
+fail_under = 50
+show_missing = true
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..0de2237
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,111 @@
+absl-py==1.4.0 ; python_version >= "3.8" and python_version < "3.9"
+ale-py==0.7.5 ; python_version >= "3.8" and python_version < "3.9"
+appdirs==1.4.4 ; python_version >= "3.8" and python_version < "3.9"
+atomicwrites==1.4.1 ; python_version >= "3.8" and python_version < "3.9" and sys_platform == "win32"
+attrs==23.1.0 ; python_version >= "3.8" and python_version < "3.9"
+beartype==0.14.1 ; python_version >= "3.8" and python_version < "3.9"
+box2d-py==2.3.5 ; python_version >= "3.8" and python_version < "3.9"
+certifi==2023.7.22 ; python_version >= "3.8" and python_version < "3.9"
+cffi==1.15.1 ; python_version >= "3.8" and python_version < "3.9"
+charset-normalizer==3.2.0 ; python_version >= "3.8" and python_version < "3.9"
+click==8.1.6 ; python_version >= "3.8" and python_version < "3.9"
+cloudpickle==2.2.1 ; python_version >= "3.8" and python_version < "3.9"
+colorama==0.4.6 ; python_version >= "3.8" and python_version < "3.9" and (sys_platform == "win32" or platform_system == "Windows")
+contourpy==1.1.0 ; python_version >= "3.8" and python_version < "3.9"
+cvxpy==1.3.2 ; python_version >= "3.8" and python_version < "3.9"
+cycler==0.11.0 ; python_version >= "3.8" and python_version < "3.9"
+cython==3.0.0 ; python_version >= "3.8" and python_version < "3.9"
+d4rl @ git+https://github.com/ezhang7423/d4rl-installable.git@HEAD ; python_version >= "3.8" and python_version < "3.9"
+dm-control==1.0.14 ; python_version >= "3.8" and python_version < "3.9"
+dm-env==1.6 ; python_version >= "3.8" and python_version < "3.9"
+dm-tree==0.1.8 ; python_version >= "3.8" and python_version < "3.9"
+docker-pycreds==0.4.0 ; python_version >= "3.8" and python_version < "3.9"
+ecos==2.0.12 ; python_version >= "3.8" and python_version < "3.9"
+einops==0.6.1 ; python_version >= "3.8" and python_version < "3.9"
+executing==1.2.0 ; python_version >= "3.8" and python_version < "3.9"
+ezjaxtyping==0.2.20 ; python_version >= "3.8" and python_version < "3.9"
+eztils[torch]==0.4.73 ; python_version >= "3.8" and python_version < "3.9"
+filelock==3.12.2 ; python_version >= "3.8" and python_version < "3.9"
+fonttools==4.41.1 ; python_version >= "3.8" and python_version < "3.9"
+gitdb==4.0.10 ; python_version >= "3.8" and python_version < "3.9"
+gitpython==3.1.32 ; python_version >= "3.8" and python_version < "3.9"
+glfw==2.6.2 ; python_version >= "3.8" and python_version < "3.9"
+gtimer==1.0.0b5 ; python_version >= "3.8" and python_version < "3.9"
+gym-notices==0.0.8 ; python_version >= "3.8" and python_version < "3.9"
+gym==0.23.1 ; python_version >= "3.8" and python_version < "3.9"
+gym[all]==0.23.1 ; python_version >= "3.8" and python_version < "3.9"
+h5py==3.9.0 ; python_version >= "3.8" and python_version < "3.9"
+idna==3.4 ; python_version >= "3.8" and python_version < "3.9"
+imageio==2.31.1 ; python_version >= "3.8" and python_version < "3.9"
+importlib-metadata==6.8.0 ; python_version >= "3.8" and python_version < "3.9"
+importlib-resources==6.0.0 ; python_version >= "3.8" and python_version < "3.9"
+iniconfig==2.0.0 ; python_version >= "3.8" and python_version < "3.9"
+jinja2==3.1.2 ; python_version >= "3.8" and python_version < "3.9"
+kiwisolver==1.4.4 ; python_version >= "3.8" and python_version < "3.9"
+labmaze==1.0.6 ; python_version >= "3.8" and python_version < "3.9"
+lockfile==0.12.2 ; python_version >= "3.8" and python_version < "3.9"
+loguru==0.7.0 ; python_version >= "3.8" and python_version < "3.9"
+lxml==4.9.3 ; python_version >= "3.8" and python_version < "3.9"
+lz4==4.3.2 ; python_version >= "3.8" and python_version < "3.9"
+markdown-it-py==3.0.0 ; python_version >= "3.8" and python_version < "3.9"
+markupsafe==2.1.3 ; python_version >= "3.8" and python_version < "3.9"
+matplotlib==3.7.2 ; python_version >= "3.8" and python_version < "3.9"
+mdurl==0.1.2 ; python_version >= "3.8" and python_version < "3.9"
+mjrl @ git+https://github.com/aravindr93/mjrl@master ; python_version >= "3.8" and python_version < "3.9"
+mpmath==1.3.0 ; python_version >= "3.8" and python_version < "3.9"
+mujoco-py==1.50.1.68 ; python_version >= "3.8" and python_version < "3.9"
+mujoco==2.3.7 ; python_version >= "3.8" and python_version < "3.9"
+networkx==3.1 ; python_version >= "3.8" and python_version < "3.9"
+numpy==1.24.4 ; python_version >= "3.8" and python_version < "3.9"
+opencv-python==4.8.0.74 ; python_version >= "3.8" and python_version < "3.9"
+osqp==0.6.3 ; python_version >= "3.8" and python_version < "3.9"
+packaging==21.3 ; python_version >= "3.8" and python_version < "3.9"
+patchelf==0.17.2.1 ; python_version >= "3.8" and python_version < "3.9"
+pathtools==0.1.2 ; python_version >= "3.8" and python_version < "3.9"
+pillow==10.0.0 ; python_version >= "3.8" and python_version < "3.9"
+pluggy==1.2.0 ; python_version >= "3.8" and python_version < "3.9"
+portalocker==2.7.0 ; python_version >= "3.8" and python_version < "3.9"
+protobuf==4.23.4 ; python_version >= "3.8" and python_version < "3.9"
+psutil==5.9.5 ; python_version >= "3.8" and python_version < "3.9"
+py==1.11.0 ; python_version >= "3.8" and python_version < "3.9"
+pybullet==3.2.5 ; python_version >= "3.8" and python_version < "3.9"
+pycparser==2.21 ; python_version >= "3.8" and python_version < "3.9"
+pydantic==1.10.12 ; python_version >= "3.8" and python_version < "3.9"
+pygame==2.1.0 ; python_version >= "3.8" and python_version < "3.9"
+pygments==2.15.1 ; python_version >= "3.8" and python_version < "3.9"
+pyopengl==3.1.7 ; python_version >= "3.8" and python_version < "3.9"
+pyparsing==3.0.9 ; python_version >= "3.8" and python_version < "3.9"
+pytest==7.0.1 ; python_version >= "3.8" and python_version < "3.9"
+python-dateutil==2.8.2 ; python_version >= "3.8" and python_version < "3.9"
+pywin32==306 ; python_version >= "3.8" and python_version < "3.9" and platform_system == "Windows"
+pyyaml==6.0.1 ; python_version >= "3.8" and python_version < "3.9"
+qdldl==0.1.7.post0 ; python_version >= "3.8" and python_version < "3.9"
+requests==2.31.0 ; python_version >= "3.8" and python_version < "3.9"
+rich==13.4.2 ; python_version >= "3.8" and python_version < "3.9"
+scipy==1.10.1 ; python_version >= "3.8" and python_version < "3.9"
+scs==3.2.3 ; python_version >= "3.8" and python_version < "3.9"
+sentry-sdk==1.28.1 ; python_version >= "3.8" and python_version < "3.9"
+setproctitle==1.3.2 ; python_version >= "3.8" and python_version < "3.9"
+setuptools==68.0.0 ; python_version >= "3.8" and python_version < "3.9"
+shellingham==1.5.0.post1 ; python_version >= "3.8" and python_version < "3.9"
+six==1.16.0 ; python_version >= "3.8" and python_version < "3.9"
+smmap==5.0.0 ; python_version >= "3.8" and python_version < "3.9"
+swig==4.1.1 ; python_version >= "3.8" and python_version < "3.9"
+sympy==1.12 ; python_version >= "3.8" and python_version < "3.9"
+tensorboardx==2.6.1 ; python_version >= "3.8" and python_version < "3.9"
+termcolor==2.3.0 ; python_version >= "3.8" and python_version < "3.9"
+tomli==2.0.1 ; python_version >= "3.8" and python_version < "3.9"
+torch==2.0.1 ; python_version >= "3.8" and python_version < "3.9"
+torchtyping==0.1.4 ; python_version >= "3.8" and python_version < "3.9"
+torchvision==0.15.2 ; python_version >= "3.8" and python_version < "3.9"
+tqdm==4.65.0 ; python_version >= "3.8" and python_version < "3.9"
+typeguard==4.0.0 ; python_version >= "3.8" and python_version < "3.9"
+typer[all]==0.9.0 ; python_version >= "3.8" and python_version < "3.9"
+types-python-dateutil==2.8.19.14 ; python_version >= "3.8" and python_version < "3.9"
+types-setuptools==68.0.0.3 ; python_version >= "3.8" and python_version < "3.9"
+typing-extensions==4.7.1 ; python_version >= "3.8" and python_version < "3.9"
+urllib3==2.0.4 ; python_version >= "3.8" and python_version < "3.9"
+varname==0.11.2 ; python_version >= "3.8" and python_version < "3.9"
+wandb==0.15.7 ; python_version >= "3.8" and python_version < "3.9"
+win32-setctime==1.1.0 ; python_version >= "3.8" and python_version < "3.9" and sys_platform == "win32"
+zipp==3.16.2 ; python_version >= "3.8" and python_version < "3.9"
diff --git a/scripts/delmlock b/scripts/delmlock
new file mode 100755
index 0000000..69e6382
--- /dev/null
+++ b/scripts/delmlock
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+cd $(python -c 'import site; print(site.getsitepackages()[0])')/mujoco_py
+rm generated/*.lock
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..3c46a08
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,4 @@
+[darglint]
+# https://github.com/terrencepreilly/darglint
+strictness = long
+docstring_style = google