diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 36e949d..0000000 --- a/.gitmodules +++ /dev/null @@ -1,4 +0,0 @@ -[submodule "website-assets"] - path = website-assets - url = git@github.com:montrealrobotics/website-assets.git - branch = master diff --git a/404.html b/404.html new file mode 100644 index 0000000..4318e45 --- /dev/null +++ b/404.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | 404 Not Found + + + + + + + + + + + +
+ + + + + + + +

Sorry! The server can’t find that page.

+ +

Please consider trying to find what you need from the home page.

+ +

If you think something might be broken, please send an email to webmaster.

+ + + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/404.md b/404.md deleted file mode 100644 index 583b48a..0000000 --- a/404.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -layout: default -title: 404 Not Found ---- -Sorry! The server can't find that page. - -Please consider trying to find what you need from [the home page][home]. - -If you think something might be broken, please send an email to [webmaster][]. - -[home]: {{ site.base }}/ -[webmaster]: mailto:paulll@iro.umontreal.ca diff --git a/Gemfile.lock b/Gemfile.lock new file mode 100644 index 0000000..ca47030 --- /dev/null +++ b/Gemfile.lock @@ -0,0 +1,283 @@ +GEM + remote: https://rubygems.org/ + specs: + activesupport (7.0.7.2) + concurrent-ruby (~> 1.0, >= 1.0.2) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + addressable (2.8.5) + public_suffix (>= 2.0.2, < 6.0) + bibtex-ruby (4.4.7) + latex-decode (~> 0.0) + citeproc (1.0.10) + namae (~> 1.0) + citeproc-ruby (1.1.14) + citeproc (~> 1.0, >= 1.0.9) + csl (~> 1.6) + coffee-script (2.4.1) + coffee-script-source + execjs + coffee-script-source (1.11.1) + colorator (1.1.0) + commonmarker (0.23.10) + concurrent-ruby (1.2.2) + csl (1.6.0) + namae (~> 1.0) + rexml + csl-styles (1.0.1.11) + csl (~> 1.0) + dnsruby (1.70.0) + simpleidn (~> 0.2.1) + em-websocket (0.5.3) + eventmachine (>= 0.12.9) + http_parser.rb (~> 0) + ethon (0.16.0) + ffi (>= 1.15.0) + eventmachine (1.2.7) + execjs (2.8.1) + faraday (2.7.10) + faraday-net_http (>= 2.0, < 3.1) + ruby2_keywords (>= 0.0.4) + faraday-net_http (3.0.2) + ffi (1.15.5) + forwardable-extended (2.6.0) + gemoji (3.0.1) + github-pages (228) + github-pages-health-check (= 1.17.9) + jekyll (= 3.9.3) + jekyll-avatar (= 0.7.0) + jekyll-coffeescript (= 1.1.1) + jekyll-commonmark-ghpages (= 0.4.0) + jekyll-default-layout (= 0.1.4) + jekyll-feed (= 0.15.1) + jekyll-gist (= 1.5.0) + jekyll-github-metadata (= 2.13.0) + jekyll-include-cache (= 0.2.1) + jekyll-mentions (= 1.6.0) + jekyll-optional-front-matter (= 0.3.2) + jekyll-paginate (= 1.1.0) + jekyll-readme-index (= 0.3.0) + jekyll-redirect-from (= 0.16.0) + jekyll-relative-links (= 0.6.1) + jekyll-remote-theme (= 0.4.3) + jekyll-sass-converter (= 1.5.2) + jekyll-seo-tag (= 2.8.0) + jekyll-sitemap (= 1.4.0) + jekyll-swiss (= 1.0.0) + jekyll-theme-architect (= 0.2.0) + jekyll-theme-cayman (= 0.2.0) + jekyll-theme-dinky (= 0.2.0) + jekyll-theme-hacker (= 0.2.0) + jekyll-theme-leap-day (= 0.2.0) + jekyll-theme-merlot (= 0.2.0) + jekyll-theme-midnight (= 0.2.0) + jekyll-theme-minimal (= 0.2.0) + jekyll-theme-modernist (= 0.2.0) + jekyll-theme-primer (= 0.6.0) + jekyll-theme-slate (= 0.2.0) + jekyll-theme-tactile (= 0.2.0) + jekyll-theme-time-machine (= 0.2.0) + jekyll-titles-from-headings (= 0.5.3) + jemoji (= 0.12.0) + kramdown (= 2.3.2) + kramdown-parser-gfm (= 1.1.0) + liquid (= 4.0.4) + mercenary (~> 0.3) + minima (= 2.5.1) + nokogiri (>= 1.13.6, < 2.0) + rouge (= 3.26.0) + terminal-table (~> 1.4) + github-pages-health-check (1.17.9) + addressable (~> 2.3) + dnsruby (~> 1.60) + octokit (~> 4.0) + public_suffix (>= 3.0, < 5.0) + typhoeus (~> 1.3) + html-pipeline (2.14.3) + activesupport (>= 2) + nokogiri (>= 1.4) + http_parser.rb (0.8.0) + i18n (1.14.1) + concurrent-ruby (~> 1.0) + jekyll (3.9.3) + addressable (~> 2.4) + colorator (~> 1.0) + em-websocket (~> 0.5) + i18n (>= 0.7, < 2) + jekyll-sass-converter (~> 1.0) + jekyll-watch (~> 2.0) + kramdown (>= 1.17, < 3) + liquid (~> 4.0) + mercenary (~> 0.3.3) + pathutil (~> 0.9) + rouge (>= 1.7, < 4) + safe_yaml (~> 1.0) + jekyll-avatar (0.7.0) + jekyll (>= 3.0, < 5.0) + jekyll-coffeescript (1.1.1) + coffee-script (~> 2.2) + coffee-script-source (~> 1.11.1) + jekyll-commonmark (1.4.0) + commonmarker (~> 0.22) + jekyll-commonmark-ghpages (0.4.0) + commonmarker (~> 0.23.7) + jekyll (~> 3.9.0) + jekyll-commonmark (~> 1.4.0) + rouge (>= 2.0, < 5.0) + jekyll-default-layout (0.1.4) + jekyll (~> 3.0) + jekyll-feed (0.15.1) + jekyll (>= 3.7, < 5.0) + jekyll-gist (1.5.0) + octokit (~> 4.2) + jekyll-github-metadata (2.13.0) + jekyll (>= 3.4, < 5.0) + octokit (~> 4.0, != 4.4.0) + jekyll-include-cache (0.2.1) + jekyll (>= 3.7, < 5.0) + jekyll-mentions (1.6.0) + html-pipeline (~> 2.3) + jekyll (>= 3.7, < 5.0) + jekyll-optional-front-matter (0.3.2) + jekyll (>= 3.0, < 5.0) + jekyll-paginate (1.1.0) + jekyll-readme-index (0.3.0) + jekyll (>= 3.0, < 5.0) + jekyll-redirect-from (0.16.0) + jekyll (>= 3.3, < 5.0) + jekyll-relative-links (0.6.1) + jekyll (>= 3.3, < 5.0) + jekyll-remote-theme (0.4.3) + addressable (~> 2.0) + jekyll (>= 3.5, < 5.0) + jekyll-sass-converter (>= 1.0, <= 3.0.0, != 2.0.0) + rubyzip (>= 1.3.0, < 3.0) + jekyll-sass-converter (1.5.2) + sass (~> 3.4) + jekyll-scholar (5.16.0) + bibtex-ruby (~> 4.0, >= 4.0.13) + citeproc-ruby (~> 1.0) + csl-styles (~> 1.0) + jekyll (~> 3.0) + jekyll-seo-tag (2.8.0) + jekyll (>= 3.8, < 5.0) + jekyll-sitemap (1.4.0) + jekyll (>= 3.7, < 5.0) + jekyll-swiss (1.0.0) + jekyll-theme-architect (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-cayman (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-dinky (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-hacker (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-leap-day (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-merlot (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-midnight (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-minimal (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-modernist (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-primer (0.6.0) + jekyll (> 3.5, < 5.0) + jekyll-github-metadata (~> 2.9) + jekyll-seo-tag (~> 2.0) + jekyll-theme-slate (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-tactile (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-theme-time-machine (0.2.0) + jekyll (> 3.5, < 5.0) + jekyll-seo-tag (~> 2.0) + jekyll-titles-from-headings (0.5.3) + jekyll (>= 3.3, < 5.0) + jekyll-watch (2.2.1) + listen (~> 3.0) + jemoji (0.12.0) + gemoji (~> 3.0) + html-pipeline (~> 2.2) + jekyll (>= 3.0, < 5.0) + kramdown (2.3.2) + rexml + kramdown-parser-gfm (1.1.0) + kramdown (~> 2.0) + latex-decode (0.4.0) + liquid (4.0.4) + listen (3.8.0) + rb-fsevent (~> 0.10, >= 0.10.3) + rb-inotify (~> 0.9, >= 0.9.10) + mercenary (0.3.6) + minima (2.5.1) + jekyll (>= 3.5, < 5.0) + jekyll-feed (~> 0.9) + jekyll-seo-tag (~> 2.1) + minitest (5.20.0) + namae (1.1.1) + nokogiri (1.15.4-x86_64-linux) + racc (~> 1.4) + octokit (4.25.1) + faraday (>= 1, < 3) + sawyer (~> 0.9) + pathutil (0.16.2) + forwardable-extended (~> 2.6) + public_suffix (4.0.7) + racc (1.7.1) + rb-fsevent (0.11.2) + rb-inotify (0.10.1) + ffi (~> 1.0) + rexml (3.2.6) + rouge (3.26.0) + ruby2_keywords (0.0.5) + rubyzip (2.3.2) + safe_yaml (1.0.5) + sass (3.7.4) + sass-listen (~> 4.0.0) + sass-listen (4.0.0) + rb-fsevent (~> 0.9, >= 0.9.4) + rb-inotify (~> 0.9, >= 0.9.7) + sawyer (0.9.2) + addressable (>= 2.3.5) + faraday (>= 0.17.3, < 3) + simpleidn (0.2.1) + unf (~> 0.1.4) + terminal-table (1.8.0) + unicode-display_width (~> 1.1, >= 1.1.1) + typhoeus (1.4.0) + ethon (>= 0.9.0) + tzinfo (2.0.6) + concurrent-ruby (~> 1.0) + unf (0.1.4) + unf_ext + unf_ext (0.0.8.2) + unicode-display_width (1.8.0) + unicode_utils (1.4.0) + +PLATFORMS + x86_64-linux + +DEPENDENCIES + github-pages + jekyll + jekyll-paginate + jekyll-scholar + jemoji + unicode_utils + +BUNDLED WITH + 2.4.7 diff --git a/Makefile b/Makefile deleted file mode 100644 index 88f0c4b..0000000 --- a/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -# targets that aren't filenames -.PHONY: all clean deploy build serve - -all: build - -build: - bundle exec jekyll build - -install: - bundle install - -# you can configure these at the shell, e.g.: -# SERVE_PORT=5001 make serve -SERVE_HOST ?= 127.0.0.1 -SERVE_PORT ?= 5000 - -serve: - bundle exec jekyll serve --port $(SERVE_PORT) --host $(SERVE_HOST) - -clean: - $(RM) -r _site - - -deploy: clean build - ./bin/deploy diff --git a/README.md b/README.md deleted file mode 100644 index 8b6cc9b..0000000 --- a/README.md +++ /dev/null @@ -1,125 +0,0 @@ -Research Group Web Site Template -================================ - -This is a [Jekyll][]-based Web site intended for research groups. Your group should be able to get up and running with minimal fuss. - -

-screenshot of the template -

- -This project originated at the University of Washington. You can see the machinery working live at [our site][sampa]. - -This work is licensed under a [Creative Commons Attribution-NonCommercial 4.0 International License][license]. - -[sampa]: http://sampa.cs.washington.edu/ -[license]: https://creativecommons.org/licenses/by-nc/4.0/ - - -Features --------- - -* Thanks to [Jekyll][], content is just text files. So even faculty should be able to figure it out. -* Publications list generated from BibTeX. -* Personnel list. Organize your professors, students, staff, and alumni. -* Combined news stream and blog posts. -* Easily extensible navigation bar. -* Responsive (mobile-ready) design based on [Bootstrap][]. - -[Bootstrap]: http://getbootstrap.com/ - - -Setup ------ - -1. Install the dependencies. You will need [Python][], [bibble][] (`pip install bibble`), and [Jekyll][] (`gem install jekyll`). -2. [Fork][] this repository on GitHub. -3. Clone the fork to your own machine: `git clone git@github.com:yourgroup/research-group-web.git`. -4. Add an "upstream" remote for the original repository so you can stay abreast of bugfixes: `git remote add upstream git://github.com/uwsampa/research-group-web.git`. -5. Customize. Start with the `_config.yml` file, where you enter the name of the site and its URL. -6. Type `make` to build the site and then run `make serve` to view your site. -7. Keep adding content. See below for instructions for each of the various sections. -8. Periodically pull from the upstream repository: `git pull upstream master`. - -[Python]: https://www.python.org/ -[Fork]: https://github.com/uwsampa/research-group-web/fork - - -Publication List ----------------- - -The list of publications is in `bib/pubs.bib`. Typing `make` will generate `pubs.html`, which contains a pretty, sorted HTML-formatted list of papers. The public page, `publications.html`, also has a link to download the original BibTeX. - - -News Items and Blog Posts -------------------------- - -For both long-form blog posts and short news updates, we use Jekyll's blogging system. To post a new item of either type, you create a file in the `_posts` directory using the naming convention `YYYY-MM-DD-title-for-url.md`. The date part of the filename always matters; the title part is currently only used for full blog posts (but is still required for news updates). - -The file must begin with [YAML front matter][yfm]. For news updates, use this: - - --- - layout: post - shortnews: true - --- - -For full blog posts, use this format: - - --- - layout: post - title: "Some Great Title Here" - --- - -And concoct a page title for your post. The body of the post goes after the `---` in either case. - -You can also customize the icon that is displayed on the news feed. By default it's `newspaper-o`. We use icons from the [FontAwesome][fa] icon set. - -[yfm]: http://jekyllrb.com/docs/frontmatter/ -[fa]: http://fontawesome.io/icons/ - -Projects --------- - -To create a project, just create a markdown file in the `_projects` folder. Here are the things you can put in the YAML frontmatter: - -- `title:` The project title. -- `notitle:` Set this to `true` if you don't want a title displayed on the project card. Optional. -- `description:` The text shown in the project card. It supports markdown. -- `people:` The people working on the project. This is a list of keys from the `_data/people.yml` file. -- `layout: project` This sets the layout of the actual project page. It should be set to `project`. -- `image:` The URL of an image for the project. This is shown on both the project page and the project card. Optional. -- `last-updated:` Date in the format of `YYYY-MM-DD`. The project cards are sorted by this, most recent first. -- `status: inactive` Set this to `inactive` if don't want the project to appear on the front page. Just ignore it otherwise. -- `link:` Set this to an external URL if this project has a page somewhere else on the web. If you don't have a `link:`, then the content of this markdown file (below the YAML frontmatter) will be this project's page. -- `no-link: true` Set this if you just don't want a project page for your project. - -Personnel ---------- - -People are listed in a [YAML][] file in `_data/people.yml`. You can list the name, link, bio, and role of each person. Roles (e.g., "Faculty", "Staff", and "Students") are defined in `_config.yml`. - -[YAML]: https://en.wikipedia.org/wiki/YAML - - -Building --------- - -The requirements for building the site are: - -* [Jekyll][]: run `gem install jekyll` -* [bibble][]: available on `pip` -* ssh and rsync, only if you want to deploy directly. - -`make` compiles the bibliography and the website content to the `_site` -directory. To preview the site, run `jekyll serve`` and head to -http://0.0.0.0:5000. - - -Deploying to Your Sever ------------------------ - -To set up deployments, edit the Makefile and look for the lines where `HOST` and `DIR` are defined. Change these to the host where your HTML files should be copied to. - -To upload a new version of the site via rsync over ssh, type `make deploy`. A web hook does this automatically when you push to GitHub. Be aware that the Makefile is configured to have rsync delete stray files from the destination directory. - -[Jekyll]: http://jekyllrb.com/ -[bibble]: https://github.com/sampsyo/bibble/ diff --git a/_bibliography/papers.bib b/_bibliography/papers.bib deleted file mode 100644 index ecaeab7..0000000 --- a/_bibliography/papers.bib +++ /dev/null @@ -1,567 +0,0 @@ -@article{conceptfusion, - author = {Jatavallabhula, {Krishna Murthy} and Kuwajerwala, Alihusein and Gu, Qiao and Omama, Mohd and Chen, Tao and Li, Shuang and Iyer, Ganesh and Saryazdi, Soroush and Keetha, Nikhil and Tewari, Ayush and Tenenbaum, {Joshua B.} and {de Melo}, {Celso Miguel} and Krishna, Madhava and Paull, Liam and Shkurti, Florian and Torralba, Antonio}, - title = {ConceptFusion: Open-set Multimodal 3D Mapping}, - journal = {Robotics: Science and Systems (RSS)}, - year = {2023}, - image = {papers/conceptfusion.gif}, - arxiv = {https://arxiv.org/abs/2302.07241}, - projectpage = {https://concept-fusion.github.io/}, - video = {https://www.youtube.com/watch?v=rkXgws8fiDs}, - abstract = { - Building 3D maps of the environment is central to robot navigation, planning, and interaction with objects in a scene. Most existing approaches that integrate semantic concepts with 3D maps largely remain confined to the closed-set setting: they can only reason about a finite set of concepts, pre-defined at training time. Further, these maps can only be queried using class labels, or in recent work, using text prompts. -We address both these issues with ConceptFusion, a scene representation that is (1) fundamentally open-set, enabling reasoning beyond a closed set of concepts and (ii) inherently multimodal, enabling a diverse range of possible queries to the 3D map, from language, to images, to audio, to 3D geometry, all working in concert. ConceptFusion leverages the open-set capabilities of today's foundation models pre-trained on internet-scale data to reason about concepts across modalities such as natural language, images, and audio. We demonstrate that pixel-aligned open-set features can be fused into 3D maps via traditional SLAM and multi-view fusion approaches. This enables effective zero-shot spatial reasoning, not needing any additional training or finetuning, and retains long-tailed concepts better than supervised approaches, outperforming them by more than 40 percent margin on 3D IoU. We extensively evaluate ConceptFusion on a number of real-world datasets, simulated home environments, a real-world tabletop manipulation task, and an autonomous driving platform. We showcase new avenues for blending foundation models with 3D open-set multimodal mapping. - }, - -} - -@inproceedings{quadsoccer, - author = {Yandong Ji and Zhongyu Li* and Yinan Sun and Xue Bin Peng and Sergey Levine and Glen Berseth and Koushil Sreenath}, - title = {Hierarchical Reinforcement Learning for Precise Soccer Shooting Skills using a Quadrupedal Robot }, - booktitle = {Proc. IEEE/RSJ Intl Conf on Intelligent Robots and Systems (IROS 2022)}, - arxiv = {https://arxiv.org/abs/2208.01160}, - image = {papers/a-reinforcement-learni-1.jpg}, - projectpage = {https://mila.quebec/en/article/anymorph-learning-transferable-policies-by-inferring-agent-morphology/}, - video = {https://www.youtube.com/watch?v=bteipHcJ8BM}, - abstract = {We address the problem of enabling quadrupedal robots to perform precise shooting skills in the real world using reinforcement learning. Developing algorithms to enable a legged robot to shoot a soccer ball to a given target is a challenging problem that combines robot motion control and planning into one task. To solve this problem, we need to consider the dynamics limitation and motion stability during the control of a dynamic legged robot. Moreover, we need to consider motion planning to shoot the hard-to-model deformable ball rolling on the ground with uncertain friction to a desired location. In this paper, we propose a hierarchical framework that leverages deep reinforcement learning to train (a) a robust motion control policy that can track arbitrary motions and (b) a planning policy to decide the desired kicking motion to shoot a soccer ball to a target. We deploy the proposed framework on an A1 quadrupedal robot and enable it to accurately shoot the ball to random targets in the real world. }, - year={2022} -} - -@article{Traboco2022, - title={AnyMorph: Learning Transferable Policies By Inferring Agent Morphology}, - author={Brandon Trabucco and Phielipp Mariano and Glen Berseth}, - journal = {Internation Conference on Machine Learning}, - arxiv = {https://arxiv.org/abs/2206.12279}, - image = {papers/anymorph_prompt.gif}, - projectpage = {https://mila.quebec/en/article/anymorph-learning-transferable-policies-by-inferring-agent-morphology/}, - abstract = {The prototypical approach to reinforcement learning involves training policies tailored to a particular agent from scratch for every new morphology. Recent work aims to eliminate the re-training of policies by investigating whether a morphology-agnostic policy, trained on a diverse set of agents with similar task objectives, can be transferred to new agents with unseen morphologies without re-training. This is a challenging problem that required previous approaches to use hand-designed descriptions of the new agent's morphology. Instead of hand-designing this description, we propose a data-driven method that learns a representation of morphology directly from the reinforcement learning objective. Ours is the first reinforcement learning algorithm that can train a policy to generalize to new agent morphologies without requiring a description of the agent's morphology in advance. We evaluate our approach on the standard benchmark for agent-agnostic control, and improve over the current state of the art in zero-shot generalization to new agents. Importantly, our method attains good performance without an explicit description of morphology. }, - year={2022} -} - -@inproceedings{paull2016unified, - title={A Unified Resource-Constrained Framework for Graph SLAM}, - author={Paull, Liam and Huang, Guoquan and Leonard, John J}, - booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, - pages={1--8}, - month={May}, - year={2016}, - image = {papers/paull2016unified.png}, - arxiv = {http://liampaull.ca/publications/Paull_ICRA_2016.pdf}, - code = {https://github.com/liampaull/Resource_Constrained}, - slides = {http://liampaull.ca/publications/Paull_ICRA_2016_presentation.pptx}, - poster = {http://liampaull.ca/publications/Paull_ICRA_2016_poster.pptx}, - abstract = {Graphical methods have proven an extremely useful tool employed by the mobile robotics community to frame estimation problems. Incremental solvers are able to process incoming sensor data and produce maximum a posteriori (MAP) estimates in realtime by exploiting the natural sparsity within the graph for reasonable-sized problems. However, to enable truly longterm operation in prior unknown environments requires algorithms whose computation, memory, and bandwidth (in the case of distributed systems) requirements scale constantly with time and environment size. Some recent approaches have addressed this problem through a two-step process - first the variables selected for removal are marginalized which induces density, and then the result is sparsified to maintain computational efficiency. Previous literature generally addresses only one of these two components. In this work, we attempt to explicitly connect all of the aforementioned resource constraint requirements by considering the node removal and sparsification pipeline in its entirety. We formulate the node selection problem as a minimization problem over the penalty to be paid in the resulting sparsification. As a result, we produce node subset selection strategies that are optimal in terms of minimizing the impact, in terms of Kullback-Liebler divergence (KLD), of approximating the dense distribution by a sparse one. We then show that one instantiation of this problem yields a computationally tractable formulation. Finally, we evaluate the method on standard datasets and show that the KLD is minimized as compared to other commonly-used heuristic node selection techniques.}, -} - -@inproceedings{mu2016iros, - title={Slam with objects using a nonparametric pose graph}, - author={Mu, Beipeng and Liu, Shih-Yuan and Paull, Liam and Leonard, John and How, Jonathan P}, - booktitle={IEEE/RSJ International Conference onnIntelligent Robots and Systems (IROS)}, - year={2016}, - month={Oct}, - image = {papers/mu2016iros.png}, - arxiv = {1704.05959}, - video = {https://www.youtube.com/watch?v=gOwMiFlj8KU}, - abstract = {Mapping and self-localization in unknown environments are fundamental capabilities in many robotic applications. These tasks typically involve the identification of objects as unique features or landmarks, which requires the objects both to be detected and then assigned a unique identifier that can be maintained when viewed from different perspectives and in different images. The data association and simultaneous localization and mapping (SLAM) problems are, individually, well-studied in the literature. But these two problems are inherently tightly coupled, and that has not been well-addressed. Without accurate SLAM, possible data associations are combinatorial and become intractable easily. Without accurate data association, the error of SLAM algorithms diverge easily. This paper proposes a novel nonparametric pose graph that models data association and SLAM in a single framework. An algorithm is further introduced to alternate between inferring data association and performing SLAM. Experimental results show that our approach has the new capability of associating object detections and localizing objects at the same time, leading to significantly better performance on both the data association and SLAM problems than achieved by considering only one and ignoring imperfections in the other.}, -} - -@inproceedings{paull2017duckietown, - title={Duckietown: an open, inexpensive and flexible platform for autonomy education and research}, - author={Paull, Liam and Tani, Jacopo and Ahn, Heejin and Alonso-Mora, Javier and Carlone, Luca and Cap, Michal and Chen, Yu Fan and Choi, Changhyun and Dusek, Jeff and Fang, Yajun and others}, - booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, - pages={1497--1504}, - year={2017}, - month={May}, - image = {papers/paull2017duckietown.png}, - arxiv = {http://www.mit.edu/~hangzhao/papers/duckietown.pdf}, - abstract = {Duckietown is an open, inexpensive and flexible platform for autonomy education and research. The platform comprises small autonomous vehicles (“Duckiebots”) built from off-the-shelf components, and cities (“Duckietowns”) complete with roads, signage, traffic lights, obstacles, and citizens (duckies) in need of transportation. The Duckietown platform offers a wide range of functionalities at a low cost. Duckiebots sense the world with only one monocular camera and perform all processing onboard with a Raspberry Pi 2, yet are able to: follow lanes while avoiding obstacles, pedestrians (duckies) and other Duckiebots, localize within a global map, navigate a city, and coordinate with other Duckiebots to avoid collisions. Duckietown is a useful tool since educators and researchers can save money and time by not having to develop all of the necessary supporting infrastructure and capabilities. All materials are available as open source, and the hope is that others in the community will adopt the platform for education and research.}, -} - -@inproceedings{rosman2017hybrid, - title={Hybrid control and learning with coresets for autonomous vehicles}, - author={Rosman, Guy and Paull, Liam and Rus, Daniela}, - booktitle={IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, - pages={6894--6901}, - year={2017}, - month={Oct}, - arxiv = {http://people.csail.mit.edu/rosman/papers/ctrl_embedding.pdf}, - image = {papers/rosman2017hybrid.png}, - abstract = {Modern autonomous systems such as driverless vehicles need to safely operate in a wide range of conditions. A potential solution is to employ a hybrid systems approach, where safety is guaranteed in each individual mode within the system. This offsets complexity and responsibility from the individual controllers onto the complexity of determining discrete mode transitions. In this work we propose an efficient framework based on recursive neural networks and coreset data summarization to learn the transitions between an arbitrary number of controller modes that can have arbitrary complexity. Our approach allows us to efficiently gather annotation data from the large-scale datasets that are required to train such hybrid nonlinear systems to be safe under all operating conditions, favoring underexplored parts of the data. We demonstrate the construction of the embedding, and efficient detection of switching points for autonomous and nonautonomous car data. We further show how our approach enables efficient sampling of training data, to further improve either our embedding or the controllers}, -} - -@inproceedings{paull2018probabilistic, - title={Probabilistic cooperative mobile robot area coverage and its application to autonomous seabed mapping}, - author={Paull, Liam and Seto, Mae and Leonard, John J and Li, Howard}, - journal={The International Journal of Robotics Research}, - volume={37}, - number={1}, - pages={21--45}, - year={2018}, - image = {papers/paull2018coverage.png}, - abstract = {There are many applications that require mobile robots to autonomously cover an entire area with a sensor or end effector. The vast majority of the literature on this subject is focused on addressing path planning for area coverage under the assumption that the robot’s pose is known or that error is bounded. In this work, we remove this assumption and develop a completely probabilistic representation of coverage. We show that coverage is guaranteed as long as the robot pose estimates are consistent, a much milder assumption than zero or bounded error. After formally connecting robot sensor uncertainty with area coverage, we propose an adaptive sliding window filter pose estimator that provides a close approximation to the full maximum a posteriori estimate with a computation cost that is bounded over time. Subsequently, an adaptive planning strategy is presented that automatically exploits conditions of low vehicle uncertainty to more efficiently cover an area. We further extend this approach to the multi-robot case where robots can communicate through a (possibly faulty and low-bandwidth) channel and make relative measurements of one another. In this case, area coverage is achieved more quickly since the uncertainty over the robots’ trajectories is reduced. We apply the framework to the scenario of mapping an area of seabed with an autonomous underwater vehicle. Experimental results support the claim that our method achieves guaranteed complete coverage notwithstanding poor navigational sensors and that resulting path lengths required to cover the entire area are shortest using the proposed cooperative and adaptive approach.}, -} - -@inproceedings{ort2018icra, - title={Autonomous Vehicle Navigation in Rural Environments without Detailed Prior Maps}, - author={Ort, Teddy and Paull, Liam and Rus, Daniela}, - booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, - year={2018}, - month={May}, - image = {papers/ort2018icra.png}, - arxiv = {https://toyota.csail.mit.edu/sites/default/files/documents/papers/ICRA2018_AutonomousVehicleNavigationRuralEnvironment.pdf}, - abstract = {State-of-the-art autonomous driving systems rely heavily on detailed and highly accurate prior maps. However, outside of small urban areas, it is very challenging to build, store, and transmit detailed maps since the spatial scales are so large. Furthermore, maintaining detailed maps of large rural areas can be impracticable due to the rapid rate at which these environments can change. This is a significant limitation for the widespread applicability of autonomous driving technology, which has the potential for an incredibly positive societal impact. In this paper, we address the problem of autonomous navigation in rural environments through a novel mapless driving framework that combines sparse topological maps for global navigation with a sensor-based perception system for local navigation. First, a local navigation goal within the sensor view of the vehicle is chosen as a waypoint leading towards the global goal. Next, the local perception system generates a feasible trajectory in the vehicle frame to reach the waypoint while abiding by the rules of the road for the segment being traversed. These trajectories are updated to remain in the local frame using the vehicle’s odometry and the associated uncertainty based on the least-squares residual and a recursive filtering approach, which allows the vehicle to navigate road networks reliably, and at high speed, without detailed prior maps. We demonstrate the performance of the system on a full-scale autonomous vehicle navigating in a challenging rural environment and benchmark the system on a large amount of collected data.}, -} - -@inproceedings{mai2018local, - title={Local Positioning System Using UWB Range Measurements for an Unmanned Blimp}, - author={Mai, Vincent and Kamel, Mina and Krebs, Matthias and Schaffner, Andreas and Meier, Daniel and Paull, Liam and Siegwart, Roland}, - journal={IEEE Robotics and Automation Letters}, - volume={3}, - number={4}, - pages={2971--2978}, - year={2018}, - month={October}, - image = {papers/mai2018blimp.png}, - arxiv = {https://ieeexplore.ieee.org/document/8392389}, - abstract = {Unmanned blimps are a safe and reliable alternative to conventional drones when flying above people. On-board real-time tracking of their pose and velocities is a necessary step toward autonomous navigation. There is a need for an easily deployable technology that is able to accurately and robustly estimate the pose and velocities of a blimp in 6 DOF, as well as unexpected applied forces and torques, in an uncontrolled environment. We present two multiplicative extended Kalman filters using ultrawideband radio sensors and a gyroscope to address this challenge. One filter is updated using a dynamics model of the blimp, whereas the other uses a constant speed model. We describe a set of experiments in which these estimators have been implemented on an embedded flight controller. They were tested and compared in accuracy and robustness in a hardware-in-loop simulation as well as on a real blimp. This approach can be generalized to any lighter than air robot to track it with the necessary accuracy, precision, and robustness to allow autonomous navigation.}, -} - -@inproceedings{CTCNet, - title = {Geometric Consistency for Self-Supervised End-to-End Visual Odometry}, - author = {Iyer, Ganesh and Murthy, J Krishna and Gunshi Gupta, K and Paull, Liam}, - booktitle = {CVPR Workshop on Deep Learning for Visual SLAM}, - month = {June}, - year = {2018}, - arxiv = {1804.03789}, - projectpage = {https://krrish94.github.io/CTCNet-release/}, - image = {papers/ctcnet.png}, - abstract = {With the success of deep learning based approaches in tackling challenging problems in computer vision, a wide range of deep architectures have recently been proposed for the task of visual odometry (VO) estimation. Most of these proposed solutions rely on supervision, which requires the acquisition of precise ground-truth camera pose information, collected using expensive motion capture systems or high-precision IMU/GPS sensor rigs. In this work, we propose an unsupervised paradigm for deep visual odometry learning. We show that using a noisy teacher, which could be a standard VO pipeline, and by designing a loss term that enforces geometric consistency of the trajectory, we can train accurate deep models for VO that do not require ground-truth labels. We leverage geometry as a self-supervisory signal and propose "Composite Transformation Constraints (CTCs)", that automatically generate supervisory signals for training and enforce geometric consistency in the VO estimate. We also present a method of characterizing the uncertainty in VO estimates thus obtained. To evaluate our VO pipeline, we present exhaustive ablation studies that demonstrate the efficacy of end-to-end, self-supervised methodologies to train deep models for monocular VO. We show that leveraging concepts from geometry and incorporating them into the training of a recurrent neural network results in performance competitive to supervised deep VO methods.}, -} - -@inproceedings{amini2018learning, - title={Learning steering bounds for parallel autonomous systems}, - author={Amini, Alexander and Paull, Liam and Balch, Thomas and Karaman, Sertac and Rus, Daniela}, - booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, - year={2018}, - month={May}, - arxiv = {https://dspace.mit.edu/handle/1721.1/117632}, - image = {papers/amini2018parallel.png}, - abstract = {Deep learning has been successfully applied to “end-to-end” learning of the autonomous driving task, where a deep neural network learns to predict steering control commands from camera data input. While these previous works support reactionary control, the representation learned is not usable for higher-level decision making required for autonomous navigation. This paper tackles the problem of learning a representation to predict a continuous control probability distribution, and thus steering control options and bounds for those options, which can be used for autonomous navigation. Each mode of the distribution encodes a possible macro-action that the system could execute at that instant, and the covariances of the modes place bounds on safe steering control values. Our approach has the added advantage of being trained on unlabeled data collected from inexpensive cameras. The deep neural network based algorithm generates a probability distribution over the space of steering angles, from which we leverage Variational Bayesian methods to extract a mixture model and compute the different possible actions in the environment. A bound, which the autonomous vehicle must respect in our parallel autonomy setting, is then computed for each of these actions. We evaluate our approach on a challenging dataset containing a wide variety of driving conditions, and show that our algorithm is capable of parameterizing Gaussian Mixture Models for possible actions, and extract steering bounds with a mean error of only 2 degrees. Additionally, we demonstrate our system working on a full scale autonomous vehicle and evaluate its ability to successful handle various different parallel autonomy situations.}, -} - -@inproceedings{bharadhwaj2018data, - title={A Data-Efficient Framework for Training and Sim-to-Real Transfer of Navigation Policies}, - author={Bharadhwaj, Homanga and Wang, Zihan and Bengio, Yoshua and Paull, Liam}, - booktitle={IEEE International Conference on Robotics and Automation (ICRA)}, - year={2019}, - month={May}, - arxiv = {1810.04871}, - image = {papers/homanga2019icra.png}, - abstract = {Learning effective visuomotor policies for robots purely from data is challenging, but also appealing since a learning-based system should not require manual tuning or calibration. In the case of a robot operating in a real environment the training process can be costly, time-consuming, and even dangerous since failures are common at the start of training. For this reason, it is desirable to be able to leverage \textit{simulation} and \textit{off-policy} data to the extent possible to train the robot. In this work, we introduce a robust framework that plans in simulation and transfers well to the real environment. Our model incorporates a gradient-descent based planning module, which, given the initial image and goal image, encodes the images to a lower dimensional latent state and plans a trajectory to reach the goal. The model, consisting of the encoder and planner modules, is trained through a meta-learning strategy in simulation first. We subsequently perform adversarial domain transfer on the encoder by using a bank of unlabelled but random images from the simulation and real environments to enable the encoder to map images from the real and simulated environments to a similarly distributed latent representation. By fine tuning the entire model (encoder + planner) with far fewer real world expert demonstrations, we show successful planning performances in different navigation tasks.}, -} - -@inproceedings{sai2019dal, - title = {Deep Active Localization}, - author = {Krishna, Sai and Seo, Keehong and Bhatt, Dhaivat and Mai, Vincent and Murthy, Krishna and Paull, Liam}, - booktitle = {IEEE Robotics and Automation Letters (RAL)}, - year = {2019}, - month = {May}, - arxiv = {1903.01669}, - code = {https://github.com/montrealrobotics/dal}, - image = {papers/dal.png}, - abstract = {Active localization is the problem of generating robot actions that allow it to maximally disambiguate its pose within a reference map. Traditional approaches to this use an information-theoretic criterion for action selection and hand-crafted perceptual models. In this work we propose an end-to-end differentiable method for learning to take informative actions that is trainable entirely in simulation and then transferable to real robot hardware with zero refinement. The system is composed of two modules: a convolutional neural network for perception, and a deep reinforcement learned planning module. We introduce a multi-scale approach to the learned perceptual model since the accuracy needed to perform action selection with reinforcement learning is much less than the accuracy needed for robot control. We demonstrate that the resulting system outperforms using the traditional approach for either perception or planning. We also demonstrate our approaches robustness to different map configurations and other nuisance parameters through the use of domain randomization in training. The code is also compatible with the OpenAI gym framework, as well as the Gazebo simulator.}, -} - -@inproceedings{adr, - title = {Active Domain Randomization}, - author = {Mehta, Bhairav and Diaz, Manfred and Golemo, Florian and Pal, Christopher and Paull, Liam}, - booktitle = {Conference on Robot Learning (CoRL)}, - year = {2019}, - arxiv = {1904.04762}, - code = {https://github.com/montrealrobotics/active-domainrand}, - image = {papers/adr.gif}, - abstract = {We tackle the uniform sampling assumption in domain randomization and learn a randomization strategy, looking for the most informative environments. Our method shows significant improvements in agent performance, agent generalization, sample complexity, and interpretability over the traditional domain and dynamics randomization strategies.}, -} - -@inproceedings{gradslam, - title = {gradSLAM: Dense SLAM meets automatic differentiation}, - author = {{Krishna Murthy}, Jatavallabhula and Iyer, Ganesh and Paull, Liam}, - booktitle = {International Conference on Robotics and Automation (ICRA)}, - year={2020}, - arxiv = {1910.10672}, - projectpage = {https://gradslam.github.io}, - code = {https://github.com/gradslam/gradslam}, - image = {papers/gradslam.png}, - abstract = {The question of "representation" is central in the context of dense simultaneous localization and mapping (SLAM). Newer learning-based approaches have the potential to leverage data or task performance to directly inform the choice of representation. However, learning representations for SLAM has been an open question, because traditional SLAM systems are not end-to-end differentiable.In this work, we present gradSLAM, a differentiable computational graph take on SLAM. Leveraging the automatic differentiation capabilities of computational graphs, gradSLAM enables the design of SLAM systems that allow for gradient-based learning across each of their components, or the system as a whole. This is achieved by creating differentiable alternatives for each non-differentiable component in a typical dense SLAM system. Specifically, we demonstrate how to design differentiable trust-region optimizers, surface measurement and fusion schemes, as well as differentiate over rays, without sacrificing performance. This amalgamation of dense SLAM with computational graphs enables us to backprop all the way from 3D maps to 2D pixels, opening up new possibilities in gradient-based learning for SLAM.}, -} - -@inproceedings{lamaml, - title = {La-MAML: Look-ahead Meta Learning for Continual Learning}, - author = {Gupta, Gunshi and Yadav, Karmesh and Paull, Liam}, - booktitle = {Neural Information Processing Systems (Neurips)}, - highlight = {Oral (top 1.1%)}, - year = {2020}, - arxiv = {2007.13904}, - projectpage = {https://mila.quebec/en/article/la-maml-look-ahead-meta-learning-for-continual-learning/}, - image = {papers/lamaml.png}, - abstract = {The continual learning problem involves training models with limited capacity to perform well on a set of an unknown number of sequentially arriving tasks. While meta-learning shows great potential for reducing interference between old and new tasks, the current training procedures tend to be either slow or offline, and sensitive to many hyper-parameters. In this work, we propose Look-ahead MAML (La-MAML), a fast optimisation-based meta-learning algorithm for online-continual learning, aided by a small episodic memory. Our proposed modulation of per-parameter learning rates in our meta-learning update allows us to draw connections to prior work on hypergradients and meta-descent. This provides a more flexible and efficient way to mitigate catastrophic forgetting compared to conventional prior-based methods. La-MAML achieves performance superior to other replay-based, prior-based and meta-learning based approaches for continual learning on real-world visual classification benchmarks.}, -} - -@inproceedings{che2020neurips, - title = {Your GAN is Secretly an Energy-based Model and You Should Use Discriminator Driven Latent Sampling}, - author = {Che, Tong and Zhang, Ruixiang and Sohl-Dickstein, Jascha and Larochelle, Hugo and Paull, Liam and Cao, Yuan and Bengio, Yoshua}, - booktitle = {Neural Information Processing Systems (Neurips)}, - year = {2020}, - image = {papers/gan.png}, - arxiv = {2003.06060}, - abstract = {We show that the sum of the implicit generator log-density of a GAN with the logit score of the discriminator defines an energy function which yields the true data density when the generator is imperfect but the discriminator is optimal, thus making it possible to improve on the typical generator. To make that practical, we show that sampling from this modified density can be achieved by sampling in latent space according to an energy-based model induced by the sum of the latent prior log-density and the discriminator output score. This can be achieved by running a Langevin MCMC in latent space and then applying the generator function, which we call Discriminator Driven Latent Sampling~(DDLS). We show that DDLS is highly efficient compared to previous methods which work in the high-dimensional pixel space and can be applied to improve on previously trained GANs of many types. We evaluate DDLS on both synthetic and real-world datasets qualitatively and quantitatively. On CIFAR-10, DDLS substantially improves the Inception Score of an off-the-shelf pre-trained SN-GAN from 8.22 to 9.09 which is even comparable to the class-conditional BigGAN model. This achieves a new state-of-the-art in unconditional image synthesis setting without introducing extra parameters or additional training.}, -} - -@inproceedings{mehta2020curriculum, - title = {Curriculum in Gradient-Based Meta-Reinforcement Learning}, - author = {Mehta, Bhairav and Deleu, Tristan and Raparthy, {Sharath Chandra} and Pal, Christopher and Paull, Liam}, - booktitle = {BETR-RL Workshop}, - year = {2020}, - arxiv = {2002.07956}, - image = {papers/mehta2020curriculum.png}, - abstract = {Can Meta-RL use curriculum learning? In this work, we explore that question and find that curriculum learning stabilizes meta-RL in complex navigation and locomotion tasks. We also highlight issues with Meta-RL benchmarks by highlighting failure cases when we vary task distributions.}, -} - -@inproceedings{ssadr, - title = {Generating Automatic Curricula via Self-Supervised Active Domain Randomization}, - author = {Raparthy, {Sharath Chandra} and Mehta, Bhairav and Paull, Liam}, - booktitle = {BETR-RL Workshop}, - year = {2020}, - arxiv = {2002.07911}, - code = {https://github.com/montrealrobotics/unsupervised-adr}, - image = {papers/ssadr.png}, - abstract = {Can you learn domain randomization curricula with no rewards? We show that agents trained via self-play in the ADR framework outperform uniform domain randomization by magnitudes in both simulated and real-world transfer.}, -} - -@inproceedings{aido2018, - title = {The AI Driving Olympics at NeurIPS 2018}, - author = {Zilly, Julian and Tani, Jacopo and Considine, Breandan and Mehta, Bhairav and Daniele, {Andrea F} and Diaz, Manfred and Bernasconi, Gianmarco and Ruch, Claudio Hakenberg, Jan and Golemo, Florian and Bowser, {A Kirsten} and Walter, {Matthew R} and Hristov, Ruslan and Mallya, Sunil and Frazzoli, Emilio and Censi, Andrea and Paull, Liam}, - booktitle = {Springer}, - year = {2020}, - arxiv = {1903.02503}, - image = {papers/aido18.png}, - abstract = {Despite recent breakthroughs, the ability of deep learning and reinforcement learning to outperform traditional approaches to control physically embodied robotic agents remains largely unproven. To help bridge this gap, we created the “AI Driving Olympics” (AI-DO), a competition with the objective of evaluating the state of the art in machine learning and artificial intelligence for mobile robotics. Based on the simple and well specified autonomous driving and navigation environment called “Duckietown,” AI-DO includes a series of tasks of increasing complexity – from simple lane-following to fleet management. For each task, we provide tools for competitors to use in the form of simulators, logs, code templates, baseline implementations and low-cost access to robotic hardware. We evaluate submissions in simulation online, on standardized hardware environments, and finally at the competition event. The first AI-DO, AI-DO 1, occurred at the Neural Information Processing Systems (NeurIPS) conference in December 2018. The results of AI-DO 1 highlight the need for better benchmarks, which are lacking in robotics, as well as improved mechanisms to bridge the gap between simulation and reality.}, -} - -@inproceedings{probod, - title = {Probabilistic Object Detection: Strenghts, Weaknesses, and Opportunities}, - author = {Bhatt, Dhaivat and Bansal, Dishank and Gupta, Gunshi and Jatavallabhula, {Krishna Murthy} and Lee, Hanju and Paull, Liam}, - booktitle = {ICML workshop on AI for autonomous driving}, - year = {2020}, - projectpage = {https://gunshigupta.netlify.app/publication/probod/}, - image = {papers/probod.png}, - abstract = {Deep neural networks are the de-facto standard for object detection in autonomous driving applications. However, neural networks cannot be blindly trusted even within the training data distribution, let alone outside it. This has paved way for several probabilistic object detection techniques that measure uncertainty in the outputs of an object detector. Through this position paper, we serve three main purposes. First, we briefly sketch the landscape of current methods for probabilistic object detection. Second, we present the main shortcomings of these approaches. Finally, we present promising avenues for future research, and proof-of-concept results where applicable. Through this effort, we hope to bring the community one step closer to performing accurate, reliable, and consistent probabilistic object detection.}, -} - - -@inproceedings{biv, - title = {Batch Inverse-Variance Weighting: Deep Heteroscedastic Regression}, - author = {Mai, Vincent and Khamies, Waleed and Paull, Liam}, - booktitle = {ICML Workshop on Uncertainty & Robustness in Deep Learning}, - year = {2021}, - arxiv = {2107.04497}, - image = {papers/biv.png}, - abstract = {In the supervised learning task of heteroscedastic regression, each label is subject to noise from a different distribution. The label generator may estimate the variance of the noise distribution for each label, which is useful information to mitigate its impact. We adapt an inverse-variance weighted mean square error, based on the Gauss-Markov theorem, for gradient descent on neural networks. We introduce Batch Inverse-Variance, a loss function which is robust to near-ground truth samples, and allows to control the effective learning rate. Our experimental results show that BIV improves significantly the performance of the networks on two noisy datasets, compared to L2 loss, inverse-variance weighting, and a filtering-based baseline.}, -} - -@inproceedings{loco, - title = {LOCO: Adaptive exploration in reinforcement learning via local estimation of contraction coefficients}, - author = {Manfred Diaz, Liam Paull, Pablo Samuel Castro}, - booktitle = {Self-Supervision for Reinforcement Learning Workshop-ICLR 2021}, - year = {2021}, - openreview = {SmvsysIyHW-}, - image = {papers/loco.png}, - pdf = {diaz_loco.pdf}, - abstract = {We offer a novel approach to balance exploration and exploitation in reinforcement learning (RL). To do so, we characterize an environment’s exploration difficulty via the Second Largest Eigenvalue Modulus (SLEM) of the Markov chain induced by uniform stochastic behaviour. Specifically, we investigate the connection of state-space coverage with the SLEM of this Markov chain and use the theory of contraction coefficients to derive estimates of this eigenvalue of interest. Furthermore, we introduce a method for estimating the contraction coefficients on a local level and leverage it to design a novel exploration algorithm. We evaluate our algorithm on a series of GridWorld tasks of varying sizes and complexity.}, -} - - -@inproceedings{schwarting2017parallel, - title={Parallel autonomy in automated vehicles: Safe motion generation with minimal intervention}, - author={Schwarting, Wilko and Alonso-Mora, Javier and Paull, Liam and Karaman, Sertac and Rus, Daniela}, - booktitle={Robotics and Automation (ICRA), 2017 IEEE International Conference on}, - pages={1928--1935}, - pdf = {schwarting_ICRA_2017.pdf}, - image = {papers/parallel_autonomy_ICRA.png}, - abstract = {Current state-of-the-art vehicle safety systems, such as assistive braking or automatic lane following, are still only able to help in relatively simple driving situations. We introduce a Parallel Autonomy shared-control framework that produces safe trajectories based on human inputs even in much more complex driving scenarios, such as those commonly encountered in an urban setting. We minimize the deviation from the human inputs while ensuring safety via a set of collision avoidance constraints. We develop a receding horizon planner formulated as a Non-linear Model Predictive Control (NMPC) including analytic descriptions of road boundaries, and the configurations and future uncertainties of other traffic participants, and directly supplying them to the optimizer without linearization. The NMPC operates over both steering and acceleration simultaneously. Furthermore, the proposed receding horizon planner also applies to fully autonomous vehicles. We validate the proposed approach through simulations in a wide variety of complex driving scenarios such as left-turns across traffic, passing on busy streets, and under dynamic constraints in sharp turns on a race track.}, - year={2017}, - organization={IEEE} -} - -@inproceedings{naser2017parallel, - title={A parallel autonomy research platform}, - author={Naser, Felix and Dorhout, David and Proulx, Stephen and Pendleton, Scott Drew and Andersen, Hans and Schwarting, Wilko and Paull, Liam and Alonso-Mora, Javier and Ang, Marcelo H and Karaman, Sertac and others}, - booktitle={2017 IEEE Intelligent Vehicles Symposium (IV)}, - pages={933--940}, - year={2017}, - abstract={We present the development of a full-scale “parallel autonomy” research platform including software and hardware. In the parallel autonomy paradigm, the control of the vehicle is shared; the human is still in control of the vehicle, but the autonomy system is always running in the background to prevent accidents. Our holistic approach includes: (1) a drive-by-wire conversion method only based on reverse engineering mounting of relatively inexpensive sensors onto the vehicle implementation of a localization and mapping system, (4) obstacle detection and (5) a shared controller as well as (6) integration with an advanced autonomy simulation system (Drake) for rapid development and testing. The system can operate in three modes: (a) manual driving, (b) full autonomy, where the system is in complete control of the vehicle and (c) parallel autonomy, where the shared controller is implemented. We present results from extensive testing of a full-scale vehicle on closed tracks that demonstrate these capabilities.}, - pdf = {naser_IV_2017.pdf}, - image = {papers/prius.png}, - organization={IEEE} -} - -@article{schwarting2017safe, - title={Safe nonlinear trajectory generation for parallel autonomy with a dynamic vehicle model}, - author={Schwarting, Wilko and Alonso-Mora, Javier and Paull, Liam and Karaman, Sertac and Rus, Daniela}, - journal={IEEE Transactions on Intelligent Transportation Systems}, - volume={19}, - number={9}, - pages={2994--3008}, - year={2017}, - abstact={High-end vehicles are already equipped with safety systems, such as assistive braking and automatic lane following, enhancing vehicle safety. Yet, these current solutions can only help in low-complexity driving situations. In this paper, we introduce a parallel autonomy, or shared control, framework that computes safe trajectories for an automated vehicle, based on human inputs. We minimize the deviation from the human inputs while ensuring safety via a set of collision avoidance constraints. Our method achieves safe motion even in complex driving scenarios, such as those commonly encountered in an urban setting. We introduce a receding horizon planner formulated as nonlinear model predictive control (NMPC), which includes the analytic descriptions of road boundaries and the configuration and future uncertainties of other road participants. The NMPC operates over both steering and acceleration simultaneously. We introduce a nonslip model suitable for handling complex environments with dynamic obstacles, and a nonlinear combined slip vehicle model including normal load transfer capable of handling static environments. We validate the proposed approach in two complex driving scenarios. First, in an urban environment that includes a left-turn across traffic and passing on a busy street. And second, under snow conditions on a race track with sharp turns and under complex dynamic constraints. We evaluate the performance of the method with various human driving styles. We consequently observe that the method successfully avoids collisions and generates motions with minimal intervention for parallel autonomy. We note that the method can also be applied to generate safe motion for fully autonomous vehicles.}, - pdf={schwarting_TITS_2018.pdf}, - image={papers/parallel_autonomy_TITS.png}, - publisher={IEEE} -} - -@article{ort2020maplite, - title={MapLite: Autonomous Intersection Navigation without a Detailed Prior Map}, - author={Ort, Teddy and Murthy, Krishna and Banerjee, Rohan and Gottipati, Sai Krishna and Bhatt, Dhaivat and Gilitschenski, Igor and Paull, Liam and Rus, Daniela}, - journal={IEEE Robotics and Automation Letters}, - abstract={In this work, we present MapLite: a one-click -autonomous navigation system capable of piloting a vehicle to an -arbitrary desired destination point given only a sparse publicly -available topometric map (from OpenStreetMap). The onboard -sensors are used to segment the road region and register the -topometric map in order to fuse the high-level navigation goals -with a variational path planner in the vehicle frame. This enables -the system to plan trajectories that correctly navigate road -intersections without the use of an external localization system -such as GPS or a detailed prior map. Since the topometric -maps already exist for the vast majority of roads, this solution -greatly increases the geographical scope for autonomous mobility -solutions. We implement MapLite on a full-scale autonomous -vehicle and exhaustively test it on over 15km of road including -over 100 autonomous intersection traversals. We further extend -these results through simulated testing to validate the system on -complex road junction topologies such as traffic circles.}, - image={papers/maplite_RAL.png}, - pdf={MapLite_RAL.pdf}, - year={2020} -} - -@inproceedings{tani2020integrated, - title={Integrated benchmarking and design for reproducible and accessible evaluation of robotic agents}, - author={Tani, Jacopo and Daniele, Andrea F and Bernasconi, Gianmarco and Camus, Amaury and Petrov, Aleksandar and Courchesne, Anthony and Mehta, Bhairav and Suri, Rohit and Zaluska, Tomasz and Walter, Matthew R and others}, - booktitle={2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, - pages={6229--6236}, - year={2020}, - arxiv={2009.04362}, - abstract={As robotics matures and increases in complexity, -it is more necessary than ever that robot autonomy research -be reproducible. Compared to other sciences, there are specific -challenges to benchmarking autonomy, such as the complexity -of the software stacks, the variability of the hardware and -the reliance on data-driven techniques, amongst others. In this -paper, we describe a new concept for reproducible robotics -research that integrates development and benchmarking, so -that reproducibility is obtained “by design” from the beginning -of the research/development processes. We first provide the -overall conceptual objectives to achieve this goal and then a -concrete instance that we have built: the DUCKIENet. One -of the central components of this setup is the Duckietown -Autolab, a remotely accessible standardized setup that is itself -also relatively low-cost and reproducible. When evaluating -agents, careful definition of interfaces allows users to choose -among local versus remote evaluation using simulation, logs, -or remote automated hardware setups. We validate the system -by analyzing the repeatability of experiments conducted using -the infrastructure and show that there is low variance across -different robot hardware and across different remote labs.}, - image={papers/autolab.png}, - organization={IEEE} -} - -@inproceedings{zhang2020perceptual, - title={Perceptual generative autoencoders}, - author={Zhang, Zijun and Zhang, Ruixiang and Li, Zongpeng and Bengio, Yoshua and Paull, Liam}, - booktitle={International Conference on Machine Learning}, - pages={11298--11306}, - year={2020}, - arxiv={1906.10335}, - abstract={Modern generative models are usually designed to match target distributions directly in the data space, where the intrinsic dimension of data can be much lower than the ambient dimension. We argue that this discrepancy may contribute to the difficulties in training generative models. We therefore propose to map both the generated and target distributions to a latent space using the encoder of a standard autoencoder, and train the generator (or decoder) to match the target distribution in the latent space. Specifically, we enforce the consistency in both the data space and the latent space with theoretically justified data and latent reconstruction losses. The resulting generative model, which we call a perceptual generative autoencoder (PGA), is then trained with a maximum likelihood or variational autoencoder (VAE) objective. With maximum likelihood, PGAs generalize the idea of reversible generative models to unrestricted neural network architectures and arbitrary number of latent dimensions. When combined with VAEs, PGAs substantially improve over the baseline VAEs in terms of sample quality. Compared to other autoencoder-based generative models using simple priors, PGAs achieve state-of-the-art FID scores on CIFAR-10 and CelebA.}, - image={papers/pga.png}, - organization={PMLR} -} - -@inproceedings{liu2021orthogonal, - title={Orthogonal over-parameterized training}, - author={Liu, Weiyang and Lin, Rongmei and Liu, Zhen and Rehg, James M and Paull, Liam and Xiong, Li and Song, Le and Weller, Adrian}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={7251--7260}, - arxiv={2004.04690}, - abstract={The inductive bias of a neural network is largely determined by the architecture and the training algorithm. To achieve good generalization, how to effectively train a neural network is of great importance. We propose a novel orthogonal over-parameterized training (OPT) framework that can provably minimize the hyperspherical energy which characterizes the diversity of neurons on a hypersphere. By maintaining the minimum hyperspherical energy during training, OPT can greatly improve the empirical generalization. Specifically, OPT fixes the randomly initialized weights of the neurons and learns an orthogonal transformation that applies to these neurons. We consider multiple ways to learn such an orthogonal transformation, including unrolling orthogonalization algorithms, applying orthogonal parameterization, and designing orthogonality-preserving gradient descent. For better scalability, we propose the stochastic OPT which performs orthogonal transformation stochastically for partial dimensions of neurons. Interestingly, OPT reveals that learning a proper coordinate system for neurons is crucial to generalization. We provide some insights on why OPT yields better generalization. Extensive experiments validate the superiority of OPT over the standard training.}, - image={papers/orthogonal.png}, - year={2021} -} - -@inproceedings{murthy2020gradsim, - title={gradSim: Differentiable simulation for system identification and visuomotor control}, - author={Murthy, J Krishna and Macklin, Miles and Golemo, Florian and Voleti, Vikram and Petrini, Linda and Weiss, Martin and Considine, Breandan and Parent-L{\'e}vesque, J{\'e}r{\^o}me and Xie, Kevin and Erleben, Kenny and others}, - booktitle={International Conference on Learning Representations}, - abstract={We consider the problem of estimating an object's physical properties such as mass, friction, and elasticity directly from video sequences. Such a system identification problem is fundamentally ill-posed due to the loss of information during image formation. Current solutions require precise 3D labels which are labor-intensive to gather, and infeasible to create for many systems such as deformable solids or cloth. We present gradSim, a framework that overcomes the dependence on 3D supervision by leveraging differentiable multiphysics simulation and differentiable rendering to jointly model the evolution of scene dynamics and image formation. This novel combination enables backpropagation from pixels in a video sequence through to the underlying physical attributes that generated them. Moreover, our unified computation graph -- spanning from the dynamics and through the rendering process -- enables learning in challenging visuomotor control tasks, without relying on state-based (3D) supervision, while obtaining performance competitive to or better than techniques that rely on precise 3D labels.}, - arxiv={2104.02646}, - image={papers/gradsim.png}, - projectpage={https://gradsim.github.io/}, - year={2020} -} - -@inproceedings{diaz2021uncertainty, - title={Uncertainty-Aware Policy Sampling and Mixing for Safe Interactive Imitation Learning}, - author={Diaz, Manfred and Fevens, Thomas and Paull, Liam}, - booktitle={2021 18th Conference on Robots and Vision (CRV)}, - pages={72--78}, - year={2021}, - abstract={Teaching robots how to execute tasks through demonstrations is appealing since it sidesteps the need to explicitly specify a reward function. However, posing imitation learning as a simple supervised learning problem suffers from the well-known problem of distributional shift - the teacher will only demonstrate the optimal trajectory and therefore the learner is unable to recover if it deviates even slightly from this trajectory since it has no training data for this case. This problem has been overcome in the literature by some element of interactivity in the learning process - usually be somehow interleaving the execution of the learner and the teacher so that the teacher can demonstrate to the learner also how to recover from mistakes. In this paper, we consider the cases where the robot has the potential to do harm, and therefore safety must be imposed at every step in the learning process. We show that uncertainty is an appropriate measure of safety and that both the mixing of the policies and the data sampling procedure benefit from considering the uncertainty of both the learner and the teacher. Our method, uncertainty-aware policy sampling and mixing (UPMS), is used to teach an agent to drive down a lane with less safety violations and less queries to the teacher than state-of-the-art methods.}, - pdf={Diaz_CRV_2021.pdf}, - image={papers/diaz_CRV_2021.png}, - organization={IEEE} -} - -@inproceedings{laferriere2021deep, - title={Deep Koopman Representation for Control over Images (DKRCI)}, - author={Laferri{\`e}re, Philippe and Laferri{\`e}re, Samuel and Dahdah, Steven and Forbes, James Richard and Paull, Liam}, - booktitle={2021 18th Conference on Robots and Vision (CRV)}, - pages={158--164}, - year={2021}, - abstract={The Koopman operator provides a means to represent nonlinear systems as infinite dimensional linear systems in a lifted state space. This enables the application of linear control techniques to nonlinear systems. However, the choice of a finite number of lifting functions, or Koopman observables, is still an unresolved problem. Deep learning techniques have recently been used to jointly learn these lifting function along with the Koopman operator. However, these methods require knowledge of the system's state space. In this paper, we present a method to learn a Koopman representation directly from images and control inputs. We then demonstrate our deep learning architecture on a cart-pole system with external inputs.}, - image={papers/koopman.png}, - pdf={koopman.pdf}, - organization={IEEE} -} - -@inproceedings{agia2022taskography, - title={Taskography: Evaluating robot task planning over large 3D scene graphs}, - author={Agia, Christopher and Jatavallabhula, Krishna Murthy and Khodeir, Mohamed and Miksik, Ondrej and Vineet, Vibhav and Mukadam, Mustafa and Paull, Liam and Shkurti, Florian}, - booktitle={Conference on Robot Learning}, - pages={46--58}, - year={2021}, - projectpage={https://taskography.github.io/}, - image={papers/taskography.png}, - openreview={nWLt35BU1z_}, - pdf={taskography.pdf}, - abstract={3D scene graphs (3DSGs) are an emerging description; unifying symbolic, topological, and metric scene representations. However, typical 3DSGs -contain hundreds of objects and symbols even for small environments; rendering task planning on the full graph impractical. We construct TASKOGRAPHY, -the first large-scale robotic task planning benchmark over 3DSGs. While most -benchmarking efforts in this area focus on vision-based planning, we systematically study symbolic planning, to decouple planning performance from visual representation learning. We observe that, among existing methods, neither classical -nor learning-based planners are capable of real-time planning over full 3DSGs. -Enabling real-time planning demands progress on both (a) sparsifying 3DSGs -for tractable planning and (b) designing planners that better exploit 3DSG hierarchies. Towards the former goal, we propose SCRUB, a task-conditioned 3DSG -sparsification method; enabling classical planners to match and in some cases surpass state-of-the-art learning-based planners. Towards the latter goal, we propose -SEEK, a procedure enabling learning-based planners to exploit 3DSG structure, -reducing the number of replanning queries required by current best approaches by -an order of magnitude. We will open-source all code and baselines to spur further -research along the intersections of robot task planning, learning and 3DSGs.}, - organization={PMLR} -} - -@inproceedings{courchesne2021assessing, - title={On Assessing the Usefulness of Proxy Domains for Developing and Evaluating Embodied Agents}, - author={Courchesne, Anthony and Censi, Andrea and Paull, Liam}, - booktitle={2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, - pages={4298--4305}, - year={2021}, - arxiv={2109.14516}, - image={papers/Courchesne_IROS_2021.png}, - abstract={In many situations it is either impossible or -impractical to develop and evaluate agents entirely on the target -domain on which they will be deployed. This is particularly -true in robotics, where doing experiments on hardware is -much more arduous than in simulation. This has become -arguably more so in the case of learning-based agents. To this -end, considerable recent effort has been devoted to developing -increasingly realistic and higher fidelity simulators. However, -we lack any principled way to evaluate how good a “proxy -domain” is, specifically in terms of how useful it is in helping -us achieve our end objective of building an agent that performs -well in the target domain. In this work, we investigate methods -to address this need. We begin by clearly separating two uses of -proxy domains that are often conflated: 1) their ability to be a -faithful predictor of agent performance and 2) their ability to be -a useful tool for learning. In this paper, we attempt to clarify the -role of proxy domains and establish new proxy usefulness (PU) -metrics to compare the usefulness of different proxy domains. -We propose the relative predictive PU to assess the predictive -ability of a proxy domain and the learning PU to quantify -the usefulness of a proxy as a tool to generate learning data. -Furthermore, we argue that the value of a proxy is conditioned -on the task that it is being used to help solve. We demonstrate -how these new metrics can be used to optimize parameters of -the proxy domain for which obtaining ground truth via system -identification is not trivial.}, - organization={IEEE} -} - -@article{wiyatno2022lifelong, - title={Lifelong Topological Visual Navigation}, - author={Wiyatno, Rey Reza and Xu, Anqi and Paull, Liam}, - journal={IEEE Robotics and Automation Letters}, - volume={7}, - number={4}, - pages={9271--9278}, - arxiv={2110.08488}, - abstract={Commonly, learning-based topological navigation approaches produce a local policy while preserving some loose connectivity of the space through a topological map. Nevertheless, spurious or missing edges in the topological graph often lead to navigation failure. In this work, we propose a sampling-based graph building method, which results in sparser graphs yet with higher navigation performance compared to baseline methods. We also propose graph maintenance strategies that eliminate spurious edges and expand the graph as needed, which improves lifelong navigation performance. Unlike controllers that learn from fixed training environments, we show that our model can be fine-tuned using only a small number of collected trajectory images from a real-world environment where the agent is deployed. We demonstrate successful navigation after fine-tuning on real-world environments, and notably show significant navigation improvements over time by applying our lifelong graph maintenance strategies.}, - image={papers/ltvn.png}, - projectpage={https://montrealrobotics.ca/ltvn/}, - year={2022}, - publisher={IEEE} -} - -@article{liu2021iterative, - title={Iterative teaching by label synthesis}, - author={Liu, Weiyang and Liu, Zhen and Wang, Hanchen and Paull, Liam and Sch{\"o}lkopf, Bernhard and Weller, Adrian}, - journal={Advances in Neural Information Processing Systems (NeurIPS)}, - volume={34}, - pages={21681--21695}, - openreview={9rphbXqgmqM}, - abstract={In this paper, we consider the problem of iterative machine teaching, where a teacher provides examples sequentially based on the current iterative learner. In contrast to previous methods that have to scan over the entire pool and select teaching examples from it in each iteration, we propose a label synthesis teaching framework where the teacher randomly selects input teaching examples (e.g., images) and then synthesizes suitable outputs (e.g., labels) for them. We show that this framework can avoid costly example selection while still provably achieving exponential teachability. We propose multiple novel teaching algorithms in this framework. Finally, we empirically demonstrate the value of our framework.}, - image={papers/iterative.png}, - year={2021} -} - -@article{mai2022sample, - title={Sample efficient deep reinforcement learning via uncertainty estimation}, - author={Mai, Vincent and Mani, Kaustubh and Paull, Liam}, - journal={International Conference on Learning Representations (ICLR)}, - arxiv={2201.01666}, - abstract={In model-free deep reinforcement learning (RL) algorithms, using noisy value estimates to supervise policy evaluation and optimization is detrimental to the sample efficiency. As this noise is heteroscedastic, its effects can be mitigated using uncertainty-based weights in the optimization process. Previous methods rely on sampled ensembles, which do not capture all aspects of uncertainty. We provide a systematic analysis of the sources of uncertainty in the noisy supervision that occurs in RL, and introduce inverse-variance RL, a Bayesian framework which combines probabilistic ensembles and Batch Inverse Variance weighting. We propose a method whereby two complementary uncertainty estimation methods account for both the Q-value and the environment stochasticity to better mitigate the negative impacts of noisy supervision. Our results show significant improvement in terms of sample efficiency on discrete and continuous control tasks.}, - image={papers/mai_ICLR_2022.png}, - year={2022} -} - -@inproceedings{saavedra2022monocular, - title={Monocular Robot Navigation with Self-Supervised Pretrained Vision Transformers}, - author={Saavedra-Ruiz, Miguel and Morin, Sacha and Paull, Liam}, - journal={IEEE Conference on Robots and Vision}, - abstract={In this work, we consider the problem of learning a perception model for monocular robot navigation using few annotated images. Using a Vision Transformer (ViT) pretrained with a label-free self-supervised method, we successfully train a coarse image segmentation model for the Duckietown environment using 70 training images. Our model performs coarse image segmentation at the 8x8 patch level, and the inference resolution can be adjusted to balance prediction granularity and real-time perception constraints. We study how best to adapt a ViT to our task and environment, and find that some lightweight architectures can yield good single-image segmentation at a usable frame rate, even on CPU. The resulting perception model is used as the backbone for a simple yet robust visual servoing agent, which we deploy on a differential drive mobile robot to perform two tasks: lane following and obstacle avoidance.}, - arxiv={2203.03682}, - image={papers/transformers.png}, - year={2022} -} - -@inproceedings{diaz2022generalization, - title={Generalization Games for Reinforcement Learning}, - author={Diaz, Manfred and Gauthier, Charlie and Berseth, Glen and Paull, Liam}, - booktitle={ICLR 2022 Workshop on Gamification and Multiagent Solutions}, - openreview={HIc8rQv-LZq}, - abstract={In reinforcement learning (RL), the term generalization has either denoted introducing function approximation to reduce the intractability of problems with large state and action spaces or designated RL agents' ability to transfer learned experiences to one or more evaluation tasks. Recently, many subfields have emerged to understand how distributions of training tasks affect an RL agent's performance in unseen environments. While the field is extensive and ever-growing, recent research has underlined that variability among the different approaches is not as significant. We leverage this intuition to demonstrate how current methods for generalization in RL are specializations of a general framework. We obtain the fundamental aspects of this formulation by rebuilding a Markov Decision Process (MDP) from the ground up by resurfacing the game-theoretic framework of games against nature. The two-player game that arises from considering nature as a complete player in this formulation explains how existing methods rely on learned and randomized dynamics and initial state distributions. We develop this result further by drawing inspiration from mechanism design theory to introduce the role of a principal as a third player that can modify the payoff functions of the decision-making agent and nature. The games induced by playing against the principal extend our framework to explain how learned and randomized reward functions induce generalization in RL agents. The main contribution of our work is the complete description of the Generalization Games for Reinforcement Learning, a multiagent, multiplayer, game-theoretic formal approach to study generalization methods in RL. We offer a preliminary ablation experiment of the different components of the framework. We demonstrate that a more simplified composition of the objectives that we introduce for each player leads to comparable, and in some cases superior, zero-shot generalization compared to state-of-the-art methods, all while requiring almost two orders of magnitude fewer samples.}, - image={papers/generalization.png}, - year={2022} -} - -@inproceedings{bhatt2022f, - title={f-Cal: Aleatoric uncertainty quantification for robot perception via calibrated neural regression}, - author={Bhatt, Dhaivat and Mani, Kaustubh and Bansal, Dishank and Murthy, Krishna and Lee, Hanju and Paull, Liam}, - booktitle={2022 International Conference on Robotics and Automation (ICRA)}, - pages={6533--6539}, - year={2022}, - abstract={While modern deep neural networks are performant perception modules, performance (accuracy) alone is insufficient, particularly for safety-critical robotic applications such as self-driving vehicles. Robot autonomy stacks also require these otherwise blackbox models to produce reliable and calibrated measures of confidence on their predictions. Existing approaches estimate uncertainty from these neural network perception stacks by modifying network architectures, inference procedure, or loss functions. However, in general, these methods lack calibration, meaning that the predictive uncertainties do not faithfully represent the true underlying uncertainties (process noise). Our key insight is that calibration is only achieved by imposing constraints across multiple examples, such as those in a mini-batch; as opposed to existing approaches which only impose constraints per-sample, often leading to overconfident (thus miscalibrated) uncertainty estimates. By enforcing the distribution of outputs of a neural network to resemble a target distribution by minimizing an f -divergence, we obtain significantly better-calibrated models compared to prior approaches. Our approach, f-Cal, outperforms existing uncertainty calibration approaches on robot perception tasks such as object detection and monocular depth estimation over multiple real-world benchmarks.}, - projectpage={https://f-cal.github.io/}, - pdf={f-cal.pdf}, - image={papers/f-cal.png}, - organization={IEEE} -} diff --git a/_config.yml b/_config.yml deleted file mode 100644 index e52d5c8..0000000 --- a/_config.yml +++ /dev/null @@ -1,115 +0,0 @@ -name: Robotics Group @ University of Montreal -description: The Robotics and Embodied AI Lab @ U de Montreal -url: https://montrealrobotics.github.io - -exclude: [bibble, README.md, Makefile, screenshot.png] - -# colorize code snippets with the pygments module -highlighter: rouge - -# The path structure for blog posts. -permalink: /blog/:year/:month/:day/:title.html - -# Sections on the "people" page. -roles: - - key: faculty - name: Faculty - - key: staff - name: Staff - - key: developer - name: Software Developers - - key: postdoc - name: Postdocs - - key: masters - name: Master's Students - - key: phd - name: PhD Students - - key: intern - name: Undergraduate Researchers and Interns - - key: postdoc-alum - name: Past Postocs - - key: phd-alum - name: Past PhD Students - - key: masters-alum - name: Past Master's Students - - key: intern-alum - name: Past Undergraduate Researchers and Interns - - -includes_dir: website-assets/_includes -layouts_dir: website-assets/_layouts -sass: - sass_dir: website-assets/_sass -asset_dir: /website-assets/assets - -# Number of news stories on the front page. -front_page_news: 8 - -# Number of old projects on the front page -front_page_old_projects: 4 - -# Base pathname for links. -base: '' - -# make pages for the _projects folder -collections: - projects: - output: true - events: - output: true - -# Navigation bar links. -navigation: - - title: Home - link: / - - title: People - link: /people.html - - title: Research - link: /research.html - - title: Publications - link: /publications/ - - title: Events - link: /events.html - - title: Contact - link: /contact.html - - title: Blog - link: /blog.html - -# Includes -include: ['_pages'] - -gems: - - jekyll-paginate - - jekyll/scholar - - jemoji - - -# Build settings -markdown: kramdown -highlighter: pygments - -# Jekyll-Scholar -scholar: - - style: apa - locale: en - - sort_by: year - order: descending - - source: /_bibliography/ - bibliography: pubs.bib - bibliography_template: bib - - replace_strings: true - join_strings: true - - details_dir: bibliography - details_layout: bibtex.html - details_link: Details - - query: "@*" - -jquery_version: "1.12.4" -katex_version: "0.7.1" -anchorjs_version: "3.2.2" diff --git a/_data/people.yml b/_data/people.yml deleted file mode 100644 index 19e4cad..0000000 --- a/_data/people.yml +++ /dev/null @@ -1,505 +0,0 @@ -#### Role keys: -# roles: -# - key: faculty -# name: Faculty -# - key: staff -# name: Staff -# - key: developer -# name: Software Developers -# - key: postdoc -# name: Postdocs -# - key: masters -# name: Master's Students -# - key: phd -# name: PhD Students -# - key: intern -# name: Undergraduate Researchers and Interns -# - key: postdoc-alum -# name: Past Postocs -# - key: phd-alum -# name: Past PhD Students -# - key: masters-alum -# name: Past Master's Students -# - key: intern-alum -# name: Past Undergraduate Researchers and Interns - -liam: - display_name: "Liam Paull" - webpage: "http://liampaull.ca" - image: /img/people/liam.jpg - role: faculty - interests: "Robot perception, uncertainty, sim2real, and robot benchmarking" - -glen: - display_name: "Glen Berseth" - webpage: "http://fracturedplane.com" - image: /img/people/glen.jpg - role: faculty - interests: "Reinforcement learning, robotics, machine learning, generalization, planning" - - -florian: - display_name: "Florian Golemo" - webpage: "https://fgolemo.github.io/" - role: postdoc-alum - coadvisor: Chris Pal - image: /img/people/florian.jpg - -ali: - display_name: "Ali Harakeh" - webpage: "https://www.aharakeh.com/" - interests: "Bayesian deep learning, conformal prediction, out-of-distribution generalization, and continual learning" - role: postdoc-alum - image: /img/people/ali-h.jpg - current_role: Senior Applied Research Scientist at Mila - -steven: - display_name: "Steven Parkison" - webpage: "https://saparkison.github.io/" - interests: "SLAM, optimization, robotic perception, and lukewarm coffee" - role: postdoc - image: /img/people/stevenhead.png - -kirsty: - display_name: "Kirsty Ellis" - role: developer - image: /img/people/kirsty.jpg - -roger: - display_name: "Roger Creus Castanyer" - role: masters - image: /img/people/rogercreus.jpeg - interests: "(Unsupervised) (Deep) reinforcement learning" - webpage: "https://roger-creus.github.io/" - -vincent: - display_name: "Vincent Mai" - role: phd-alum - image: /img/people/vincent.jpg - current_role: AI researcher at the Institut de Recherche d'Hydro Québec (IREQ) - -krishna: - display_name: "Krishna Murthy Jatavallabhula" - role: phd-alum - image: /img/people/krishna.jpg - webpage: "https://krrish94.github.io" - current_role: PostDoc at MIT with Antonio Torralba and Joshua Tenenbaum - -manfred: - display_name: "Manfred Diaz" - role: phd - image: /img/people/manfred.jpg - webpage: "https://manfreddiaz.github.io/" - -ruixiang: - display_name: "Ruixiang Zhang" - role: phd - image: /img/people/ruixiang.jpg - coadvisor: "Yoshua Bengio" - -zhen: - display_name: "Zhen Liu" - coadvisor: "Yoshua Bengio" - role: phd - image: /img/people/zhen.jpeg - website: "http://itszhen.com/" - -mostafa: - display_name: "Mostafa Elaraby" - role: phd - interests: "Continual learning, imitation learning" - image: /img/people/mostafa.jpeg - - -sacha: - display_name: "Sacha Morin" - role: phd - webpage: https://sachamorin.github.io/ - image: /img/people/sacha.jpg - coadvisor: "Guy Wolf" - -ali-k: - display_name: "Ali Kuwajerwala" - role: masters - image: /img/people/ali-k.jpeg - webpage: "https://alihkw.com/" - -dishank: - display_name: "Dishank Bansal" - role: masters - webpage: "http://dishank-b.github.io/" - image: /img/people/dishank.jpg - -charlie1: - display_name: "Charlie Gauthier" - role: masters-alum - image: /img/people/charlie.jpg - thesis: "Fear prediction for training robust RL agents" - thesis_link: "https://papyrus.bib.umontreal.ca/xmlui/handle/1866/28235" - -charlie2: - display_name: "Charlie Gauthier" - role: phd - image: /img/people/charlie.jpg - -miguel: - display_name: "Miguel Saavedra-Ruiz" - role: phd - webpage: "https://mikes96.github.io/" - image: /img/people/miguel.jpg - -simon: - display_name: "Simon Demeule" - role: masters - webpage: "https://simondemeule.com/" - image: /img/people/simon.jpg -adriana: - display_name: "Adriana Hugessen" - role: masters - image: /img/people/adriana.jpg -albert: - display_name: "Albert Zhan" - role: phd - image: /img/people/albert-z.jpg - webpage: "albertzhan.github.io" -raj: - display_name: "Raj Ghugare" - role: intern - image: /img/people/raj-g.png - webpage: "http://RajGhugare19.github.io/" - -atharva: - display_name: "Atharva Chandak" - role: intern - image: /img/people/atharva.jpeg - -bipasha: - display_name: "Bipasha Sen" - role: intern - image: /img/people/bipasha.jpeg - - -aditya: - display_name: "Aditya Agarwal" - role: intern - image: /img/people/aditya.png - - -sai: - display_name: "Sai Krishna G.V." - role: masters-alum - thesis: "Deep active localization" - thesis_link: "https://papyrus.bib.umontreal.ca/xmlui/handle/1866/22526" - current_role: "Reinforcement learning researcher at AI-Redefined" - image: /img/people/sai.jpg - webpage: "https://saikrishna-1996.github.io/" - - -gunshi: - display_name: "Gunshi Gupta" - role: masters-alum - thesis: "Look-ahead meta-learning for continual learning" - thesis_link: "https://papyrus.bib.umontreal.ca/xmlui/handle/1866/24315" - current_role: PhD student at Oxford - image: /img/people/gunshi.jpg - -nithin: - display_name: "Nithin Vasisth" - role: masters-alum - thesis: "Lifelong learning of concepts in CRAFT" - thesis_link: "https://papyrus.bib.umontreal.ca/xmlui/handle/1866/24335" - image: /img/people/nithin.jpg - -breandan: - display_name: "Breandan Considine" - role: masters-alum - thesis: "Programming tools for intelligent systems" - thesis_link: "https://papyrus.bib.umontreal.ca/xmlui/handle/1866/24310" - current_role: "PhD student at McGill" - coadvisor: "Michalis Famelis" - image: /img/people/breandan.jpg - -bhairav: - display_name: "Bhairav Mehta" - role: masters-alum - thesis: "On learning and generalization in unstructured task spaces" - thesis_link: "https://papyrus.bib.umontreal.ca/xmlui/handle/1866/24310" - current_role: "PhD student at MIT" - image: /img/people/bhairav.jpg - -anthony: - display_name: "Anthony Courchesne" - role: masters-alum - thesis: "On quantifying the value of simulation for training and evaluating robotic agents" - thesis_link: - image: /img/people/anthony.jpg - webpage: "https://helium.sparkpi.ca/" - current_role: Project manager at Institut du Vehicule Innovant (IVI) - - -dhaivat: - display_name: "Dhaivat Bhatt" - role: masters-alum - thesis: "Variational aleatoric uncertainty calibration in neural regression" - thesis_link: - current_role: Research engineer at Samsung - image: /img/people/dhaivat.jpeg - -rey: - display_name: "Rey Reza Wiyatno" - role: masters-alum - thesis: "Lifelong Topological Visual Navigation" - thesis_link: - current_role: - image: /img/people/rey.jpg - webpage: "http://rrwiyatn.github.io/" - - - - -kaustubh: - display_name: "Kaustubh Mani" - role: intern-alum - current_role: "PhD student at the University of Montreal" - -nikhil: - display_name: "Nikhil Varma Keetha" - role: intern-alum - current_role: - -harsha: - display_name: "Sai Sree Harsha" - role: intern-alum - current_role: - -abhishek: - display_name: "Abhishek Jain" - role: intern-alum - -sharath: - display_name: "Sharath Chandra Raparthy" - role: intern-alum - current_role: "PhD student at the University of Montreal" - -mark: - display_name: "Mark Van der Merwe" - role: intern-alum - current_role: "PhD student at the University of Michigan" - webpage: "https://mvandermerwe.github.io/" - -amrut: - display_name: "Amrut Sarangi" - role: intern-alum - - -rohan: - display_name: "Rohan Raj" - role: intern-alum - - -waleed: - display_name: "Waleed Khamies" - role: intern-alum - image: /img/people/waleed.jpg - webpage: "https://khamies.github.io/about" - -zihan: - display_name: "Zihan Wang" - role: intern-alum - image: /img/people/zihan.jpg - current_role: "Master's student at Stanford" - -homanga: - display_name: "Homanga Bharadhwaj" - role: intern-alum - current_role: "PhD student at the University of Toronto" - -adam: - display_name: "Adam Sigal" - role: intern-alum - current_role: "PhD student at McGill" - -sarthak: - display_name: "Sarthak Sharma" - role: intern-alum - current_role: "AI/ML Engineer at Verisk AI Lab" - - -keehong: - display_name: "Keehong Seo" - role: collab - -yoshua: - display_name: "Yoshua Bengio" - role: collab - -teddy: - display_name: "Teddy Ort" - role: collab - -daniela: - display_name: "Daniela Rus" - role: collab - -chris: - display_name: "Chris Pal" - role: collab - -michalis: - display_name: "Michalis Famelis" - role: collab - -sarath: - display_name: "Sarath Chandar" - role: collab - -ganesh: - display_name: "Ganesh Iyer" - role: collab - webpage: "https://epiception.github.io" - -soroush: - display_name: "Soroush Saryazdi" - role: collab - webpage: "https://saryazdi.github.io/" - -madhav: - display_name: "K. Madhava Krishna" - role: collab - webpage: "https://robotics.iiit.ac.in" - -anqi: - display_name: "Anqi Xu" - role: collab - webpage: "https://anqixu.me/" - -hanju: - display_name: "Hanju Lee" - role: collab - webpage: "https://www.linkedin.com/in/lee-hanju-1848323/?originalSubdomain=jp" - -agia: - display_name: Christopher Agia - role: collab - webpage: https://agiachris.github.io/ - -khodeir: - display_name: Mohamed Khodeir - role: collab - webpage: https://www.linkedin.com/in/khodeir/?originalSubdomain=ca - -miksik: - display_name: Ondrej Miksik - role: collab - webpage: https://www.microsoft.com/en-us/research/people/onmiksik/ - -vineet: - display_name: Vibhav Vineet - role: collab - webpage: http://vibhavvineet.info/ - -mukadam: - display_name: Mustafa Mukadam - role: collab - webpage: https://www.mustafamukadam.com/ - -shkurti: - display_name: Florian Shkurti - role: collab - webpage: http://www.cs.toronto.edu/~florian/ - -qiao: - display_name: Qiao Gu - role: collab - webpage: https://georgegu1997.github.io/ - -mohd: - display_name: Mohd Omama - role: collab - webpage: https://scholar.google.com/citations?user=jFH3ShsAAAAJ&hl=en - -tao: - display_name: Tao Chen - role: collab - webpage: https://taochenshh.github.io/ - -alaa: - display_name: Alaa Maalouf - role: collab - webpage: https://www.csail.mit.edu/person/alaa-maalouf - -shuang: - display_name: Shuang Li - role: collab - webpage: https://people.csail.mit.edu/lishuang/ - -ayush: - display_name: Ayush Tewari - role: collab - webpage: https://ayushtewari.com/ - -tenenbaum: - display_name: Joshua B. Tenenbaum - role: collab - webpage: http://web.mit.edu/cocosci/josh.html - -celso: - display_name: Celso Miguel de Melo - role: collab - webpage: https://celsodemelo.net/ - -torralba: - display_name: Antonio Torralba - role: collab - webpage: https://groups.csail.mit.edu/vision/torralbalab/ - - -macklin: - display_name: Miles Macklin - role: collab - - - -voleti: - display_name: Vikram Voleti - role: collab - - -petrini: - display_name: Linda Petrini - role: collab - - -weiss: - display_name: Martin Weiss - role: collab - - -parent: - display_name: Jerome Parent-Levesque - role: collab - - -xie: - display_name: Kevin Xie - role: collab - - - -nowrouzerzahrai: - display_name: Derek Nowrouzerzahrai - role: collab - - -fidler: - display_name: Sanja Fidler - role: collab - - -erleben: - display_name: Kenny Erleben - role: collab - -yadav: - display_name: Karmesh Yadav - role: collab diff --git a/_events/aido-6.md b/_events/aido-6.md deleted file mode 100644 index 81d25df..0000000 --- a/_events/aido-6.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: The 6th AI Driving Olympics Competition - -notitle: false - -description: | - The 6th iteration of the AI Driving Olympics, taking place virtually at NeurIPS 2021. The AI-DO serves to benchmark the state of the art of artificial intelligence in autonomous driving by providing standardized simulation and hardware environments for tasks related to multi-sensory perception and embodied AI. - -people: - - liam - - charlie - - mostafa - -image: /img/events/aido-6.jpg -link: "https:/driving-olympics.ai" -date: 2021-12-10 ---- - -# The AI Driving Olympics 6 - - - - -Duckietown traditionally hosts AI-DO competitions biannually, with finals events held at machine learning and robotics conferences such as the International Conference on Robotics and Automation (ICRA) and the Neural Information Processing Systems (NeurIPS). - -AI-DO 6 will be in conjunction with NeurIPS 2021 and have three leagues: urban driving, advanced perception, and racing. The winter champions will be announced during NeurIPS 2021, on December 10, 2021! diff --git a/_events/ecorl.md b/_events/ecorl.md deleted file mode 100644 index 456f2c0..0000000 --- a/_events/ecorl.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Workshop on the Ecological Theory of RL - -notitle: false - -description: | - Workshop at NeurIPS 2021 - -people: - - manfred - -image: /img/events/ecorl.jpeg -link: "https://sites.google.com/view/ecorl2021/" -date: 2021-12-14 ---- - diff --git a/_events/iros2020.md b/_events/iros2020.md deleted file mode 100644 index a05a634..0000000 --- a/_events/iros2020.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: IROS 2020 Workshop on Benchmarking Progress in Autonomous Driving - -notitle: false - -description: | - Autonomous driving has seen incredible progress of-late. Recent workshops at top conferences in robotics, computer vision, and machine learning have primarily showcased the technological advancements in the field. This workshop provides an platform to investigate and discuss the methods by which progress in autonomous driving is evaluated, benchmarked, and verified. - -people: - - liam - -image: /img/events/iros2020-bpad.jpg -link: "https://montrealrobotics.ca/driving-benchmarks/" -date: 2020-10-25 ---- - diff --git a/_events/iros2021.md b/_events/iros2021.md deleted file mode 100644 index 1404dcb..0000000 --- a/_events/iros2021.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: IROS 2021 Workshop on Evaluating the Broader Impacts of Self-Driving Cars - -notitle: false - -description: | - The primary objective of this workshop is to stimulate a conversation between roboticists, who focus on the development and implementation of autonomy algorithms, and regulators, economists, psychologists, and lawyers who are experts on the broader impacts that self-driving vehicles will have on society. - -people: - - liam - -image: /img/events/iros2021-bisc.jpg -link: "https://montrealrobotics.ca/broader-impacts-self-driving/" -date: 2021-09-27 ---- - -# IROS 2021 Workshop on Evaluating the Broader Impacts of Self-Driving Cars - -Self-driving cars have received significant attention in the last decade, and arguably have the potential to be the most impactful robotics application to date. The question that is usually asked by the public is “when are self-driving cars going to be here?” On one side, entrusting the entire driving problem to an autonomous agent seems frustratingly daunting. On the other side, we have started to see real deployments of autonomous vehicles in limited capacities, so perhaps there is reason for hope. - -Autonomous driving advancements are typically evaluated along well-defined, but potentially myopic performance criteria. These metrics are reasonable in the sense that they do give us some quantitative measure that we can use for comparison. However, the true potential impact of this technology reaches far beyond these relatively simplistic measures. In this workshop we will take a broader perspective with respect to evaluating the progress that we have made towards making self-driving a reality. In the process, we will focus particularly on aspects of the integration of this technology that are rarely covered in technical papers on the subject. Specifically, we will focus on the following three objectives: - -The primary objective of this workshop is to stimulate a conversation between roboticists, who focus on the development and implementation of autonomy algorithms, and regulators, economists, psychologists, and lawyers who are experts on the broader impacts that self-driving vehicles will have on society. We feel that it is critical to foster a community of researchers and practitioners whose expertise extends beyond the algorithmic challenges of realizing self-driving vehicles. As roboticists, we are ill-equipped to understand the broad impacts of this technology in areas that include ethics, philosophy, psychology, regulations, legal policy, and risk, to name a few, and it is critical that technological development is guided by such impacts. We will achieve our objective by inviting speakers and panelists who are experts in these adjacent fields to stimulate a broader conversation around this technology. This objective would be considered achieved if participants take the new perspectives they were exposed to and consider them in their own specific field of interest. For roboticists, this means explicitly considering these broader issues in the development of their algorithms. A stretch goal would be to spawn research collaborations between roboticists and researchers from these adjacent fields. - - -Duckietown traditionally hosts AI-DO competitions biannually, with finals events held at machine learning and robotics conferences such as the International Conference on Robotics and Automation (ICRA) and the Neural Information Processing Systems (NeurIPS). - -AI-DO 6 will be in conjunction with NeurIPS 2021 and have three leagues: urban driving, advanced perception, and racing. The winter champions will be announced during NeurIPS 2021, on December 10, 2021! diff --git a/_events/learningseriesfall2020.md b/_events/learningseriesfall2020.md deleted file mode 100644 index 1ac43f1..0000000 --- a/_events/learningseriesfall2020.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Summer 2020 Robot Learning Seminar Series - -notitle: false - -description: | - The Robotics and Embodied AI Lab and Mila are hosting the Winter 2021 edition of robot learning seminar series; a set of virtual talks by researchers in this field. Speakers in this inaugural session include Stefani Tellex, Rika Antonova, Gunshi Gupta, Igor Gilitschenski, and Bhairav Mehta. - -people: - - krishna - - florian - - dishank - - rey - - zhen - - liam - -image: /img/events/learningseries.jpg -link: "https://montrealrobotics.ca/robotlearningseries/fall2020/" -date: 2021-01-15 ---- - diff --git a/_events/learningseriessummer2020.md b/_events/learningseriessummer2020.md deleted file mode 100644 index cbc2485..0000000 --- a/_events/learningseriessummer2020.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Fall 2020 Robot Learning Seminar Series - -notitle: false - -description: | - The Robotics and Embodied AI Lab and Mila are hosting the Winter 2021 edition of robot learning seminar series; a set of virtual talks by researchers in this field. Speakers this session include Florian Shkurti, Valentin Peretroukhin, Ankur Handa, Shubham Tulsiani, Ronald Clark, Lerrel Pinto, Mustafa Mukadam, Shuran Song and Angela Shoellig. - -people: - - krishna - - florian - - dishank - - rey - - zhen - - liam - -image: /img/events/learningseries.jpg -link: "https://montrealrobotics.ca/robotlearningseries/summer2020" -date: 2020-09-4 ---- - diff --git a/_events/learningserieswinter2021.md b/_events/learningserieswinter2021.md deleted file mode 100644 index 0e0b754..0000000 --- a/_events/learningserieswinter2021.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Winter 2021 Robot Learning Seminar Series - -notitle: false - -description: | - The Robotics and Embodied AI Lab and Mila are hosting the Winter 2021 edition of robot learning seminar series; a set of virtual talks by researchers in this field. Speakers this session include Steven Waslander, Animesh Garg, Sylvia Herbert, Georgia Chalvatzaki, Deepak Pathak, Pulkit Agrawal, Lilian Weng, Kelsey Allen, Manolis Savva, and Jiajun Wu. - -people: - - krishna - - florian - - dishank - - rey - - zhen - - liam - -image: /img/events/learningseries.jpg -link: "https://montrealrobotics.ca/robotlearningseries/" -date: 2021-01-15 ---- - diff --git a/_events/mrss.md b/_events/mrss.md deleted file mode 100644 index b84b084..0000000 --- a/_events/mrss.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Montreal Robotics Summer School - -notitle: false - -description: | - Robotics is a rapidly growing field with interest from around the world. This summer school offers tutorials and lectures on state-of-the-art machine learning methods for training the next generation of learning robots. This summer school is an extension supported by the many robotics groups around Montreal. - -people: - - glen - - florian - - steven - -image: /img/events/mrss.png -link: "https://www.notion.so/fracturedplane/Montreal-Robotics-Summer-School-e9c969cc262b4f85aa17e5808a51e225" -date: 2022-08-22 ---- - - diff --git a/_events/physical-reasoning.md b/_events/physical-reasoning.md deleted file mode 100644 index 84939d7..0000000 --- a/_events/physical-reasoning.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Workshop on Physical Reasoning and Inductive Biases for the Real World - -notitle: false - -description: | - Workshop at NeurIPS 2021 - -people: - - krishna - -image: /img/events/physical-reasoning.jpg -link: "https://physical-reasoning.github.io/" -date: 2021-12-14 ---- \ No newline at end of file diff --git a/_pages/publications.md b/_pages/publications.md deleted file mode 100644 index 43cac85..0000000 --- a/_pages/publications.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -layout: default -permalink: /publications/ -title: Publications -description: Publications (reverse chronological order) -years: [2023, 2022, 2021, 2020, 2019, 2018, 2017, 2016] ---- -
-
-
-
-
Publications
-
-
-
-
- -{% for y in page.years %} -
-
-
{{y}}
-
-
-{% bibliography -f papers -q @*[year={{y}}]* %} -{% endfor %} diff --git a/_posts/2018-04-20-cvpr.md b/_posts/2018-04-20-cvpr.md deleted file mode 100644 index d0a2af6..0000000 --- a/_posts/2018-04-20-cvpr.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Paper on self-supervised visual odometry estimation accepted to CVPR workshop on SLAM. diff --git a/_posts/2018-09-05-welcomephd.md b/_posts/2018-09-05-welcomephd.md deleted file mode 100644 index 57ce5a3..0000000 --- a/_posts/2018-09-05-welcomephd.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Manfred Diaz and Ruixiang Zhang join the group as PhD students ... diff --git a/_posts/2018-11-28-image.md b/_posts/2018-11-28-image.md deleted file mode 100644 index a2588ac..0000000 --- a/_posts/2018-11-28-image.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Dhaivat Bhatt just joined our group as an intern. Welcome! diff --git a/_posts/2019-08-01-dal.md b/_posts/2019-08-01-dal.md deleted file mode 100644 index 51075d7..0000000 --- a/_posts/2019-08-01-dal.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Our paper _Deep Active Localization_ got accepted into Robotics and Automation Letters \ No newline at end of file diff --git a/_posts/2019-09-01-welcomeinterns.md b/_posts/2019-09-01-welcomeinterns.md deleted file mode 100644 index 2452d92..0000000 --- a/_posts/2019-09-01-welcomeinterns.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Sharath, Mark, Amrut, Rohan, and Dishank joined the group as interns. Welcome! diff --git a/_posts/2019-09-01-welcomestudents.md b/_posts/2019-09-01-welcomestudents.md deleted file mode 100644 index 8df6497..0000000 --- a/_posts/2019-09-01-welcomestudents.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Dhaivat, Rey, and Philippe joined the group as Masters' students. Welcome! diff --git a/_posts/2019-09-10-adrpaper.md b/_posts/2019-09-10-adrpaper.md deleted file mode 100644 index ea22f8a..0000000 --- a/_posts/2019-09-10-adrpaper.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -The "Active Domain Randomization" paper got accepted to CoRL 2019. Congrats Bhairav, Manfred, and Florian. diff --git a/_posts/2020-01-20-gradslam-icra.md b/_posts/2020-01-20-gradslam-icra.md deleted file mode 100644 index 209a376..0000000 --- a/_posts/2020-01-20-gradslam-icra.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Check out our new ICRA 2020 paper [gradSLAM: Dense SLAM meets automatic differentiation](https://arxiv.org/abs/1910.10672) on fully differentiable dense SLAM: [Project page](http://montrealrobotics.ca/gradSLAM/), [Video](http://www.youtube.com/watch?feature=player_embedded&v=2ygtSJTmo08). diff --git a/_posts/2020-06-05-maplite-award.md b/_posts/2020-06-05-maplite-award.md deleted file mode 100644 index 3d56460..0000000 --- a/_posts/2020-06-05-maplite-award.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Our paper [MapLite: Autonomous intersection navigation without detailed prior maps] was adjudged _best Robotics and Automation Letters (RAL) paper for 2019_! Check it out [here](https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=8936918). And, here's a short [video abstract](https://youtu.be/P6Kk5pB2gF4). diff --git a/_posts/2020-06-30-gunshi-graduates.md b/_posts/2020-06-30-gunshi-graduates.md deleted file mode 100644 index 4b9193e..0000000 --- a/_posts/2020-06-30-gunshi-graduates.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Gunshi Gupta succesfully completes her M.Sc. and joins Wayve as a deep learning researcher! diff --git a/_posts/2020-09-10-robotlearningseries.md b/_posts/2020-09-10-robotlearningseries.md deleted file mode 100644 index f9330bb..0000000 --- a/_posts/2020-09-10-robotlearningseries.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -[Robot learning seminar series](http://montrealrobotics.ca/robotlearningseries/) launched! diff --git a/_posts/2020-10-10-neurips.md b/_posts/2020-10-10-neurips.md deleted file mode 100644 index 4d65b99..0000000 --- a/_posts/2020-10-10-neurips.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Two papers accepted to Neurips 2020 (one of them an oral - top 1.1%). Congratulations Gunshi and Ruixiang! diff --git a/_posts/2020-10-15-lamaml.md b/_posts/2020-10-15-lamaml.md deleted file mode 100644 index 5c369d4..0000000 --- a/_posts/2020-10-15-lamaml.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Checkout our new Neurips 2020 Oral paper [La-MAML: Look-Ahead Meta-Learning for Continual Learning](https://arxiv.org/abs/2007.13904) [[Code](https://github.com/montrealrobotics/La-MAML)], [[Short Video](https://www.youtube.com/watch?v=HzewyVu8LaY)]. diff --git a/_posts/2020-10-30-irosworkshop.md b/_posts/2020-10-30-irosworkshop.md deleted file mode 100644 index 4709675..0000000 --- a/_posts/2020-10-30-irosworkshop.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -We organized an IROS workshop on [Benchmarking progress in autonomous driving](http://montrealrobotics.ca/driving-benchmarks/) diff --git a/_posts/2020-11-30-gradslam.md b/_posts/2020-11-30-gradslam.md deleted file mode 100644 index 5f93ee8..0000000 --- a/_posts/2020-11-30-gradslam.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -We released [gradslam](http://gradslam.github.io/) - a differentiable dense SLAM framework for deep learning. Check it out! diff --git a/_posts/2020-12-05-krishna-fellowship.md b/_posts/2020-12-05-krishna-fellowship.md deleted file mode 100644 index 523ca8e..0000000 --- a/_posts/2020-12-05-krishna-fellowship.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: post -shortnews: true -icon: newspaper ---- - -Krishna [won an NVIDIA fellowship](https://blogs.nvidia.com/blog/2020/12/04/graduate-fellowships-gpu-computing-research/) for 2021-22. Congratulations! diff --git a/_projects/01-gradslam.md b/_projects/01-gradslam.md deleted file mode 100644 index 85e53d7..0000000 --- a/_projects/01-gradslam.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: gradslam - -notitle: false - -description: | - gradslam is an open-source framework providing differentiable building blocks for simultaneous localization and mapping (SLAM) systems. We enable the usage of dense SLAM subsystems from the comfort of PyTorch. - -people: - - krishna - - liam - -collaborators: - - ganesh - - soroush - - -layout: project -image: "https://gradslam.github.io/images/pointfusiondemo.gif" -link: https://gradslam.github.io/ -last-updated: 2020-12-06 ---- - -## gradslam - -gradslam is an open-source framework providing differentiable building blocks for simultaneous localization and mapping (SLAM) systems. We enable the usage of dense SLAM subsystems from the comfort of PyTorch. diff --git a/_projects/adr.md b/_projects/adr.md deleted file mode 100644 index ca0d78e..0000000 --- a/_projects/adr.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Active Domain Randomization - -description: Making sim-to-real transfer more efficient - -people: - - bhairav - - florian - - manfred - - liam - - -collaborators: - - chris - -layout: project -image: /img/papers/adr.gif - -last-updated: 2019-06-28 ---- - -## Active Domain Randomization - -Domain randomization is a popular technique for improving domain transfer, often used in a zero-shot setting when the target domain is unknown or cannot easily be used for training. In this work, we empirically examine the effects of domain randomization on agent generalization. Our experiments show that domain randomization may lead to suboptimal, high-variance policies, which we attribute to the uniform sampling of environment parameters. We propose Active Domain Randomization, a novel algorithm that learns a parameter sampling strategy. Our method looks for the most informative environment variations within the given randomization ranges by leveraging the discrepancies of policy rollouts in randomized and reference environment instances. We find that training more frequently on these instances leads to better overall agent generalization. Our experiments across various physics-based simulated and real-robot tasks show that this enhancement leads to more robust, consistent policies. diff --git a/_projects/conceptfusion.md b/_projects/conceptfusion.md deleted file mode 100644 index ff5062c..0000000 --- a/_projects/conceptfusion.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: "ConceptFusion: Open-set Multimodal 3D Mapping" - -# status: active - -notitle: false - -description: | - ConceptFusion builds open-set 3D maps that can be queried via text, click, image, or audio. Given a series of RGB-D images, our system builds a 3D scene representation, that is inherently multimodal by leveraging foundation models such as CLIP, and therefore doesn't require any additional training or finetuning. - -people: - - ali-k - - liam - -collaborators: - - krishna - - qiao - - mohd - - tao - - alaa - - shuang - - ganesh - - soroush - - nikhil - - ayush - - tenenbaum - - celso - - madhav - - shkurti - - torralba - -layout: project -image: /img/papers/conceptfusion.gif -link: https://concept-fusion.github.io/ -last-updated: 2023-06-16 ---- - -## ConceptFusion: Open-set Multimodal 3D Mapping - -Building 3D maps of the environment is central to robot navigation, planning, and interaction with objects in a scene. Most existing approaches that integrate semantic concepts with 3D maps largely remain confined to the closed-set setting: they can only reason about a finite set of concepts, pre-defined at training time. Further, these maps can only be queried using class labels, or in recent work, using text prompts. - -We address both these issues with ConceptFusion, a scene representation that is: (i) fundamentally open-set, enabling reasoning beyond a closed set of concepts (ii) inherently multi-modal, enabling a diverse range of possible queries to the 3D map, from language, to images, to audio, to 3D geometry, all working in concert. ConceptFusion leverages the open-set capabilities of today’s foundation models pre-trained on internet-scale data to reason about concepts across modalities such as natural language, images, and audio. We demonstrate that pixel-aligned open-set features can be fused into 3D maps via traditional SLAM and multi-view fusion approaches. This enables effective zero-shot spatial reasoning, not needing any additional training or finetuning, and retains long-tailed concepts better than supervised approaches, outperforming them by more than 40% margin on 3D IoU. We extensively evaluate ConceptFusion on a number of real-world datasets, simulated home environments, a real-world tabletop manipulation task, and an autonomous driving platform. We showcase new avenues for blending foundation models with 3D open-set multimodal mapping. diff --git a/_projects/ctcnet.md b/_projects/ctcnet.md deleted file mode 100644 index d361201..0000000 --- a/_projects/ctcnet.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Self-supervised visual odometry estimation - -description: | - A self-supervised deep network for visual odometry estimation from monocular imagery. - -people: - - krishna - - gunshi - - liam - -collaborators: - - ganesh - - madhav - -layout: project -link: "https://krrish94.github.io/CTCNet-release/" -image: /img/events/ctcnet.png -last-updated: 2019-01-01 ---- diff --git a/_projects/dal.md b/_projects/dal.md deleted file mode 100644 index 3a407d7..0000000 --- a/_projects/dal.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Deep Active Localization - -description: | - Learned active localization, implemented on "real" robots. - -people: - - sai - - dhaivat - - krishna - - vincent - - liam - -collaborators: - - keehong - -layout: project -last-updated: 2018-11-27 - -image: /img/papers/dal.jpg ---- - - -# Deep Active Localization - -Active localization is the problem of generating robot actions that allow it to maximally disambiguate its pose within a reference map. Traditional approaches to this use an information-theoretic criterion for action selection and hand-crafted perceptual models. In this work we propose an end-to-end differentiable method for learning to take informative actions that is trainable entirely in simulation and then transferable to real robot hardware with zero refinement. The system is composed of two modules: a convolutional neural network for perception, and a deep reinforcement learned planning module. We introduce a multi-scale approach to the learned perceptual model since the accuracy needed to perform action selection with reinforcement learning is much less than the accuracy needed for robot control. We demonstrate that the resulting system outperforms using the traditional approach for either perception or planning. We also demonstrate our approaches robustness to different map configurations and other nuisance parameters through the use of domain randomization in training. The code is also compatible with the OpenAI gym framework, as well as the Gazebo simulator. diff --git a/_projects/fcal.md b/_projects/fcal.md deleted file mode 100644 index b8f4293..0000000 --- a/_projects/fcal.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: f-Cal - Calibrated aleatoric uncertainty estimation from neural networks for robot perception - -notitle: false - -description: | - f-Cal is calibration method proposed to calibrate probabilistic regression networks. Typical bayesian neural networks are shown to be overconfident in their predictions. To use the predictions for downstream tasks, reliable and calibrated uncertainity estimates are critical. f-Cal is a straightforward loss function, which can be employed to train any probabilistic neural regressor, and obtain calibrated uncertainty estimates. - -people: - - dhaivat - - kaustubh - - dishank - - krishna - - liam - -collaborators: - - hanju - - -layout: project -image: /img/papers/f_cal_od_gif1.gif -last-updated: 2022-04-20 -link: https://f-cal.github.io/ ---- - -## f-cal - diff --git a/_projects/gradsim.md b/_projects/gradsim.md deleted file mode 100644 index 4e64160..0000000 --- a/_projects/gradsim.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -title: gradsim - -notitle: false - -description: | - gradSim is a framework that overcomes the dependence on 3D supervision by leveraging differentiable multiphysics simulation and differentiable rendering to jointly model the evolution of scene dynamics and image formation. - -people: - - krishna - - florian - - breandan - - liam - - -collaborators: - - macklin - - voleti - - petrini - - weiss - - parent - - xie - - erleben - - shkurti - - nowrouzerzahrai - - fidler - - -layout: project -image: /img/papers/walker.gif -link: https://gradslam.github.io/ -last-updated: 2021-03-29 ---- - -## gradsim - diff --git a/_projects/ivrl.md b/_projects/ivrl.md deleted file mode 100644 index 2eaae6a..0000000 --- a/_projects/ivrl.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Inverse Variance Reinforcement Learning -status: active - -notitle: false - -description: | - Improving sample efficiency in deep reinforcement learning by mitigating the impacts of heteroscedastic noise in the bootstraped target using uncertainty estimation. - -people: - - vincent - - kaustubh - - waleed - - liam - -layout: project -image: /img/papers/mai2022ivrl_resized.jpg -link: https://montrealrobotics.ca/ivrl/ -last-updated: 2022-01-12 ---- - -## Inverse Variance Reinforcement Learning - -Most robotics problems can be written as (Partially Observable) Markov Decision Processes (MDPs), with discrete or continuous observation and action spaces. Deep Reinforcement Learning (DRL) is a powerful tool to find an optimal policy for these processes, based on experience acquired during the training process. The training of a DRL agent requires many trajectories, which can be arduous and expensive to produce in the real world. Indeed, the real world is not parallelizable, may require human efforts to reset, and comes with risks for the robot and the environment. Gathering sufficient experience is therefore one of the most important challenges when applying DRL to robotics. *The objective of this project is to reduce the amount of samples necessary to train a DRL agent on a robot.* - -A diagram representing the generation process of the noisy target. - -DRL algorithms are complex processes. An important part of most model-free algorithms is learning the value function of a given state or state-action pair, i.e., the expected return given the current policy. To do so, deep supervised learning components are used, where the input is the state(-action), and the label is called the target. The target T is a noisy sample of the value. Often, it is computed using the reward r and the next state s' sampled from experience, the next action a' based on s' and the current policy, and the value Q of the next state-action pair which is bootstrapped from the current value estimator (this is the Temporal Difference target). The noise on the target negatively impacts the learning process: the networks learn from wrong data, which entails slower learning and instability. - -The key element in this project is the fact that the noise affecting in the target, i.e. its difference from the true and unique value function, is heteroscedastic. This means that the distribution it is sampled from changes for each input and training step. Sometimes, this distribution has a very low variance: the target is close to the value. Sometimes, on the other hand, the target is subject to a lot of noise and it does not contain useful information with respect to the value. Therefore, the value estimation task in DRL is a case of heteroscedastic regression. - -## Projects - -### Batch Inverse-Variance Weighting for Deep Heteroscedastic Regression - -Noisy labels slows the learning process in regression: the first part of this project was to prove that the effect of noisy labels can be mitigated given the hypothesis that we know the variance of the noise distribution of each label. How can we include this additional information for heteroscedastic regression? Intuitively, we shoud give more weight to the labels we trust more. In linear regression, the Gauss-Markov theorem shows that the optimal solution is to weigh each sample by the inverse of the variance of the label noise. We show that adapting inverse-variance weighting for gradient-based optimization methods allows to significantly improve the performance of the learning process. Our paper, [Batch Inverse-Variance Weighting: Deep Heteroscedastic Regression](https://arxiv.org/abs/2107.04497) (BIV), was presented at the [Uncertainty and Robustness in Deep Learning](https://sites.google.com/view/udlworkshop2021/home?authuser=0) workshop at ICML 2021. - -
- A plot showing learning curves, where BIV is doing better than L2 and some baselines. -
BIV improves the learning performance with noisy labels compared to the L2 loss. Source: Batch Inverse-Variance Weighting: Deep Heteroscedastic Regression
-
- -### Inverse-Variance Reinforcement Learning - -See project page: https://montrealrobotics.ca/ivrl/ - -The second part of the project was to use this weighting scheme in a DRL setting. For this work, the challenge was to estimate the uncertainty of the target. A systematic analysis of the sources of uncertainty in the target generation process justifies the use of deep variance ensembles. These are used to estimate the variance due to the stochasticity of the environment and the policy, as well as the predictive uncertainty of the value prediction used to bootstrap the target. As the variance output by these deep ensembles is also the result of a training process, the uncertainty estimation is subject to complex dynamics. We show that the BIV weighting scheme is robust to changes of scale in the variance estimation. We show that combining BIV with deep variance ensembles in DRL algorithms such as DQN and SAC leads to significant improvements in the sample efficiency. This framework, called Inverse-Variance Reinforcement Learning (IV-RL), is presented in our [Sample Efficient Deep Reinforcement Learning via Uncertainty Estimation](https://openreview.net/forum?id=vrW3tvDfOJQ) submission to ICLR 2022. - - -
- A plot showing learning curves, where IV-SAC is doing better than DQN and other ensemble baselines. -
IV-RL on SAC improves the learning performance and the sample efficiency compared to other ensemble-based baselines. Source: Sample Efficient Deep Reinforcement Learning via Uncertainty Estimation
-
diff --git a/_projects/lamaml.md b/_projects/lamaml.md deleted file mode 100644 index ba7a007..0000000 --- a/_projects/lamaml.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: La-MAML - -notitle: false - -description: | - Look-ahead meta-learning for continual learning - -people: - - gunshi - - liam - - -collaborators: - - yadav - - -layout: project -image: "https://mila.quebec/wp-content/uploads/2020/11/lamaml_jpg.gif" -link: https://mila.quebec/en/article/la-maml-look-ahead-meta-learning-for-continual-learning/ -last-updated: 2020-11-19 ---- - -## La-MAML - diff --git a/_projects/ltvn.md b/_projects/ltvn.md deleted file mode 100644 index ea5ee16..0000000 --- a/_projects/ltvn.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: Lifelong Topological Visual Navigation -# status: active - -notitle: false - -description: | - A learning-based topological visual navigation method with graph update strategies that improves lifelong navigation performance over time. - -people: - - rey - - liam - -collaborators: - - anqi - -layout: project -image: /img/papers/ltvn.gif -link: https://montrealrobotics.ca/ltvn/ -last-updated: 2021-11-26 ---- - -## Lifelong Topological Visual Navigation - -See project page: https://montrealrobotics.ca/ltvn/ diff --git a/_projects/o4a.md b/_projects/o4a.md deleted file mode 100644 index 294577c..0000000 --- a/_projects/o4a.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: One-4-All - Neural Potential Fields for Embodied Navigation -# status: active - -notitle: false - -description: | - An end-to-end fully parametric method for image-goal navigation that leverages self-supervised and manifold learning to replace a topological graph with a geodesic regressor. During navigation, the geodesic regressor is used as an attractor in a potential function defined in latent space, allowing to frame navigation as a minimization problem. - -people: - - sacha - - miguel - - liam - -layout: project -image: /img/papers/o4a.gif -link: https://montrealrobotics.ca/o4a/ -last-updated: 2023-03-16 ---- - -## One-4-All: Neural Potential Fields for Embodied Navigation - -A fundamental task in robotics is to navigate between two locations. In particular, real-world navigation can require long-horizon planning using high-dimensional RGB images, which poses a substantial challenge for end-to-end learning-based approaches. Current semi-parametric methods instead achieve long-horizon navigation by combining learned modules with a topological memory of the environment, often represented as a graph over previously collected images. However, using these graphs in practice typically involves tuning a number of pruning heuristics to avoid spurious edges, limit runtime memory usage and allow reasonably fast graph queries. In this work, we present One-4-All (O4A), a method leveraging self-supervised and manifold learning to obtain a graph-free, end-to-end navigation pipeline in which the goal is specified as an image. Navigation is achieved by greedily minimizing a potential function defined continuously over the O4A latent space. Our system is trained offline on non-expert exploration sequences of RGB data and controls, and does not require any depth or pose measurements. We show that O4A can reach long-range goals in 8 simulated Gibson indoor environments, and further demonstrate successful real-world navigation using a Jackal UGV platform. diff --git a/_projects/taskography.md b/_projects/taskography.md deleted file mode 100644 index 41da054..0000000 --- a/_projects/taskography.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Taskography - Evaluating robot task planning over large 3D scene graphs - -notitle: false - -description: | - Taskography is the first large-scale robotic task planning benchmark over 3DSGs. While most benchmarking efforts in this area focus on vision-based planning, we systematically study symbolic planning, to decouple planning performance from visual representation learning. - -people: - - krishna - - liam - -collaborators: - - agia - - khodeir - - miksik - - vineet - - mukadam - - shkurti - - -layout: project -image: /img/papers/3dsg.png -last-updated: 2021-07-20 -link: https://taskography.github.io ---- - -## Taskography - diff --git a/assets/css/style.css b/assets/css/style.css new file mode 100644 index 0000000..bc2033b --- /dev/null +++ b/assets/css/style.css @@ -0,0 +1,2883 @@ +/*! normalize.css v4.1.1 | MIT License | github.com/necolas/normalize.css */ +/** 1. Change the default font family in all browsers (opinionated). 2. Prevent adjustments of font size after orientation changes in IE and iOS. */ +html { font-family: sans-serif; /* 1 */ -ms-text-size-adjust: 100%; /* 2 */ -webkit-text-size-adjust: 100%; /* 2 */ } + +/** Remove the margin in all browsers (opinionated). */ +body { margin: 0; } + +/* HTML5 display definitions ========================================================================== */ +/** Add the correct display in IE 9-. 1. Add the correct display in Edge, IE, and Firefox. 2. Add the correct display in IE. */ +article, aside, details, figcaption, figure, footer, header, main, menu, nav, section { /* 1 */ display: block; } + +summary { display: list-item; } + +/** Add the correct display in IE 9-. */ +audio, canvas, progress, video { display: inline-block; } + +/** Add the correct display in iOS 4-7. */ +audio:not([controls]) { display: none; height: 0; } + +/** Add the correct vertical alignment in Chrome, Firefox, and Opera. */ +progress { vertical-align: baseline; } + +/** Add the correct display in IE 10-. 1. Add the correct display in IE. */ +template, [hidden] { display: none !important; } + +/* Links ========================================================================== */ +/** Remove the gray background on active links in IE 10. */ +a { background-color: transparent; /* 1 */ } + +/** Remove the outline on focused links when they are also active or hovered in all browsers (opinionated). */ +a:active, a:hover { outline-width: 0; } + +/* Text-level semantics ========================================================================== */ +/** 1. Remove the bottom border in Firefox 39-. 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari. */ +abbr[title] { border-bottom: none; /* 1 */ text-decoration: underline; /* 2 */ text-decoration: underline dotted; /* 2 */ } + +/** Prevent the duplicate application of `bolder` by the next rule in Safari 6. */ +b, strong { font-weight: inherit; } + +/** Add the correct font weight in Chrome, Edge, and Safari. */ +b, strong { font-weight: bolder; } + +/** Add the correct font style in Android 4.3-. */ +dfn { font-style: italic; } + +/** Correct the font size and margin on `h1` elements within `section` and `article` contexts in Chrome, Firefox, and Safari. */ +h1 { font-size: 2em; margin: 0.67em 0; } + +/** Add the correct background and color in IE 9-. */ +mark { background-color: #ff0; color: #000; } + +/** Add the correct font size in all browsers. */ +small { font-size: 80%; } + +/** Prevent `sub` and `sup` elements from affecting the line height in all browsers. */ +sub, sup { font-size: 75%; line-height: 0; position: relative; vertical-align: baseline; } + +sub { bottom: -0.25em; } + +sup { top: -0.5em; } + +/* Embedded content ========================================================================== */ +/** Remove the border on images inside links in IE 10-. */ +img { border-style: none; } + +/** Hide the overflow in IE. */ +svg:not(:root) { overflow: hidden; } + +/* Grouping content ========================================================================== */ +/** 1. Correct the inheritance and scaling of font size in all browsers. 2. Correct the odd `em` font sizing in all browsers. */ +code, kbd, pre, samp { font-family: monospace, monospace; /* 1 */ font-size: 1em; /* 2 */ } + +/** Add the correct margin in IE 8. */ +figure { margin: 1em 40px; } + +/** 1. Add the correct box sizing in Firefox. 2. Show the overflow in Edge and IE. */ +hr { box-sizing: content-box; /* 1 */ height: 0; /* 1 */ overflow: visible; /* 2 */ } + +/* Forms ========================================================================== */ +/** 1. Change font properties to `inherit` in all browsers (opinionated). 2. Remove the margin in Firefox and Safari. */ +button, input, select, textarea { font: inherit; /* 1 */ margin: 0; /* 2 */ } + +/** Restore the font weight unset by the previous rule. */ +optgroup { font-weight: bold; } + +/** Show the overflow in IE. 1. Show the overflow in Edge. */ +button, input { /* 1 */ overflow: visible; } + +/** Remove the inheritance of text transform in Edge, Firefox, and IE. 1. Remove the inheritance of text transform in Firefox. */ +button, select { /* 1 */ text-transform: none; } + +/** 1. Prevent a WebKit bug where (2) destroys native `audio` and `video` controls in Android 4. 2. Correct the inability to style clickable types in iOS and Safari. */ +button, html [type="button"], [type="reset"], [type="submit"] { -webkit-appearance: button; /* 2 */ } + +/** Remove the inner border and padding in Firefox. */ +button::-moz-focus-inner, [type="button"]::-moz-focus-inner, [type="reset"]::-moz-focus-inner, [type="submit"]::-moz-focus-inner { border-style: none; padding: 0; } + +/** Restore the focus styles unset by the previous rule. */ +button:-moz-focusring, [type="button"]:-moz-focusring, [type="reset"]:-moz-focusring, [type="submit"]:-moz-focusring { outline: 1px dotted ButtonText; } + +/** Change the border, margin, and padding in all browsers (opinionated). */ +fieldset { border: 1px solid #c0c0c0; margin: 0 2px; padding: 0.35em 0.625em 0.75em; } + +/** 1. Correct the text wrapping in Edge and IE. 2. Correct the color inheritance from `fieldset` elements in IE. 3. Remove the padding so developers are not caught out when they zero out `fieldset` elements in all browsers. */ +legend { box-sizing: border-box; /* 1 */ color: inherit; /* 2 */ display: table; /* 1 */ max-width: 100%; /* 1 */ padding: 0; /* 3 */ white-space: normal; /* 1 */ } + +/** Remove the default vertical scrollbar in IE. */ +textarea { overflow: auto; } + +/** 1. Add the correct box sizing in IE 10-. 2. Remove the padding in IE 10-. */ +[type="checkbox"], [type="radio"] { box-sizing: border-box; /* 1 */ padding: 0; /* 2 */ } + +/** Correct the cursor style of increment and decrement buttons in Chrome. */ +[type="number"]::-webkit-inner-spin-button, [type="number"]::-webkit-outer-spin-button { height: auto; } + +/** 1. Correct the odd appearance in Chrome and Safari. 2. Correct the outline style in Safari. */ +[type="search"] { -webkit-appearance: textfield; /* 1 */ outline-offset: -2px; /* 2 */ } + +/** Remove the inner padding and cancel buttons in Chrome and Safari on OS X. */ +[type="search"]::-webkit-search-cancel-button, [type="search"]::-webkit-search-decoration { -webkit-appearance: none; } + +/** Correct the text style of placeholders in Chrome, Edge, and Safari. */ +::-webkit-input-placeholder { color: inherit; opacity: 0.54; } + +/** 1. Correct the inability to style clickable types in iOS and Safari. 2. Change font properties to `inherit` in Safari. */ +::-webkit-file-upload-button { -webkit-appearance: button; /* 1 */ font: inherit; /* 2 */ } + +* { box-sizing: border-box; } + +input, select, textarea, button { font-family: inherit; font-size: inherit; line-height: inherit; } + +body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; font-size: 14px; line-height: 1.5; color: #24292e; background-color: #fff; } + +a { color: #0366d6; text-decoration: none; } +a:hover { text-decoration: underline; } + +b, strong { font-weight: 600; } + +hr, .rule { height: 0; margin: 15px 0; overflow: hidden; background: transparent; border: 0; border-bottom: 1px solid #dfe2e5; } +hr::before, .rule::before { display: table; content: ""; } +hr::after, .rule::after { display: table; clear: both; content: ""; } + +table { border-spacing: 0; border-collapse: collapse; } + +td, th { padding: 0; } + +button { cursor: pointer; border-radius: 0; } + +[hidden][hidden] { display: none !important; } + +details summary { cursor: pointer; } +details:not([open]) > *:not(summary) { display: none !important; } + +h1, h2, h3, h4, h5, h6 { margin-top: 0; margin-bottom: 0; } + +h1 { font-size: 32px; font-weight: 600; } + +h2 { font-size: 24px; font-weight: 600; } + +h3 { font-size: 20px; font-weight: 600; } + +h4 { font-size: 16px; font-weight: 600; } + +h5 { font-size: 14px; font-weight: 600; } + +h6 { font-size: 12px; font-weight: 600; } + +p { margin-top: 0; margin-bottom: 10px; } + +small { font-size: 90%; } + +blockquote { margin: 0; } + +ul, ol { padding-left: 0; margin-top: 0; margin-bottom: 0; } + +ol ol, ul ol { list-style-type: lower-roman; } + +ul ul ol, ul ol ol, ol ul ol, ol ol ol { list-style-type: lower-alpha; } + +dd { margin-left: 0; } + +tt, code { font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; font-size: 12px; } + +pre { margin-top: 0; margin-bottom: 0; font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; font-size: 12px; } + +.octicon { vertical-align: text-bottom; } + +/* Fade in an element */ +.anim-fade-in { animation-name: fade-in; animation-duration: 1s; animation-timing-function: ease-in-out; } +.anim-fade-in.fast { animation-duration: 300ms; } + +@keyframes fade-in { 0% { opacity: 0; } + 100% { opacity: 1; } } +/* Fade out an element */ +.anim-fade-out { animation-name: fade-out; animation-duration: 1s; animation-timing-function: ease-out; } +.anim-fade-out.fast { animation-duration: 0.3s; } + +@keyframes fade-out { 0% { opacity: 1; } + 100% { opacity: 0; } } +/* Fade in and slide up an element */ +.anim-fade-up { opacity: 0; animation-name: fade-up; animation-duration: 0.3s; animation-fill-mode: forwards; animation-timing-function: ease-out; animation-delay: 1s; } + +@keyframes fade-up { 0% { opacity: 0.8; transform: translateY(100%); } + 100% { opacity: 1; transform: translateY(0); } } +/* Fade an element out and slide down */ +.anim-fade-down { animation-name: fade-down; animation-duration: 0.3s; animation-fill-mode: forwards; animation-timing-function: ease-in; } + +@keyframes fade-down { 0% { opacity: 1; transform: translateY(0); } + 100% { opacity: 0.5; transform: translateY(100%); } } +/* Grow an element width from 0 to 100% */ +.anim-grow-x { width: 0%; animation-name: grow-x; animation-duration: 0.3s; animation-fill-mode: forwards; animation-timing-function: ease; animation-delay: 0.5s; } + +@keyframes grow-x { to { width: 100%; } } +/* Shrink an element from 100% to 0% */ +.anim-shrink-x { animation-name: shrink-x; animation-duration: 0.3s; animation-fill-mode: forwards; animation-timing-function: ease-in-out; animation-delay: 0.5s; } + +@keyframes shrink-x { to { width: 0%; } } +/* Fade in an element and scale it fast */ +.anim-scale-in { animation-name: scale-in; animation-duration: 0.15s; animation-timing-function: cubic-bezier(0.2, 0, 0.13, 1.5); } + +@keyframes scale-in { 0% { opacity: 0; transform: scale(0.5); } + 100% { opacity: 1; transform: scale(1); } } +/* Pulse an element's opacity */ +.anim-pulse { animation-name: pulse; animation-duration: 2s; animation-timing-function: linear; animation-iteration-count: infinite; } + +@keyframes pulse { 0% { opacity: 0.3; } + 10% { opacity: 1; } + 100% { opacity: 0.3; } } +/* Pulse in an element */ +.anim-pulse-in { animation-name: pulse-in; animation-duration: 0.5s; } + +@keyframes pulse-in { 0% { transform: scale3d(1, 1, 1); } + 50% { transform: scale3d(1.1, 1.1, 1.1); } + 100% { transform: scale3d(1, 1, 1); } } +/* Increase scale of an element on hover */ +.hover-grow { transition: transform 0.3s; backface-visibility: hidden; } +.hover-grow:hover { transform: scale(1.025); } + +/* Add a gray border on all sides */ +.border { border: 1px #e1e4e8 solid !important; } + +/* Add a gray border to the left and right */ +.border-y { border-top: 1px #e1e4e8 solid !important; border-bottom: 1px #e1e4e8 solid !important; } + +/* Remove borders from all sides */ +.border-0 { border: 0 !important; } + +.border-dashed { border-style: dashed !important; } + +/* Use with .border to turn the border blue */ +.border-blue { border-color: #0366d6 !important; } + +/* Use with .border to turn the border blue-light */ +.border-blue-light { border-color: #c8e1ff !important; } + +/* Use with .border to turn the border green */ +.border-green { border-color: #34d058 !important; } + +/* Use with .border to turn the border green light */ +.border-green-light { border-color: #a2cbac !important; } + +/* Use with .border to turn the border red */ +.border-red { border-color: #d73a49 !important; } + +/* Use with .border to turn the border red-light */ +.border-red-light { border-color: #cea0a5 !important; } + +/* Use with .border to turn the border purple */ +.border-purple { border-color: #6f42c1 !important; } + +/* Use with .border to turn the border yellow */ +.border-yellow { border-color: #d9d0a5 !important; } + +/* Use with .border to turn the border gray-light */ +.border-gray-light { border-color: #eaecef !important; } + +/* Use with .border to turn the border gray-dark */ +.border-gray-dark { border-color: #d1d5da !important; } + +/* Use with .border to turn the border rgba black 0.15 */ +.border-black-fade { border-color: rgba(27, 31, 35, 0.15) !important; } + +/* Add a gray border */ +/* Add a gray border to the top */ +.border-top { border-top: 1px #e1e4e8 solid !important; } + +/* Add a gray border to the right */ +.border-right { border-right: 1px #e1e4e8 solid !important; } + +/* Add a gray border to the bottom */ +.border-bottom { border-bottom: 1px #e1e4e8 solid !important; } + +/* Add a gray border to the left */ +.border-left { border-left: 1px #e1e4e8 solid !important; } + +/* Remove the top border */ +.border-top-0 { border-top: 0 !important; } + +/* Remove the right border */ +.border-right-0 { border-right: 0 !important; } + +/* Remove the bottom border */ +.border-bottom-0 { border-bottom: 0 !important; } + +/* Remove the left border */ +.border-left-0 { border-left: 0 !important; } + +/* Remove the border-radius */ +.rounded-0 { border-radius: 0 !important; } + +/* Add a border-radius to all corners */ +.rounded-1 { border-radius: 3px !important; } + +/* Add a 2x border-radius to all corners */ +.rounded-2 { border-radius: 6px !important; } + +.rounded-top-0 { border-top-left-radius: 0 !important; border-top-right-radius: 0 !important; } + +.rounded-top-1 { border-top-left-radius: 3px !important; border-top-right-radius: 3px !important; } + +.rounded-top-2 { border-top-left-radius: 6px !important; border-top-right-radius: 6px !important; } + +.rounded-right-0 { border-top-right-radius: 0 !important; border-bottom-right-radius: 0 !important; } + +.rounded-right-1 { border-top-right-radius: 3px !important; border-bottom-right-radius: 3px !important; } + +.rounded-right-2 { border-top-right-radius: 6px !important; border-bottom-right-radius: 6px !important; } + +.rounded-bottom-0 { border-bottom-right-radius: 0 !important; border-bottom-left-radius: 0 !important; } + +.rounded-bottom-1 { border-bottom-right-radius: 3px !important; border-bottom-left-radius: 3px !important; } + +.rounded-bottom-2 { border-bottom-right-radius: 6px !important; border-bottom-left-radius: 6px !important; } + +.rounded-left-0 { border-bottom-left-radius: 0 !important; border-top-left-radius: 0 !important; } + +.rounded-left-1 { border-bottom-left-radius: 3px !important; border-top-left-radius: 3px !important; } + +.rounded-left-2 { border-bottom-left-radius: 6px !important; border-top-left-radius: 6px !important; } + +@media (min-width: 544px) { /* Add a gray border */ + /* Add a gray border to the top */ + .border-sm-top { border-top: 1px #e1e4e8 solid !important; } + /* Add a gray border to the right */ + .border-sm-right { border-right: 1px #e1e4e8 solid !important; } + /* Add a gray border to the bottom */ + .border-sm-bottom { border-bottom: 1px #e1e4e8 solid !important; } + /* Add a gray border to the left */ + .border-sm-left { border-left: 1px #e1e4e8 solid !important; } + /* Remove the top border */ + .border-sm-top-0 { border-top: 0 !important; } + /* Remove the right border */ + .border-sm-right-0 { border-right: 0 !important; } + /* Remove the bottom border */ + .border-sm-bottom-0 { border-bottom: 0 !important; } + /* Remove the left border */ + .border-sm-left-0 { border-left: 0 !important; } + /* Remove the border-radius */ + .rounded-sm-0 { border-radius: 0 !important; } + /* Add a border-radius to all corners */ + .rounded-sm-1 { border-radius: 3px !important; } + /* Add a 2x border-radius to all corners */ + .rounded-sm-2 { border-radius: 6px !important; } + .rounded-sm-top-0 { border-top-left-radius: 0 !important; border-top-right-radius: 0 !important; } + .rounded-sm-top-1 { border-top-left-radius: 3px !important; border-top-right-radius: 3px !important; } + .rounded-sm-top-2 { border-top-left-radius: 6px !important; border-top-right-radius: 6px !important; } + .rounded-sm-right-0 { border-top-right-radius: 0 !important; border-bottom-right-radius: 0 !important; } + .rounded-sm-right-1 { border-top-right-radius: 3px !important; border-bottom-right-radius: 3px !important; } + .rounded-sm-right-2 { border-top-right-radius: 6px !important; border-bottom-right-radius: 6px !important; } + .rounded-sm-bottom-0 { border-bottom-right-radius: 0 !important; border-bottom-left-radius: 0 !important; } + .rounded-sm-bottom-1 { border-bottom-right-radius: 3px !important; border-bottom-left-radius: 3px !important; } + .rounded-sm-bottom-2 { border-bottom-right-radius: 6px !important; border-bottom-left-radius: 6px !important; } + .rounded-sm-left-0 { border-bottom-left-radius: 0 !important; border-top-left-radius: 0 !important; } + .rounded-sm-left-1 { border-bottom-left-radius: 3px !important; border-top-left-radius: 3px !important; } + .rounded-sm-left-2 { border-bottom-left-radius: 6px !important; border-top-left-radius: 6px !important; } } +@media (min-width: 768px) { /* Add a gray border */ + /* Add a gray border to the top */ + .border-md-top { border-top: 1px #e1e4e8 solid !important; } + /* Add a gray border to the right */ + .border-md-right { border-right: 1px #e1e4e8 solid !important; } + /* Add a gray border to the bottom */ + .border-md-bottom { border-bottom: 1px #e1e4e8 solid !important; } + /* Add a gray border to the left */ + .border-md-left { border-left: 1px #e1e4e8 solid !important; } + /* Remove the top border */ + .border-md-top-0 { border-top: 0 !important; } + /* Remove the right border */ + .border-md-right-0 { border-right: 0 !important; } + /* Remove the bottom border */ + .border-md-bottom-0 { border-bottom: 0 !important; } + /* Remove the left border */ + .border-md-left-0 { border-left: 0 !important; } + /* Remove the border-radius */ + .rounded-md-0 { border-radius: 0 !important; } + /* Add a border-radius to all corners */ + .rounded-md-1 { border-radius: 3px !important; } + /* Add a 2x border-radius to all corners */ + .rounded-md-2 { border-radius: 6px !important; } + .rounded-md-top-0 { border-top-left-radius: 0 !important; border-top-right-radius: 0 !important; } + .rounded-md-top-1 { border-top-left-radius: 3px !important; border-top-right-radius: 3px !important; } + .rounded-md-top-2 { border-top-left-radius: 6px !important; border-top-right-radius: 6px !important; } + .rounded-md-right-0 { border-top-right-radius: 0 !important; border-bottom-right-radius: 0 !important; } + .rounded-md-right-1 { border-top-right-radius: 3px !important; border-bottom-right-radius: 3px !important; } + .rounded-md-right-2 { border-top-right-radius: 6px !important; border-bottom-right-radius: 6px !important; } + .rounded-md-bottom-0 { border-bottom-right-radius: 0 !important; border-bottom-left-radius: 0 !important; } + .rounded-md-bottom-1 { border-bottom-right-radius: 3px !important; border-bottom-left-radius: 3px !important; } + .rounded-md-bottom-2 { border-bottom-right-radius: 6px !important; border-bottom-left-radius: 6px !important; } + .rounded-md-left-0 { border-bottom-left-radius: 0 !important; border-top-left-radius: 0 !important; } + .rounded-md-left-1 { border-bottom-left-radius: 3px !important; border-top-left-radius: 3px !important; } + .rounded-md-left-2 { border-bottom-left-radius: 6px !important; border-top-left-radius: 6px !important; } } +@media (min-width: 1012px) { /* Add a gray border */ + /* Add a gray border to the top */ + .border-lg-top { border-top: 1px #e1e4e8 solid !important; } + /* Add a gray border to the right */ + .border-lg-right { border-right: 1px #e1e4e8 solid !important; } + /* Add a gray border to the bottom */ + .border-lg-bottom { border-bottom: 1px #e1e4e8 solid !important; } + /* Add a gray border to the left */ + .border-lg-left { border-left: 1px #e1e4e8 solid !important; } + /* Remove the top border */ + .border-lg-top-0 { border-top: 0 !important; } + /* Remove the right border */ + .border-lg-right-0 { border-right: 0 !important; } + /* Remove the bottom border */ + .border-lg-bottom-0 { border-bottom: 0 !important; } + /* Remove the left border */ + .border-lg-left-0 { border-left: 0 !important; } + /* Remove the border-radius */ + .rounded-lg-0 { border-radius: 0 !important; } + /* Add a border-radius to all corners */ + .rounded-lg-1 { border-radius: 3px !important; } + /* Add a 2x border-radius to all corners */ + .rounded-lg-2 { border-radius: 6px !important; } + .rounded-lg-top-0 { border-top-left-radius: 0 !important; border-top-right-radius: 0 !important; } + .rounded-lg-top-1 { border-top-left-radius: 3px !important; border-top-right-radius: 3px !important; } + .rounded-lg-top-2 { border-top-left-radius: 6px !important; border-top-right-radius: 6px !important; } + .rounded-lg-right-0 { border-top-right-radius: 0 !important; border-bottom-right-radius: 0 !important; } + .rounded-lg-right-1 { border-top-right-radius: 3px !important; border-bottom-right-radius: 3px !important; } + .rounded-lg-right-2 { border-top-right-radius: 6px !important; border-bottom-right-radius: 6px !important; } + .rounded-lg-bottom-0 { border-bottom-right-radius: 0 !important; border-bottom-left-radius: 0 !important; } + .rounded-lg-bottom-1 { border-bottom-right-radius: 3px !important; border-bottom-left-radius: 3px !important; } + .rounded-lg-bottom-2 { border-bottom-right-radius: 6px !important; border-bottom-left-radius: 6px !important; } + .rounded-lg-left-0 { border-bottom-left-radius: 0 !important; border-top-left-radius: 0 !important; } + .rounded-lg-left-1 { border-bottom-left-radius: 3px !important; border-top-left-radius: 3px !important; } + .rounded-lg-left-2 { border-bottom-left-radius: 6px !important; border-top-left-radius: 6px !important; } } +@media (min-width: 1280px) { /* Add a gray border */ + /* Add a gray border to the top */ + .border-xl-top { border-top: 1px #e1e4e8 solid !important; } + /* Add a gray border to the right */ + .border-xl-right { border-right: 1px #e1e4e8 solid !important; } + /* Add a gray border to the bottom */ + .border-xl-bottom { border-bottom: 1px #e1e4e8 solid !important; } + /* Add a gray border to the left */ + .border-xl-left { border-left: 1px #e1e4e8 solid !important; } + /* Remove the top border */ + .border-xl-top-0 { border-top: 0 !important; } + /* Remove the right border */ + .border-xl-right-0 { border-right: 0 !important; } + /* Remove the bottom border */ + .border-xl-bottom-0 { border-bottom: 0 !important; } + /* Remove the left border */ + .border-xl-left-0 { border-left: 0 !important; } + /* Remove the border-radius */ + .rounded-xl-0 { border-radius: 0 !important; } + /* Add a border-radius to all corners */ + .rounded-xl-1 { border-radius: 3px !important; } + /* Add a 2x border-radius to all corners */ + .rounded-xl-2 { border-radius: 6px !important; } + .rounded-xl-top-0 { border-top-left-radius: 0 !important; border-top-right-radius: 0 !important; } + .rounded-xl-top-1 { border-top-left-radius: 3px !important; border-top-right-radius: 3px !important; } + .rounded-xl-top-2 { border-top-left-radius: 6px !important; border-top-right-radius: 6px !important; } + .rounded-xl-right-0 { border-top-right-radius: 0 !important; border-bottom-right-radius: 0 !important; } + .rounded-xl-right-1 { border-top-right-radius: 3px !important; border-bottom-right-radius: 3px !important; } + .rounded-xl-right-2 { border-top-right-radius: 6px !important; border-bottom-right-radius: 6px !important; } + .rounded-xl-bottom-0 { border-bottom-right-radius: 0 !important; border-bottom-left-radius: 0 !important; } + .rounded-xl-bottom-1 { border-bottom-right-radius: 3px !important; border-bottom-left-radius: 3px !important; } + .rounded-xl-bottom-2 { border-bottom-right-radius: 6px !important; border-bottom-left-radius: 6px !important; } + .rounded-xl-left-0 { border-bottom-left-radius: 0 !important; border-top-left-radius: 0 !important; } + .rounded-xl-left-1 { border-bottom-left-radius: 3px !important; border-top-left-radius: 3px !important; } + .rounded-xl-left-2 { border-bottom-left-radius: 6px !important; border-top-left-radius: 6px !important; } } +/* Add a 50% border-radius to make something into a circle */ +.circle { border-radius: 50% !important; } + +.box-shadow { box-shadow: 0 1px 1px rgba(27, 31, 35, 0.1) !important; } + +.box-shadow-medium { box-shadow: 0 1px 5px rgba(27, 31, 35, 0.15) !important; } + +.box-shadow-large { box-shadow: 0 1px 15px rgba(27, 31, 35, 0.15) !important; } + +.box-shadow-extra-large { box-shadow: 0 10px 50px rgba(27, 31, 35, 0.07) !important; } + +.box-shadow-none { box-shadow: none !important; } + +/* Set the background to $bg-white */ +.bg-white { background-color: #fff !important; } + +/* Set the background to $bg-blue */ +.bg-blue { background-color: #0366d6 !important; } + +/* Set the background to $bg-blue-light */ +.bg-blue-light { background-color: #f1f8ff !important; } + +/* Set the background to $bg-gray-dark */ +.bg-gray-dark { background-color: #24292e !important; } + +/* Set the background to $bg-gray */ +.bg-gray { background-color: #f6f8fa !important; } + +/* Set the background to $bg-gray-light */ +.bg-gray-light { background-color: #fafbfc !important; } + +/* Set the background to $bg-green */ +.bg-green { background-color: #28a745 !important; } + +/* Set the background to $bg-green-light */ +.bg-green-light { background-color: #dcffe4 !important; } + +/* Set the background to $bg-red */ +.bg-red { background-color: #d73a49 !important; } + +/* Set the background to $bg-red-light */ +.bg-red-light { background-color: #ffdce0 !important; } + +/* Set the background to $bg-yellow */ +.bg-yellow { background-color: #ffd33d !important; } + +/* Set the background to $bg-yellow-light */ +.bg-yellow-light { background-color: #fff5b1 !important; } + +/* Set the background to $bg-purple */ +.bg-purple { background-color: #6f42c1 !important; } + +/* Set the background to $bg-purple-light */ +.bg-purple-light { background-color: #f5f0ff !important; } + +.bg-shade-gradient { background-image: linear-gradient(180deg, rgba(27, 31, 35, 0.065), rgba(27, 31, 35, 0)) !important; background-repeat: no-repeat !important; background-size: 100% 200px !important; } + +/* Set the text color to $text-blue */ +.text-blue { color: #0366d6 !important; } + +/* Set the text color to $text-red */ +.text-red { color: #cb2431 !important; } + +/* Set the text color to $text-gray-light */ +.text-gray-light { color: #6a737d !important; } + +/* Set the text color to $text-gray */ +.text-gray { color: #586069 !important; } + +/* Set the text color to $text-gray-dark */ +.text-gray-dark { color: #24292e !important; } + +/* Set the text color to $text-green */ +.text-green { color: #28a745 !important; } + +/* Set the text color to $text-orange */ +.text-orange { color: #a04100 !important; } + +/* Set the text color to $text-orange-light */ +.text-orange-light { color: #e36209 !important; } + +/* Set the text color to $text-purple */ +.text-purple { color: #6f42c1 !important; } + +/* Set the text color to $text-white */ +.text-white { color: #fff !important; } + +/* Set the text color to inherit */ +.text-inherit { color: inherit !important; } + +.text-pending { color: #b08800 !important; } + +.bg-pending { color: #dbab09 !important; } + +.link-gray { color: #586069 !important; } +.link-gray:hover { color: #0366d6 !important; } + +.link-gray-dark { color: #24292e !important; } +.link-gray-dark:hover { color: #0366d6 !important; } + +/* Set the link color to $text-blue on hover Useful when you want only part of a link to turn blue on hover */ +.link-hover-blue:hover { color: #0366d6 !important; } + +/* Make a link $text-gray, then $text-blue on hover and removes the underline */ +.muted-link { color: #586069 !important; } +.muted-link:hover { color: #0366d6 !important; text-decoration: none; } + +.details-overlay[open] > summary::before { position: fixed; top: 0; right: 0; bottom: 0; left: 0; z-index: 80; display: block; cursor: default; content: " "; background: transparent; } + +.details-overlay-dark[open] > summary::before { z-index: 99; background: rgba(27, 31, 35, 0.5); } + +.flex-row { flex-direction: row !important; } + +.flex-row-reverse { flex-direction: row-reverse !important; } + +.flex-column { flex-direction: column !important; } + +.flex-wrap { flex-wrap: wrap !important; } + +.flex-nowrap { flex-wrap: nowrap !important; } + +.flex-justify-start { justify-content: flex-start !important; } + +.flex-justify-end { justify-content: flex-end !important; } + +.flex-justify-center { justify-content: center !important; } + +.flex-justify-between { justify-content: space-between !important; } + +.flex-justify-around { justify-content: space-around !important; } + +.flex-items-start { align-items: flex-start !important; } + +.flex-items-end { align-items: flex-end !important; } + +.flex-items-center { align-items: center !important; } + +.flex-items-baseline { align-items: baseline !important; } + +.flex-items-stretch { align-items: stretch !important; } + +.flex-content-start { align-content: flex-start !important; } + +.flex-content-end { align-content: flex-end !important; } + +.flex-content-center { align-content: center !important; } + +.flex-content-between { align-content: space-between !important; } + +.flex-content-around { align-content: space-around !important; } + +.flex-content-stretch { align-content: stretch !important; } + +.flex-auto { flex: 1 1 auto !important; } + +.flex-shrink-0 { flex-shrink: 0 !important; } + +.flex-self-auto { align-self: auto !important; } + +.flex-self-start { align-self: flex-start !important; } + +.flex-self-end { align-self: flex-end !important; } + +.flex-self-center { align-self: center !important; } + +.flex-self-baseline { align-self: baseline !important; } + +.flex-self-stretch { align-self: stretch !important; } + +.flex-item-equal { flex-grow: 1; flex-basis: 0; } + +@media (min-width: 544px) { .flex-sm-row { flex-direction: row !important; } + .flex-sm-row-reverse { flex-direction: row-reverse !important; } + .flex-sm-column { flex-direction: column !important; } + .flex-sm-wrap { flex-wrap: wrap !important; } + .flex-sm-nowrap { flex-wrap: nowrap !important; } + .flex-sm-justify-start { justify-content: flex-start !important; } + .flex-sm-justify-end { justify-content: flex-end !important; } + .flex-sm-justify-center { justify-content: center !important; } + .flex-sm-justify-between { justify-content: space-between !important; } + .flex-sm-justify-around { justify-content: space-around !important; } + .flex-sm-items-start { align-items: flex-start !important; } + .flex-sm-items-end { align-items: flex-end !important; } + .flex-sm-items-center { align-items: center !important; } + .flex-sm-items-baseline { align-items: baseline !important; } + .flex-sm-items-stretch { align-items: stretch !important; } + .flex-sm-content-start { align-content: flex-start !important; } + .flex-sm-content-end { align-content: flex-end !important; } + .flex-sm-content-center { align-content: center !important; } + .flex-sm-content-between { align-content: space-between !important; } + .flex-sm-content-around { align-content: space-around !important; } + .flex-sm-content-stretch { align-content: stretch !important; } + .flex-sm-auto { flex: 1 1 auto !important; } + .flex-sm-shrink-0 { flex-shrink: 0 !important; } + .flex-sm-self-auto { align-self: auto !important; } + .flex-sm-self-start { align-self: flex-start !important; } + .flex-sm-self-end { align-self: flex-end !important; } + .flex-sm-self-center { align-self: center !important; } + .flex-sm-self-baseline { align-self: baseline !important; } + .flex-sm-self-stretch { align-self: stretch !important; } + .flex-sm-item-equal { flex-grow: 1; flex-basis: 0; } } +@media (min-width: 768px) { .flex-md-row { flex-direction: row !important; } + .flex-md-row-reverse { flex-direction: row-reverse !important; } + .flex-md-column { flex-direction: column !important; } + .flex-md-wrap { flex-wrap: wrap !important; } + .flex-md-nowrap { flex-wrap: nowrap !important; } + .flex-md-justify-start { justify-content: flex-start !important; } + .flex-md-justify-end { justify-content: flex-end !important; } + .flex-md-justify-center { justify-content: center !important; } + .flex-md-justify-between { justify-content: space-between !important; } + .flex-md-justify-around { justify-content: space-around !important; } + .flex-md-items-start { align-items: flex-start !important; } + .flex-md-items-end { align-items: flex-end !important; } + .flex-md-items-center { align-items: center !important; } + .flex-md-items-baseline { align-items: baseline !important; } + .flex-md-items-stretch { align-items: stretch !important; } + .flex-md-content-start { align-content: flex-start !important; } + .flex-md-content-end { align-content: flex-end !important; } + .flex-md-content-center { align-content: center !important; } + .flex-md-content-between { align-content: space-between !important; } + .flex-md-content-around { align-content: space-around !important; } + .flex-md-content-stretch { align-content: stretch !important; } + .flex-md-auto { flex: 1 1 auto !important; } + .flex-md-shrink-0 { flex-shrink: 0 !important; } + .flex-md-self-auto { align-self: auto !important; } + .flex-md-self-start { align-self: flex-start !important; } + .flex-md-self-end { align-self: flex-end !important; } + .flex-md-self-center { align-self: center !important; } + .flex-md-self-baseline { align-self: baseline !important; } + .flex-md-self-stretch { align-self: stretch !important; } + .flex-md-item-equal { flex-grow: 1; flex-basis: 0; } } +@media (min-width: 1012px) { .flex-lg-row { flex-direction: row !important; } + .flex-lg-row-reverse { flex-direction: row-reverse !important; } + .flex-lg-column { flex-direction: column !important; } + .flex-lg-wrap { flex-wrap: wrap !important; } + .flex-lg-nowrap { flex-wrap: nowrap !important; } + .flex-lg-justify-start { justify-content: flex-start !important; } + .flex-lg-justify-end { justify-content: flex-end !important; } + .flex-lg-justify-center { justify-content: center !important; } + .flex-lg-justify-between { justify-content: space-between !important; } + .flex-lg-justify-around { justify-content: space-around !important; } + .flex-lg-items-start { align-items: flex-start !important; } + .flex-lg-items-end { align-items: flex-end !important; } + .flex-lg-items-center { align-items: center !important; } + .flex-lg-items-baseline { align-items: baseline !important; } + .flex-lg-items-stretch { align-items: stretch !important; } + .flex-lg-content-start { align-content: flex-start !important; } + .flex-lg-content-end { align-content: flex-end !important; } + .flex-lg-content-center { align-content: center !important; } + .flex-lg-content-between { align-content: space-between !important; } + .flex-lg-content-around { align-content: space-around !important; } + .flex-lg-content-stretch { align-content: stretch !important; } + .flex-lg-auto { flex: 1 1 auto !important; } + .flex-lg-shrink-0 { flex-shrink: 0 !important; } + .flex-lg-self-auto { align-self: auto !important; } + .flex-lg-self-start { align-self: flex-start !important; } + .flex-lg-self-end { align-self: flex-end !important; } + .flex-lg-self-center { align-self: center !important; } + .flex-lg-self-baseline { align-self: baseline !important; } + .flex-lg-self-stretch { align-self: stretch !important; } + .flex-lg-item-equal { flex-grow: 1; flex-basis: 0; } } +@media (min-width: 1280px) { .flex-xl-row { flex-direction: row !important; } + .flex-xl-row-reverse { flex-direction: row-reverse !important; } + .flex-xl-column { flex-direction: column !important; } + .flex-xl-wrap { flex-wrap: wrap !important; } + .flex-xl-nowrap { flex-wrap: nowrap !important; } + .flex-xl-justify-start { justify-content: flex-start !important; } + .flex-xl-justify-end { justify-content: flex-end !important; } + .flex-xl-justify-center { justify-content: center !important; } + .flex-xl-justify-between { justify-content: space-between !important; } + .flex-xl-justify-around { justify-content: space-around !important; } + .flex-xl-items-start { align-items: flex-start !important; } + .flex-xl-items-end { align-items: flex-end !important; } + .flex-xl-items-center { align-items: center !important; } + .flex-xl-items-baseline { align-items: baseline !important; } + .flex-xl-items-stretch { align-items: stretch !important; } + .flex-xl-content-start { align-content: flex-start !important; } + .flex-xl-content-end { align-content: flex-end !important; } + .flex-xl-content-center { align-content: center !important; } + .flex-xl-content-between { align-content: space-between !important; } + .flex-xl-content-around { align-content: space-around !important; } + .flex-xl-content-stretch { align-content: stretch !important; } + .flex-xl-auto { flex: 1 1 auto !important; } + .flex-xl-shrink-0 { flex-shrink: 0 !important; } + .flex-xl-self-auto { align-self: auto !important; } + .flex-xl-self-start { align-self: flex-start !important; } + .flex-xl-self-end { align-self: flex-end !important; } + .flex-xl-self-center { align-self: center !important; } + .flex-xl-self-baseline { align-self: baseline !important; } + .flex-xl-self-stretch { align-self: stretch !important; } + .flex-xl-item-equal { flex-grow: 1; flex-basis: 0; } } +/* Set position to static */ +.position-static { position: static !important; } + +/* Set position to relative */ +.position-relative { position: relative !important; } + +/* Set position to absolute */ +.position-absolute { position: absolute !important; } + +/* Set position to fixed */ +.position-fixed { position: fixed !important; } + +/* Set top 0 */ +.top-0 { top: 0 !important; } + +/* Set right 0 */ +.right-0 { right: 0 !important; } + +/* Set bottom 0 */ +.bottom-0 { bottom: 0 !important; } + +/* Set left 0 */ +.left-0 { left: 0 !important; } + +/* Vertical align middle */ +.v-align-middle { vertical-align: middle !important; } + +/* Vertical align top */ +.v-align-top { vertical-align: top !important; } + +/* Vertical align bottom */ +.v-align-bottom { vertical-align: bottom !important; } + +/* Vertical align to the top of the text */ +.v-align-text-top { vertical-align: text-top !important; } + +/* Vertical align to the bottom of the text */ +.v-align-text-bottom { vertical-align: text-bottom !important; } + +/* Vertical align to the parent's baseline */ +.v-align-baseline { vertical-align: baseline !important; } + +/* Set the overflow hidden */ +.overflow-hidden { overflow: hidden !important; } + +/* Set the overflow scroll */ +.overflow-scroll { overflow: scroll !important; } + +/* Set the overflow auto */ +.overflow-auto { overflow: auto !important; } + +/* Clear floats around the element */ +.clearfix::before { display: table; content: ""; } +.clearfix::after { display: table; clear: both; content: ""; } + +/* Float to the left */ +.float-left { float: left !important; } + +/* Float to the right */ +.float-right { float: right !important; } + +/* No float */ +.float-none { float: none !important; } + +@media (min-width: 544px) { /* Float to the left */ + .float-sm-left { float: left !important; } + /* Float to the right */ + .float-sm-right { float: right !important; } + /* No float */ + .float-sm-none { float: none !important; } } +@media (min-width: 768px) { /* Float to the left */ + .float-md-left { float: left !important; } + /* Float to the right */ + .float-md-right { float: right !important; } + /* No float */ + .float-md-none { float: none !important; } } +@media (min-width: 1012px) { /* Float to the left */ + .float-lg-left { float: left !important; } + /* Float to the right */ + .float-lg-right { float: right !important; } + /* No float */ + .float-lg-none { float: none !important; } } +@media (min-width: 1280px) { /* Float to the left */ + .float-xl-left { float: left !important; } + /* Float to the right */ + .float-xl-right { float: right !important; } + /* No float */ + .float-xl-none { float: none !important; } } +/* Max width 100% */ +.width-fit { max-width: 100% !important; } + +/* Set the width to 100% */ +.width-full { width: 100% !important; } + +/* Max height 100% */ +.height-fit { max-height: 100% !important; } + +/* Set the height to 100% */ +.height-full { height: 100% !important; } + +/* Remove min-width from element */ +.min-width-0 { min-width: 0 !important; } + +/* Set the direction to rtl */ +.direction-rtl { direction: rtl !important; } + +/* Set the direction to ltr */ +.direction-ltr { direction: ltr !important; } + +@media (min-width: 544px) { /* Set the direction to rtl */ + .direction-sm-rtl { direction: rtl !important; } + /* Set the direction to ltr */ + .direction-sm-ltr { direction: ltr !important; } } +@media (min-width: 768px) { /* Set the direction to rtl */ + .direction-md-rtl { direction: rtl !important; } + /* Set the direction to ltr */ + .direction-md-ltr { direction: ltr !important; } } +@media (min-width: 1012px) { /* Set the direction to rtl */ + .direction-lg-rtl { direction: rtl !important; } + /* Set the direction to ltr */ + .direction-lg-ltr { direction: ltr !important; } } +@media (min-width: 1280px) { /* Set the direction to rtl */ + .direction-xl-rtl { direction: rtl !important; } + /* Set the direction to ltr */ + .direction-xl-ltr { direction: ltr !important; } } +/* Set a $size margin to all sides at $breakpoint */ +.m-0 { margin: 0 !important; } + +/* Set a $size margin on the top at $breakpoint */ +.mt-0 { margin-top: 0 !important; } + +/* Set a $size margin on the right at $breakpoint */ +.mr-0 { margin-right: 0 !important; } + +/* Set a $size margin on the bottom at $breakpoint */ +.mb-0 { margin-bottom: 0 !important; } + +/* Set a $size margin on the left at $breakpoint */ +.ml-0 { margin-left: 0 !important; } + +/* Set a $size margin on the left & right at $breakpoint */ +.mx-0 { margin-right: 0 !important; margin-left: 0 !important; } + +/* Set a $size margin on the top & bottom at $breakpoint */ +.my-0 { margin-top: 0 !important; margin-bottom: 0 !important; } + +/* Set a $size margin to all sides at $breakpoint */ +.m-1 { margin: 4px !important; } + +/* Set a $size margin on the top at $breakpoint */ +.mt-1 { margin-top: 4px !important; } + +/* Set a $size margin on the right at $breakpoint */ +.mr-1 { margin-right: 4px !important; } + +/* Set a $size margin on the bottom at $breakpoint */ +.mb-1 { margin-bottom: 4px !important; } + +/* Set a $size margin on the left at $breakpoint */ +.ml-1 { margin-left: 4px !important; } + +/* Set a negative $size margin on top at $breakpoint */ +.mt-n1 { margin-top: -4px !important; } + +/* Set a negative $size margin on the right at $breakpoint */ +.mr-n1 { margin-right: -4px !important; } + +/* Set a negative $size margin on the bottom at $breakpoint */ +.mb-n1 { margin-bottom: -4px !important; } + +/* Set a negative $size margin on the left at $breakpoint */ +.ml-n1 { margin-left: -4px !important; } + +/* Set a $size margin on the left & right at $breakpoint */ +.mx-1 { margin-right: 4px !important; margin-left: 4px !important; } + +/* Set a $size margin on the top & bottom at $breakpoint */ +.my-1 { margin-top: 4px !important; margin-bottom: 4px !important; } + +/* Set a $size margin to all sides at $breakpoint */ +.m-2 { margin: 8px !important; } + +/* Set a $size margin on the top at $breakpoint */ +.mt-2 { margin-top: 8px !important; } + +/* Set a $size margin on the right at $breakpoint */ +.mr-2 { margin-right: 8px !important; } + +/* Set a $size margin on the bottom at $breakpoint */ +.mb-2 { margin-bottom: 8px !important; } + +/* Set a $size margin on the left at $breakpoint */ +.ml-2 { margin-left: 8px !important; } + +/* Set a negative $size margin on top at $breakpoint */ +.mt-n2 { margin-top: -8px !important; } + +/* Set a negative $size margin on the right at $breakpoint */ +.mr-n2 { margin-right: -8px !important; } + +/* Set a negative $size margin on the bottom at $breakpoint */ +.mb-n2 { margin-bottom: -8px !important; } + +/* Set a negative $size margin on the left at $breakpoint */ +.ml-n2 { margin-left: -8px !important; } + +/* Set a $size margin on the left & right at $breakpoint */ +.mx-2 { margin-right: 8px !important; margin-left: 8px !important; } + +/* Set a $size margin on the top & bottom at $breakpoint */ +.my-2 { margin-top: 8px !important; margin-bottom: 8px !important; } + +/* Set a $size margin to all sides at $breakpoint */ +.m-3 { margin: 16px !important; } + +/* Set a $size margin on the top at $breakpoint */ +.mt-3 { margin-top: 16px !important; } + +/* Set a $size margin on the right at $breakpoint */ +.mr-3 { margin-right: 16px !important; } + +/* Set a $size margin on the bottom at $breakpoint */ +.mb-3 { margin-bottom: 16px !important; } + +/* Set a $size margin on the left at $breakpoint */ +.ml-3 { margin-left: 16px !important; } + +/* Set a negative $size margin on top at $breakpoint */ +.mt-n3 { margin-top: -16px !important; } + +/* Set a negative $size margin on the right at $breakpoint */ +.mr-n3 { margin-right: -16px !important; } + +/* Set a negative $size margin on the bottom at $breakpoint */ +.mb-n3 { margin-bottom: -16px !important; } + +/* Set a negative $size margin on the left at $breakpoint */ +.ml-n3 { margin-left: -16px !important; } + +/* Set a $size margin on the left & right at $breakpoint */ +.mx-3 { margin-right: 16px !important; margin-left: 16px !important; } + +/* Set a $size margin on the top & bottom at $breakpoint */ +.my-3 { margin-top: 16px !important; margin-bottom: 16px !important; } + +/* Set a $size margin to all sides at $breakpoint */ +.m-4 { margin: 24px !important; } + +/* Set a $size margin on the top at $breakpoint */ +.mt-4 { margin-top: 24px !important; } + +/* Set a $size margin on the right at $breakpoint */ +.mr-4 { margin-right: 24px !important; } + +/* Set a $size margin on the bottom at $breakpoint */ +.mb-4 { margin-bottom: 24px !important; } + +/* Set a $size margin on the left at $breakpoint */ +.ml-4 { margin-left: 24px !important; } + +/* Set a negative $size margin on top at $breakpoint */ +.mt-n4 { margin-top: -24px !important; } + +/* Set a negative $size margin on the right at $breakpoint */ +.mr-n4 { margin-right: -24px !important; } + +/* Set a negative $size margin on the bottom at $breakpoint */ +.mb-n4 { margin-bottom: -24px !important; } + +/* Set a negative $size margin on the left at $breakpoint */ +.ml-n4 { margin-left: -24px !important; } + +/* Set a $size margin on the left & right at $breakpoint */ +.mx-4 { margin-right: 24px !important; margin-left: 24px !important; } + +/* Set a $size margin on the top & bottom at $breakpoint */ +.my-4 { margin-top: 24px !important; margin-bottom: 24px !important; } + +/* Set a $size margin to all sides at $breakpoint */ +.m-5 { margin: 32px !important; } + +/* Set a $size margin on the top at $breakpoint */ +.mt-5 { margin-top: 32px !important; } + +/* Set a $size margin on the right at $breakpoint */ +.mr-5 { margin-right: 32px !important; } + +/* Set a $size margin on the bottom at $breakpoint */ +.mb-5 { margin-bottom: 32px !important; } + +/* Set a $size margin on the left at $breakpoint */ +.ml-5 { margin-left: 32px !important; } + +/* Set a negative $size margin on top at $breakpoint */ +.mt-n5 { margin-top: -32px !important; } + +/* Set a negative $size margin on the right at $breakpoint */ +.mr-n5 { margin-right: -32px !important; } + +/* Set a negative $size margin on the bottom at $breakpoint */ +.mb-n5 { margin-bottom: -32px !important; } + +/* Set a negative $size margin on the left at $breakpoint */ +.ml-n5 { margin-left: -32px !important; } + +/* Set a $size margin on the left & right at $breakpoint */ +.mx-5 { margin-right: 32px !important; margin-left: 32px !important; } + +/* Set a $size margin on the top & bottom at $breakpoint */ +.my-5 { margin-top: 32px !important; margin-bottom: 32px !important; } + +/* Set a $size margin to all sides at $breakpoint */ +.m-6 { margin: 40px !important; } + +/* Set a $size margin on the top at $breakpoint */ +.mt-6 { margin-top: 40px !important; } + +/* Set a $size margin on the right at $breakpoint */ +.mr-6 { margin-right: 40px !important; } + +/* Set a $size margin on the bottom at $breakpoint */ +.mb-6 { margin-bottom: 40px !important; } + +/* Set a $size margin on the left at $breakpoint */ +.ml-6 { margin-left: 40px !important; } + +/* Set a negative $size margin on top at $breakpoint */ +.mt-n6 { margin-top: -40px !important; } + +/* Set a negative $size margin on the right at $breakpoint */ +.mr-n6 { margin-right: -40px !important; } + +/* Set a negative $size margin on the bottom at $breakpoint */ +.mb-n6 { margin-bottom: -40px !important; } + +/* Set a negative $size margin on the left at $breakpoint */ +.ml-n6 { margin-left: -40px !important; } + +/* Set a $size margin on the left & right at $breakpoint */ +.mx-6 { margin-right: 40px !important; margin-left: 40px !important; } + +/* Set a $size margin on the top & bottom at $breakpoint */ +.my-6 { margin-top: 40px !important; margin-bottom: 40px !important; } + +/* responsive horizontal auto margins */ +.mx-auto { margin-right: auto !important; margin-left: auto !important; } + +@media (min-width: 544px) { /* Set a $size margin to all sides at $breakpoint */ + .m-sm-0 { margin: 0 !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-sm-0 { margin-top: 0 !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-sm-0 { margin-right: 0 !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-sm-0 { margin-bottom: 0 !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-sm-0 { margin-left: 0 !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-sm-0 { margin-right: 0 !important; margin-left: 0 !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-sm-0 { margin-top: 0 !important; margin-bottom: 0 !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-sm-1 { margin: 4px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-sm-1 { margin-top: 4px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-sm-1 { margin-right: 4px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-sm-1 { margin-bottom: 4px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-sm-1 { margin-left: 4px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-sm-n1 { margin-top: -4px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-sm-n1 { margin-right: -4px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-sm-n1 { margin-bottom: -4px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-sm-n1 { margin-left: -4px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-sm-1 { margin-right: 4px !important; margin-left: 4px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-sm-1 { margin-top: 4px !important; margin-bottom: 4px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-sm-2 { margin: 8px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-sm-2 { margin-top: 8px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-sm-2 { margin-right: 8px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-sm-2 { margin-bottom: 8px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-sm-2 { margin-left: 8px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-sm-n2 { margin-top: -8px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-sm-n2 { margin-right: -8px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-sm-n2 { margin-bottom: -8px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-sm-n2 { margin-left: -8px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-sm-2 { margin-right: 8px !important; margin-left: 8px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-sm-2 { margin-top: 8px !important; margin-bottom: 8px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-sm-3 { margin: 16px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-sm-3 { margin-top: 16px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-sm-3 { margin-right: 16px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-sm-3 { margin-bottom: 16px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-sm-3 { margin-left: 16px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-sm-n3 { margin-top: -16px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-sm-n3 { margin-right: -16px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-sm-n3 { margin-bottom: -16px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-sm-n3 { margin-left: -16px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-sm-3 { margin-right: 16px !important; margin-left: 16px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-sm-3 { margin-top: 16px !important; margin-bottom: 16px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-sm-4 { margin: 24px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-sm-4 { margin-top: 24px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-sm-4 { margin-right: 24px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-sm-4 { margin-bottom: 24px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-sm-4 { margin-left: 24px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-sm-n4 { margin-top: -24px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-sm-n4 { margin-right: -24px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-sm-n4 { margin-bottom: -24px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-sm-n4 { margin-left: -24px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-sm-4 { margin-right: 24px !important; margin-left: 24px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-sm-4 { margin-top: 24px !important; margin-bottom: 24px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-sm-5 { margin: 32px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-sm-5 { margin-top: 32px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-sm-5 { margin-right: 32px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-sm-5 { margin-bottom: 32px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-sm-5 { margin-left: 32px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-sm-n5 { margin-top: -32px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-sm-n5 { margin-right: -32px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-sm-n5 { margin-bottom: -32px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-sm-n5 { margin-left: -32px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-sm-5 { margin-right: 32px !important; margin-left: 32px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-sm-5 { margin-top: 32px !important; margin-bottom: 32px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-sm-6 { margin: 40px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-sm-6 { margin-top: 40px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-sm-6 { margin-right: 40px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-sm-6 { margin-bottom: 40px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-sm-6 { margin-left: 40px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-sm-n6 { margin-top: -40px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-sm-n6 { margin-right: -40px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-sm-n6 { margin-bottom: -40px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-sm-n6 { margin-left: -40px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-sm-6 { margin-right: 40px !important; margin-left: 40px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-sm-6 { margin-top: 40px !important; margin-bottom: 40px !important; } + /* responsive horizontal auto margins */ + .mx-sm-auto { margin-right: auto !important; margin-left: auto !important; } } +@media (min-width: 768px) { /* Set a $size margin to all sides at $breakpoint */ + .m-md-0 { margin: 0 !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-md-0 { margin-top: 0 !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-md-0 { margin-right: 0 !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-md-0 { margin-bottom: 0 !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-md-0 { margin-left: 0 !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-md-0 { margin-right: 0 !important; margin-left: 0 !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-md-0 { margin-top: 0 !important; margin-bottom: 0 !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-md-1 { margin: 4px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-md-1 { margin-top: 4px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-md-1 { margin-right: 4px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-md-1 { margin-bottom: 4px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-md-1 { margin-left: 4px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-md-n1 { margin-top: -4px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-md-n1 { margin-right: -4px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-md-n1 { margin-bottom: -4px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-md-n1 { margin-left: -4px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-md-1 { margin-right: 4px !important; margin-left: 4px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-md-1 { margin-top: 4px !important; margin-bottom: 4px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-md-2 { margin: 8px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-md-2 { margin-top: 8px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-md-2 { margin-right: 8px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-md-2 { margin-bottom: 8px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-md-2 { margin-left: 8px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-md-n2 { margin-top: -8px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-md-n2 { margin-right: -8px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-md-n2 { margin-bottom: -8px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-md-n2 { margin-left: -8px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-md-2 { margin-right: 8px !important; margin-left: 8px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-md-2 { margin-top: 8px !important; margin-bottom: 8px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-md-3 { margin: 16px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-md-3 { margin-top: 16px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-md-3 { margin-right: 16px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-md-3 { margin-bottom: 16px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-md-3 { margin-left: 16px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-md-n3 { margin-top: -16px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-md-n3 { margin-right: -16px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-md-n3 { margin-bottom: -16px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-md-n3 { margin-left: -16px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-md-3 { margin-right: 16px !important; margin-left: 16px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-md-3 { margin-top: 16px !important; margin-bottom: 16px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-md-4 { margin: 24px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-md-4 { margin-top: 24px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-md-4 { margin-right: 24px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-md-4 { margin-bottom: 24px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-md-4 { margin-left: 24px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-md-n4 { margin-top: -24px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-md-n4 { margin-right: -24px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-md-n4 { margin-bottom: -24px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-md-n4 { margin-left: -24px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-md-4 { margin-right: 24px !important; margin-left: 24px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-md-4 { margin-top: 24px !important; margin-bottom: 24px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-md-5 { margin: 32px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-md-5 { margin-top: 32px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-md-5 { margin-right: 32px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-md-5 { margin-bottom: 32px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-md-5 { margin-left: 32px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-md-n5 { margin-top: -32px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-md-n5 { margin-right: -32px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-md-n5 { margin-bottom: -32px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-md-n5 { margin-left: -32px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-md-5 { margin-right: 32px !important; margin-left: 32px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-md-5 { margin-top: 32px !important; margin-bottom: 32px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-md-6 { margin: 40px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-md-6 { margin-top: 40px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-md-6 { margin-right: 40px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-md-6 { margin-bottom: 40px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-md-6 { margin-left: 40px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-md-n6 { margin-top: -40px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-md-n6 { margin-right: -40px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-md-n6 { margin-bottom: -40px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-md-n6 { margin-left: -40px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-md-6 { margin-right: 40px !important; margin-left: 40px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-md-6 { margin-top: 40px !important; margin-bottom: 40px !important; } + /* responsive horizontal auto margins */ + .mx-md-auto { margin-right: auto !important; margin-left: auto !important; } } +@media (min-width: 1012px) { /* Set a $size margin to all sides at $breakpoint */ + .m-lg-0 { margin: 0 !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-lg-0 { margin-top: 0 !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-lg-0 { margin-right: 0 !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-lg-0 { margin-bottom: 0 !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-lg-0 { margin-left: 0 !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-lg-0 { margin-right: 0 !important; margin-left: 0 !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-lg-0 { margin-top: 0 !important; margin-bottom: 0 !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-lg-1 { margin: 4px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-lg-1 { margin-top: 4px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-lg-1 { margin-right: 4px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-lg-1 { margin-bottom: 4px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-lg-1 { margin-left: 4px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-lg-n1 { margin-top: -4px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-lg-n1 { margin-right: -4px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-lg-n1 { margin-bottom: -4px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-lg-n1 { margin-left: -4px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-lg-1 { margin-right: 4px !important; margin-left: 4px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-lg-1 { margin-top: 4px !important; margin-bottom: 4px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-lg-2 { margin: 8px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-lg-2 { margin-top: 8px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-lg-2 { margin-right: 8px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-lg-2 { margin-bottom: 8px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-lg-2 { margin-left: 8px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-lg-n2 { margin-top: -8px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-lg-n2 { margin-right: -8px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-lg-n2 { margin-bottom: -8px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-lg-n2 { margin-left: -8px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-lg-2 { margin-right: 8px !important; margin-left: 8px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-lg-2 { margin-top: 8px !important; margin-bottom: 8px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-lg-3 { margin: 16px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-lg-3 { margin-top: 16px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-lg-3 { margin-right: 16px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-lg-3 { margin-bottom: 16px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-lg-3 { margin-left: 16px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-lg-n3 { margin-top: -16px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-lg-n3 { margin-right: -16px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-lg-n3 { margin-bottom: -16px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-lg-n3 { margin-left: -16px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-lg-3 { margin-right: 16px !important; margin-left: 16px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-lg-3 { margin-top: 16px !important; margin-bottom: 16px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-lg-4 { margin: 24px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-lg-4 { margin-top: 24px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-lg-4 { margin-right: 24px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-lg-4 { margin-bottom: 24px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-lg-4 { margin-left: 24px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-lg-n4 { margin-top: -24px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-lg-n4 { margin-right: -24px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-lg-n4 { margin-bottom: -24px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-lg-n4 { margin-left: -24px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-lg-4 { margin-right: 24px !important; margin-left: 24px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-lg-4 { margin-top: 24px !important; margin-bottom: 24px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-lg-5 { margin: 32px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-lg-5 { margin-top: 32px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-lg-5 { margin-right: 32px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-lg-5 { margin-bottom: 32px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-lg-5 { margin-left: 32px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-lg-n5 { margin-top: -32px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-lg-n5 { margin-right: -32px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-lg-n5 { margin-bottom: -32px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-lg-n5 { margin-left: -32px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-lg-5 { margin-right: 32px !important; margin-left: 32px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-lg-5 { margin-top: 32px !important; margin-bottom: 32px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-lg-6 { margin: 40px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-lg-6 { margin-top: 40px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-lg-6 { margin-right: 40px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-lg-6 { margin-bottom: 40px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-lg-6 { margin-left: 40px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-lg-n6 { margin-top: -40px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-lg-n6 { margin-right: -40px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-lg-n6 { margin-bottom: -40px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-lg-n6 { margin-left: -40px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-lg-6 { margin-right: 40px !important; margin-left: 40px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-lg-6 { margin-top: 40px !important; margin-bottom: 40px !important; } + /* responsive horizontal auto margins */ + .mx-lg-auto { margin-right: auto !important; margin-left: auto !important; } } +@media (min-width: 1280px) { /* Set a $size margin to all sides at $breakpoint */ + .m-xl-0 { margin: 0 !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-xl-0 { margin-top: 0 !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-xl-0 { margin-right: 0 !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-xl-0 { margin-bottom: 0 !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-xl-0 { margin-left: 0 !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-xl-0 { margin-right: 0 !important; margin-left: 0 !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-xl-0 { margin-top: 0 !important; margin-bottom: 0 !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-xl-1 { margin: 4px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-xl-1 { margin-top: 4px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-xl-1 { margin-right: 4px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-xl-1 { margin-bottom: 4px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-xl-1 { margin-left: 4px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-xl-n1 { margin-top: -4px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-xl-n1 { margin-right: -4px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-xl-n1 { margin-bottom: -4px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-xl-n1 { margin-left: -4px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-xl-1 { margin-right: 4px !important; margin-left: 4px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-xl-1 { margin-top: 4px !important; margin-bottom: 4px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-xl-2 { margin: 8px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-xl-2 { margin-top: 8px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-xl-2 { margin-right: 8px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-xl-2 { margin-bottom: 8px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-xl-2 { margin-left: 8px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-xl-n2 { margin-top: -8px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-xl-n2 { margin-right: -8px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-xl-n2 { margin-bottom: -8px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-xl-n2 { margin-left: -8px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-xl-2 { margin-right: 8px !important; margin-left: 8px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-xl-2 { margin-top: 8px !important; margin-bottom: 8px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-xl-3 { margin: 16px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-xl-3 { margin-top: 16px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-xl-3 { margin-right: 16px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-xl-3 { margin-bottom: 16px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-xl-3 { margin-left: 16px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-xl-n3 { margin-top: -16px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-xl-n3 { margin-right: -16px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-xl-n3 { margin-bottom: -16px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-xl-n3 { margin-left: -16px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-xl-3 { margin-right: 16px !important; margin-left: 16px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-xl-3 { margin-top: 16px !important; margin-bottom: 16px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-xl-4 { margin: 24px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-xl-4 { margin-top: 24px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-xl-4 { margin-right: 24px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-xl-4 { margin-bottom: 24px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-xl-4 { margin-left: 24px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-xl-n4 { margin-top: -24px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-xl-n4 { margin-right: -24px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-xl-n4 { margin-bottom: -24px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-xl-n4 { margin-left: -24px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-xl-4 { margin-right: 24px !important; margin-left: 24px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-xl-4 { margin-top: 24px !important; margin-bottom: 24px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-xl-5 { margin: 32px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-xl-5 { margin-top: 32px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-xl-5 { margin-right: 32px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-xl-5 { margin-bottom: 32px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-xl-5 { margin-left: 32px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-xl-n5 { margin-top: -32px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-xl-n5 { margin-right: -32px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-xl-n5 { margin-bottom: -32px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-xl-n5 { margin-left: -32px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-xl-5 { margin-right: 32px !important; margin-left: 32px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-xl-5 { margin-top: 32px !important; margin-bottom: 32px !important; } + /* Set a $size margin to all sides at $breakpoint */ + .m-xl-6 { margin: 40px !important; } + /* Set a $size margin on the top at $breakpoint */ + .mt-xl-6 { margin-top: 40px !important; } + /* Set a $size margin on the right at $breakpoint */ + .mr-xl-6 { margin-right: 40px !important; } + /* Set a $size margin on the bottom at $breakpoint */ + .mb-xl-6 { margin-bottom: 40px !important; } + /* Set a $size margin on the left at $breakpoint */ + .ml-xl-6 { margin-left: 40px !important; } + /* Set a negative $size margin on top at $breakpoint */ + .mt-xl-n6 { margin-top: -40px !important; } + /* Set a negative $size margin on the right at $breakpoint */ + .mr-xl-n6 { margin-right: -40px !important; } + /* Set a negative $size margin on the bottom at $breakpoint */ + .mb-xl-n6 { margin-bottom: -40px !important; } + /* Set a negative $size margin on the left at $breakpoint */ + .ml-xl-n6 { margin-left: -40px !important; } + /* Set a $size margin on the left & right at $breakpoint */ + .mx-xl-6 { margin-right: 40px !important; margin-left: 40px !important; } + /* Set a $size margin on the top & bottom at $breakpoint */ + .my-xl-6 { margin-top: 40px !important; margin-bottom: 40px !important; } + /* responsive horizontal auto margins */ + .mx-xl-auto { margin-right: auto !important; margin-left: auto !important; } } +/* Set a $size padding to all sides at $breakpoint */ +.p-0 { padding: 0 !important; } + +/* Set a $size padding to the top at $breakpoint */ +.pt-0 { padding-top: 0 !important; } + +/* Set a $size padding to the right at $breakpoint */ +.pr-0 { padding-right: 0 !important; } + +/* Set a $size padding to the bottom at $breakpoint */ +.pb-0 { padding-bottom: 0 !important; } + +/* Set a $size padding to the left at $breakpoint */ +.pl-0 { padding-left: 0 !important; } + +/* Set a $size padding to the left & right at $breakpoint */ +.px-0 { padding-right: 0 !important; padding-left: 0 !important; } + +/* Set a $size padding to the top & bottom at $breakpoint */ +.py-0 { padding-top: 0 !important; padding-bottom: 0 !important; } + +/* Set a $size padding to all sides at $breakpoint */ +.p-1 { padding: 4px !important; } + +/* Set a $size padding to the top at $breakpoint */ +.pt-1 { padding-top: 4px !important; } + +/* Set a $size padding to the right at $breakpoint */ +.pr-1 { padding-right: 4px !important; } + +/* Set a $size padding to the bottom at $breakpoint */ +.pb-1 { padding-bottom: 4px !important; } + +/* Set a $size padding to the left at $breakpoint */ +.pl-1 { padding-left: 4px !important; } + +/* Set a $size padding to the left & right at $breakpoint */ +.px-1 { padding-right: 4px !important; padding-left: 4px !important; } + +/* Set a $size padding to the top & bottom at $breakpoint */ +.py-1 { padding-top: 4px !important; padding-bottom: 4px !important; } + +/* Set a $size padding to all sides at $breakpoint */ +.p-2 { padding: 8px !important; } + +/* Set a $size padding to the top at $breakpoint */ +.pt-2 { padding-top: 8px !important; } + +/* Set a $size padding to the right at $breakpoint */ +.pr-2 { padding-right: 8px !important; } + +/* Set a $size padding to the bottom at $breakpoint */ +.pb-2 { padding-bottom: 8px !important; } + +/* Set a $size padding to the left at $breakpoint */ +.pl-2 { padding-left: 8px !important; } + +/* Set a $size padding to the left & right at $breakpoint */ +.px-2 { padding-right: 8px !important; padding-left: 8px !important; } + +/* Set a $size padding to the top & bottom at $breakpoint */ +.py-2 { padding-top: 8px !important; padding-bottom: 8px !important; } + +/* Set a $size padding to all sides at $breakpoint */ +.p-3 { padding: 16px !important; } + +/* Set a $size padding to the top at $breakpoint */ +.pt-3 { padding-top: 16px !important; } + +/* Set a $size padding to the right at $breakpoint */ +.pr-3 { padding-right: 16px !important; } + +/* Set a $size padding to the bottom at $breakpoint */ +.pb-3 { padding-bottom: 16px !important; } + +/* Set a $size padding to the left at $breakpoint */ +.pl-3 { padding-left: 16px !important; } + +/* Set a $size padding to the left & right at $breakpoint */ +.px-3 { padding-right: 16px !important; padding-left: 16px !important; } + +/* Set a $size padding to the top & bottom at $breakpoint */ +.py-3 { padding-top: 16px !important; padding-bottom: 16px !important; } + +/* Set a $size padding to all sides at $breakpoint */ +.p-4 { padding: 24px !important; } + +/* Set a $size padding to the top at $breakpoint */ +.pt-4 { padding-top: 24px !important; } + +/* Set a $size padding to the right at $breakpoint */ +.pr-4 { padding-right: 24px !important; } + +/* Set a $size padding to the bottom at $breakpoint */ +.pb-4 { padding-bottom: 24px !important; } + +/* Set a $size padding to the left at $breakpoint */ +.pl-4 { padding-left: 24px !important; } + +/* Set a $size padding to the left & right at $breakpoint */ +.px-4 { padding-right: 24px !important; padding-left: 24px !important; } + +/* Set a $size padding to the top & bottom at $breakpoint */ +.py-4 { padding-top: 24px !important; padding-bottom: 24px !important; } + +/* Set a $size padding to all sides at $breakpoint */ +.p-5 { padding: 32px !important; } + +/* Set a $size padding to the top at $breakpoint */ +.pt-5 { padding-top: 32px !important; } + +/* Set a $size padding to the right at $breakpoint */ +.pr-5 { padding-right: 32px !important; } + +/* Set a $size padding to the bottom at $breakpoint */ +.pb-5 { padding-bottom: 32px !important; } + +/* Set a $size padding to the left at $breakpoint */ +.pl-5 { padding-left: 32px !important; } + +/* Set a $size padding to the left & right at $breakpoint */ +.px-5 { padding-right: 32px !important; padding-left: 32px !important; } + +/* Set a $size padding to the top & bottom at $breakpoint */ +.py-5 { padding-top: 32px !important; padding-bottom: 32px !important; } + +/* Set a $size padding to all sides at $breakpoint */ +.p-6 { padding: 40px !important; } + +/* Set a $size padding to the top at $breakpoint */ +.pt-6 { padding-top: 40px !important; } + +/* Set a $size padding to the right at $breakpoint */ +.pr-6 { padding-right: 40px !important; } + +/* Set a $size padding to the bottom at $breakpoint */ +.pb-6 { padding-bottom: 40px !important; } + +/* Set a $size padding to the left at $breakpoint */ +.pl-6 { padding-left: 40px !important; } + +/* Set a $size padding to the left & right at $breakpoint */ +.px-6 { padding-right: 40px !important; padding-left: 40px !important; } + +/* Set a $size padding to the top & bottom at $breakpoint */ +.py-6 { padding-top: 40px !important; padding-bottom: 40px !important; } + +@media (min-width: 544px) { /* Set a $size padding to all sides at $breakpoint */ + .p-sm-0 { padding: 0 !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-sm-0 { padding-top: 0 !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-sm-0 { padding-right: 0 !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-sm-0 { padding-bottom: 0 !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-sm-0 { padding-left: 0 !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-sm-0 { padding-right: 0 !important; padding-left: 0 !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-sm-0 { padding-top: 0 !important; padding-bottom: 0 !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-sm-1 { padding: 4px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-sm-1 { padding-top: 4px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-sm-1 { padding-right: 4px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-sm-1 { padding-bottom: 4px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-sm-1 { padding-left: 4px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-sm-1 { padding-right: 4px !important; padding-left: 4px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-sm-1 { padding-top: 4px !important; padding-bottom: 4px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-sm-2 { padding: 8px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-sm-2 { padding-top: 8px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-sm-2 { padding-right: 8px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-sm-2 { padding-bottom: 8px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-sm-2 { padding-left: 8px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-sm-2 { padding-right: 8px !important; padding-left: 8px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-sm-2 { padding-top: 8px !important; padding-bottom: 8px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-sm-3 { padding: 16px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-sm-3 { padding-top: 16px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-sm-3 { padding-right: 16px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-sm-3 { padding-bottom: 16px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-sm-3 { padding-left: 16px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-sm-3 { padding-right: 16px !important; padding-left: 16px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-sm-3 { padding-top: 16px !important; padding-bottom: 16px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-sm-4 { padding: 24px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-sm-4 { padding-top: 24px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-sm-4 { padding-right: 24px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-sm-4 { padding-bottom: 24px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-sm-4 { padding-left: 24px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-sm-4 { padding-right: 24px !important; padding-left: 24px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-sm-4 { padding-top: 24px !important; padding-bottom: 24px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-sm-5 { padding: 32px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-sm-5 { padding-top: 32px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-sm-5 { padding-right: 32px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-sm-5 { padding-bottom: 32px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-sm-5 { padding-left: 32px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-sm-5 { padding-right: 32px !important; padding-left: 32px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-sm-5 { padding-top: 32px !important; padding-bottom: 32px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-sm-6 { padding: 40px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-sm-6 { padding-top: 40px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-sm-6 { padding-right: 40px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-sm-6 { padding-bottom: 40px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-sm-6 { padding-left: 40px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-sm-6 { padding-right: 40px !important; padding-left: 40px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-sm-6 { padding-top: 40px !important; padding-bottom: 40px !important; } } +@media (min-width: 768px) { /* Set a $size padding to all sides at $breakpoint */ + .p-md-0 { padding: 0 !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-md-0 { padding-top: 0 !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-md-0 { padding-right: 0 !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-md-0 { padding-bottom: 0 !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-md-0 { padding-left: 0 !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-md-0 { padding-right: 0 !important; padding-left: 0 !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-md-0 { padding-top: 0 !important; padding-bottom: 0 !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-md-1 { padding: 4px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-md-1 { padding-top: 4px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-md-1 { padding-right: 4px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-md-1 { padding-bottom: 4px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-md-1 { padding-left: 4px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-md-1 { padding-right: 4px !important; padding-left: 4px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-md-1 { padding-top: 4px !important; padding-bottom: 4px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-md-2 { padding: 8px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-md-2 { padding-top: 8px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-md-2 { padding-right: 8px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-md-2 { padding-bottom: 8px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-md-2 { padding-left: 8px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-md-2 { padding-right: 8px !important; padding-left: 8px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-md-2 { padding-top: 8px !important; padding-bottom: 8px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-md-3 { padding: 16px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-md-3 { padding-top: 16px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-md-3 { padding-right: 16px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-md-3 { padding-bottom: 16px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-md-3 { padding-left: 16px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-md-3 { padding-right: 16px !important; padding-left: 16px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-md-3 { padding-top: 16px !important; padding-bottom: 16px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-md-4 { padding: 24px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-md-4 { padding-top: 24px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-md-4 { padding-right: 24px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-md-4 { padding-bottom: 24px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-md-4 { padding-left: 24px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-md-4 { padding-right: 24px !important; padding-left: 24px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-md-4 { padding-top: 24px !important; padding-bottom: 24px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-md-5 { padding: 32px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-md-5 { padding-top: 32px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-md-5 { padding-right: 32px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-md-5 { padding-bottom: 32px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-md-5 { padding-left: 32px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-md-5 { padding-right: 32px !important; padding-left: 32px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-md-5 { padding-top: 32px !important; padding-bottom: 32px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-md-6 { padding: 40px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-md-6 { padding-top: 40px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-md-6 { padding-right: 40px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-md-6 { padding-bottom: 40px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-md-6 { padding-left: 40px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-md-6 { padding-right: 40px !important; padding-left: 40px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-md-6 { padding-top: 40px !important; padding-bottom: 40px !important; } } +@media (min-width: 1012px) { /* Set a $size padding to all sides at $breakpoint */ + .p-lg-0 { padding: 0 !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-lg-0 { padding-top: 0 !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-lg-0 { padding-right: 0 !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-lg-0 { padding-bottom: 0 !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-lg-0 { padding-left: 0 !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-lg-0 { padding-right: 0 !important; padding-left: 0 !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-lg-0 { padding-top: 0 !important; padding-bottom: 0 !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-lg-1 { padding: 4px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-lg-1 { padding-top: 4px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-lg-1 { padding-right: 4px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-lg-1 { padding-bottom: 4px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-lg-1 { padding-left: 4px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-lg-1 { padding-right: 4px !important; padding-left: 4px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-lg-1 { padding-top: 4px !important; padding-bottom: 4px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-lg-2 { padding: 8px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-lg-2 { padding-top: 8px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-lg-2 { padding-right: 8px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-lg-2 { padding-bottom: 8px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-lg-2 { padding-left: 8px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-lg-2 { padding-right: 8px !important; padding-left: 8px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-lg-2 { padding-top: 8px !important; padding-bottom: 8px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-lg-3 { padding: 16px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-lg-3 { padding-top: 16px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-lg-3 { padding-right: 16px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-lg-3 { padding-bottom: 16px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-lg-3 { padding-left: 16px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-lg-3 { padding-right: 16px !important; padding-left: 16px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-lg-3 { padding-top: 16px !important; padding-bottom: 16px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-lg-4 { padding: 24px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-lg-4 { padding-top: 24px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-lg-4 { padding-right: 24px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-lg-4 { padding-bottom: 24px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-lg-4 { padding-left: 24px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-lg-4 { padding-right: 24px !important; padding-left: 24px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-lg-4 { padding-top: 24px !important; padding-bottom: 24px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-lg-5 { padding: 32px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-lg-5 { padding-top: 32px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-lg-5 { padding-right: 32px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-lg-5 { padding-bottom: 32px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-lg-5 { padding-left: 32px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-lg-5 { padding-right: 32px !important; padding-left: 32px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-lg-5 { padding-top: 32px !important; padding-bottom: 32px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-lg-6 { padding: 40px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-lg-6 { padding-top: 40px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-lg-6 { padding-right: 40px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-lg-6 { padding-bottom: 40px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-lg-6 { padding-left: 40px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-lg-6 { padding-right: 40px !important; padding-left: 40px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-lg-6 { padding-top: 40px !important; padding-bottom: 40px !important; } } +@media (min-width: 1280px) { /* Set a $size padding to all sides at $breakpoint */ + .p-xl-0 { padding: 0 !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-xl-0 { padding-top: 0 !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-xl-0 { padding-right: 0 !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-xl-0 { padding-bottom: 0 !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-xl-0 { padding-left: 0 !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-xl-0 { padding-right: 0 !important; padding-left: 0 !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-xl-0 { padding-top: 0 !important; padding-bottom: 0 !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-xl-1 { padding: 4px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-xl-1 { padding-top: 4px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-xl-1 { padding-right: 4px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-xl-1 { padding-bottom: 4px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-xl-1 { padding-left: 4px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-xl-1 { padding-right: 4px !important; padding-left: 4px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-xl-1 { padding-top: 4px !important; padding-bottom: 4px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-xl-2 { padding: 8px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-xl-2 { padding-top: 8px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-xl-2 { padding-right: 8px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-xl-2 { padding-bottom: 8px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-xl-2 { padding-left: 8px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-xl-2 { padding-right: 8px !important; padding-left: 8px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-xl-2 { padding-top: 8px !important; padding-bottom: 8px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-xl-3 { padding: 16px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-xl-3 { padding-top: 16px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-xl-3 { padding-right: 16px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-xl-3 { padding-bottom: 16px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-xl-3 { padding-left: 16px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-xl-3 { padding-right: 16px !important; padding-left: 16px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-xl-3 { padding-top: 16px !important; padding-bottom: 16px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-xl-4 { padding: 24px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-xl-4 { padding-top: 24px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-xl-4 { padding-right: 24px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-xl-4 { padding-bottom: 24px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-xl-4 { padding-left: 24px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-xl-4 { padding-right: 24px !important; padding-left: 24px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-xl-4 { padding-top: 24px !important; padding-bottom: 24px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-xl-5 { padding: 32px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-xl-5 { padding-top: 32px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-xl-5 { padding-right: 32px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-xl-5 { padding-bottom: 32px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-xl-5 { padding-left: 32px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-xl-5 { padding-right: 32px !important; padding-left: 32px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-xl-5 { padding-top: 32px !important; padding-bottom: 32px !important; } + /* Set a $size padding to all sides at $breakpoint */ + .p-xl-6 { padding: 40px !important; } + /* Set a $size padding to the top at $breakpoint */ + .pt-xl-6 { padding-top: 40px !important; } + /* Set a $size padding to the right at $breakpoint */ + .pr-xl-6 { padding-right: 40px !important; } + /* Set a $size padding to the bottom at $breakpoint */ + .pb-xl-6 { padding-bottom: 40px !important; } + /* Set a $size padding to the left at $breakpoint */ + .pl-xl-6 { padding-left: 40px !important; } + /* Set a $size padding to the left & right at $breakpoint */ + .px-xl-6 { padding-right: 40px !important; padding-left: 40px !important; } + /* Set a $size padding to the top & bottom at $breakpoint */ + .py-xl-6 { padding-top: 40px !important; padding-bottom: 40px !important; } } +.p-responsive { padding-right: 16px !important; padding-left: 16px !important; } +@media (min-width: 544px) { .p-responsive { padding-right: 40px !important; padding-left: 40px !important; } } +@media (min-width: 1012px) { .p-responsive { padding-right: 16px !important; padding-left: 16px !important; } } + +/* Set the font size to 26px */ +.h1 { font-size: 26px !important; } +@media (min-width: 768px) { .h1 { font-size: 32px !important; } } + +/* Set the font size to 22px */ +.h2 { font-size: 22px !important; } +@media (min-width: 768px) { .h2 { font-size: 24px !important; } } + +/* Set the font size to 18px */ +.h3 { font-size: 18px !important; } +@media (min-width: 768px) { .h3 { font-size: 20px !important; } } + +/* Set the font size to 16px */ +.h4 { font-size: 16px !important; } + +/* Set the font size to 14px */ +.h5 { font-size: 14px !important; } + +/* Set the font size to 12px */ +.h6 { font-size: 12px !important; } + +.h1, .h2, .h3, .h4, .h5, .h6 { font-weight: 600 !important; } + +/* Set the font size to 26px */ +.f1 { font-size: 26px !important; } +@media (min-width: 768px) { .f1 { font-size: 32px !important; } } + +/* Set the font size to 22px */ +.f2 { font-size: 22px !important; } +@media (min-width: 768px) { .f2 { font-size: 24px !important; } } + +/* Set the font size to 18px */ +.f3 { font-size: 18px !important; } +@media (min-width: 768px) { .f3 { font-size: 20px !important; } } + +/* Set the font size to 16px */ +.f4 { font-size: 16px !important; } +@media (min-width: 768px) { .f4 { font-size: 16px !important; } } + +/* Set the font size to 14px */ +.f5 { font-size: 14px !important; } + +/* Set the font size to 12px */ +.f6 { font-size: 12px !important; } + +/* Set the font size to 40px and weight to light */ +.f00-light { font-size: 40px !important; font-weight: 300 !important; } +@media (min-width: 768px) { .f00-light { font-size: 48px !important; } } + +/* Set the font size to 32px and weight to light */ +.f0-light { font-size: 32px !important; font-weight: 300 !important; } +@media (min-width: 768px) { .f0-light { font-size: 40px !important; } } + +/* Set the font size to 26px and weight to light */ +.f1-light { font-size: 26px !important; font-weight: 300 !important; } +@media (min-width: 768px) { .f1-light { font-size: 32px !important; } } + +/* Set the font size to 22px and weight to light */ +.f2-light { font-size: 22px !important; font-weight: 300 !important; } +@media (min-width: 768px) { .f2-light { font-size: 24px !important; } } + +/* Set the font size to 18px and weight to light */ +.f3-light { font-size: 18px !important; font-weight: 300 !important; } +@media (min-width: 768px) { .f3-light { font-size: 20px !important; } } + +/* Set the font size to ${#h6-size} */ +.text-small { font-size: 12px !important; } + +/* Large leading paragraphs */ +.lead { margin-bottom: 30px; font-size: 20px; font-weight: 300; color: #586069; } + +/* Set the line height to ultra condensed */ +.lh-condensed-ultra { line-height: 1 !important; } + +/* Set the line height to condensed */ +.lh-condensed { line-height: 1.25 !important; } + +/* Set the line height to default */ +.lh-default { line-height: 1.5 !important; } + +/* Set the line height to zero */ +.lh-0 { line-height: 0 !important; } + +/* Text align to the right */ +.text-right { text-align: right !important; } + +/* Text align to the left */ +.text-left { text-align: left !important; } + +/* Text align to the center */ +.text-center { text-align: center !important; } + +@media (min-width: 544px) { /* Text align to the right */ + .text-sm-right { text-align: right !important; } + /* Text align to the left */ + .text-sm-left { text-align: left !important; } + /* Text align to the center */ + .text-sm-center { text-align: center !important; } } +@media (min-width: 768px) { /* Text align to the right */ + .text-md-right { text-align: right !important; } + /* Text align to the left */ + .text-md-left { text-align: left !important; } + /* Text align to the center */ + .text-md-center { text-align: center !important; } } +@media (min-width: 1012px) { /* Text align to the right */ + .text-lg-right { text-align: right !important; } + /* Text align to the left */ + .text-lg-left { text-align: left !important; } + /* Text align to the center */ + .text-lg-center { text-align: center !important; } } +@media (min-width: 1280px) { /* Text align to the right */ + .text-xl-right { text-align: right !important; } + /* Text align to the left */ + .text-xl-left { text-align: left !important; } + /* Text align to the center */ + .text-xl-center { text-align: center !important; } } +/* Set the font weight to normal */ +.text-normal { font-weight: 400 !important; } + +/* Set the font weight to bold */ +.text-bold { font-weight: 600 !important; } + +/* Set the font to italic */ +.text-italic { font-style: italic !important; } + +/* Make text uppercase */ +.text-uppercase { text-transform: uppercase !important; } + +/* Underline text */ +.text-underline { text-decoration: underline !important; } + +/* Don't underline text */ +.no-underline { text-decoration: none !important; } + +/* Don't wrap white space */ +.no-wrap { white-space: nowrap !important; } + +/* Normal white space */ +.ws-normal { white-space: normal !important; } + +/* Allow long lines with no spaces to line break */ +.wb-break-all { word-break: break-all !important; } + +.text-emphasized { font-weight: 600; color: #24292e; } + +.list-style-none { list-style: none !important; } + +/* Add a dark text shadow */ +.text-shadow-dark { text-shadow: 0 1px 1px rgba(27, 31, 35, 0.25), 0 1px 25px rgba(27, 31, 35, 0.75); } + +/* Add a light text shadow */ +.text-shadow-light { text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); } + +/* Set to monospace font */ +.text-mono { font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, Courier, monospace; } + +/* Disallow user from selecting text */ +.user-select-none { user-select: none !important; } + +.d-block { display: block !important; } + +.d-flex { display: flex !important; } + +.d-inline { display: inline !important; } + +.d-inline-block { display: inline-block !important; } + +.d-inline-flex { display: inline-flex !important; } + +.d-none { display: none !important; } + +.d-table { display: table !important; } + +.d-table-cell { display: table-cell !important; } + +@media (min-width: 544px) { .d-sm-block { display: block !important; } + .d-sm-flex { display: flex !important; } + .d-sm-inline { display: inline !important; } + .d-sm-inline-block { display: inline-block !important; } + .d-sm-inline-flex { display: inline-flex !important; } + .d-sm-none { display: none !important; } + .d-sm-table { display: table !important; } + .d-sm-table-cell { display: table-cell !important; } } +@media (min-width: 768px) { .d-md-block { display: block !important; } + .d-md-flex { display: flex !important; } + .d-md-inline { display: inline !important; } + .d-md-inline-block { display: inline-block !important; } + .d-md-inline-flex { display: inline-flex !important; } + .d-md-none { display: none !important; } + .d-md-table { display: table !important; } + .d-md-table-cell { display: table-cell !important; } } +@media (min-width: 1012px) { .d-lg-block { display: block !important; } + .d-lg-flex { display: flex !important; } + .d-lg-inline { display: inline !important; } + .d-lg-inline-block { display: inline-block !important; } + .d-lg-inline-flex { display: inline-flex !important; } + .d-lg-none { display: none !important; } + .d-lg-table { display: table !important; } + .d-lg-table-cell { display: table-cell !important; } } +@media (min-width: 1280px) { .d-xl-block { display: block !important; } + .d-xl-flex { display: flex !important; } + .d-xl-inline { display: inline !important; } + .d-xl-inline-block { display: inline-block !important; } + .d-xl-inline-flex { display: inline-flex !important; } + .d-xl-none { display: none !important; } + .d-xl-table { display: table !important; } + .d-xl-table-cell { display: table-cell !important; } } +.v-hidden { visibility: hidden !important; } + +.v-visible { visibility: visible !important; } + +@media (max-width: 544px) { .hide-sm { display: none !important; } } +@media (min-width: 544px) and (max-width: 768px) { .hide-md { display: none !important; } } +@media (min-width: 768px) and (max-width: 1012px) { .hide-lg { display: none !important; } } +@media (min-width: 1012px) { .hide-xl { display: none !important; } } +/* Set the table-layout to fixed */ +.table-fixed { table-layout: fixed !important; } + +.sr-only { position: absolute; width: 1px; height: 1px; padding: 0; overflow: hidden; clip: rect(0, 0, 0, 0); word-wrap: normal; border: 0; } + +.show-on-focus { position: absolute; width: 1px; height: 1px; margin: 0; overflow: hidden; clip: rect(1px, 1px, 1px, 1px); } +.show-on-focus:focus { z-index: 20; width: auto; height: auto; clip: auto; } + +.container { width: 980px; margin-right: auto; margin-left: auto; } +.container::before { display: table; content: ""; } +.container::after { display: table; clear: both; content: ""; } + +.container-md { max-width: 768px; margin-right: auto; margin-left: auto; } + +.container-lg { max-width: 1012px; margin-right: auto; margin-left: auto; } + +.container-xl { max-width: 1280px; margin-right: auto; margin-left: auto; } + +.columns { margin-right: -10px; margin-left: -10px; } +.columns::before { display: table; content: ""; } +.columns::after { display: table; clear: both; content: ""; } + +.column { float: left; padding-right: 10px; padding-left: 10px; } + +.one-third { width: 33.333333%; } + +.two-thirds { width: 66.666667%; } + +.one-fourth { width: 25%; } + +.one-half { width: 50%; } + +.three-fourths { width: 75%; } + +.one-fifth { width: 20%; } + +.four-fifths { width: 80%; } + +.centered { display: block; float: none; margin-right: auto; margin-left: auto; } + +.col-1 { width: 8.3333333333%; } + +.col-2 { width: 16.6666666667%; } + +.col-3 { width: 25%; } + +.col-4 { width: 33.3333333333%; } + +.col-5 { width: 41.6666666667%; } + +.col-6 { width: 50%; } + +.col-7 { width: 58.3333333333%; } + +.col-8 { width: 66.6666666667%; } + +.col-9 { width: 75%; } + +.col-10 { width: 83.3333333333%; } + +.col-11 { width: 91.6666666667%; } + +.col-12 { width: 100%; } + +@media (min-width: 544px) { .col-sm-1 { width: 8.3333333333%; } + .col-sm-2 { width: 16.6666666667%; } + .col-sm-3 { width: 25%; } + .col-sm-4 { width: 33.3333333333%; } + .col-sm-5 { width: 41.6666666667%; } + .col-sm-6 { width: 50%; } + .col-sm-7 { width: 58.3333333333%; } + .col-sm-8 { width: 66.6666666667%; } + .col-sm-9 { width: 75%; } + .col-sm-10 { width: 83.3333333333%; } + .col-sm-11 { width: 91.6666666667%; } + .col-sm-12 { width: 100%; } } +@media (min-width: 768px) { .col-md-1 { width: 8.3333333333%; } + .col-md-2 { width: 16.6666666667%; } + .col-md-3 { width: 25%; } + .col-md-4 { width: 33.3333333333%; } + .col-md-5 { width: 41.6666666667%; } + .col-md-6 { width: 50%; } + .col-md-7 { width: 58.3333333333%; } + .col-md-8 { width: 66.6666666667%; } + .col-md-9 { width: 75%; } + .col-md-10 { width: 83.3333333333%; } + .col-md-11 { width: 91.6666666667%; } + .col-md-12 { width: 100%; } } +@media (min-width: 1012px) { .col-lg-1 { width: 8.3333333333%; } + .col-lg-2 { width: 16.6666666667%; } + .col-lg-3 { width: 25%; } + .col-lg-4 { width: 33.3333333333%; } + .col-lg-5 { width: 41.6666666667%; } + .col-lg-6 { width: 50%; } + .col-lg-7 { width: 58.3333333333%; } + .col-lg-8 { width: 66.6666666667%; } + .col-lg-9 { width: 75%; } + .col-lg-10 { width: 83.3333333333%; } + .col-lg-11 { width: 91.6666666667%; } + .col-lg-12 { width: 100%; } } +@media (min-width: 1280px) { .col-xl-1 { width: 8.3333333333%; } + .col-xl-2 { width: 16.6666666667%; } + .col-xl-3 { width: 25%; } + .col-xl-4 { width: 33.3333333333%; } + .col-xl-5 { width: 41.6666666667%; } + .col-xl-6 { width: 50%; } + .col-xl-7 { width: 58.3333333333%; } + .col-xl-8 { width: 66.6666666667%; } + .col-xl-9 { width: 75%; } + .col-xl-10 { width: 83.3333333333%; } + .col-xl-11 { width: 91.6666666667%; } + .col-xl-12 { width: 100%; } } +.gutter { margin-right: -16px; margin-left: -16px; } +.gutter > [class*="col-"] { padding-right: 16px !important; padding-left: 16px !important; } + +.gutter-condensed { margin-right: -8px; margin-left: -8px; } +.gutter-condensed > [class*="col-"] { padding-right: 8px !important; padding-left: 8px !important; } + +.gutter-spacious { margin-right: -24px; margin-left: -24px; } +.gutter-spacious > [class*="col-"] { padding-right: 24px !important; padding-left: 24px !important; } + +@media (min-width: 544px) { .gutter-sm { margin-right: -16px; margin-left: -16px; } + .gutter-sm > [class*="col-"] { padding-right: 16px !important; padding-left: 16px !important; } + .gutter-sm-condensed { margin-right: -8px; margin-left: -8px; } + .gutter-sm-condensed > [class*="col-"] { padding-right: 8px !important; padding-left: 8px !important; } + .gutter-sm-spacious { margin-right: -24px; margin-left: -24px; } + .gutter-sm-spacious > [class*="col-"] { padding-right: 24px !important; padding-left: 24px !important; } } +@media (min-width: 768px) { .gutter-md { margin-right: -16px; margin-left: -16px; } + .gutter-md > [class*="col-"] { padding-right: 16px !important; padding-left: 16px !important; } + .gutter-md-condensed { margin-right: -8px; margin-left: -8px; } + .gutter-md-condensed > [class*="col-"] { padding-right: 8px !important; padding-left: 8px !important; } + .gutter-md-spacious { margin-right: -24px; margin-left: -24px; } + .gutter-md-spacious > [class*="col-"] { padding-right: 24px !important; padding-left: 24px !important; } } +@media (min-width: 1012px) { .gutter-lg { margin-right: -16px; margin-left: -16px; } + .gutter-lg > [class*="col-"] { padding-right: 16px !important; padding-left: 16px !important; } + .gutter-lg-condensed { margin-right: -8px; margin-left: -8px; } + .gutter-lg-condensed > [class*="col-"] { padding-right: 8px !important; padding-left: 8px !important; } + .gutter-lg-spacious { margin-right: -24px; margin-left: -24px; } + .gutter-lg-spacious > [class*="col-"] { padding-right: 24px !important; padding-left: 24px !important; } } +@media (min-width: 1280px) { .gutter-xl { margin-right: -16px; margin-left: -16px; } + .gutter-xl > [class*="col-"] { padding-right: 16px !important; padding-left: 16px !important; } + .gutter-xl-condensed { margin-right: -8px; margin-left: -8px; } + .gutter-xl-condensed > [class*="col-"] { padding-right: 8px !important; padding-left: 8px !important; } + .gutter-xl-spacious { margin-right: -24px; margin-left: -24px; } + .gutter-xl-spacious > [class*="col-"] { padding-right: 24px !important; padding-left: 24px !important; } } +.offset-1 { margin-left: 8.3333333333% !important; } + +.offset-2 { margin-left: 16.6666666667% !important; } + +.offset-3 { margin-left: 25% !important; } + +.offset-4 { margin-left: 33.3333333333% !important; } + +.offset-5 { margin-left: 41.6666666667% !important; } + +.offset-6 { margin-left: 50% !important; } + +.offset-7 { margin-left: 58.3333333333% !important; } + +.offset-8 { margin-left: 66.6666666667% !important; } + +.offset-9 { margin-left: 75% !important; } + +.offset-10 { margin-left: 83.3333333333% !important; } + +.offset-11 { margin-left: 91.6666666667% !important; } + +@media (min-width: 544px) { .offset-sm-1 { margin-left: 8.3333333333% !important; } + .offset-sm-2 { margin-left: 16.6666666667% !important; } + .offset-sm-3 { margin-left: 25% !important; } + .offset-sm-4 { margin-left: 33.3333333333% !important; } + .offset-sm-5 { margin-left: 41.6666666667% !important; } + .offset-sm-6 { margin-left: 50% !important; } + .offset-sm-7 { margin-left: 58.3333333333% !important; } + .offset-sm-8 { margin-left: 66.6666666667% !important; } + .offset-sm-9 { margin-left: 75% !important; } + .offset-sm-10 { margin-left: 83.3333333333% !important; } + .offset-sm-11 { margin-left: 91.6666666667% !important; } } +@media (min-width: 768px) { .offset-md-1 { margin-left: 8.3333333333% !important; } + .offset-md-2 { margin-left: 16.6666666667% !important; } + .offset-md-3 { margin-left: 25% !important; } + .offset-md-4 { margin-left: 33.3333333333% !important; } + .offset-md-5 { margin-left: 41.6666666667% !important; } + .offset-md-6 { margin-left: 50% !important; } + .offset-md-7 { margin-left: 58.3333333333% !important; } + .offset-md-8 { margin-left: 66.6666666667% !important; } + .offset-md-9 { margin-left: 75% !important; } + .offset-md-10 { margin-left: 83.3333333333% !important; } + .offset-md-11 { margin-left: 91.6666666667% !important; } } +@media (min-width: 1012px) { .offset-lg-1 { margin-left: 8.3333333333% !important; } + .offset-lg-2 { margin-left: 16.6666666667% !important; } + .offset-lg-3 { margin-left: 25% !important; } + .offset-lg-4 { margin-left: 33.3333333333% !important; } + .offset-lg-5 { margin-left: 41.6666666667% !important; } + .offset-lg-6 { margin-left: 50% !important; } + .offset-lg-7 { margin-left: 58.3333333333% !important; } + .offset-lg-8 { margin-left: 66.6666666667% !important; } + .offset-lg-9 { margin-left: 75% !important; } + .offset-lg-10 { margin-left: 83.3333333333% !important; } + .offset-lg-11 { margin-left: 91.6666666667% !important; } } +@media (min-width: 1280px) { .offset-xl-1 { margin-left: 8.3333333333% !important; } + .offset-xl-2 { margin-left: 16.6666666667% !important; } + .offset-xl-3 { margin-left: 25% !important; } + .offset-xl-4 { margin-left: 33.3333333333% !important; } + .offset-xl-5 { margin-left: 41.6666666667% !important; } + .offset-xl-6 { margin-left: 50% !important; } + .offset-xl-7 { margin-left: 58.3333333333% !important; } + .offset-xl-8 { margin-left: 66.6666666667% !important; } + .offset-xl-9 { margin-left: 75% !important; } + .offset-xl-10 { margin-left: 83.3333333333% !important; } + .offset-xl-11 { margin-left: 91.6666666667% !important; } } +.markdown-body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; font-size: 16px; line-height: 1.5; word-wrap: break-word; } +.markdown-body::before { display: table; content: ""; } +.markdown-body::after { display: table; clear: both; content: ""; } +.markdown-body > *:first-child { margin-top: 0 !important; } +.markdown-body > *:last-child { margin-bottom: 0 !important; } +.markdown-body a:not([href]) { color: inherit; text-decoration: none; } +.markdown-body .absent { color: #cb2431; } +.markdown-body .anchor { float: left; padding-right: 4px; margin-left: -20px; line-height: 1; } +.markdown-body .anchor:focus { outline: none; } +.markdown-body p, .markdown-body blockquote, .markdown-body ul, .markdown-body ol, .markdown-body dl, .markdown-body table, .markdown-body pre { margin-top: 0; margin-bottom: 16px; } +.markdown-body hr { height: 0.25em; padding: 0; margin: 24px 0; background-color: #e1e4e8; border: 0; } +.markdown-body blockquote { padding: 0 1em; color: #6a737d; border-left: 0.25em solid #dfe2e5; } +.markdown-body blockquote > :first-child { margin-top: 0; } +.markdown-body blockquote > :last-child { margin-bottom: 0; } +.markdown-body kbd { display: inline-block; padding: 3px 5px; font-size: 11px; line-height: 10px; color: #444d56; vertical-align: middle; background-color: #fafbfc; border: solid 1px #c6cbd1; border-bottom-color: #959da5; border-radius: 3px; box-shadow: inset 0 -1px 0 #959da5; } + +.markdown-body h1, .markdown-body h2, .markdown-body h3, .markdown-body h4, .markdown-body h5, .markdown-body h6 { margin-top: 24px; margin-bottom: 16px; font-weight: 600; line-height: 1.25; } +.markdown-body h1 .octicon-link, .markdown-body h2 .octicon-link, .markdown-body h3 .octicon-link, .markdown-body h4 .octicon-link, .markdown-body h5 .octicon-link, .markdown-body h6 .octicon-link { color: #1b1f23; vertical-align: middle; visibility: hidden; } +.markdown-body h1:hover .anchor, .markdown-body h2:hover .anchor, .markdown-body h3:hover .anchor, .markdown-body h4:hover .anchor, .markdown-body h5:hover .anchor, .markdown-body h6:hover .anchor { text-decoration: none; } +.markdown-body h1:hover .anchor .octicon-link, .markdown-body h2:hover .anchor .octicon-link, .markdown-body h3:hover .anchor .octicon-link, .markdown-body h4:hover .anchor .octicon-link, .markdown-body h5:hover .anchor .octicon-link, .markdown-body h6:hover .anchor .octicon-link { visibility: visible; } +.markdown-body h1 tt, .markdown-body h1 code, .markdown-body h2 tt, .markdown-body h2 code, .markdown-body h3 tt, .markdown-body h3 code, .markdown-body h4 tt, .markdown-body h4 code, .markdown-body h5 tt, .markdown-body h5 code, .markdown-body h6 tt, .markdown-body h6 code { font-size: inherit; } +.markdown-body h1 { padding-bottom: 0.3em; font-size: 2em; border-bottom: 1px solid #eaecef; } +.markdown-body h2 { padding-bottom: 0.3em; font-size: 1.5em; border-bottom: 1px solid #eaecef; } +.markdown-body h3 { font-size: 1.25em; } +.markdown-body h4 { font-size: 1em; } +.markdown-body h5 { font-size: 0.875em; } +.markdown-body h6 { font-size: 0.85em; color: #6a737d; } + +.markdown-body ul, .markdown-body ol { padding-left: 2em; } +.markdown-body ul.no-list, .markdown-body ol.no-list { padding: 0; list-style-type: none; } +.markdown-body ul ul, .markdown-body ul ol, .markdown-body ol ol, .markdown-body ol ul { margin-top: 0; margin-bottom: 0; } +.markdown-body li { word-wrap: break-all; } +.markdown-body li > p { margin-top: 16px; } +.markdown-body li + li { margin-top: 0.25em; } +.markdown-body dl { padding: 0; } +.markdown-body dl dt { padding: 0; margin-top: 16px; font-size: 1em; font-style: italic; font-weight: 600; } +.markdown-body dl dd { padding: 0 16px; margin-bottom: 16px; } + +.markdown-body table { display: block; width: 100%; overflow: auto; } +.markdown-body table th { font-weight: 600; } +.markdown-body table th, .markdown-body table td { padding: 6px 13px; border: 1px solid #dfe2e5; } +.markdown-body table tr { background-color: #fff; border-top: 1px solid #c6cbd1; } +.markdown-body table tr:nth-child(2n) { background-color: #f6f8fa; } +.markdown-body table img { background-color: transparent; } + +.markdown-body img { max-width: 100%; box-sizing: content-box; background-color: #fff; } +.markdown-body img[align=right] { padding-left: 20px; } +.markdown-body img[align=left] { padding-right: 20px; } +.markdown-body .emoji { max-width: none; vertical-align: text-top; background-color: transparent; } +.markdown-body span.frame { display: block; overflow: hidden; } +.markdown-body span.frame > span { display: block; float: left; width: auto; padding: 7px; margin: 13px 0 0; overflow: hidden; border: 1px solid #dfe2e5; } +.markdown-body span.frame span img { display: block; float: left; } +.markdown-body span.frame span span { display: block; padding: 5px 0 0; clear: both; color: #24292e; } +.markdown-body span.align-center { display: block; overflow: hidden; clear: both; } +.markdown-body span.align-center > span { display: block; margin: 13px auto 0; overflow: hidden; text-align: center; } +.markdown-body span.align-center span img { margin: 0 auto; text-align: center; } +.markdown-body span.align-right { display: block; overflow: hidden; clear: both; } +.markdown-body span.align-right > span { display: block; margin: 13px 0 0; overflow: hidden; text-align: right; } +.markdown-body span.align-right span img { margin: 0; text-align: right; } +.markdown-body span.float-left { display: block; float: left; margin-right: 13px; overflow: hidden; } +.markdown-body span.float-left span { margin: 13px 0 0; } +.markdown-body span.float-right { display: block; float: right; margin-left: 13px; overflow: hidden; } +.markdown-body span.float-right > span { display: block; margin: 13px auto 0; overflow: hidden; text-align: right; } + +.markdown-body code, .markdown-body tt { padding: 0.2em 0.4em; margin: 0; font-size: 85%; background-color: rgba(27, 31, 35, 0.05); border-radius: 3px; } +.markdown-body code br, .markdown-body tt br { display: none; } +.markdown-body del code { text-decoration: inherit; } +.markdown-body pre { word-wrap: normal; } +.markdown-body pre > code { padding: 0; margin: 0; font-size: 100%; word-break: normal; white-space: pre; background: transparent; border: 0; } +.markdown-body .highlight { margin-bottom: 16px; } +.markdown-body .highlight pre { margin-bottom: 0; word-break: normal; } +.markdown-body .highlight pre, .markdown-body pre { padding: 16px; overflow: auto; font-size: 85%; line-height: 1.45; background-color: #f6f8fa; border-radius: 3px; } +.markdown-body pre code, .markdown-body pre tt { display: inline; max-width: auto; padding: 0; margin: 0; overflow: visible; line-height: inherit; word-wrap: normal; background-color: transparent; border: 0; } + +.markdown-body .csv-data td, .markdown-body .csv-data th { padding: 5px; overflow: hidden; font-size: 12px; line-height: 1; text-align: left; white-space: nowrap; } +.markdown-body .csv-data .blob-num { padding: 10px 8px 9px; text-align: right; background: #fff; border: 0; } +.markdown-body .csv-data tr { border-top: 0; } +.markdown-body .csv-data th { font-weight: 600; background: #f6f8fa; border-top: 0; } + +.highlight table td { padding: 5px; } + +.highlight table pre { margin: 0; } + +.highlight .cm { color: #999988; font-style: italic; } + +.highlight .cp { color: #999999; font-weight: bold; } + +.highlight .c1 { color: #999988; font-style: italic; } + +.highlight .cs { color: #999999; font-weight: bold; font-style: italic; } + +.highlight .c, .highlight .cd { color: #999988; font-style: italic; } + +.highlight .err { color: #a61717; background-color: #e3d2d2; } + +.highlight .gd { color: #000000; background-color: #ffdddd; } + +.highlight .ge { color: #000000; font-style: italic; } + +.highlight .gr { color: #aa0000; } + +.highlight .gh { color: #999999; } + +.highlight .gi { color: #000000; background-color: #ddffdd; } + +.highlight .go { color: #888888; } + +.highlight .gp { color: #555555; } + +.highlight .gs { font-weight: bold; } + +.highlight .gu { color: #aaaaaa; } + +.highlight .gt { color: #aa0000; } + +.highlight .kc { color: #000000; font-weight: bold; } + +.highlight .kd { color: #000000; font-weight: bold; } + +.highlight .kn { color: #000000; font-weight: bold; } + +.highlight .kp { color: #000000; font-weight: bold; } + +.highlight .kr { color: #000000; font-weight: bold; } + +.highlight .kt { color: #445588; font-weight: bold; } + +.highlight .k, .highlight .kv { color: #000000; font-weight: bold; } + +.highlight .mf { color: #009999; } + +.highlight .mh { color: #009999; } + +.highlight .il { color: #009999; } + +.highlight .mi { color: #009999; } + +.highlight .mo { color: #009999; } + +.highlight .m, .highlight .mb, .highlight .mx { color: #009999; } + +.highlight .sb { color: #d14; } + +.highlight .sc { color: #d14; } + +.highlight .sd { color: #d14; } + +.highlight .s2 { color: #d14; } + +.highlight .se { color: #d14; } + +.highlight .sh { color: #d14; } + +.highlight .si { color: #d14; } + +.highlight .sx { color: #d14; } + +.highlight .sr { color: #009926; } + +.highlight .s1 { color: #d14; } + +.highlight .ss { color: #990073; } + +.highlight .s { color: #d14; } + +.highlight .na { color: #008080; } + +.highlight .bp { color: #999999; } + +.highlight .nb { color: #0086B3; } + +.highlight .nc { color: #445588; font-weight: bold; } + +.highlight .no { color: #008080; } + +.highlight .nd { color: #3c5d5d; font-weight: bold; } + +.highlight .ni { color: #800080; } + +.highlight .ne { color: #990000; font-weight: bold; } + +.highlight .nf { color: #990000; font-weight: bold; } + +.highlight .nl { color: #990000; font-weight: bold; } + +.highlight .nn { color: #555555; } + +.highlight .nt { color: #000080; } + +.highlight .vc { color: #008080; } + +.highlight .vg { color: #008080; } + +.highlight .vi { color: #008080; } + +.highlight .nv { color: #008080; } + +.highlight .ow { color: #000000; font-weight: bold; } + +.highlight .o { color: #000000; font-weight: bold; } + +.highlight .w { color: #bbbbbb; } + +.highlight { background-color: #f8f8f8; } diff --git a/blog.html b/blog.html index f5791f6..52030b8 100644 --- a/blog.html +++ b/blog.html @@ -1,9 +1,146 @@ ---- -layout: default -title: Blog ---- - -
+ + + + + + + + + Robotics Group @ University of Montreal | Blog + + + + + + + + + + + +
+ + + + + + + +
@@ -13,6 +150,451 @@
-{% for post in site.posts %} - {% include news-item.html item=post %} -{% endfor %} + + + + + + + + + +
+ + + December 05, 2020 + + +
+ + Krishna won an NVIDIA fellowship for 2021-22. Congratulations! + + +
+ + +
+
+ + + + + + + + + + +
+ + + November 30, 2020 + + +
+ + We released gradslam - a differentiable dense SLAM framework for deep learning. Check it out! + + +
+ + +
+
+ + + + + + + + + + +
+ + + October 30, 2020 + + +
+ + We organized an IROS workshop on Benchmarking progress in autonomous driving + + +
+ + +
+
+ + + + + + + + + + +
+ + + October 15, 2020 + + +
+ + Checkout our new Neurips 2020 Oral paper La-MAML: Look-Ahead Meta-Learning for Continual Learning [Code], [Short Video]. + + +
+ + +
+
+ + + + + + + + + + +
+ + + October 10, 2020 + + +
+ + Two papers accepted to Neurips 2020 (one of them an oral - top 1.1%). Congratulations Gunshi and Ruixiang! + + +
+ + +
+
+ + + + + + + + + + +
+ + + September 10, 2020 + + +
+ + Robot learning seminar series launched! + + +
+ + +
+
+ + + + + + + + + + +
+ + + June 30, 2020 + + +
+ + Gunshi Gupta succesfully completes her M.Sc. and joins Wayve as a deep learning researcher! + + +
+ + +
+
+ + + + + + + + + + +
+ + + June 05, 2020 + + +
+ + Our paper [MapLite: Autonomous intersection navigation without detailed prior maps] was adjudged best Robotics and Automation Letters (RAL) paper for 2019! Check it out here. And, here’s a short video abstract. + + +
+ + +
+
+ + + + + + + + + + +
+ + + January 20, 2020 + + +
+ + Check out our new ICRA 2020 paper gradSLAM: Dense SLAM meets automatic differentiation on fully differentiable dense SLAM: Project page, Video. + + +
+ + +
+
+ + + + + + + + + + +
+ + + September 10, 2019 + + +
+ + The “Active Domain Randomization” paper got accepted to CoRL 2019. Congrats Bhairav, Manfred, and Florian. + + +
+ + +
+
+ + + + + + + + + + +
+ + + September 01, 2019 + + +
+ + Dhaivat, Rey, and Philippe joined the group as Masters’ students. Welcome! + + +
+ + +
+
+ + + + + + + + + + +
+ + + September 01, 2019 + + +
+ + Sharath, Mark, Amrut, Rohan, and Dishank joined the group as interns. Welcome! + + +
+ + +
+
+ + + + + + + + + + +
+ + + August 01, 2019 + + +
+ + Our paper Deep Active Localization got accepted into Robotics and Automation Letters + + +
+ + +
+
+ + + + + + + + + + +
+ + + November 28, 2018 + + +
+ + Dhaivat Bhatt just joined our group as an intern. Welcome! + + +
+ + +
+
+ + + + + + + + + + +
+ + + September 05, 2018 + + +
+ + Manfred Diaz and Ruixiang Zhang join the group as PhD students … + + +
+ + +
+
+ + + + + + + + + + +
+ + + April 20, 2018 + + +
+ + Paper on self-supervised visual odometry estimation accepted to CVPR workshop on SLAM. + + +
+ + +
+
+ + + + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2018/04/20/cvpr.html b/blog/2018/04/20/cvpr.html new file mode 100644 index 0000000..f0d3adc --- /dev/null +++ b/blog/2018/04/20/cvpr.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Cvpr + + + + + + + + + + + +
+ + + + + + + +

April 20, 2018

+ +
+

Paper on self-supervised visual odometry estimation accepted to CVPR workshop on SLAM.

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2018/09/05/welcomephd.html b/blog/2018/09/05/welcomephd.html new file mode 100644 index 0000000..c02baf7 --- /dev/null +++ b/blog/2018/09/05/welcomephd.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Welcomephd + + + + + + + + + + + +
+ + + + + + + +

September 05, 2018

+ +
+

Manfred Diaz and Ruixiang Zhang join the group as PhD students …

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2018/11/28/image.html b/blog/2018/11/28/image.html new file mode 100644 index 0000000..3a87381 --- /dev/null +++ b/blog/2018/11/28/image.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Image + + + + + + + + + + + +
+ + + + + + + +

November 28, 2018

+ +
+

Dhaivat Bhatt just joined our group as an intern. Welcome!

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2019/08/01/dal.html b/blog/2019/08/01/dal.html new file mode 100644 index 0000000..160cf6e --- /dev/null +++ b/blog/2019/08/01/dal.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Dal + + + + + + + + + + + +
+ + + + + + + +

August 01, 2019

+ +
+

Our paper Deep Active Localization got accepted into Robotics and Automation Letters

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2019/09/01/welcomeinterns.html b/blog/2019/09/01/welcomeinterns.html new file mode 100644 index 0000000..d4cc137 --- /dev/null +++ b/blog/2019/09/01/welcomeinterns.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Welcomeinterns + + + + + + + + + + + +
+ + + + + + + +

September 01, 2019

+ +
+

Sharath, Mark, Amrut, Rohan, and Dishank joined the group as interns. Welcome!

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2019/09/01/welcomestudents.html b/blog/2019/09/01/welcomestudents.html new file mode 100644 index 0000000..b11963e --- /dev/null +++ b/blog/2019/09/01/welcomestudents.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Welcomestudents + + + + + + + + + + + +
+ + + + + + + +

September 01, 2019

+ +
+

Dhaivat, Rey, and Philippe joined the group as Masters’ students. Welcome!

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2019/09/10/adrpaper.html b/blog/2019/09/10/adrpaper.html new file mode 100644 index 0000000..310a19f --- /dev/null +++ b/blog/2019/09/10/adrpaper.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Adrpaper + + + + + + + + + + + +
+ + + + + + + +

September 10, 2019

+ +
+

The “Active Domain Randomization” paper got accepted to CoRL 2019. Congrats Bhairav, Manfred, and Florian.

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2020/01/20/gradslam-icra.html b/blog/2020/01/20/gradslam-icra.html new file mode 100644 index 0000000..1578ff5 --- /dev/null +++ b/blog/2020/01/20/gradslam-icra.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Gradslam Icra + + + + + + + + + + + +
+ + + + + + + +

January 20, 2020

+ +
+

Check out our new ICRA 2020 paper gradSLAM: Dense SLAM meets automatic differentiation on fully differentiable dense SLAM: Project page, Video.

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2020/06/05/maplite-award.html b/blog/2020/06/05/maplite-award.html new file mode 100644 index 0000000..f4a8ef4 --- /dev/null +++ b/blog/2020/06/05/maplite-award.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Maplite Award + + + + + + + + + + + +
+ + + + + + + +

June 05, 2020

+ +
+

Our paper [MapLite: Autonomous intersection navigation without detailed prior maps] was adjudged best Robotics and Automation Letters (RAL) paper for 2019! Check it out here. And, here’s a short video abstract.

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2020/06/30/gunshi-graduates.html b/blog/2020/06/30/gunshi-graduates.html new file mode 100644 index 0000000..40f1a8e --- /dev/null +++ b/blog/2020/06/30/gunshi-graduates.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Gunshi Graduates + + + + + + + + + + + +
+ + + + + + + +

June 30, 2020

+ +
+

Gunshi Gupta succesfully completes her M.Sc. and joins Wayve as a deep learning researcher!

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2020/09/10/robotlearningseries.html b/blog/2020/09/10/robotlearningseries.html new file mode 100644 index 0000000..d0b8de4 --- /dev/null +++ b/blog/2020/09/10/robotlearningseries.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Robotlearningseries + + + + + + + + + + + +
+ + + + + + + +

September 10, 2020

+ +
+

Robot learning seminar series launched!

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2020/10/10/neurips.html b/blog/2020/10/10/neurips.html new file mode 100644 index 0000000..690c4da --- /dev/null +++ b/blog/2020/10/10/neurips.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Neurips + + + + + + + + + + + +
+ + + + + + + +

October 10, 2020

+ +
+

Two papers accepted to Neurips 2020 (one of them an oral - top 1.1%). Congratulations Gunshi and Ruixiang!

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2020/10/15/lamaml.html b/blog/2020/10/15/lamaml.html new file mode 100644 index 0000000..925f67f --- /dev/null +++ b/blog/2020/10/15/lamaml.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Lamaml + + + + + + + + + + + +
+ + + + + + + +

October 15, 2020

+ +
+

Checkout our new Neurips 2020 Oral paper La-MAML: Look-Ahead Meta-Learning for Continual Learning [Code], [Short Video].

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2020/10/30/irosworkshop.html b/blog/2020/10/30/irosworkshop.html new file mode 100644 index 0000000..03056e3 --- /dev/null +++ b/blog/2020/10/30/irosworkshop.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Irosworkshop + + + + + + + + + + + +
+ + + + + + + +

October 30, 2020

+ +
+

We organized an IROS workshop on Benchmarking progress in autonomous driving

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2020/11/30/gradslam.html b/blog/2020/11/30/gradslam.html new file mode 100644 index 0000000..7787d65 --- /dev/null +++ b/blog/2020/11/30/gradslam.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Gradslam + + + + + + + + + + + +
+ + + + + + + +

November 30, 2020

+ +
+

We released gradslam - a differentiable dense SLAM framework for deep learning. Check it out!

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/blog/2020/12/05/krishna-fellowship.html b/blog/2020/12/05/krishna-fellowship.html new file mode 100644 index 0000000..326a521 --- /dev/null +++ b/blog/2020/12/05/krishna-fellowship.html @@ -0,0 +1,179 @@ + + + + + + + + + Robotics Group @ University of Montreal | Krishna Fellowship + + + + + + + + + + + +
+ + + + + + + +

December 05, 2020

+ +
+

Krishna won an NVIDIA fellowship for 2021-22. Congratulations!

+ +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/code.html b/code.html new file mode 100644 index 0000000..df0a553 --- /dev/null +++ b/code.html @@ -0,0 +1,177 @@ + + + + + + + + + Robotics Group @ University of Montreal | Code + + + + + + + + + + + +
+ + + + + + + +

Please see our published articles for their corresponding code repos. In general, our repositories can be found here:

+ +

https://github.com/montrealrobotics

+ + + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/code.md b/code.md deleted file mode 100644 index bd6e76c..0000000 --- a/code.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -layout: default -title: Code ---- -Please see our published articles for their corresponding code repos. In general, our repositories can be found here: - -https://github.com/montrealrobotics - diff --git a/contact.html b/contact.html new file mode 100644 index 0000000..c9dc554 --- /dev/null +++ b/contact.html @@ -0,0 +1,216 @@ + + + + + + + + + Robotics Group @ University of Montreal | Contact + + + + + + + + + + + +
+ + + + + + + +

Joining Robotics and Embodied AI Lab

+ +
+
+
+
+
Work with us
+
At REAL, we are always happy to take in talented individuals as full-time students (for M.Sc./Ph.D. positions), or for short-term (intern/visitor) roles. If that's you, here is how to get started.
+
+
+
+
+ +
+ +
+

Full-time students (M.Sc./Ph.D.)

+
+ +
+ +

+ If you are looking to join an M.Sc. or a Ph.D. program at Universite de Montreal or Mila, and would like to work at REAL, please apply through Mila admissions. Indicate either Liam Paull or Glen Berseth as some of your faculty advisors of choice. +

+
+
+ +
+ +
+

Interns

+
+ +
+

+ If you are looking to intern with REAL, please fill out our internship application form. +

+

+ We are recruiting up to two interns for Winter 2021 (Jan-May). Please see this listing if interested +

+
+
+ + + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/contact.md b/contact.md deleted file mode 100644 index baa4364..0000000 --- a/contact.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -layout: default -title: Contact ---- - - -

Joining Robotics and Embodied AI Lab

- -
-
-
-
-
Work with us
-
At REAL, we are always happy to take in talented individuals as full-time students (for M.Sc./Ph.D. positions), or for short-term (intern/visitor) roles. If that's you, here is how to get started.
-
-
-
-
- -
- -
-

Full-time students (M.Sc./Ph.D.)

-
- -
- -

- If you are looking to join an M.Sc. or a Ph.D. program at Universite de Montreal or Mila, and would like to work at REAL, please apply through Mila admissions. Indicate either Liam Paull or Glen Berseth as some of your faculty advisors of choice. -

-
-
- - - -
- -
-

Interns

-
- -
-

- If you are looking to intern with REAL, please fill out our internship application form. -

-

- We are recruiting up to two interns for Winter 2021 (Jan-May). Please see this listing if interested -

-
-
- diff --git a/events.html b/events.html index 414faf6..c2b861d 100644 --- a/events.html +++ b/events.html @@ -1,9 +1,146 @@ ---- -layout: default -title: Events ---- + + + + + + + + + Robotics Group @ University of Montreal | Events + + + + + + + + -
+ + +
+ + + + + + + +
@@ -14,12 +151,1170 @@
- {% comment %} - Sort the events by date, putting those without dates last - {% endcomment %} - {% assign events_by_date = site.events | sort: 'date', 'first' %} - {% assign events_by_date = events_by_date | reverse %} - {% for p in events_by_date %} - {% include event-card.html event=p %} - {% endfor %} + + + + + + + + + + +
+
+
+ + + + + + Montreal Robotics Summer School + + +
+
+
+ +

+ + Montreal Robotics Summer School + + +

+ + +

Robotics is a rapidly growing field with interest from around the world. This summer school offers tutorials and lectures on state-of-the-art machine learning methods for training the next generation of learning robots. This summer school is an extension supported by the many robotics groups around Montreal.

+

+ + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Workshop on Physical Reasoning and Inductive Biases for the Real World + + +
+
+
+ +

+ + Workshop on Physical Reasoning and Inductive Biases for the Real World + + +

+ + +

Workshop at NeurIPS 2021

+

+ + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Workshop on the Ecological Theory of RL + + +
+
+
+ +

+ + Workshop on the Ecological Theory of RL + + +

+ + +

Workshop at NeurIPS 2021

+

+ + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + The 6th AI Driving Olympics Competition + + +
+
+
+ +

+ + The 6th AI Driving Olympics Competition + + +

+ + +

The 6th iteration of the AI Driving Olympics, taking place virtually at NeurIPS 2021. The AI-DO serves to benchmark the state of the art of artificial intelligence in autonomous driving by providing standardized simulation and hardware environments for tasks related to multi-sensory perception and embodied AI.

+

+ + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + IROS 2021 Workshop on Evaluating the Broader Impacts of Self-Driving Cars + + +
+
+
+ +

+ + IROS 2021 Workshop on Evaluating the Broader Impacts of Self-Driving Cars + + +

+ + +

The primary objective of this workshop is to stimulate a conversation between roboticists, who focus on the development and implementation of autonomy algorithms, and regulators, economists, psychologists, and lawyers who are experts on the broader impacts that self-driving vehicles will have on society.

+

+ + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Winter 2021 Robot Learning Seminar Series + + +
+
+
+ +

+ + Winter 2021 Robot Learning Seminar Series + + +

+ + +

The Robotics and Embodied AI Lab and Mila are hosting the Winter 2021 edition of robot learning seminar series; a set of virtual talks by researchers in this field. Speakers this session include Steven Waslander, Animesh Garg, Sylvia Herbert, Georgia Chalvatzaki, Deepak Pathak, Pulkit Agrawal, Lilian Weng, Kelsey Allen, Manolis Savva, and Jiajun Wu.

+

+ + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Summer 2020 Robot Learning Seminar Series + + +
+
+
+ +

+ + Summer 2020 Robot Learning Seminar Series + + +

+ + +

The Robotics and Embodied AI Lab and Mila are hosting the Winter 2021 edition of robot learning seminar series; a set of virtual talks by researchers in this field. Speakers in this inaugural session include Stefani Tellex, Rika Antonova, Gunshi Gupta, Igor Gilitschenski, and Bhairav Mehta.

+

+ + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + IROS 2020 Workshop on Benchmarking Progress in Autonomous Driving + + +
+
+
+ +

+ + IROS 2020 Workshop on Benchmarking Progress in Autonomous Driving + + +

+ + +

Autonomous driving has seen incredible progress of-late. Recent workshops at top conferences in robotics, computer vision, and machine learning have primarily showcased the technological advancements in the field. This workshop provides an platform to investigate and discuss the methods by which progress in autonomous driving is evaluated, benchmarked, and verified.

+

+ + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Fall 2020 Robot Learning Seminar Series + + +
+
+
+ +

+ + Fall 2020 Robot Learning Seminar Series + + +

+ + +

The Robotics and Embodied AI Lab and Mila are hosting the Winter 2021 edition of robot learning seminar series; a set of virtual talks by researchers in this field. Speakers this session include Florian Shkurti, Valentin Peretroukhin, Ankur Handa, Shubham Tulsiani, Ronald Clark, Lerrel Pinto, Mustafa Mukadam, Shuran Song and Angela Shoellig.

+

+ + + + + +
+ + + +
+
+
+
+ + + + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/events/aido-6.html b/events/aido-6.html new file mode 100644 index 0000000..0c25288 --- /dev/null +++ b/events/aido-6.html @@ -0,0 +1,5 @@ +

The AI Driving Olympics 6

+ +

Duckietown traditionally hosts AI-DO competitions biannually, with finals events held at machine learning and robotics conferences such as the International Conference on Robotics and Automation (ICRA) and the Neural Information Processing Systems (NeurIPS).

+ +

AI-DO 6 will be in conjunction with NeurIPS 2021 and have three leagues: urban driving, advanced perception, and racing. The winter champions will be announced during NeurIPS 2021, on December 10, 2021!

diff --git a/events/ecorl.html b/events/ecorl.html new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/events/ecorl.html @@ -0,0 +1 @@ + diff --git a/events/iros2020.html b/events/iros2020.html new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/events/iros2020.html @@ -0,0 +1 @@ + diff --git a/events/iros2021.html b/events/iros2021.html new file mode 100644 index 0000000..fcef413 --- /dev/null +++ b/events/iros2021.html @@ -0,0 +1,11 @@ +

IROS 2021 Workshop on Evaluating the Broader Impacts of Self-Driving Cars

+ +

Self-driving cars have received significant attention in the last decade, and arguably have the potential to be the most impactful robotics application to date. The question that is usually asked by the public is “when are self-driving cars going to be here?” On one side, entrusting the entire driving problem to an autonomous agent seems frustratingly daunting. On the other side, we have started to see real deployments of autonomous vehicles in limited capacities, so perhaps there is reason for hope.

+ +

Autonomous driving advancements are typically evaluated along well-defined, but potentially myopic performance criteria. These metrics are reasonable in the sense that they do give us some quantitative measure that we can use for comparison. However, the true potential impact of this technology reaches far beyond these relatively simplistic measures. In this workshop we will take a broader perspective with respect to evaluating the progress that we have made towards making self-driving a reality. In the process, we will focus particularly on aspects of the integration of this technology that are rarely covered in technical papers on the subject. Specifically, we will focus on the following three objectives:

+ +

The primary objective of this workshop is to stimulate a conversation between roboticists, who focus on the development and implementation of autonomy algorithms, and regulators, economists, psychologists, and lawyers who are experts on the broader impacts that self-driving vehicles will have on society. We feel that it is critical to foster a community of researchers and practitioners whose expertise extends beyond the algorithmic challenges of realizing self-driving vehicles. As roboticists, we are ill-equipped to understand the broad impacts of this technology in areas that include ethics, philosophy, psychology, regulations, legal policy, and risk, to name a few, and it is critical that technological development is guided by such impacts. We will achieve our objective by inviting speakers and panelists who are experts in these adjacent fields to stimulate a broader conversation around this technology. This objective would be considered achieved if participants take the new perspectives they were exposed to and consider them in their own specific field of interest. For roboticists, this means explicitly considering these broader issues in the development of their algorithms. A stretch goal would be to spawn research collaborations between roboticists and researchers from these adjacent fields.

+ +

Duckietown traditionally hosts AI-DO competitions biannually, with finals events held at machine learning and robotics conferences such as the International Conference on Robotics and Automation (ICRA) and the Neural Information Processing Systems (NeurIPS).

+ +

AI-DO 6 will be in conjunction with NeurIPS 2021 and have three leagues: urban driving, advanced perception, and racing. The winter champions will be announced during NeurIPS 2021, on December 10, 2021!

diff --git a/events/learningseriesfall2020.html b/events/learningseriesfall2020.html new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/events/learningseriesfall2020.html @@ -0,0 +1 @@ + diff --git a/events/learningseriessummer2020.html b/events/learningseriessummer2020.html new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/events/learningseriessummer2020.html @@ -0,0 +1 @@ + diff --git a/events/learningserieswinter2021.html b/events/learningserieswinter2021.html new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/events/learningserieswinter2021.html @@ -0,0 +1 @@ + diff --git a/events/mrss.html b/events/mrss.html new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/events/mrss.html @@ -0,0 +1 @@ + diff --git a/events/physical-reasoning.html b/events/physical-reasoning.html new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/events/physical-reasoning.html @@ -0,0 +1 @@ + diff --git a/index.html b/index.html index 2c1384d..a2dc916 100644 --- a/index.html +++ b/index.html @@ -1,33 +1,458 @@ ---- -layout: default -title: Home -notitle: true - -# groups of columns of {roles: list, width: num, image: bool} -role-tables: -- - roles: [faculty, postdoc] - width: 4 - image: true - - roles: [phd, masters, developer] - width: 8 - image: true -- - roles: [intern, intern-alum, collab] - width: 4 - image: false -carousels: - - images: - - image: /img/slider/group1.jpg - - image: /img/slider/group2.jpg - - image: /img/slider/group3.jpg ---- - -
- {% include carousel.html height="50" unit="%" duration="4" number="1" %} + + + + + + + + + Robotics Group @ University of Montreal + + + + + + + + + + + +
+ + + + + + + +
+ + + + + + + +

Robotics and Embodied AI Lab (REAL)

The Robotics and Embodied AI Lab (REAL) is a research lab in DIRO at the Université de Montréal and is also affiliated with Mila. REAL is dedicated to making generalist robots and other embodied agents.

We are always looking out for talented students to join us as full-time students / visitors. To know more, click on the link below.

- Learn more + Learn more
@@ -38,18 +463,224 @@
News
- {% for post in site.posts limit: site.front_page_news %} - {% include news-item.html item=post %} - {% endfor %} - {% assign numposts = site.posts | size %} - {% if numposts >= 1 %} - + + + + + + + + + + +
+ + + + + + + + + + +
+ + + November 30, 2020 + + +
+ + We released gradslam - a differentiable dense SLAM framework for deep learning. Check it out! + + +
+ + +
+
+ + + + + + + + + + +
+ + + October 30, 2020 + + +
+ + We organized an IROS workshop on Benchmarking progress in autonomous driving + + +
+ + +
+
+ + + + + + + + + + +
+ + + October 15, 2020 + + +
+ + Checkout our new Neurips 2020 Oral paper La-MAML: Look-Ahead Meta-Learning for Continual Learning [Code], [Short Video]. + + +
+ + +
+
+ + + + + + + + + + +
+ + + October 10, 2020 + + +
+ + Two papers accepted to Neurips 2020 (one of them an oral - top 1.1%). Congratulations Gunshi and Ruixiang! + + +
+ + +
+
+ + + + + + + + + + +
+ + + September 10, 2020 + + +
+ + Robot learning seminar series launched! + + +
+ + +
+
+ + + + + + + + + + +
+ + + June 30, 2020 + + +
+ + Gunshi Gupta succesfully completes her M.Sc. and joins Wayve as a deep learning researcher! + + +
+ + +
+
+ + + + + + + + + + +
+ + + June 05, 2020 + + +
+ + Our paper [MapLite: Autonomous intersection navigation without detailed prior maps] was adjudged best Robotics and Automation Letters (RAL) paper for 2019! Check it out here. And, here’s a short video abstract. + + +
+ + +
+
+ + + + + More news … - {% endif %} +
@@ -60,25 +691,1981 @@
News
Projects
- {% comment %} - Sort the projects by date, putting those without dates last - {% endcomment %} - {% assign projects_by_date = site.projects | sort: 'last-updated', 'first' %} - {% assign projects_by_date = projects_by_date | reverse %} - {% for p in projects_by_date %} - {% include project-card.html project=p %} - {% endfor %} -

- - - - - All projects… - -

+ + + + + + + + + + +
+
+
+ + + + + + ConceptFusion: Open-set Multimodal 3D Mapping + + +
+
+
+ +

+ + ConceptFusion: Open-set Multimodal 3D Mapping + +

+ + +

ConceptFusion builds open-set 3D maps that can be queried via text, click, image, or audio. Given a series of RGB-D images, our system builds a 3D scene representation, that is inherently multimodal by leveraging foundation models such as CLIP, and therefore doesn’t require any additional training or finetuning.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
-
- + + + + + + + +
+
+
+ + + + + + One-4-All - Neural Potential Fields for Embodied Navigation + + +
+
+
+ +

+ + One-4-All - Neural Potential Fields for Embodied Navigation + +

+ + +

An end-to-end fully parametric method for image-goal navigation that leverages self-supervised and manifold learning to replace a topological graph with a geodesic regressor. During navigation, the geodesic regressor is used as an attractor in a potential function defined in latent space, allowing to frame navigation as a minimization problem.

+

+ + + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + f-Cal - Calibrated aleatoric uncertainty estimation from neural networks for robot perception + + +
+
+
+ +

+ + f-Cal - Calibrated aleatoric uncertainty estimation from neural networks for robot perception + +

+ + +

f-Cal is calibration method proposed to calibrate probabilistic regression networks. Typical bayesian neural networks are shown to be overconfident in their predictions. To use the predictions for downstream tasks, reliable and calibrated uncertainity estimates are critical. f-Cal is a straightforward loss function, which can be employed to train any probabilistic neural regressor, and obtain calibrated uncertainty estimates.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Inverse Variance Reinforcement Learning + + +
+
+
+ +

+ + Inverse Variance Reinforcement Learning + +

+ + +

Improving sample efficiency in deep reinforcement learning by mitigating the impacts of heteroscedastic noise in the bootstraped target using uncertainty estimation.

+

+ + + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Lifelong Topological Visual Navigation + + +
+
+
+ +

+ + Lifelong Topological Visual Navigation + +

+ + +

A learning-based topological visual navigation method with graph update strategies that improves lifelong navigation performance over time.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Taskography - Evaluating robot task planning over large 3D scene graphs + + +
+
+
+ +

+ + Taskography - Evaluating robot task planning over large 3D scene graphs + +

+ + +

Taskography is the first large-scale robotic task planning benchmark over 3DSGs. While most benchmarking efforts in this area focus on vision-based planning, we systematically study symbolic planning, to decouple planning performance from visual representation learning.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + gradsim + + +
+
+
+ +

+ + gradsim + +

+ + +

gradSim is a framework that overcomes the dependence on 3D supervision by leveraging differentiable multiphysics simulation and differentiable rendering to jointly model the evolution of scene dynamics and image formation.

+

+ + + + + Collaborators: +
    + + +
  • + + Miles Macklin + +
  • + + +
  • + + Vikram Voleti + +
  • + + +
  • + + Linda Petrini + +
  • + + +
  • + + Martin Weiss + +
  • + + +
  • + + Jerome Parent-Levesque + +
  • + + +
  • + + Kevin Xie + +
  • + + +
  • + + Kenny Erleben + +
  • + + +
  • + + + Florian Shkurti + + +
  • + + +
  • + + Derek Nowrouzerzahrai + +
  • + + +
  • + + Sanja Fidler + +
  • + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + gradslam + + +
+
+
+ +

+ + gradslam + +

+ + +

gradslam is an open-source framework providing differentiable building blocks for simultaneous localization and mapping (SLAM) systems. We enable the usage of dense SLAM subsystems from the comfort of PyTorch.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + La-MAML + + +
+
+
+ +

+ + La-MAML + +

+ + +

Look-ahead meta-learning for continual learning

+

+ + + + + Collaborators: +
    + + +
  • + + Karmesh Yadav + +
  • + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Active Domain Randomization + + +
+
+
+ +

+ + Active Domain Randomization + +

+ + +

Making sim-to-real transfer more efficient

+

+ + + + + Collaborators: +
    + + +
  • + + Chris Pal + +
  • + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Self-supervised visual odometry estimation + + +
+
+
+ +

+ + Self-supervised visual odometry estimation + +

+ + +

A self-supervised deep network for visual odometry estimation from monocular imagery.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Deep Active Localization + + +
+
+
+ +

+ + Deep Active Localization + +

+ + +

Learned active localization, implemented on “real” robots.

+

+ + + + + Collaborators: +
    + + +
  • + + Keehong Seo + +
  • + +
+ + + +
+ + + +
+
+
+
+ + +

+ + + + + All projects… + +

+
+ +
+ + + + + +
+

+ + + + | + + + + | + + + + + +

+
+
+ + + + + + + + diff --git a/interns-winter-2021.html b/interns-winter-2021.html new file mode 100644 index 0000000..a0b66e6 --- /dev/null +++ b/interns-winter-2021.html @@ -0,0 +1,198 @@ + + + + + + + + + Robotics Group @ University of Montreal + + + + + + + + + + + +
+ + + + + + + +

Upto two internships are available at the Robotics and Embodied AI Lab (REAL) at Universite de Montreal and Mila. Internship roles are full-time (40 hrs/week) with the possibility of remote work. Preferred duration: Jan - May 2021.

+ +

Note: Students at undergraduate and graduate levels are welcome to apply too

+ +

Selected interns will leverage end-to-end differentiable SLAM frameworks (such as gradslam) to implement deep learning solutions for 3D perception, navigation, and manipulation. They will have the opportunity to collaborate with the vibrant research community at Mila and publish at leading robotics/vision/ML venues. Candidates will be compensated competitive to graduate student salaries in the Montreal area.

+ +

Requirements:

+
    +
  • Must be available for full-time roles for winter 2021 (Jan-May 2021)
  • +
  • Experience in one or more of the following: 3D reconstruction/mapping, deep learning for 3D perception, deep reinforcement learning, SLAM
  • +
  • Strong programming skills (prior pytorch experience required)
  • +
+ +

Desired:

+
    +
  • Experience with one of differentiable computer vision (see Kornia), differentiable rendering, or with designing and implementing RL environments and agents
  • +
  • Prior publication record at robotics/vision/ML venues
  • +
  • Prior open-source project contributions/management
  • +
+ +

How to apply: Please fill out this form.

+ +

In case of questions, feel free to write to Krishna.

+ +

We will continue to process applications on a first-come-first-served basis until the positions are filled.

+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/people.html b/people.html index 8c5c6db..47aaeb7 100644 --- a/people.html +++ b/people.html @@ -1,25 +1,146 @@ ---- -layout: default -title: People - -# groups of columns of {roles: list, width: num, image: bool} -member-tables: -- - roles: [faculty, postdoc] - width: 4 - image: true - - roles: [phd, masters, intern, developer] - width: 4 - image: true + + + + + + + + + Robotics Group @ University of Montreal | People + + + + + + + + + + +
+ + -alumni-tables: -- - roles: [postdoc-alum, phd-alum, masters-alum, intern-alum] - width: 4 - image: false + ---- + -
+
@@ -32,41 +153,4881 @@
- {% for role-table in page.member-tables %} - {% for role-column in role-table %} - {% for role in role-column.roles %} - {% include role-people.html role=role image=role-column.image %} -
- {% endfor %} - {% endfor %} - {% endfor %} -
-
-
+ + + + -
+ + +
-
-
-
Alumni
+
+
+
Faculty
+
+
+ + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Glen Berseth + + + + + + +

    + Glen Berseth +


+
+ +
Interests: Reinforcement learning, robotics, machine learning, generalization, planning + + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
-
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + + +
+
+
+
+
Postdocs
+
+
+ + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Steven Parkison + + + + + + +

    + Steven Parkison +


+
+ +
Interests: SLAM, optimization, robotic perception, and lukewarm coffee + + + + +
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+ + + + + + + + + + + + + + + + -
-
-
- {% for role-table in page.alumni-tables %} - {% for role-column in role-table %} - {% for role in role-column.roles %} - {% include role-people.html role=role image=role-column.image %}
- {% endfor %} - {% endfor %} + + + + + + + + + + + + + + + + + + +
+
+
+
+
PhD Students
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Manfred Diaz + + + + + + +

    + Manfred Diaz +


+
+ + + + +
- {% endfor %}
-
-
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Ruixiang Zhang + + + + + +

    Ruixiang Zhang


+
+ + +
Coadvisor: Yoshua Bengio     + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Zhen Liu + + + + + +

    Zhen Liu


+
+ + +
Coadvisor: Yoshua Bengio     + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Mostafa Elaraby + + + + + +

    Mostafa Elaraby


+
+ +
Interests: Continual learning, imitation learning + + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Sacha Morin + + + + + + +

    + Sacha Morin +


+
+ + +
Coadvisor: Guy Wolf     + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Charlie Gauthier + + + + + +

    Charlie Gauthier


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Miguel Saavedra-Ruiz + + + + + + +

    + Miguel Saavedra-Ruiz +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Albert Zhan + + + + + + +

    + Albert Zhan +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+ + + + + + + + + + + + + +
+ + + + + + + + + + + + + + +
+
+
+
+
Master's Students
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Roger Creus Castanyer + + + + + + +

    + Roger Creus Castanyer +


+
+ +
Interests: (Unsupervised) (Deep) reinforcement learning + + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Ali Kuwajerwala + + + + + + +

    + Ali Kuwajerwala +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Dishank Bansal + + + + + + +

    + Dishank Bansal +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Simon Demeule + + + + + + +

    + Simon Demeule +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Adriana Hugessen + + + + + +

    Adriana Hugessen


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+ + + + + + + + + + + + + + + +
+ + + + + + + + + + + + + + + + + + +
+
+
+
+
Undergraduate Researchers and Interns
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Raj Ghugare + + + + + + +

    + Raj Ghugare +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Atharva Chandak + + + + + +

    Atharva Chandak


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Bipasha Sen + + + + + +

    Bipasha Sen


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Aditya Agarwal + + + + + +

    Aditya Agarwal


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+ + + + + + + + + + + +
+ + + + + + + + + + +
+
+
+
+
Software Developers
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Kirsty Ellis + + + + + +

    Kirsty Ellis


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + +
+ + + +
+
+ + + +
+
+
+
+
Alumni
+
+
+
+
+ +
+
+
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
Past Postocs
+
+
+ + + + + + + + + + + + + + + + + +
+
+ + +

    + Florian Golemo +


+
+ + +
Coadvisor: Chris Pal     + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    + Ali Harakeh +


+
+ +
Interests: Bayesian deep learning, conformal prediction, out-of-distribution generalization, and continual learning + + + + +
Current Position: Senior Applied Research Scientist at Mila + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+ + + + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
Past PhD Students
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +

    Vincent Mai


+
+ + + + +
Current Position: AI researcher at the Institut de Recherche d'Hydro Québec (IREQ) + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    + Krishna Murthy Jatavallabhula +


+
+ + + + +
Current Position: PostDoc at MIT with Antonio Torralba and Joshua Tenenbaum + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+ + + + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
Past Master's Students
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +

    Charlie Gauthier


+
+ + + +
Thesis: Fear prediction for training robust RL agents + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +

    + Sai Krishna G.V. +


+
+ + + +
Thesis: Deep active localization + + +
Current Position: Reinforcement learning researcher at AI-Redefined + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Gunshi Gupta


+
+ + + +
Thesis: Look-ahead meta-learning for continual learning + + +
Current Position: PhD student at Oxford + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Nithin Vasisth


+
+ + + +
Thesis: Lifelong learning of concepts in CRAFT + + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Breandan Considine


+
+ + +
Coadvisor: Michalis Famelis     + + +
Thesis: Programming tools for intelligent systems + + +
Current Position: PhD student at McGill + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Bhairav Mehta


+
+ + + +
Thesis: On learning and generalization in unstructured task spaces + + +
Current Position: PhD student at MIT + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    + Anthony Courchesne +


+
+ + + +
Thesis: On quantifying the value of simulation for training and evaluating robotic agents + + +
Current Position: Project manager at Institut du Vehicule Innovant (IVI) + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Dhaivat Bhatt


+
+ + + +
Thesis: Variational aleatoric uncertainty calibration in neural regression + + +
Current Position: Research engineer at Samsung + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    + Rey Reza Wiyatno +


+
+ + + +
Thesis: Lifelong Topological Visual Navigation + + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+ + + + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
Past Undergraduate Researchers and Interns
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + +

    Kaustubh Mani


+
+ + + + +
Current Position: PhD student at the University of Montreal + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Nikhil Varma Keetha


+
+ + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Sai Sree Harsha


+
+ + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Abhishek Jain


+
+ + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Sharath Chandra Raparthy


+
+ + + + +
Current Position: PhD student at the University of Montreal + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    + Mark Van der Merwe +


+
+ + + + +
Current Position: PhD student at the University of Michigan + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Amrut Sarangi


+
+ + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Rohan Raj


+
+ + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    + Waleed Khamies +


+
+ + + + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Zihan Wang


+
+ + + + +
Current Position: Master's student at Stanford + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Homanga Bharadhwaj


+
+ + + + +
Current Position: PhD student at the University of Toronto + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Adam Sigal


+
+ + + + +
Current Position: PhD student at McGill + +
+ +
+
+ + + + + + + + + + + + + + + +
+
+ + +

    Sarthak Sharma


+
+ + + + +
Current Position: AI/ML Engineer at Verisk AI Lab + +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+
+ + + +
+ + + + +
+
+
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/01-gradslam.html b/projects/01-gradslam.html new file mode 100644 index 0000000..59416ae --- /dev/null +++ b/projects/01-gradslam.html @@ -0,0 +1,301 @@ + + + + + + + + + Robotics Group @ University of Montreal | gradslam + + + + + + + + + + + +
+ + + + + gradslam + + + + +

gradslam

+ +

gradslam is an open-source framework providing differentiable building blocks for simultaneous localization and mapping (SLAM) systems. We enable the usage of dense SLAM subsystems from the comfort of PyTorch.

+ + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Krishna Murthy Jatavallabhula + + + + + + +

    + Krishna Murthy Jatavallabhula +


+
+ + + + +
Current Position: PostDoc at MIT with Antonio Torralba and Joshua Tenenbaum + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/adr.html b/projects/adr.html new file mode 100644 index 0000000..fd6c1f0 --- /dev/null +++ b/projects/adr.html @@ -0,0 +1,410 @@ + + + + + + + + + Robotics Group @ University of Montreal | Active Domain Randomization + + + + + + + + + + + +
+ + + + + Active Domain Randomization + + + + +

Active Domain Randomization

+ +

Domain randomization is a popular technique for improving domain transfer, often used in a zero-shot setting when the target domain is unknown or cannot easily be used for training. In this work, we empirically examine the effects of domain randomization on agent generalization. Our experiments show that domain randomization may lead to suboptimal, high-variance policies, which we attribute to the uniform sampling of environment parameters. We propose Active Domain Randomization, a novel algorithm that learns a parameter sampling strategy. Our method looks for the most informative environment variations within the given randomization ranges by leveraging the discrepancies of policy rollouts in randomized and reference environment instances. We find that training more frequently on these instances leads to better overall agent generalization. Our experiments across various physics-based simulated and real-robot tasks show that this enhancement leads to more robust, consistent policies.

+ + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Bhairav Mehta + + + + + +

    Bhairav Mehta


+
+ + + +
Thesis: On learning and generalization in unstructured task spaces + + +
Current Position: PhD student at MIT + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Florian Golemo + + + + + + +

    + Florian Golemo +


+
+ + +
Coadvisor: Chris Pal     + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Manfred Diaz + + + + + + +

    + Manfred Diaz +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/conceptfusion.html b/projects/conceptfusion.html new file mode 100644 index 0000000..5f090f7 --- /dev/null +++ b/projects/conceptfusion.html @@ -0,0 +1,301 @@ + + + + + + + + + Robotics Group @ University of Montreal | ConceptFusion: Open-set Multimodal 3D Mapping + + + + + + + + + + + +
+ + + + + ConceptFusion: Open-set Multimodal 3D Mapping + + + + +

ConceptFusion: Open-set Multimodal 3D Mapping

+ +

Building 3D maps of the environment is central to robot navigation, planning, and interaction with objects in a scene. Most existing approaches that integrate semantic concepts with 3D maps largely remain confined to the closed-set setting: they can only reason about a finite set of concepts, pre-defined at training time. Further, these maps can only be queried using class labels, or in recent work, using text prompts.

+ +

We address both these issues with ConceptFusion, a scene representation that is: (i) fundamentally open-set, enabling reasoning beyond a closed set of concepts (ii) inherently multi-modal, enabling a diverse range of possible queries to the 3D map, from language, to images, to audio, to 3D geometry, all working in concert. ConceptFusion leverages the open-set capabilities of today’s foundation models pre-trained on internet-scale data to reason about concepts across modalities such as natural language, images, and audio. We demonstrate that pixel-aligned open-set features can be fused into 3D maps via traditional SLAM and multi-view fusion approaches. This enables effective zero-shot spatial reasoning, not needing any additional training or finetuning, and retains long-tailed concepts better than supervised approaches, outperforming them by more than 40% margin on 3D IoU. We extensively evaluate ConceptFusion on a number of real-world datasets, simulated home environments, a real-world tabletop manipulation task, and an autonomous driving platform. We showcase new avenues for blending foundation models with 3D open-set multimodal mapping.

+ + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Ali Kuwajerwala + + + + + + +

    + Ali Kuwajerwala +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/ctcnet.html b/projects/ctcnet.html new file mode 100644 index 0000000..24d45a7 --- /dev/null +++ b/projects/ctcnet.html @@ -0,0 +1,353 @@ + + + + + + + + + Robotics Group @ University of Montreal | Self-supervised visual odometry estimation + + + + + + + + + + + +
+ + + + + Self-supervised visual odometry estimation + + + + + + + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Krishna Murthy Jatavallabhula + + + + + + +

    + Krishna Murthy Jatavallabhula +


+
+ + + + +
Current Position: PostDoc at MIT with Antonio Torralba and Joshua Tenenbaum + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Gunshi Gupta + + + + + +

    Gunshi Gupta


+
+ + + +
Thesis: Look-ahead meta-learning for continual learning + + +
Current Position: PhD student at Oxford + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/dal.html b/projects/dal.html new file mode 100644 index 0000000..f648d94 --- /dev/null +++ b/projects/dal.html @@ -0,0 +1,466 @@ + + + + + + + + + Robotics Group @ University of Montreal | Deep Active Localization + + + + + + + + + + + +
+ + + + + Deep Active Localization + + + + +

Deep Active Localization

+ +

Active localization is the problem of generating robot actions that allow it to maximally disambiguate its pose within a reference map. Traditional approaches to this use an information-theoretic criterion for action selection and hand-crafted perceptual models. In this work we propose an end-to-end differentiable method for learning to take informative actions that is trainable entirely in simulation and then transferable to real robot hardware with zero refinement. The system is composed of two modules: a convolutional neural network for perception, and a deep reinforcement learned planning module. We introduce a multi-scale approach to the learned perceptual model since the accuracy needed to perform action selection with reinforcement learning is much less than the accuracy needed for robot control. We demonstrate that the resulting system outperforms using the traditional approach for either perception or planning. We also demonstrate our approaches robustness to different map configurations and other nuisance parameters through the use of domain randomization in training. The code is also compatible with the OpenAI gym framework, as well as the Gazebo simulator.

+ + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Sai Krishna G.V. + + + + + + +

    + Sai Krishna G.V. +


+
+ + + +
Thesis: Deep active localization + + +
Current Position: Reinforcement learning researcher at AI-Redefined + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Dhaivat Bhatt + + + + + +

    Dhaivat Bhatt


+
+ + + +
Thesis: Variational aleatoric uncertainty calibration in neural regression + + +
Current Position: Research engineer at Samsung + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Krishna Murthy Jatavallabhula + + + + + + +

    + Krishna Murthy Jatavallabhula +


+
+ + + + +
Current Position: PostDoc at MIT with Antonio Torralba and Joshua Tenenbaum + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Vincent Mai + + + + + +

    Vincent Mai


+
+ + + + +
Current Position: AI researcher at the Institut de Recherche d'Hydro Québec (IREQ) + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/fcal.html b/projects/fcal.html new file mode 100644 index 0000000..d7efe57 --- /dev/null +++ b/projects/fcal.html @@ -0,0 +1,468 @@ + + + + + + + + + Robotics Group @ University of Montreal | f-Cal - Calibrated aleatoric uncertainty estimation from neural networks for robot perception + + + + + + + + + + + +
+ + + + + f-Cal - Calibrated aleatoric uncertainty estimation from neural networks for robot perception + + + + +

f-cal

+ + + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Dhaivat Bhatt + + + + + +

    Dhaivat Bhatt


+
+ + + +
Thesis: Variational aleatoric uncertainty calibration in neural regression + + +
Current Position: Research engineer at Samsung + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Kaustubh Mani + + + K + + + + + + + +

    Kaustubh Mani


+
+ + + + +
Current Position: PhD student at the University of Montreal + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Dishank Bansal + + + + + + +

    + Dishank Bansal +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Krishna Murthy Jatavallabhula + + + + + + +

    + Krishna Murthy Jatavallabhula +


+
+ + + + +
Current Position: PostDoc at MIT with Antonio Torralba and Joshua Tenenbaum + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/gradsim.html b/projects/gradsim.html new file mode 100644 index 0000000..ac54f58 --- /dev/null +++ b/projects/gradsim.html @@ -0,0 +1,413 @@ + + + + + + + + + Robotics Group @ University of Montreal | gradsim + + + + + + + + + + + +
+ + + + + gradsim + + + + +

gradsim

+ + + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Krishna Murthy Jatavallabhula + + + + + + +

    + Krishna Murthy Jatavallabhula +


+
+ + + + +
Current Position: PostDoc at MIT with Antonio Torralba and Joshua Tenenbaum + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Florian Golemo + + + + + + +

    + Florian Golemo +


+
+ + +
Coadvisor: Chris Pal     + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Breandan Considine + + + + + +

    Breandan Considine


+
+ + +
Coadvisor: Michalis Famelis     + + +
Thesis: Programming tools for intelligent systems + + +
Current Position: PhD student at McGill + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/ivrl.html b/projects/ivrl.html new file mode 100644 index 0000000..dcc9df0 --- /dev/null +++ b/projects/ivrl.html @@ -0,0 +1,438 @@ + + + + + + + + + Robotics Group @ University of Montreal | Inverse Variance Reinforcement Learning + + + + + + + + + + + +
+ + + + + Inverse Variance Reinforcement Learning + + + + +

Inverse Variance Reinforcement Learning

+ +

Most robotics problems can be written as (Partially Observable) Markov Decision Processes (MDPs), with discrete or continuous observation and action spaces. Deep Reinforcement Learning (DRL) is a powerful tool to find an optimal policy for these processes, based on experience acquired during the training process. The training of a DRL agent requires many trajectories, which can be arduous and expensive to produce in the real world. Indeed, the real world is not parallelizable, may require human efforts to reset, and comes with risks for the robot and the environment. Gathering sufficient experience is therefore one of the most important challenges when applying DRL to robotics. The objective of this project is to reduce the amount of samples necessary to train a DRL agent on a robot.

+ +

A diagram representing the generation process of the noisy target.

+ +

DRL algorithms are complex processes. An important part of most model-free algorithms is learning the value function of a given state or state-action pair, i.e., the expected return given the current policy. To do so, deep supervised learning components are used, where the input is the state(-action), and the label is called the target. The target T is a noisy sample of the value. Often, it is computed using the reward r and the next state s’ sampled from experience, the next action a’ based on s’ and the current policy, and the value Q of the next state-action pair which is bootstrapped from the current value estimator (this is the Temporal Difference target). The noise on the target negatively impacts the learning process: the networks learn from wrong data, which entails slower learning and instability.

+ +

The key element in this project is the fact that the noise affecting in the target, i.e. its difference from the true and unique value function, is heteroscedastic. This means that the distribution it is sampled from changes for each input and training step. Sometimes, this distribution has a very low variance: the target is close to the value. Sometimes, on the other hand, the target is subject to a lot of noise and it does not contain useful information with respect to the value. Therefore, the value estimation task in DRL is a case of heteroscedastic regression.

+ +

Projects

+ +

Batch Inverse-Variance Weighting for Deep Heteroscedastic Regression

+ +

Noisy labels slows the learning process in regression: the first part of this project was to prove that the effect of noisy labels can be mitigated given the hypothesis that we know the variance of the noise distribution of each label. How can we include this additional information for heteroscedastic regression? Intuitively, we shoud give more weight to the labels we trust more. In linear regression, the Gauss-Markov theorem shows that the optimal solution is to weigh each sample by the inverse of the variance of the label noise. We show that adapting inverse-variance weighting for gradient-based optimization methods allows to significantly improve the performance of the learning process. Our paper, Batch Inverse-Variance Weighting: Deep Heteroscedastic Regression (BIV), was presented at the Uncertainty and Robustness in Deep Learning workshop at ICML 2021.

+ +
+ A plot showing learning curves, where BIV is doing better than L2 and some baselines. +
BIV improves the learning performance with noisy labels compared to the L2 loss. Source: Batch Inverse-Variance Weighting: Deep Heteroscedastic Regression
+
+ +

Inverse-Variance Reinforcement Learning

+ +

See project page: https://montrealrobotics.ca/ivrl/

+ +

The second part of the project was to use this weighting scheme in a DRL setting. For this work, the challenge was to estimate the uncertainty of the target. A systematic analysis of the sources of uncertainty in the target generation process justifies the use of deep variance ensembles. These are used to estimate the variance due to the stochasticity of the environment and the policy, as well as the predictive uncertainty of the value prediction used to bootstrap the target. As the variance output by these deep ensembles is also the result of a training process, the uncertainty estimation is subject to complex dynamics. We show that the BIV weighting scheme is robust to changes of scale in the variance estimation. We show that combining BIV with deep variance ensembles in DRL algorithms such as DQN and SAC leads to significant improvements in the sample efficiency. This framework, called Inverse-Variance Reinforcement Learning (IV-RL), is presented in our Sample Efficient Deep Reinforcement Learning via Uncertainty Estimation submission to ICLR 2022.

+ +
+ A plot showing learning curves, where IV-SAC is doing better than DQN and other ensemble baselines. +
IV-RL on SAC improves the learning performance and the sample efficiency compared to other ensemble-based baselines. Source: Sample Efficient Deep Reinforcement Learning via Uncertainty Estimation
+
+ + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Vincent Mai + + + + + +

    Vincent Mai


+
+ + + + +
Current Position: AI researcher at the Institut de Recherche d'Hydro Québec (IREQ) + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Kaustubh Mani + + + K + + + + + + + +

    Kaustubh Mani


+
+ + + + +
Current Position: PhD student at the University of Montreal + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Waleed Khamies + + + + + + +

    + Waleed Khamies +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/lamaml.html b/projects/lamaml.html new file mode 100644 index 0000000..53f349d --- /dev/null +++ b/projects/lamaml.html @@ -0,0 +1,297 @@ + + + + + + + + + Robotics Group @ University of Montreal | La-MAML + + + + + + + + + + + +
+ + + + + La-MAML + + + + +

La-MAML

+ + + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + Gunshi Gupta + + + + + +

    Gunshi Gupta


+
+ + + +
Thesis: Look-ahead meta-learning for continual learning + + +
Current Position: PhD student at Oxford + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/ltvn.html b/projects/ltvn.html new file mode 100644 index 0000000..a86e8b3 --- /dev/null +++ b/projects/ltvn.html @@ -0,0 +1,301 @@ + + + + + + + + + Robotics Group @ University of Montreal | Lifelong Topological Visual Navigation + + + + + + + + + + + +
+ + + + + Lifelong Topological Visual Navigation + + + + +

Lifelong Topological Visual Navigation

+ +

See project page: https://montrealrobotics.ca/ltvn/

+ + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Rey Reza Wiyatno + + + + + + +

    + Rey Reza Wiyatno +


+
+ + + +
Thesis: Lifelong Topological Visual Navigation + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/o4a.html b/projects/o4a.html new file mode 100644 index 0000000..1288bef --- /dev/null +++ b/projects/o4a.html @@ -0,0 +1,356 @@ + + + + + + + + + Robotics Group @ University of Montreal | One-4-All - Neural Potential Fields for Embodied Navigation + + + + + + + + + + + +
+ + + + + One-4-All - Neural Potential Fields for Embodied Navigation + + + + +

One-4-All: Neural Potential Fields for Embodied Navigation

+ +

A fundamental task in robotics is to navigate between two locations. In particular, real-world navigation can require long-horizon planning using high-dimensional RGB images, which poses a substantial challenge for end-to-end learning-based approaches. Current semi-parametric methods instead achieve long-horizon navigation by combining learned modules with a topological memory of the environment, often represented as a graph over previously collected images. However, using these graphs in practice typically involves tuning a number of pruning heuristics to avoid spurious edges, limit runtime memory usage and allow reasonably fast graph queries. In this work, we present One-4-All (O4A), a method leveraging self-supervised and manifold learning to obtain a graph-free, end-to-end navigation pipeline in which the goal is specified as an image. Navigation is achieved by greedily minimizing a potential function defined continuously over the O4A latent space. Our system is trained offline on non-expert exploration sequences of RGB data and controls, and does not require any depth or pose measurements. We show that O4A can reach long-range goals in 8 simulated Gibson indoor environments, and further demonstrate successful real-world navigation using a Jackal UGV platform.

+ + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Sacha Morin + + + + + + +

    + Sacha Morin +


+
+ + +
Coadvisor: Guy Wolf     + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Miguel Saavedra-Ruiz + + + + + + +

    + Miguel Saavedra-Ruiz +


+
+ + + + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/projects/taskography.html b/projects/taskography.html new file mode 100644 index 0000000..3e22da0 --- /dev/null +++ b/projects/taskography.html @@ -0,0 +1,300 @@ + + + + + + + + + Robotics Group @ University of Montreal | Taskography - Evaluating robot task planning over large 3D scene graphs + + + + + + + + + + + +
+ + + + + Taskography - Evaluating robot task planning over large 3D scene graphs + + + + +

Taskography

+ + + +

People

+
+ + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Krishna Murthy Jatavallabhula + + + + + + +

    + Krishna Murthy Jatavallabhula +


+
+ + + + +
Current Position: PostDoc at MIT with Antonio Torralba and Joshua Tenenbaum + +
+ +
+ + +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ + + + + + + + + + + Liam Paull + + + + + + +

    + Liam Paull +


+
+ +
Interests: Robot perception, uncertainty, sim2real, and robot benchmarking + + + + +
+ +
+ + +
+
+ + + + +
+ + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/publications/index.html b/publications/index.html new file mode 100644 index 0000000..96c6f55 --- /dev/null +++ b/publications/index.html @@ -0,0 +1,7381 @@ + + + + + + + + + Robotics Group @ University of Montreal | Publications + + + + + + + + + + + +
+ + + + + + + +
+
+
+
+
Publications
+
+
+
+
+ +
+
+
2023
+
+
+
  1. + + + + + + + +
    + +
    + + + + + +
    + + + + + + ConceptFusion: Open-set Multimodal 3D Mapping + + + + + + + Krishna Murthy Jatavallabhula, + + + + + + + + + Alihusein Kuwajerwala, + + + + + + + + + Qiao Gu, + + + + + + + + + Mohd Omama, + + + + + + + + + Tao Chen, + + + + + + + + + Shuang Li, + + + + + + + + + Ganesh Iyer, + + + + + + + + + Soroush Saryazdi, + + + + + + + + + Nikhil Keetha, + + + + + + + + + Ayush Tewari, + + + + + + + + + Joshua B. Tenenbaum, + + + + + + + + + Celso Miguel de Melo, + + + + + + + + + Madhava Krishna, + + + + + + + + + Liam Paull, + + + + + + + + + Florian Shkurti, + + + + + + + and + + + Antonio Torralba + + + + + + + + + Robotics: Science and Systems (RSS) + + + 2023 + + + + + + + + Abstract + + + arXiv + + + + + + + + + Project Page + + + + Video + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
+ +
+
+
2022
+
+
+
  1. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Hierarchical Reinforcement Learning for Precise Soccer Shooting Skills using a Quadrupedal Robot + + + + + + + Yandong Ji, + + + + + + + + + Zhongyu Li*, + + + + + + + + + Yinan Sun, + + + + + + + + + Xue Bin Peng, + + + + + + + + + Sergey Levine, + + + + + + + + + Glen Berseth, + + + + + + + and + + + Koushil Sreenath + + + + + + + + + In Proc. IEEE/RSJ Intl Conf on Intelligent Robots and Systems (IROS 2022) + + + 2022 + + + + + + + + Abstract + + + arXiv + + + + + + + + + Project Page + + + + Video + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  2. +
  3. + + + + + + + +
    + +
    + + + + + +
    + + + + + + AnyMorph: Learning Transferable Policies By Inferring Agent Morphology + + + + + + + Brandon Trabucco, + + + + + + + + + Phielipp Mariano, + + + + + + + and + + + Glen Berseth + + + + + + + + + Internation Conference on Machine Learning + + + 2022 + + + + + + + + Abstract + + + arXiv + + + + + + + + + Project Page + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  4. +
  5. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Lifelong Topological Visual Navigation + + + + + + + Rey Reza Wiyatno, + + + + + + + + + Anqi Xu, + + + + + + + and + + + Liam Paull + + + + + + + + + IEEE Robotics and Automation Letters + + + 2022 + + + + + + + + Abstract + + + arXiv + + + + + + + + + Project Page + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  6. +
  7. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Sample efficient deep reinforcement learning via uncertainty estimation + + + + + + + Vincent Mai, + + + + + + + + + Kaustubh Mani, + + + + + + + and + + + Liam Paull + + + + + + + + + International Conference on Learning Representations (ICLR) + + + 2022 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  8. +
  9. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Monocular Robot Navigation with Self-Supervised Pretrained Vision Transformers + + + + + + + Miguel Saavedra-Ruiz, + + + + + + + + + Sacha Morin, + + + + + + + and + + + Liam Paull + + + + + + + + + In + + + 2022 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  10. +
  11. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Generalization Games for Reinforcement Learning + + + + + + + Manfred Diaz, + + + + + + + + + Charlie Gauthier, + + + + + + + + + Glen Berseth, + + + + + + + and + + + Liam Paull + + + + + + + + + In ICLR 2022 Workshop on Gamification and Multiagent Solutions + + + 2022 + + + + + + + + Abstract + + + + OpenReview + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  12. +
  13. + + + + + + + +
    + +
    + + + + + +
    + + + + + + f-Cal: Aleatoric uncertainty quantification for robot perception via calibrated neural regression + + + + + + + Dhaivat Bhatt, + + + + + + + + + Kaustubh Mani, + + + + + + + + + Dishank Bansal, + + + + + + + + + Krishna Murthy, + + + + + + + + + Hanju Lee, + + + + + + + and + + + Liam Paull + + + + + + + + + In 2022 International Conference on Robotics and Automation (ICRA) + + + 2022 + + + + + + + + Abstract + + + + + + PDF + + + + + + Project Page + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
+ +
+
+
2021
+
+
+
  1. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Batch Inverse-Variance Weighting: Deep Heteroscedastic Regression + + + + + + + Vincent Mai, + + + + + + + + + Waleed Khamies, + + + + + + + and + + + Liam Paull + + + + + + + + + In ICML Workshop on Uncertainty & Robustness in Deep Learning + + + 2021 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  2. +
  3. + + + + + + + +
    + +
    + + + + + +
    + + + + + + LOCO: Adaptive exploration in reinforcement learning via local estimation of contraction coefficients + + + + + and + + + Pablo Samuel Castro Manfred Diaz + + + + + + + + + In Self-Supervision for Reinforcement Learning Workshop-ICLR 2021 + + + 2021 + + + + + + + + Abstract + + + + OpenReview + + + + PDF + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  4. +
  5. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Orthogonal over-parameterized training + + + + + + + Weiyang Liu, + + + + + + + + + Rongmei Lin, + + + + + + + + + Zhen Liu, + + + + + + + + + James M Rehg, + + + + + + + + + Liam Paull, + + + + + + + + + Li Xiong, + + + + + + + + + Le Song, + + + + + + + and + + + Adrian Weller + + + + + + + + + In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition + + + 2021 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  6. +
  7. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Uncertainty-Aware Policy Sampling and Mixing for Safe Interactive Imitation Learning + + + + + + + Manfred Diaz, + + + + + + + + + Thomas Fevens, + + + + + + + and + + + Liam Paull + + + + + + + + + In 2021 18th Conference on Robots and Vision (CRV) + + + 2021 + + + + + + + + Abstract + + + + + + PDF + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  8. +
  9. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Deep Koopman Representation for Control over Images (DKRCI) + + + + + + + Philippe Laferrière, + + + + + + + + + Samuel Laferrière, + + + + + + + + + Steven Dahdah, + + + + + + + + + James Richard Forbes, + + + + + + + and + + + Liam Paull + + + + + + + + + In 2021 18th Conference on Robots and Vision (CRV) + + + 2021 + + + + + + + + Abstract + + + + + + PDF + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  10. +
  11. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Taskography: Evaluating robot task planning over large 3D scene graphs + + + + + + + Christopher Agia, + + + + + + + + + Krishna Murthy Jatavallabhula, + + + + + + + + + Mohamed Khodeir, + + + + + + + + + Ondrej Miksik, + + + + + + + + + Vibhav Vineet, + + + + + + + + + Mustafa Mukadam, + + + + + + + + + Liam Paull, + + + + + + + and + + + Florian Shkurti + + + + + + + + + In Conference on Robot Learning + + + 2021 + + + + + + + + Abstract + + + + OpenReview + + + + PDF + + + + + + Project Page + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  12. +
  13. + + + + + + + +
    + +
    + + + + + +
    + + + + + + On Assessing the Usefulness of Proxy Domains for Developing and Evaluating Embodied Agents + + + + + + + Anthony Courchesne, + + + + + + + + + Andrea Censi, + + + + + + + and + + + Liam Paull + + + + + + + + + In 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) + + + 2021 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  14. +
  15. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Iterative teaching by label synthesis + + + + + + + Weiyang Liu, + + + + + + + + + Zhen Liu, + + + + + + + + + Hanchen Wang, + + + + + + + + + Liam Paull, + + + + + + + + + Bernhard Schölkopf, + + + + + + + and + + + Adrian Weller + + + + + + + + + Advances in Neural Information Processing Systems (NeurIPS) + + + 2021 + + + + + + + + Abstract + + + + OpenReview + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
+ +
+
+
2020
+
+
+
  1. + + + + + + + +
    + +
    + + + + + +
    + + + + + + gradSLAM: Dense SLAM meets automatic differentiation + + + + + + + Jatavallabhula Krishna Murthy, + + + + + + + + + Ganesh Iyer, + + + + + + + and + + + Liam Paull + + + + + + + + + In International Conference on Robotics and Automation (ICRA) + + + 2020 + + + + + + + + Abstract + + + arXiv + + + + + + + + + Project Page + + + Code + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  2. +
  3. + + + + + + + +
    + +
    + + + + + +
    + + + + + + La-MAML: Look-ahead Meta Learning for Continual Learning + + + + + + + Gunshi Gupta, + + + + + + + + + Karmesh Yadav, + + + + + + + and + + + Liam Paull + + + + + + + + + In Neural Information Processing Systems (Neurips) + + + 2020 + + + Oral (top 1.1%) + + + + + + + Abstract + + + arXiv + + + + + + + + + Project Page + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  4. +
  5. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Your GAN is Secretly an Energy-based Model and You Should Use Discriminator Driven Latent Sampling + + + + + + + Tong Che, + + + + + + + + + Ruixiang Zhang, + + + + + + + + + Jascha Sohl-Dickstein, + + + + + + + + + Hugo Larochelle, + + + + + + + + + Liam Paull, + + + + + + + + + Yuan Cao, + + + + + + + and + + + Yoshua Bengio + + + + + + + + + In Neural Information Processing Systems (Neurips) + + + 2020 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  6. +
  7. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Curriculum in Gradient-Based Meta-Reinforcement Learning + + + + + + + Bhairav Mehta, + + + + + + + + + Tristan Deleu, + + + + + + + + + Sharath Chandra Raparthy, + + + + + + + + + Christopher Pal, + + + + + + + and + + + Liam Paull + + + + + + + + + In BETR-RL Workshop + + + 2020 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  8. +
  9. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Generating Automatic Curricula via Self-Supervised Active Domain Randomization + + + + + + + Sharath Chandra Raparthy, + + + + + + + + + Bhairav Mehta, + + + + + + + and + + + Liam Paull + + + + + + + + + In BETR-RL Workshop + + + 2020 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + Code + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  10. +
  11. + + + + + + + +
    + +
    + + + + + +
    + + + + + + The AI Driving Olympics at NeurIPS 2018 + + + + + + + Julian Zilly, + + + + + + + + + Jacopo Tani, + + + + + + + + + Breandan Considine, + + + + + + + + + Bhairav Mehta, + + + + + + + + + Andrea F Daniele, + + + + + + + + + Manfred Diaz, + + + + + + + + + Gianmarco Bernasconi, + + + + + + + + + Jan Ruch, + + + + + + + + + Florian Golemo, + + + + + + + + + A Kirsten Bowser, + + + + + + + + + Matthew R Walter, + + + + + + + + + Ruslan Hristov, + + + + + + + + + Sunil Mallya, + + + + + + + + + Emilio Frazzoli, + + + + + + + + + Andrea Censi, + + + + + + + and + + + Liam Paull + + + + + + + + + In Springer + + + 2020 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  12. +
  13. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Probabilistic Object Detection: Strenghts, Weaknesses, and Opportunities + + + + + + + Dhaivat Bhatt, + + + + + + + + + Dishank Bansal, + + + + + + + + + Gunshi Gupta, + + + + + + + + + Krishna Murthy Jatavallabhula, + + + + + + + + + Hanju Lee, + + + + + + + and + + + Liam Paull + + + + + + + + + In ICML workshop on AI for autonomous driving + + + 2020 + + + + + + + + Abstract + + + + + + + + + + Project Page + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  14. +
  15. + + + + + + + +
    + +
    + + + + + +
    + + + + + + MapLite: Autonomous Intersection Navigation without a Detailed Prior Map + + + + + + + Teddy Ort, + + + + + + + + + Krishna Murthy, + + + + + + + + + Rohan Banerjee, + + + + + + + + + Sai Krishna Gottipati, + + + + + + + + + Dhaivat Bhatt, + + + + + + + + + Igor Gilitschenski, + + + + + + + + + Liam Paull, + + + + + + + and + + + Daniela Rus + + + + + + + + + IEEE Robotics and Automation Letters + + + 2020 + + + + + + + + Abstract + + + + + + PDF + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  16. +
  17. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Integrated benchmarking and design for reproducible and accessible evaluation of robotic agents + + + + + + + Jacopo Tani, + + + + + + + + + Andrea F Daniele, + + + + + + + + + Gianmarco Bernasconi, + + + + + + + + + Amaury Camus, + + + + + + + + + Aleksandar Petrov, + + + + + + + + + Anthony Courchesne, + + + + + + + + + Bhairav Mehta, + + + + + + + + + Rohit Suri, + + + + + + + + + Tomasz Zaluska, + + + + + + + + + Matthew R Walter, + + + + + + + and + + + others + + + + + + + + + In 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) + + + 2020 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  18. +
  19. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Perceptual generative autoencoders + + + + + + + Zijun Zhang, + + + + + + + + + Ruixiang Zhang, + + + + + + + + + Zongpeng Li, + + + + + + + + + Yoshua Bengio, + + + + + + + and + + + Liam Paull + + + + + + + + + In International Conference on Machine Learning + + + 2020 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  20. +
  21. + + + + + + + +
    + +
    + + + + + +
    + + + + + + gradSim: Differentiable simulation for system identification and visuomotor control + + + + + + + J Krishna Murthy, + + + + + + + + + Miles Macklin, + + + + + + + + + Florian Golemo, + + + + + + + + + Vikram Voleti, + + + + + + + + + Linda Petrini, + + + + + + + + + Martin Weiss, + + + + + + + + + Breandan Considine, + + + + + + + + + Jérôme Parent-Lévesque, + + + + + + + + + Kevin Xie, + + + + + + + + + Kenny Erleben, + + + + + + + and + + + others + + + + + + + + + In International Conference on Learning Representations + + + 2020 + + + + + + + + Abstract + + + arXiv + + + + + + + + + Project Page + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
+ +
+
+
2019
+
+
+
  1. + + + + + + + +
    + +
    + + + + + +
    + + + + + + A Data-Efficient Framework for Training and Sim-to-Real Transfer of Navigation Policies + + + + + + + Homanga Bharadhwaj, + + + + + + + + + Zihan Wang, + + + + + + + + + Yoshua Bengio, + + + + + + + and + + + Liam Paull + + + + + + + + + In IEEE International Conference on Robotics and Automation (ICRA) + + + 2019 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  2. +
  3. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Deep Active Localization + + + + + + + Sai Krishna, + + + + + + + + + Keehong Seo, + + + + + + + + + Dhaivat Bhatt, + + + + + + + + + Vincent Mai, + + + + + + + + + Krishna Murthy, + + + + + + + and + + + Liam Paull + + + + + + + + + In IEEE Robotics and Automation Letters (RAL) + + + 2019 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + Code + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  4. +
  5. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Active Domain Randomization + + + + + + + Bhairav Mehta, + + + + + + + + + Manfred Diaz, + + + + + + + + + Florian Golemo, + + + + + + + + + Christopher Pal, + + + + + + + and + + + Liam Paull + + + + + + + + + In Conference on Robot Learning (CoRL) + + + 2019 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + Code + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
+ +
+
+
2018
+
+
+
  1. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Probabilistic cooperative mobile robot area coverage and its application to autonomous seabed mapping + + + + + + + Liam Paull, + + + + + + + + + Mae Seto, + + + + + + + + + John J Leonard, + + + + + + + and + + + Howard Li + + + + + + + + + In + + + 2018 + + + + + + + + Abstract + + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  2. +
  3. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Autonomous Vehicle Navigation in Rural Environments without Detailed Prior Maps + + + + + + + Teddy Ort, + + + + + + + + + Liam Paull, + + + + + + + and + + + Daniela Rus + + + + + + + + + In IEEE International Conference on Robotics and Automation (ICRA) + + + 2018 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  4. +
  5. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Local Positioning System Using UWB Range Measurements for an Unmanned Blimp + + + + + + + Vincent Mai, + + + + + + + + + Mina Kamel, + + + + + + + + + Matthias Krebs, + + + + + + + + + Andreas Schaffner, + + + + + + + + + Daniel Meier, + + + + + + + + + Liam Paull, + + + + + + + and + + + Roland Siegwart + + + + + + + + + In + + + 2018 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  6. +
  7. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Geometric Consistency for Self-Supervised End-to-End Visual Odometry + + + + + + + Ganesh Iyer, + + + + + + + + + J Krishna Murthy, + + + + + + + + + K Gunshi Gupta, + + + + + + + and + + + Liam Paull + + + + + + + + + In CVPR Workshop on Deep Learning for Visual SLAM + + + 2018 + + + + + + + + Abstract + + + arXiv + + + + + + + + + Project Page + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  8. +
  9. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Learning steering bounds for parallel autonomous systems + + + + + + + Alexander Amini, + + + + + + + + + Liam Paull, + + + + + + + + + Thomas Balch, + + + + + + + + + Sertac Karaman, + + + + + + + and + + + Daniela Rus + + + + + + + + + In IEEE International Conference on Robotics and Automation (ICRA) + + + 2018 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
+ +
+
+
2017
+
+
+
  1. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Duckietown: an open, inexpensive and flexible platform for autonomy education and research + + + + + + + Liam Paull, + + + + + + + + + Jacopo Tani, + + + + + + + + + Heejin Ahn, + + + + + + + + + Javier Alonso-Mora, + + + + + + + + + Luca Carlone, + + + + + + + + + Michal Cap, + + + + + + + + + Yu Fan Chen, + + + + + + + + + Changhyun Choi, + + + + + + + + + Jeff Dusek, + + + + + + + + + Yajun Fang, + + + + + + + and + + + others + + + + + + + + + In IEEE International Conference on Robotics and Automation (ICRA) + + + 2017 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  2. +
  3. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Hybrid control and learning with coresets for autonomous vehicles + + + + + + + Guy Rosman, + + + + + + + + + Liam Paull, + + + + + + + and + + + Daniela Rus + + + + + + + + + In IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) + + + 2017 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  4. +
  5. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Parallel autonomy in automated vehicles: Safe motion generation with minimal intervention + + + + + + + Wilko Schwarting, + + + + + + + + + Javier Alonso-Mora, + + + + + + + + + Liam Paull, + + + + + + + + + Sertac Karaman, + + + + + + + and + + + Daniela Rus + + + + + + + + + In Robotics and Automation (ICRA), 2017 IEEE International Conference on + + + 2017 + + + + + + + + Abstract + + + + + + PDF + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  6. +
  7. + + + + + + + +
    + +
    + + + + + +
    + + + + + + A parallel autonomy research platform + + + + + + + Felix Naser, + + + + + + + + + David Dorhout, + + + + + + + + + Stephen Proulx, + + + + + + + + + Scott Drew Pendleton, + + + + + + + + + Hans Andersen, + + + + + + + + + Wilko Schwarting, + + + + + + + + + Liam Paull, + + + + + + + + + Javier Alonso-Mora, + + + + + + + + + Marcelo H Ang, + + + + + + + + + Sertac Karaman, + + + + + + + and + + + others + + + + + + + + + In 2017 IEEE Intelligent Vehicles Symposium (IV) + + + 2017 + + + + + + + + Abstract + + + + + + PDF + + + + + + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  8. +
  9. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Safe nonlinear trajectory generation for parallel autonomy with a dynamic vehicle model + + + + + + + Wilko Schwarting, + + + + + + + + + Javier Alonso-Mora, + + + + + + + + + Liam Paull, + + + + + + + + + Sertac Karaman, + + + + + + + and + + + Daniela Rus + + + + + + + + + IEEE Transactions on Intelligent Transportation Systems + + + 2017 + + + + + + + + + + + + PDF + + + + + + + + + + BibTeX + + + + + + + +
    + +
    + +
    + +
    +
+ +
+
+
2016
+
+
+
  1. + + + + + + + +
    + +
    + + + + + +
    + + + + + + A Unified Resource-Constrained Framework for Graph SLAM + + + + + + + Liam Paull, + + + + + + + + + Guoquan Huang, + + + + + + + and + + + John J Leonard + + + + + + + + + In IEEE International Conference on Robotics and Automation (ICRA) + + + 2016 + + + + + + + + Abstract + + + arXiv + + + + + + + Poster + + + Slides + + + + Code + + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
  2. +
  3. + + + + + + + +
    + +
    + + + + + +
    + + + + + + Slam with objects using a nonparametric pose graph + + + + + + + Beipeng Mu, + + + + + + + + + Shih-Yuan Liu, + + + + + + + + + Liam Paull, + + + + + + + + + John Leonard, + + + + + + + and + + + Jonathan P How + + + + + + + + + In IEEE/RSJ International Conference onnIntelligent Robots and Systems (IROS) + + + 2016 + + + + + + + + Abstract + + + arXiv + + + + + + + + + + + Video + + + + BibTeX + + + + + +
    + +
    + + + +
    + +
    + +
    + +
    +
+ + + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/research.html b/research.html index c184b5a..c637d21 100644 --- a/research.html +++ b/research.html @@ -1,8 +1,146 @@ ---- -layout: default -title: Research Projects ---- -
+ + + + + + + + + Robotics Group @ University of Montreal | Research Projects + + + + + + + + + + + +
+ + + + + + + +
@@ -13,12 +151,1968 @@
- {% comment %} - Sort the projects by date, putting those without dates last - {% endcomment %} - {% assign projects_by_date = site.projects | sort: 'last-updated', 'first' %} - {% assign projects_by_date = projects_by_date | reverse %} - {% for p in projects_by_date %} - {% include project-card.html project=p %} - {% endfor %} + + + + + + + + + + +
+
+
+ + + + + + ConceptFusion: Open-set Multimodal 3D Mapping + + +
+
+
+ +

+ + ConceptFusion: Open-set Multimodal 3D Mapping + +

+ + +

ConceptFusion builds open-set 3D maps that can be queried via text, click, image, or audio. Given a series of RGB-D images, our system builds a 3D scene representation, that is inherently multimodal by leveraging foundation models such as CLIP, and therefore doesn’t require any additional training or finetuning.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + One-4-All - Neural Potential Fields for Embodied Navigation + + +
+
+
+ +

+ + One-4-All - Neural Potential Fields for Embodied Navigation + +

+ + +

An end-to-end fully parametric method for image-goal navigation that leverages self-supervised and manifold learning to replace a topological graph with a geodesic regressor. During navigation, the geodesic regressor is used as an attractor in a potential function defined in latent space, allowing to frame navigation as a minimization problem.

+

+ + + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + f-Cal - Calibrated aleatoric uncertainty estimation from neural networks for robot perception + + +
+
+
+ +

+ + f-Cal - Calibrated aleatoric uncertainty estimation from neural networks for robot perception + +

+ + +

f-Cal is calibration method proposed to calibrate probabilistic regression networks. Typical bayesian neural networks are shown to be overconfident in their predictions. To use the predictions for downstream tasks, reliable and calibrated uncertainity estimates are critical. f-Cal is a straightforward loss function, which can be employed to train any probabilistic neural regressor, and obtain calibrated uncertainty estimates.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Inverse Variance Reinforcement Learning + + +
+
+
+ +

+ + Inverse Variance Reinforcement Learning + +

+ + +

Improving sample efficiency in deep reinforcement learning by mitigating the impacts of heteroscedastic noise in the bootstraped target using uncertainty estimation.

+

+ + + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Lifelong Topological Visual Navigation + + +
+
+
+ +

+ + Lifelong Topological Visual Navigation + +

+ + +

A learning-based topological visual navigation method with graph update strategies that improves lifelong navigation performance over time.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Taskography - Evaluating robot task planning over large 3D scene graphs + + +
+
+
+ +

+ + Taskography - Evaluating robot task planning over large 3D scene graphs + +

+ + +

Taskography is the first large-scale robotic task planning benchmark over 3DSGs. While most benchmarking efforts in this area focus on vision-based planning, we systematically study symbolic planning, to decouple planning performance from visual representation learning.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + gradsim + + +
+
+
+ +

+ + gradsim + +

+ + +

gradSim is a framework that overcomes the dependence on 3D supervision by leveraging differentiable multiphysics simulation and differentiable rendering to jointly model the evolution of scene dynamics and image formation.

+

+ + + + + Collaborators: +
    + + +
  • + + Miles Macklin + +
  • + + +
  • + + Vikram Voleti + +
  • + + +
  • + + Linda Petrini + +
  • + + +
  • + + Martin Weiss + +
  • + + +
  • + + Jerome Parent-Levesque + +
  • + + +
  • + + Kevin Xie + +
  • + + +
  • + + Kenny Erleben + +
  • + + +
  • + + + Florian Shkurti + + +
  • + + +
  • + + Derek Nowrouzerzahrai + +
  • + + +
  • + + Sanja Fidler + +
  • + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + gradslam + + +
+
+
+ +

+ + gradslam + +

+ + +

gradslam is an open-source framework providing differentiable building blocks for simultaneous localization and mapping (SLAM) systems. We enable the usage of dense SLAM subsystems from the comfort of PyTorch.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + La-MAML + + +
+
+
+ +

+ + La-MAML + +

+ + +

Look-ahead meta-learning for continual learning

+

+ + + + + Collaborators: +
    + + +
  • + + Karmesh Yadav + +
  • + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Active Domain Randomization + + +
+
+
+ +

+ + Active Domain Randomization + +

+ + +

Making sim-to-real transfer more efficient

+

+ + + + + Collaborators: +
    + + +
  • + + Chris Pal + +
  • + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Self-supervised visual odometry estimation + + +
+
+
+ +

+ + Self-supervised visual odometry estimation + +

+ + +

A self-supervised deep network for visual odometry estimation from monocular imagery.

+

+ + + + + Collaborators: + + + + +
+ + + +
+
+
+
+ + + + + + + + +
+
+
+ + + + + + Deep Active Localization + + +
+
+
+ +

+ + Deep Active Localization + +

+ + +

Learned active localization, implemented on “real” robots.

+

+ + + + + Collaborators: +
    + + +
  • + + Keehong Seo + +
  • + +
+ + + +
+ + + +
+
+
+
+ + + + +
+

+ + + + | + + + + | + + + + + +

+
+ +
+ + + + + + + + + diff --git a/screenshot.png b/screenshot.png deleted file mode 100644 index 3063a2a..0000000 Binary files a/screenshot.png and /dev/null differ