From 1e9f838269d2eadb6c1658db62e77ad9c6133182 Mon Sep 17 00:00:00 2001 From: Avasam Date: Fri, 20 Oct 2023 15:34:24 -0400 Subject: [PATCH 1/5] Doc & config update --- .github/workflows/lint-and-build.yml | 258 ++++++------ .sonarcloud.properties | 2 +- README.md | 560 +++++++++++++-------------- docs/build instructions.md | 70 ++-- pyproject.toml | 3 +- 5 files changed, 446 insertions(+), 447 deletions(-) diff --git a/.github/workflows/lint-and-build.yml b/.github/workflows/lint-and-build.yml index ff334daa..ed8ebdb1 100644 --- a/.github/workflows/lint-and-build.yml +++ b/.github/workflows/lint-and-build.yml @@ -1,129 +1,129 @@ -# https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions -name: Lint and build -on: - workflow_dispatch: # Allows manual builds - inputs: - excludeBuildNumber: - description: "Exclude build number" - required: true - default: false - type: boolean - push: - branches: - - main - - master - - dev* - paths: - - "**.py" - - "**.ui" - - ".github/workflows/lint-and-build.yml" - - "**/requirements.txt" - pull_request: - branches: - - main - - master - - dev* - paths: - - "**.py" - - "**.pyi" - - "**.ui" - - ".github/workflows/lint-and-build.yml" - - "**/requirements*.txt" - -env: - GITHUB_HEAD_REPOSITORY: ${{ github.event.pull_request.head.repo.full_name }} - GITHUB_EXCLUDE_BUILD_NUMBER: ${{ inputs.excludeBuildNumber }} - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - ruff: - runs-on: windows-latest - strategy: - fail-fast: false - # Ruff is version and platform sensible - matrix: - python-version: ["3.9", "3.10", "3.11"] - steps: - - name: Checkout ${{ github.repository }}/${{ github.ref }} - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: "pip" - cache-dependency-path: "scripts/requirements*.txt" - - run: scripts/install.ps1 - shell: pwsh - - run: ruff check . - add-trailing-comma: - runs-on: windows-latest - steps: - - name: Checkout ${{ github.repository }}/${{ github.ref }} - uses: actions/checkout@v3 - - name: Set up Python 3.11 - uses: actions/setup-python@v4 - with: - python-version: "3.11" - - run: pip install add-trailing-comma - - name: Analysing the code with add-trailing-comma - run: add-trailing-comma $(git ls-files '**.py*') - Pyright: - runs-on: windows-latest - strategy: - fail-fast: false - # Pyright is version and platform sensible - matrix: - python-version: ["3.9", "3.10", "3.11"] - steps: - - name: Checkout ${{ github.repository }}/${{ github.ref }} - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: "pip" - cache-dependency-path: "scripts/requirements*.txt" - - run: scripts/install.ps1 - shell: pwsh - - name: Analysing the code with Pyright - uses: jakebailey/pyright-action@v1 - with: - working-directory: src/ - Build: - runs-on: windows-latest - strategy: - fail-fast: false - # Only the Python version we plan on shipping matters. - matrix: - python-version: ["3.11"] - steps: - - name: Checkout ${{ github.repository }}/${{ github.ref }} - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - cache: "pip" - cache-dependency-path: "scripts/requirements.txt" - - run: scripts/install.ps1 - shell: pwsh - - run: scripts/build.ps1 - shell: pwsh - - name: Upload Build Artifact - uses: actions/upload-artifact@v3 - with: - name: AutoSplit (Python ${{ matrix.python-version }}) - path: dist/AutoSplit* - if-no-files-found: error - - name: Upload Build logs - uses: actions/upload-artifact@v3 - with: - name: Build logs (Python ${{ matrix.python-version }}) - path: | - build/AutoSplit/*.toc - build/AutoSplit/*.txt - build/AutoSplit/*.html - if-no-files-found: error +# https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions +name: Lint and build +on: + workflow_dispatch: # Allows manual builds + inputs: + excludeBuildNumber: + description: "Exclude build number" + required: true + default: false + type: boolean + push: + branches: + - main + - master + - dev* + paths: + - "**.py" + - "**.ui" + - ".github/workflows/lint-and-build.yml" + - "**/requirements.txt" + pull_request: + branches: + - main + - master + - dev* + paths: + - "**.py" + - "**.pyi" + - "**.ui" + - ".github/workflows/lint-and-build.yml" + - "**/requirements*.txt" + +env: + GITHUB_HEAD_REPOSITORY: ${{ github.event.pull_request.head.repo.full_name }} + GITHUB_EXCLUDE_BUILD_NUMBER: ${{ inputs.excludeBuildNumber }} + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + ruff: + runs-on: windows-latest + strategy: + fail-fast: false + # Ruff is version and platform sensible + matrix: + python-version: ["3.10", "3.11"] + steps: + - name: Checkout ${{ github.repository }}/${{ github.ref }} + uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + cache-dependency-path: "scripts/requirements*.txt" + - run: scripts/install.ps1 + shell: pwsh + - run: ruff check . + add-trailing-comma: + runs-on: windows-latest + steps: + - name: Checkout ${{ github.repository }}/${{ github.ref }} + uses: actions/checkout@v3 + - name: Set up Python 3.11 + uses: actions/setup-python@v4 + with: + python-version: "3.11" + - run: pip install add-trailing-comma + - name: Analysing the code with add-trailing-comma + run: add-trailing-comma $(git ls-files '**.py*') + Pyright: + runs-on: windows-latest + strategy: + fail-fast: false + # Pyright is version and platform sensible + matrix: + python-version: ["3.10", "3.11"] + steps: + - name: Checkout ${{ github.repository }}/${{ github.ref }} + uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + cache-dependency-path: "scripts/requirements*.txt" + - run: scripts/install.ps1 + shell: pwsh + - name: Analysing the code with Pyright + uses: jakebailey/pyright-action@v1 + with: + working-directory: src/ + Build: + runs-on: windows-latest + strategy: + fail-fast: false + # Only the Python version we plan on shipping matters. + matrix: + python-version: ["3.11"] + steps: + - name: Checkout ${{ github.repository }}/${{ github.ref }} + uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + cache-dependency-path: "scripts/requirements.txt" + - run: scripts/install.ps1 + shell: pwsh + - run: scripts/build.ps1 + shell: pwsh + - name: Upload Build Artifact + uses: actions/upload-artifact@v3 + with: + name: AutoSplit (Python ${{ matrix.python-version }}) + path: dist/AutoSplit* + if-no-files-found: error + - name: Upload Build logs + uses: actions/upload-artifact@v3 + with: + name: Build logs (Python ${{ matrix.python-version }}) + path: | + build/AutoSplit/*.toc + build/AutoSplit/*.txt + build/AutoSplit/*.html + if-no-files-found: error diff --git a/.sonarcloud.properties b/.sonarcloud.properties index ff239edf..64b00174 100644 --- a/.sonarcloud.properties +++ b/.sonarcloud.properties @@ -1 +1 @@ -sonar.python.version=3.9, 3.10, 3.11 +sonar.python.version=3.10, 3.11 diff --git a/README.md b/README.md index 25e54138..2eb0a087 100644 --- a/README.md +++ b/README.md @@ -1,280 +1,280 @@ - -# LiveSplit AutoSplit [![CodeQL](/../../actions/workflows/codeql-analysis.yml/badge.svg)](/../../actions/workflows/codeql-analysis.yml) [![Lint and build](/../../actions/workflows/lint-and-build.yml/badge.svg)](/../../actions/workflows/lint-and-build.yml) - -[![Maintainability Rating](https://sonarcloud.io/api/project_badges/measure?project=Avasam_AutoSplit&metric=sqale_rating)](https://sonarcloud.io/dashboard?id=Avasam_AutoSplit) -[![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=Avasam_AutoSplit&metric=reliability_rating)](https://sonarcloud.io/dashboard?id=Avasam_AutoSplit) -[![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=Avasam_AutoSplit&metric=security_rating)](https://sonarcloud.io/dashboard?id=Avasam_AutoSplit) -[![Code Smells](https://sonarcloud.io/api/project_badges/measure?project=Avasam_AutoSplit&metric=code_smells)](https://sonarcloud.io/summary/new_code?id=Avasam_AutoSplit) -[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=Avasam_AutoSplit&metric=alert_status)](https://sonarcloud.io/summary/new_code?id=Avasam_AutoSplit) -[![SemVer](https://badgen.net/badge/_/SemVer%20compliant/grey?label)](https://semver.org/) -[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) -[![autopep8](https://badgen.net/badge/code%20style/autopep8/blue)](https://github.com/hhatto/autopep8) -[![Checked with pyright](https://microsoft.github.io/pyright/img/pyright_badge.svg)](https://microsoft.github.io/pyright/) -[![Checked with mypy](https://www.mypy-lang.org/static/mypy_badge.svg)](https://mypy-lang.org/) - -Easy to use image comparison based auto splitter for speedrunning on console or PC. - -This program can be used to automatically start, split, and reset your preferred speedrun timer by comparing images to a capture region. This allows you to focus more on your speedrun and less on managing your timer. It also improves the accuracy of your splits. It can be used in tandem with any speedrun timer that accepts hotkeys (LiveSplit, wsplit, etc.), and can be integrated with LiveSplit. - -![Example](/docs/2.0.0_gif.gif) - -# TUTORIAL - -## DOWNLOAD AND OPEN - -- Download the [latest version](/../../releases/latest) -- You can also check out the [latest dev builds](/../../actions/workflows/lint-and-build.yml?query=event%3Apush+is%3Asuccess) (requires a GitHub account) - (If you don't have a GitHub account, you can try [nightly.link](https://nightly.link/Toufool/AutoSplit/workflows/lint-and-build/dev)) - -### Compatibility - -- Windows 10 and 11. -- Python 3.9+ (Not required for normal use. Refer to the [build instructions](/docs/build%20instructions.md) if you'd like run the application directly in Python). - -## OPTIONS - -#### Split Image Folder - -- Supported image file types: PNG, JPEG, bitmaps, WebP, and [more](https://docs.opencv.org/4.8.0/d4/da8/group__imgcodecs.html#imread). -- Images can be any size and ratio. -- Images are matched in alphanumerical order. -- Recommended filenaming convention: `001_SplitName.png, 002_SplitName.png, 003_SplitName.png`... -- Custom split image settings are handled in the filename. See how [here](#custom-split-image-settings). -- To create split images, it is recommended to use AutoSplit's Take Screenshot button for accuracy. However, images can be created using any method including Print Screen and [Snipping Tool](https://support.microsoft.com/en-us/help/4027213/windows-10-open-snipping-tool-and-take-a-screenshot). - -#### Capture Region - -- This is the region that your split images are compared to. Usually, this is going to be the full game screen. -- Click "Select Region". -- Click and drag to form a rectangle over the region you want to capture. -- Adjust the x, y, width, and height of the capture region manually to make adjustments as needed. -- If you want to align your capture region by using a reference image, click "Align Region". -- You can freely move the window that the program is capturing, but resizing the window will cause the capture region to change. -- Once you are happy with your capture region, you may unselect Live Capture Region to decrease CPU usage if you wish. -- You can save a screenshot of the capture region to your split image folder using the Take Screenshot button. - -#### Avg. FPS - -- Calculates the average comparison rate of the capture region to split images. This value will likely be much higher than needed, so it is highly recommended to limit your FPS depending on the frame rate of the game you are capturing. - -### Settings - -#### Comparison Method - -- There are three comparison methods to choose from: L2 Norm, Histograms, and Perceptual Hash (or pHash). - - L2 Norm: This method should be fine to use for most cases. It finds the difference between each pixel, squares it, sums it over the entire image and takes the square root. This is very fast but is a problem if your image is high frequency. Any translational movement or rotation can cause similarity to be very different. - - Histograms: An explanation on Histograms comparison can be found [here](https://mpatacchiola.github.io/blog/2016/11/12/the-simplest-classifier-histogram-intersection.html). This is a great method to use if you are using several masked images. - > This algorithm is particular reliable when the colour is a strong predictor of the object identity. The histogram intersection [...] is robust to occluding objects in the foreground. - - Perceptual Hash: An explanation on pHash comparison can be found [here](http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html). It is highly recommended to NOT use pHash if you use masked images, or it'll be very inaccurate. - -#### Capture Method - - -- **Windows Graphics Capture** (fast, most compatible, capped at 60fps) - Only available in Windows 10.0.17134 and up. - Due to current technical limitations, Windows versions below 10.0.0.17763 require having at least one audio or video Capture Device connected and enabled. - Allows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. - Adds a yellow border on Windows 10 (not on Windows 11). - Caps at around 60 FPS. -- **BitBlt** (fastest, least compatible) - The best option when compatible. But it cannot properly record OpenGL, Hardware Accelerated or Exclusive Fullscreen windows. - The smaller the selected region, the more efficient it is. -- **Direct3D Desktop Duplication** (slower, bound to display) - Duplicates the desktop using Direct3D. - It can record OpenGL and Hardware Accelerated windows. - About 10-15x slower than BitBlt. Not affected by window size. - Overlapping windows will show up and can't record across displays. - This option may not be available for hybrid GPU laptops, see [D3DDD-Note-Laptops.md](/docs/D3DDD-Note-Laptops.md) for a solution. -- **Force Full Content Rendering** (very slow, can affect rendering) - Uses BitBlt behind the scene, but passes a special flag to PrintWindow to force rendering the entire desktop. - About 10-15x slower than BitBlt based on original window size and can mess up some applications' rendering pipelines. -- **Video Capture Device** - Uses a Video Capture Device, like a webcam, virtual cam, or capture card. - -#### Capture Device - -Select the Video Capture Device that you wanna use if selecting the `Video Capture Device` Capture Method. - - -#### Show Live Similarity - -- Displays the live similarity between the capture region and the current split image. This number is between 0 and 1, with 1 being a perfect match. - -#### Show Highest Similarity - -- Shows the highest similarity between the capture region and current split image. - -#### Current Similarity Threshold - -- When the live similarity goes above this value, the program hits your split hotkey and moves to the next split image. - -#### Default Similarity Threshold - -- This value will be set as the threshold for an image if there is no custom threshold set for that image. - -#### Default Delay Time - -- Time in milliseconds that the program waits before hitting the split hotkey for that specific split if there is no custom Delay Time set for that image. - -#### Default Pause Time - -- Time in seconds that the program stops comparison after a split if there is no custom Pause Time set for that image. Useful for if you have two of the same split images in a row and want to avoid double-splitting. Also useful for reducing CPU usage. - -#### Dummy splits when undoing / skipping - -AutoSplit will group dummy splits together with a real split when undoing/skipping. This basically allows you to tie one or more dummy splits to a real split to keep it as in sync as possible with the real splits in LiveSplit/wsplit. If they are out of sync, you can always use "Previous Image" and "Next Image". - -Examples: -Given these splits: 1 dummy, 2 normal, 3 dummy, 4 dummy, 5 normal, 6 normal. - -In this situation you would have only 3 splits in LiveSplit/wsplit (even though there are 6 split images, only 3 are "real" splits). This basically results in 3 groups of splits: 1st split is images 1 and 2. 2nd split is images 3, 4 and 5. 3rd split is image 6. - -- If you are in the 1st or 2nd image and press the skip key, it will end up on the 3rd image -- If you are in the 3rd, 4th or 5th image and press the undo key, it will end up on the 2nd image -- If you are in the 3rd, 4th or 5th image and press the skip key, it will end up on the 6th image -- If you are in the 6th image and press the undo key, it will end up on the 5th image - -#### Loop last Split Image to first Split Image - -If this option is enabled, when the last split meets the threshold and splits, AutoSplit will loop back to the first split image and continue comparisons. -If this option is disabled, when the last split meets the threshold and splits, AutoSplit will stop running comparisons. -This option does not loop single, specific images. See the Custom Split Image Settings section above for this feature. - -#### Start also Resets - -If this option is enabled, a "Start" command (ie: from the Start Image) will also send the "Reset" command. This is useful if you want to automatically restart your timer using the Start Image. Since AutoSplit won't be running and won't be checking for the Reset Image. - -Having the reset image check be active at all time would be a better, more organic solution in the future. But that is dependent on migrating to an observer pattern () and being able to reload all images. - -#### Enable auto Reset Image - -This option is mainly meant to be toggled with the `Toggle auto Reset Image` hotkey. You can enable it to temporarily disable the Reset Image if you make a mistake in your run that would cause the Reset Image to trigger. Like exiting back to the game's menu (aka Save&Quit). - -### Custom Split Image Settings - -- Each split image can have different thresholds, pause times, delay split times, loop amounts, and can be flagged. -- These settings are handled in the image's filename. -- **Custom thresholds** are place between parenthesis `()` in the filename. This value will override the default threshold. -- **Custom pause times** are placed between square brackets `[]` in the filename. This value will override the default pause time. -- **Custom delay times** are placed between hash signs `##` in the filename. Note that these are in milliseconds. For example, a 10 second split delay would be `#10000#`. You cannot skip or undo splits during split delays. -- A different **comparison method** can be specified with their 0-base index between carets `^^`: - - `^0^`: L2 Norm - - `^1^`: Histogram - - `^2^`: Perceptual Hash -- **Image loop** amounts are placed between at symbols `@@` in the filename. For example, a specific image that you want to split 5 times in a row would be `@5@`. The current loop # is conveniently located beneath the current split image. -- **Flags** are placed between curly brackets `{}` in the filename. Multiple flags are placed in the same set of curly brackets. Current available flags: - - `{d}` **dummy split image**. When matched, it moves to the next image without hitting your split hotkey. - - `{b}` split when **similarity goes below** the threshold rather than above. When a split image filename has this flag, the split image similarity will go above the threshold, do nothing, and then split the next time the similarity goes below the threshold. - - `{p}` **pause flag**. When a split image filename has this flag, it will hit your pause hotkey rather than your split hokey. -- Filename examples: - - `001_SplitName_(0.9)_[10].png` is a split image with a threshold of 0.9 and a pause time of 10 seconds. - - `002_SplitName_(0.9)_[10]_{d}.png` is the second split image with a threshold of 0.9, pause time of 10, and is a dummy split. - - `003_SplitName_(0.85)_[20]_#3500#.png` is the third split image with a threshold of 0.85, pause time of 20 and has a delay split time of 3.5 seconds. - - `004_SplitName_(0.9)_[10]_#3500#_@3@_{b}.png` is the fourth split image with a threshold of 0.9, pause time of 10 seconds, delay split time of 3.5 seconds, will loop 3 times, and will split when similarity is below the threshold rather than above. - -## Special images - -### How to Create a Masked Image - -Masked images are very useful if only a certain part of the capture region is consistent (for example, consistent text on the screen, but the background is always different). Histogram or L2 norm comparison is recommended if you use any masked images. It is highly recommended that you do NOT use pHash comparison if you use any masked images, or it'll be very inaccurate. - -The best way to create a masked image is to set your capture region as the entire game screen, take a screenshot, and use a program like [paint.net](https://www.getpaint.net/) to "erase" (make transparent) everything you don't want the program to compare. More on creating images with transparency using paint.net can be found in [this tutorial](https://www.youtube.com/watch?v=v53kkUYFVn8). For visualization, here is what the capture region compared to a masked split image looks like if you would want to split on "Shine Get!" text in Super Mario Sunshine: - -![Mask Example](/docs/mask_example_image.png) - -### Reset Image - -You can have one (and only one) image with the keyword `reset` in its name. AutoSplit will press the reset button when it finds this image. This image will only be used for resets and it will not be tied to any split. You can set a threshold and pause time for it. The pause time is the amount of seconds AutoSplit will wait before checking for the Reset Image once the run starts. For example: `Reset_(0.95)_[10].png`. - -### Start Image - -The Start Image is similar to the Reset Image. You can only have one Start Image with the keyword `start_auto_splitter`.You can reload the image using the "`Reload Start Image`" button. The pause time is the amount of seconds AutoSplit will wait before starting comparisons of the first split image. Delay times will be used to delay starting your timer after the threshold is met. - -### Profiles - - -- Profiles use the extension `.toml`. Profiles can be saved and loaded by using `File -> Save Profile As...` and `File -> Load Profile`. -- The profile contains all of your settings, including information about the capture region. -- You can save multiple profiles, which is useful if you speedrun multiple games. -- If you change your display setup (like using a new monitor, or upgrading to Windows 11), you may need to readjust or reselect your Capture Region. - -## Timer Integration - -### Timer Global Hotkeys - -- Click "Set Hotkey" on each hotkey to set the hotkeys to AutoSplit. The Start / Split hotkey and Pause hotkey must be the same as the one used in your preferred timer program in order for the splitting/pausing to work properly. -- Make sure that Global Hotkeys are enabled in your speedrun timer. -- All of these actions can also be handled by their corresponding buttons. -- Note that pressing your Pause Hotkey does not serve any function in AutoSplit itself and is strictly used for the Pause flag. - -### LiveSplit Integration - -The AutoSplit LiveSplit Component will directly connect AutoSplit with LiveSplit. LiveSplit integration is only supported in AutoSplit v1.6.0 or higher. This integration will allow you to: - -- Use hotkeys directly from LiveSplit to control AutoSplit and LiveSplit together -- Load AutoSplit and any AutoSplit profile automatically when opening a LiveSplit layout. - -#### LiveSplit Integration Tutorial - -- Click [here](https://github.com/Toufool/LiveSplit.AutoSplitIntegration/raw/main/update/Components/LiveSplit.AutoSplitIntegration.dll) to download the latest component. -- Place the .dll file into your `[...]\LiveSplit\Components` folder. -- Open LiveSplit -> Right Click -> Edit Layout -> Plus Button -> Control -> AutoSplit Integration. -- Click Layout Settings -> AutoSplit Integration -- Click the Browse buttons to locate your AutoSplit Path (path to AutoSplit executable) and Profile Path (path to your AutoSplit `.toml` profile file) respectively. - - If you have not yet set saved a profile, you can do so using AutoSplit, and then go back and set your Settings Path. -- Once set, click OK, and then OK again to close the Layout Editor. Right click LiveSplit -> Save Layout to save your layout. AutoSplit and your selected profile will now open automatically when opening that LiveSplit Layout `.lsl` file. - -## Known Limitations - -- For many games, it will be difficult to find a split image for the last split of the run. -- The window of the capture region cannot be minimized. - -## Resources - -Still need help? - -- [Check if your issue already exists](../../issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc) - - If it does, upvote it 👍 - - If it doesn't, create a new one -- Join the [AutoSplit Discord -![AutoSplit Discord](https://badgen.net/discord/members/Qcbxv9y)](https://discord.gg/Qcbxv9y) - -## Contributing - -See [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for our contributing standards. -Refer to the [build instructions](/docs/build%20instructions.md) if you're interested in building the application yourself or running it in Python. - -Not a developer? You can still help through the following methods: - -- Donating (see link below) -- [Upvoting feature requests](../../issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc+label%3Aenhancement) you are interested in -- Sharing AutoSplit with other speedrunners -- Upvoting the following upstream issues in libraries and tools we use: - - - - - - - - - - - - - - - - - - - - - - - - - -## Credits - -- Created by [Toufool](https://twitter.com/Toufool) and [Faschz](https://twitter.com/faschz). -- [Harutaka Kawamura](https://github.com/harupy/) for the snipping tool code that I used to integrate into the autosplitter. -- [amaringos](https://twitter.com/amaringos) for the icon. -- [Zana_G](https://www.twitch.tv/zana_g) for motivating me to start this project back up and for all of the time spent testing and suggesting improvements. -- [Avasam](https://twitter.com/Avasam06) for their continued work on making an incredible amount of improvements and changes to AutoSplit while I have not had the time/motivation to do so. -- [KaDiWa](https://github.com/KaDiWa4) for the LiveSplit integration. -- [Tyron18](https://twitter.com/Tyron18_) for assisting with Windows 11 testing. - -## Donate - -If you enjoy using the program, please consider donating. Thank you! - -[![paypal](https://www.paypalobjects.com/en_US/i/btn/btn_donateCC_LG.gif)](https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=BYRHQG69YRHBA&item_name=AutoSplit+development¤cy_code=USD&source=url) + +# LiveSplit AutoSplit [![CodeQL](/../../actions/workflows/codeql-analysis.yml/badge.svg)](/../../actions/workflows/codeql-analysis.yml) [![Lint and build](/../../actions/workflows/lint-and-build.yml/badge.svg)](/../../actions/workflows/lint-and-build.yml) + +[![Maintainability Rating](https://sonarcloud.io/api/project_badges/measure?project=Avasam_AutoSplit&metric=sqale_rating)](https://sonarcloud.io/dashboard?id=Avasam_AutoSplit) +[![Reliability Rating](https://sonarcloud.io/api/project_badges/measure?project=Avasam_AutoSplit&metric=reliability_rating)](https://sonarcloud.io/dashboard?id=Avasam_AutoSplit) +[![Security Rating](https://sonarcloud.io/api/project_badges/measure?project=Avasam_AutoSplit&metric=security_rating)](https://sonarcloud.io/dashboard?id=Avasam_AutoSplit) +[![Code Smells](https://sonarcloud.io/api/project_badges/measure?project=Avasam_AutoSplit&metric=code_smells)](https://sonarcloud.io/summary/new_code?id=Avasam_AutoSplit) +[![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=Avasam_AutoSplit&metric=alert_status)](https://sonarcloud.io/summary/new_code?id=Avasam_AutoSplit) +[![SemVer](https://badgen.net/badge/_/SemVer%20compliant/grey?label)](https://semver.org/) +[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) +[![autopep8](https://badgen.net/badge/code%20style/autopep8/blue)](https://github.com/hhatto/autopep8) +[![Checked with pyright](https://microsoft.github.io/pyright/img/pyright_badge.svg)](https://microsoft.github.io/pyright/) +[![Checked with mypy](https://www.mypy-lang.org/static/mypy_badge.svg)](https://mypy-lang.org/) + +Easy to use image comparison based auto splitter for speedrunning on console or PC. + +This program can be used to automatically start, split, and reset your preferred speedrun timer by comparing images to a capture region. This allows you to focus more on your speedrun and less on managing your timer. It also improves the accuracy of your splits. It can be used in tandem with any speedrun timer that accepts hotkeys (LiveSplit, wsplit, etc.), and can be integrated with LiveSplit. + +![Example](/docs/2.0.0_gif.gif) + +# TUTORIAL + +## DOWNLOAD AND OPEN + +- Download the [latest version](/../../releases/latest) +- You can also check out the [latest dev builds](/../../actions/workflows/lint-and-build.yml?query=event%3Apush+is%3Asuccess) (requires a GitHub account) + (If you don't have a GitHub account, you can try [nightly.link](https://nightly.link/Toufool/AutoSplit/workflows/lint-and-build/dev)) + +### Compatibility + +- Windows 10 and 11. +- Python 3.10+ (Not required for normal use. Refer to the [build instructions](/docs/build%20instructions.md) if you'd like run the application directly in Python). + +## OPTIONS + +#### Split Image Folder + +- Supported image file types: PNG, JPEG, bitmaps, WebP, and [more](https://docs.opencv.org/4.8.0/d4/da8/group__imgcodecs.html#imread). +- Images can be any size and ratio. +- Images are matched in alphanumerical order. +- Recommended filenaming convention: `001_SplitName.png, 002_SplitName.png, 003_SplitName.png`... +- Custom split image settings are handled in the filename. See how [here](#custom-split-image-settings). +- To create split images, it is recommended to use AutoSplit's Take Screenshot button for accuracy. However, images can be created using any method including Print Screen and [Snipping Tool](https://support.microsoft.com/en-us/help/4027213/windows-10-open-snipping-tool-and-take-a-screenshot). + +#### Capture Region + +- This is the region that your split images are compared to. Usually, this is going to be the full game screen. +- Click "Select Region". +- Click and drag to form a rectangle over the region you want to capture. +- Adjust the x, y, width, and height of the capture region manually to make adjustments as needed. +- If you want to align your capture region by using a reference image, click "Align Region". +- You can freely move the window that the program is capturing, but resizing the window will cause the capture region to change. +- Once you are happy with your capture region, you may unselect Live Capture Region to decrease CPU usage if you wish. +- You can save a screenshot of the capture region to your split image folder using the Take Screenshot button. + +#### Avg. FPS + +- Calculates the average comparison rate of the capture region to split images. This value will likely be much higher than needed, so it is highly recommended to limit your FPS depending on the frame rate of the game you are capturing. + +### Settings + +#### Comparison Method + +- There are three comparison methods to choose from: L2 Norm, Histograms, and Perceptual Hash (or pHash). + - L2 Norm: This method should be fine to use for most cases. It finds the difference between each pixel, squares it, sums it over the entire image and takes the square root. This is very fast but is a problem if your image is high frequency. Any translational movement or rotation can cause similarity to be very different. + - Histograms: An explanation on Histograms comparison can be found [here](https://mpatacchiola.github.io/blog/2016/11/12/the-simplest-classifier-histogram-intersection.html). This is a great method to use if you are using several masked images. + > This algorithm is particular reliable when the colour is a strong predictor of the object identity. The histogram intersection [...] is robust to occluding objects in the foreground. + - Perceptual Hash: An explanation on pHash comparison can be found [here](http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html). It is highly recommended to NOT use pHash if you use masked images, or it'll be very inaccurate. + +#### Capture Method + + +- **Windows Graphics Capture** (fast, most compatible, capped at 60fps) + Only available in Windows 10.0.17134 and up. + Due to current technical limitations, Windows versions below 10.0.0.17763 require having at least one audio or video Capture Device connected and enabled. + Allows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. + Adds a yellow border on Windows 10 (not on Windows 11). + Caps at around 60 FPS. +- **BitBlt** (fastest, least compatible) + The best option when compatible. But it cannot properly record OpenGL, Hardware Accelerated or Exclusive Fullscreen windows. + The smaller the selected region, the more efficient it is. +- **Direct3D Desktop Duplication** (slower, bound to display) + Duplicates the desktop using Direct3D. + It can record OpenGL and Hardware Accelerated windows. + About 10-15x slower than BitBlt. Not affected by window size. + Overlapping windows will show up and can't record across displays. + This option may not be available for hybrid GPU laptops, see [D3DDD-Note-Laptops.md](/docs/D3DDD-Note-Laptops.md) for a solution. +- **Force Full Content Rendering** (very slow, can affect rendering) + Uses BitBlt behind the scene, but passes a special flag to PrintWindow to force rendering the entire desktop. + About 10-15x slower than BitBlt based on original window size and can mess up some applications' rendering pipelines. +- **Video Capture Device** + Uses a Video Capture Device, like a webcam, virtual cam, or capture card. + +#### Capture Device + +Select the Video Capture Device that you wanna use if selecting the `Video Capture Device` Capture Method. + + +#### Show Live Similarity + +- Displays the live similarity between the capture region and the current split image. This number is between 0 and 1, with 1 being a perfect match. + +#### Show Highest Similarity + +- Shows the highest similarity between the capture region and current split image. + +#### Current Similarity Threshold + +- When the live similarity goes above this value, the program hits your split hotkey and moves to the next split image. + +#### Default Similarity Threshold + +- This value will be set as the threshold for an image if there is no custom threshold set for that image. + +#### Default Delay Time + +- Time in milliseconds that the program waits before hitting the split hotkey for that specific split if there is no custom Delay Time set for that image. + +#### Default Pause Time + +- Time in seconds that the program stops comparison after a split if there is no custom Pause Time set for that image. Useful for if you have two of the same split images in a row and want to avoid double-splitting. Also useful for reducing CPU usage. + +#### Dummy splits when undoing / skipping + +AutoSplit will group dummy splits together with a real split when undoing/skipping. This basically allows you to tie one or more dummy splits to a real split to keep it as in sync as possible with the real splits in LiveSplit/wsplit. If they are out of sync, you can always use "Previous Image" and "Next Image". + +Examples: +Given these splits: 1 dummy, 2 normal, 3 dummy, 4 dummy, 5 normal, 6 normal. + +In this situation you would have only 3 splits in LiveSplit/wsplit (even though there are 6 split images, only 3 are "real" splits). This basically results in 3 groups of splits: 1st split is images 1 and 2. 2nd split is images 3, 4 and 5. 3rd split is image 6. + +- If you are in the 1st or 2nd image and press the skip key, it will end up on the 3rd image +- If you are in the 3rd, 4th or 5th image and press the undo key, it will end up on the 2nd image +- If you are in the 3rd, 4th or 5th image and press the skip key, it will end up on the 6th image +- If you are in the 6th image and press the undo key, it will end up on the 5th image + +#### Loop last Split Image to first Split Image + +If this option is enabled, when the last split meets the threshold and splits, AutoSplit will loop back to the first split image and continue comparisons. +If this option is disabled, when the last split meets the threshold and splits, AutoSplit will stop running comparisons. +This option does not loop single, specific images. See the Custom Split Image Settings section above for this feature. + +#### Start also Resets + +If this option is enabled, a "Start" command (ie: from the Start Image) will also send the "Reset" command. This is useful if you want to automatically restart your timer using the Start Image. Since AutoSplit won't be running and won't be checking for the Reset Image. + +Having the reset image check be active at all time would be a better, more organic solution in the future. But that is dependent on migrating to an observer pattern () and being able to reload all images. + +#### Enable auto Reset Image + +This option is mainly meant to be toggled with the `Toggle auto Reset Image` hotkey. You can enable it to temporarily disable the Reset Image if you make a mistake in your run that would cause the Reset Image to trigger. Like exiting back to the game's menu (aka Save&Quit). + +### Custom Split Image Settings + +- Each split image can have different thresholds, pause times, delay split times, loop amounts, and can be flagged. +- These settings are handled in the image's filename. +- **Custom thresholds** are place between parenthesis `()` in the filename. This value will override the default threshold. +- **Custom pause times** are placed between square brackets `[]` in the filename. This value will override the default pause time. +- **Custom delay times** are placed between hash signs `##` in the filename. Note that these are in milliseconds. For example, a 10 second split delay would be `#10000#`. You cannot skip or undo splits during split delays. +- A different **comparison method** can be specified with their 0-base index between carets `^^`: + - `^0^`: L2 Norm + - `^1^`: Histogram + - `^2^`: Perceptual Hash +- **Image loop** amounts are placed between at symbols `@@` in the filename. For example, a specific image that you want to split 5 times in a row would be `@5@`. The current loop # is conveniently located beneath the current split image. +- **Flags** are placed between curly brackets `{}` in the filename. Multiple flags are placed in the same set of curly brackets. Current available flags: + - `{d}` **dummy split image**. When matched, it moves to the next image without hitting your split hotkey. + - `{b}` split when **similarity goes below** the threshold rather than above. When a split image filename has this flag, the split image similarity will go above the threshold, do nothing, and then split the next time the similarity goes below the threshold. + - `{p}` **pause flag**. When a split image filename has this flag, it will hit your pause hotkey rather than your split hokey. +- Filename examples: + - `001_SplitName_(0.9)_[10].png` is a split image with a threshold of 0.9 and a pause time of 10 seconds. + - `002_SplitName_(0.9)_[10]_{d}.png` is the second split image with a threshold of 0.9, pause time of 10, and is a dummy split. + - `003_SplitName_(0.85)_[20]_#3500#.png` is the third split image with a threshold of 0.85, pause time of 20 and has a delay split time of 3.5 seconds. + - `004_SplitName_(0.9)_[10]_#3500#_@3@_{b}.png` is the fourth split image with a threshold of 0.9, pause time of 10 seconds, delay split time of 3.5 seconds, will loop 3 times, and will split when similarity is below the threshold rather than above. + +## Special images + +### How to Create a Masked Image + +Masked images are very useful if only a certain part of the capture region is consistent (for example, consistent text on the screen, but the background is always different). Histogram or L2 norm comparison is recommended if you use any masked images. It is highly recommended that you do NOT use pHash comparison if you use any masked images, or it'll be very inaccurate. + +The best way to create a masked image is to set your capture region as the entire game screen, take a screenshot, and use a program like [paint.net](https://www.getpaint.net/) to "erase" (make transparent) everything you don't want the program to compare. More on creating images with transparency using paint.net can be found in [this tutorial](https://www.youtube.com/watch?v=v53kkUYFVn8). For visualization, here is what the capture region compared to a masked split image looks like if you would want to split on "Shine Get!" text in Super Mario Sunshine: + +![Mask Example](/docs/mask_example_image.png) + +### Reset Image + +You can have one (and only one) image with the keyword `reset` in its name. AutoSplit will press the reset button when it finds this image. This image will only be used for resets and it will not be tied to any split. You can set a threshold and pause time for it. The pause time is the amount of seconds AutoSplit will wait before checking for the Reset Image once the run starts. For example: `Reset_(0.95)_[10].png`. + +### Start Image + +The Start Image is similar to the Reset Image. You can only have one Start Image with the keyword `start_auto_splitter`.You can reload the image using the "`Reload Start Image`" button. The pause time is the amount of seconds AutoSplit will wait before starting comparisons of the first split image. Delay times will be used to delay starting your timer after the threshold is met. + +### Profiles + + +- Profiles use the extension `.toml`. Profiles can be saved and loaded by using `File -> Save Profile As...` and `File -> Load Profile`. +- The profile contains all of your settings, including information about the capture region. +- You can save multiple profiles, which is useful if you speedrun multiple games. +- If you change your display setup (like using a new monitor, or upgrading to Windows 11), you may need to readjust or reselect your Capture Region. + +## Timer Integration + +### Timer Global Hotkeys + +- Click "Set Hotkey" on each hotkey to set the hotkeys to AutoSplit. The Start / Split hotkey and Pause hotkey must be the same as the one used in your preferred timer program in order for the splitting/pausing to work properly. +- Make sure that Global Hotkeys are enabled in your speedrun timer. +- All of these actions can also be handled by their corresponding buttons. +- Note that pressing your Pause Hotkey does not serve any function in AutoSplit itself and is strictly used for the Pause flag. + +### LiveSplit Integration + +The AutoSplit LiveSplit Component will directly connect AutoSplit with LiveSplit. LiveSplit integration is only supported in AutoSplit v1.6.0 or higher. This integration will allow you to: + +- Use hotkeys directly from LiveSplit to control AutoSplit and LiveSplit together +- Load AutoSplit and any AutoSplit profile automatically when opening a LiveSplit layout. + +#### LiveSplit Integration Tutorial + +- Click [here](https://github.com/Toufool/LiveSplit.AutoSplitIntegration/raw/main/update/Components/LiveSplit.AutoSplitIntegration.dll) to download the latest component. +- Place the .dll file into your `[...]\LiveSplit\Components` folder. +- Open LiveSplit -> Right Click -> Edit Layout -> Plus Button -> Control -> AutoSplit Integration. +- Click Layout Settings -> AutoSplit Integration +- Click the Browse buttons to locate your AutoSplit Path (path to AutoSplit executable) and Profile Path (path to your AutoSplit `.toml` profile file) respectively. + - If you have not yet set saved a profile, you can do so using AutoSplit, and then go back and set your Settings Path. +- Once set, click OK, and then OK again to close the Layout Editor. Right click LiveSplit -> Save Layout to save your layout. AutoSplit and your selected profile will now open automatically when opening that LiveSplit Layout `.lsl` file. + +## Known Limitations + +- For many games, it will be difficult to find a split image for the last split of the run. +- The window of the capture region cannot be minimized. + +## Resources + +Still need help? + +- [Check if your issue already exists](../../issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc) + - If it does, upvote it 👍 + - If it doesn't, create a new one +- Join the [AutoSplit Discord +![AutoSplit Discord](https://badgen.net/discord/members/Qcbxv9y)](https://discord.gg/Qcbxv9y) + +## Contributing + +See [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for our contributing standards. +Refer to the [build instructions](/docs/build%20instructions.md) if you're interested in building the application yourself or running it in Python. + +Not a developer? You can still help through the following methods: + +- Donating (see link below) +- [Upvoting feature requests](../../issues?q=is%3Aissue+is%3Aopen+sort%3Areactions-%2B1-desc+label%3Aenhancement) you are interested in +- Sharing AutoSplit with other speedrunners +- Upvoting the following upstream issues in libraries and tools we use: + - + - + - + - + - + - + - + - + - + - + - + - + +## Credits + +- Created by [Toufool](https://twitter.com/Toufool) and [Faschz](https://twitter.com/faschz). +- [Harutaka Kawamura](https://github.com/harupy/) for the snipping tool code that I used to integrate into the autosplitter. +- [amaringos](https://twitter.com/amaringos) for the icon. +- [Zana_G](https://www.twitch.tv/zana_g) for motivating me to start this project back up and for all of the time spent testing and suggesting improvements. +- [Avasam](https://twitter.com/Avasam06) for their continued work on making an incredible amount of improvements and changes to AutoSplit while I have not had the time/motivation to do so. +- [KaDiWa](https://github.com/KaDiWa4) for the LiveSplit integration. +- [Tyron18](https://twitter.com/Tyron18_) for assisting with Windows 11 testing. + +## Donate + +If you enjoy using the program, please consider donating. Thank you! + +[![paypal](https://www.paypalobjects.com/en_US/i/btn/btn_donateCC_LG.gif)](https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=BYRHQG69YRHBA&item_name=AutoSplit+development¤cy_code=USD&source=url) diff --git a/docs/build instructions.md b/docs/build instructions.md index 49bc2004..6c483dc5 100644 --- a/docs/build instructions.md +++ b/docs/build instructions.md @@ -1,35 +1,35 @@ -# Install and Build instructions - -## Requirements - -### Windows - -- Microsoft Visual C++ 14.0 or greater may be required to build the executable. Get it with [Microsoft C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/). - -### All platforms - -- [Python](https://www.python.org/downloads/) 3.9+. -- [Node](https://nodejs.org) is optional, but required for complete linting. - - Alternatively you can install the [pyright python wrapper](https://pypi.org/project/pyright/) which has a bit of an overhead delay. -- [PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell) -- [VSCode](https://code.visualstudio.com/Download) is not required, but highly recommended. - - Everything already configured in the workspace, including Run (F5) and Build (Ctrl+Shift+B) commands, default shell, and recommended extensions. - - [PyCharm](https://www.jetbrains.com/pycharm/) is also a good Python IDE, but nothing is configured. If you are a PyCharm user, feel free to open a PR with all necessary workspace configurations! - -## Install and Build steps - -- Create and activate a virtual environment: - - Windows / PowerShell: - - `python -m venv .venv` - - `& ./.venv/Scripts/Activate.ps1` - - Unix / Bash: - - `python3 -m venv .venv` - - `source .venv/bin/activate` -- Run `./scripts/install.ps1` to install all dependencies. - - If you're having issues with the PySide generated code, you might want to first run `pip uninstall -y shiboken6 PySide PySide-Essentials` -- Run the app directly with `./scripts/start.ps1 [--auto-controlled]`. - - Or debug by pressing `F5` in VSCode. - - The `--auto-controlled` flag is passed when AutoSplit is started by LiveSplit. -- Run `./scripts/build.ps1` or press `CTRL+Shift+B` in VSCode to build an executable. -- Optional: Recompile resources after modifications by running `./scripts/compile_resources.ps1`. - - This should be done automatically by other scripts +# Install and Build instructions + +## Requirements + +### Windows + +- Microsoft Visual C++ 14.0 or greater may be required to build the executable. Get it with [Microsoft C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools/). + +### All platforms + +- [Python](https://www.python.org/downloads/) 3.10+. +- [Node](https://nodejs.org) is optional, but required for complete linting. + - Alternatively you can install the [pyright python wrapper](https://pypi.org/project/pyright/) which has a bit of an overhead delay. +- [PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell) +- [VSCode](https://code.visualstudio.com/Download) is not required, but highly recommended. + - Everything already configured in the workspace, including Run (F5) and Build (Ctrl+Shift+B) commands, default shell, and recommended extensions. + - [PyCharm](https://www.jetbrains.com/pycharm/) is also a good Python IDE, but nothing is configured. If you are a PyCharm user, feel free to open a PR with all necessary workspace configurations! + +## Install and Build steps + +- Create and activate a virtual environment: + - Windows / PowerShell: + - `python -m venv .venv` + - `& ./.venv/Scripts/Activate.ps1` + - Unix / Bash: + - `python3 -m venv .venv` + - `source .venv/bin/activate` +- Run `./scripts/install.ps1` to install all dependencies. + - If you're having issues with the PySide generated code, you might want to first run `pip uninstall -y shiboken6 PySide PySide-Essentials` +- Run the app directly with `./scripts/start.ps1 [--auto-controlled]`. + - Or debug by pressing `F5` in VSCode. + - The `--auto-controlled` flag is passed when AutoSplit is started by LiveSplit. +- Run `./scripts/build.ps1` or press `CTRL+Shift+B` in VSCode to build an executable. +- Optional: Recompile resources after modifications by running `./scripts/compile_resources.ps1`. + - This should be done automatically by other scripts diff --git a/pyproject.toml b/pyproject.toml index 6eba3768..5db20d84 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ # https://docs.astral.sh/ruff/configuration/ [tool.ruff] -target-version = "py39" +target-version = "py310" line-length = 120 select = ["ALL"] preview = true @@ -85,7 +85,6 @@ allow-multiline = false [tool.ruff.isort] combine-as-imports = true split-on-trailing-comma = false -required-imports = ["from __future__ import annotations"] # Unlike isort, Ruff only counts relative imports as local-folder by default for know. # https://github.com/astral-sh/ruff/issues/3115 known-local-folder = [ From 895643af91b03809350a5384aa57c79b1b4f0234 Mon Sep 17 00:00:00 2001 From: Avasam Date: Fri, 20 Oct 2023 15:38:38 -0400 Subject: [PATCH 2/5] Re-lint --- pyproject.toml | 2 + .../DesktopDuplicationCaptureMethod.py | 130 +++++++++--------- src/utils.py | 3 +- typings/cv2/mat_wrapper/__init__.pyi | 3 +- 4 files changed, 70 insertions(+), 68 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 5db20d84..f4273448 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,6 +21,8 @@ ignore = [ "ERA001", # eradicate: commented-out-code # contextlib.suppress is roughly 3x slower than try/except "SIM105", # flake8-simplify: use-contextlib-suppress + # Negative performance impact + "UP038", # non-pep604-isinstance # Checked by type-checker (pyright) "ANN", # flake-annotations "PGH003", # blanket-type-ignore diff --git a/src/capture_method/DesktopDuplicationCaptureMethod.py b/src/capture_method/DesktopDuplicationCaptureMethod.py index ee07ac11..6dd0056b 100644 --- a/src/capture_method/DesktopDuplicationCaptureMethod.py +++ b/src/capture_method/DesktopDuplicationCaptureMethod.py @@ -1,65 +1,65 @@ -from __future__ import annotations - -import ctypes -from typing import TYPE_CHECKING, Union, cast - -import cv2 -import d3dshot -import numpy as np -import win32con -from typing_extensions import override -from win32 import win32gui - -from capture_method.BitBltCaptureMethod import BitBltCaptureMethod -from utils import GITHUB_REPOSITORY, get_window_bounds - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - - -class DesktopDuplicationCaptureMethod(BitBltCaptureMethod): - name = "Direct3D Desktop Duplication" - short_description = "slower, bound to display" - description = ( - "\nDuplicates the desktop using Direct3D. " - + "\nIt can record OpenGL and Hardware Accelerated windows. " - + "\nAbout 10-15x slower than BitBlt. Not affected by window size. " - + "\nOverlapping windows will show up and can't record across displays. " - + "\nThis option may not be available for hybrid GPU laptops, " - + "\nsee D3DDD-Note-Laptops.md for a solution. " - + f"\nhttps://www.github.com/{GITHUB_REPOSITORY}#capture-method " - ) - - def __init__(self, autosplit: AutoSplit | None): - super().__init__(autosplit) - # Must not set statically as some laptops will throw an error - self.desktop_duplication = d3dshot.create(capture_output="numpy") - - @override - def get_frame(self, autosplit: AutoSplit): - selection = autosplit.settings_dict["capture_region"] - hwnd = autosplit.hwnd - hmonitor = ctypes.windll.user32.MonitorFromWindow(hwnd, win32con.MONITOR_DEFAULTTONEAREST) - if not hmonitor or not self.check_selected_region_exists(autosplit): - return None, False - - left_bounds, top_bounds, *_ = get_window_bounds(hwnd) - self.desktop_duplication.display = next( - display for display - in self.desktop_duplication.displays - if display.hmonitor == hmonitor - ) - offset_x, offset_y, *_ = win32gui.GetWindowRect(hwnd) - offset_x -= self.desktop_duplication.display.position["left"] - offset_y -= self.desktop_duplication.display.position["top"] - left = selection["x"] + offset_x + left_bounds - top = selection["y"] + offset_y + top_bounds - right = selection["width"] + left - bottom = selection["height"] + top - screenshot = cast( - Union[np.ndarray[int, np.dtype[np.generic]], None], - self.desktop_duplication.screenshot((left, top, right, bottom)), - ) - if screenshot is None: - return None, False - return cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGRA), False +from __future__ import annotations + +import ctypes +from typing import TYPE_CHECKING, cast + +import cv2 +import d3dshot +import numpy as np +import win32con +from typing_extensions import override +from win32 import win32gui + +from capture_method.BitBltCaptureMethod import BitBltCaptureMethod +from utils import GITHUB_REPOSITORY, get_window_bounds + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + + +class DesktopDuplicationCaptureMethod(BitBltCaptureMethod): + name = "Direct3D Desktop Duplication" + short_description = "slower, bound to display" + description = ( + "\nDuplicates the desktop using Direct3D. " + + "\nIt can record OpenGL and Hardware Accelerated windows. " + + "\nAbout 10-15x slower than BitBlt. Not affected by window size. " + + "\nOverlapping windows will show up and can't record across displays. " + + "\nThis option may not be available for hybrid GPU laptops, " + + "\nsee D3DDD-Note-Laptops.md for a solution. " + + f"\nhttps://www.github.com/{GITHUB_REPOSITORY}#capture-method " + ) + + def __init__(self, autosplit: AutoSplit | None): + super().__init__(autosplit) + # Must not set statically as some laptops will throw an error + self.desktop_duplication = d3dshot.create(capture_output="numpy") + + @override + def get_frame(self, autosplit: AutoSplit): + selection = autosplit.settings_dict["capture_region"] + hwnd = autosplit.hwnd + hmonitor = ctypes.windll.user32.MonitorFromWindow(hwnd, win32con.MONITOR_DEFAULTTONEAREST) + if not hmonitor or not self.check_selected_region_exists(autosplit): + return None, False + + left_bounds, top_bounds, *_ = get_window_bounds(hwnd) + self.desktop_duplication.display = next( + display for display + in self.desktop_duplication.displays + if display.hmonitor == hmonitor + ) + offset_x, offset_y, *_ = win32gui.GetWindowRect(hwnd) + offset_x -= self.desktop_duplication.display.position["left"] + offset_y -= self.desktop_duplication.display.position["top"] + left = selection["x"] + offset_x + left_bounds + top = selection["y"] + offset_y + top_bounds + right = selection["width"] + left + bottom = selection["height"] + top + screenshot = cast( + np.ndarray[int, np.dtype[np.generic]] | None, + self.desktop_duplication.screenshot((left, top, right, bottom)), + ) + if screenshot is None: + return None, False + return cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGRA), False diff --git a/src/utils.py b/src/utils.py index 1851cdec..f913c059 100644 --- a/src/utils.py +++ b/src/utils.py @@ -10,11 +10,10 @@ from itertools import chain from platform import version from threading import Thread -from typing import TYPE_CHECKING, Any, TypeVar +from typing import TYPE_CHECKING, Any, TypeGuard, TypeVar import win32ui from cv2.typing import MatLike -from typing_extensions import TypeGuard from win32 import win32gui from winsdk.windows.ai.machinelearning import LearningModelDevice, LearningModelDeviceKind from winsdk.windows.media.capture import MediaCapture diff --git a/typings/cv2/mat_wrapper/__init__.pyi b/typings/cv2/mat_wrapper/__init__.pyi index 3d3906b4..0bfcd316 100644 --- a/typings/cv2/mat_wrapper/__init__.pyi +++ b/typings/cv2/mat_wrapper/__init__.pyi @@ -1,6 +1,7 @@ +from typing import TypeAlias + import numpy as np from _typeshed import Unused -from typing_extensions import TypeAlias __all__: list[str] = [] _NDArray: TypeAlias = np.ndarray[float, np.dtype[np.generic]] From aa2746da2c4138b89c62a6be9da49d9310492ba0 Mon Sep 17 00:00:00 2001 From: Avasam Date: Fri, 20 Oct 2023 15:39:49 -0400 Subject: [PATCH 3/5] Drop `from __future__ import annotations` --- PyInstaller/hooks/hook-requests.py | 10 +- src/AutoControlledThread.py | 90 +- src/AutoSplit.py | 18 +- src/AutoSplitImage.py | 344 ++++---- src/capture_method/BitBltCaptureMethod.py | 172 ++-- src/capture_method/CaptureMethodBase.py | 84 +- .../DesktopDuplicationCaptureMethod.py | 2 - .../ForceFullContentRenderingCaptureMethod.py | 28 +- .../VideoCaptureDeviceCaptureMethod.py | 290 ++++--- .../WindowsGraphicsCaptureMethod.py | 310 ++++--- src/capture_method/__init__.py | 4 +- src/compare.py | 236 +++--- src/error_messages.py | 392 +++++---- src/hotkeys.py | 624 +++++++------- src/menu_bar.py | 802 +++++++++--------- src/region_selection.py | 796 +++++++++-------- src/split_parser.py | 500 ++++++----- src/user_profile.py | 20 +- src/utils.py | 380 +++++---- .../test_cases/check_pipe_connections.py | 52 +- 20 files changed, 2557 insertions(+), 2597 deletions(-) diff --git a/PyInstaller/hooks/hook-requests.py b/PyInstaller/hooks/hook-requests.py index e1a554d0..2f2f5bc2 100644 --- a/PyInstaller/hooks/hook-requests.py +++ b/PyInstaller/hooks/hook-requests.py @@ -1,6 +1,4 @@ -from __future__ import annotations - -from PyInstaller.utils.hooks import collect_data_files - -# Get the cacert.pem -datas = collect_data_files("certifi") +from PyInstaller.utils.hooks import collect_data_files + +# Get the cacert.pem +datas = collect_data_files("certifi") diff --git a/src/AutoControlledThread.py b/src/AutoControlledThread.py index f5e518a8..153049cd 100644 --- a/src/AutoControlledThread.py +++ b/src/AutoControlledThread.py @@ -1,46 +1,44 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -from PySide6 import QtCore - -import error_messages -import user_profile - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - - -class AutoControlledThread(QtCore.QThread): - def __init__(self, autosplit: AutoSplit): - self.autosplit = autosplit - super().__init__() - - @QtCore.Slot() - def run(self): - while True: - try: - line = input() - except RuntimeError: - self.autosplit.show_error_signal.emit(error_messages.stdin_lost) - break - except EOFError: - continue - # This is for use in a Development environment - if line == "kill": - self.autosplit.closeEvent() - break - if line == "start": - self.autosplit.start_auto_splitter() - elif line in {"split", "skip"}: - self.autosplit.skip_split_signal.emit() - elif line == "undo": - self.autosplit.undo_split_signal.emit() - elif line == "reset": - self.autosplit.reset_signal.emit() - elif line.startswith("settings"): - # Allow for any split character between "settings" and the path - user_profile.load_settings(self.autosplit, line[9:]) - # TODO: Not yet implemented in AutoSplit Integration - # elif line == 'pause': - # self.pause_signal.emit() +from typing import TYPE_CHECKING + +from PySide6 import QtCore + +import error_messages +import user_profile + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + + +class AutoControlledThread(QtCore.QThread): + def __init__(self, autosplit: AutoSplit): + self.autosplit = autosplit + super().__init__() + + @QtCore.Slot() + def run(self): + while True: + try: + line = input() + except RuntimeError: + self.autosplit.show_error_signal.emit(error_messages.stdin_lost) + break + except EOFError: + continue + # This is for use in a Development environment + if line == "kill": + self.autosplit.closeEvent() + break + if line == "start": + self.autosplit.start_auto_splitter() + elif line in {"split", "skip"}: + self.autosplit.skip_split_signal.emit() + elif line == "undo": + self.autosplit.undo_split_signal.emit() + elif line == "reset": + self.autosplit.reset_signal.emit() + elif line.startswith("settings"): + # Allow for any split character between "settings" and the path + user_profile.load_settings(self.autosplit, line[9:]) + # TODO: Not yet implemented in AutoSplit Integration + # elif line == 'pause': + # self.pause_signal.emit() diff --git a/src/AutoSplit.py b/src/AutoSplit.py index fb83abe2..ab588411 100644 --- a/src/AutoSplit.py +++ b/src/AutoSplit.py @@ -1,6 +1,4 @@ #!/usr/bin/python3 -from __future__ import annotations - import ctypes import os import signal @@ -887,11 +885,11 @@ def exit_program() -> NoReturn: if user_profile.have_settings_changed(self): # Give a different warning if there was never a settings file that was loaded successfully, # and "save as" instead of "save". - settings_file_name = ( - "Untitled" - if not self.last_successfully_loaded_settings_file_path + settings_file_name = ( + "Untitled" + if not self.last_successfully_loaded_settings_file_path else os.path.basename(self.last_successfully_loaded_settings_file_path) - ) + ) warning = QMessageBox.warning( self, @@ -930,10 +928,10 @@ def set_preview_image(qlabel: QLabel, image: MatLike | None): qimage = QtGui.QImage( capture.data, # pyright: ignore[reportGeneralTypeIssues] # https://bugreports.qt.io/browse/PYSIDE-2476 - width, - height, - width * channels, - image_format, + width, + height, + width * channels, + image_format, ) qlabel.setPixmap( QtGui.QPixmap(qimage).scaled( diff --git a/src/AutoSplitImage.py b/src/AutoSplitImage.py index bb994581..ff7ef5a6 100644 --- a/src/AutoSplitImage.py +++ b/src/AutoSplitImage.py @@ -1,173 +1,171 @@ -from __future__ import annotations - -import os -from enum import IntEnum -from math import sqrt -from typing import TYPE_CHECKING - -import cv2 -import numpy as np -from cv2.typing import MatLike - -import error_messages -from compare import COMPARE_METHODS_BY_INDEX, check_if_image_has_transparency -from utils import BGR_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - - -# Resize to these width and height so that FPS performance increases -COMPARISON_RESIZE_WIDTH = 320 -COMPARISON_RESIZE_HEIGHT = 240 -COMPARISON_RESIZE = (COMPARISON_RESIZE_WIDTH, COMPARISON_RESIZE_HEIGHT) -COMPARISON_RESIZE_AREA = COMPARISON_RESIZE_WIDTH * COMPARISON_RESIZE_HEIGHT -MASK_LOWER_BOUND = np.array([0, 0, 0, 1], dtype="uint8") -MASK_UPPER_BOUND = np.array([MAXBYTE, MAXBYTE, MAXBYTE, MAXBYTE], dtype="uint8") -START_KEYWORD = "start_auto_splitter" -RESET_KEYWORD = "reset" - - -class ImageType(IntEnum): - SPLIT = 0 - RESET = 1 - START = 2 - - -class AutoSplitImage: - path: str - filename: str - flags: int - loops: int - image_type: ImageType - byte_array: MatLike | None = None - mask: MatLike | None = None - # This value is internal, check for mask instead - _has_transparency = False - # These values should be overriden by some Defaults if None. Use getters instead - __delay_time: float | None = None - __comparison_method: int | None = None - __pause_time: float | None = None - __similarity_threshold: float | None = None - - def get_delay_time(self, default: AutoSplit | int): - """Get image's delay time or fallback to the default value from spinbox.""" - if self.__delay_time is not None: - return self.__delay_time - if isinstance(default, int): - return default - return default.settings_dict["default_delay_time"] - - def __get_comparison_method(self, default: AutoSplit | int): - """Get image's comparison or fallback to the default value from combobox.""" - if self.__comparison_method is not None: - return self.__comparison_method - if isinstance(default, int): - return default - return default.settings_dict["default_comparison_method"] - - def get_pause_time(self, default: AutoSplit | float): - """Get image's pause time or fallback to the default value from spinbox.""" - if self.__pause_time is not None: - return self.__pause_time - if isinstance(default, (float, int)): - return default - return default.settings_dict["default_pause_time"] - - def get_similarity_threshold(self, default: AutoSplit | float): - """Get image's similarity threshold or fallback to the default value from spinbox.""" - if self.__similarity_threshold is not None: - return self.__similarity_threshold - if isinstance(default, (float, int)): - return default - return default.settings_dict["default_similarity_threshold"] - - def __init__(self, path: str): - self.path = path - self.filename = os.path.split(path)[-1].lower() - self.flags = flags_from_filename(self.filename) - self.loops = loop_from_filename(self.filename) - self.__delay_time = delay_time_from_filename(self.filename) - self.__comparison_method = comparison_method_from_filename(self.filename) - self.__pause_time = pause_from_filename(self.filename) - self.__similarity_threshold = threshold_from_filename(self.filename) - self.__read_image_bytes(path) - - if START_KEYWORD in self.filename: - self.image_type = ImageType.START - elif RESET_KEYWORD in self.filename: - self.image_type = ImageType.RESET - else: - self.image_type = ImageType.SPLIT - - def __read_image_bytes(self, path: str): - image = cv2.imread(path, cv2.IMREAD_UNCHANGED) - if not is_valid_image(image): - self.byte_array = None - error_messages.image_type(path) - return - - self._has_transparency = check_if_image_has_transparency(image) - # If image has transparency, create a mask - if self._has_transparency: - # Adaptively determine the target size according to - # the number of nonzero elements in the alpha channel of the split image. - # This may result in images bigger than COMPARISON_RESIZE if there's plenty of transparency. - # Which wouldn't incur any performance loss in methods where masked regions are ignored. - scale = min(1, sqrt(COMPARISON_RESIZE_AREA / cv2.countNonZero(image[:, :, ColorChannel.Alpha]))) - - image = cv2.resize( - image, - dsize=None, - fx=scale, - fy=scale, - interpolation=cv2.INTER_NEAREST, - ) - - # Mask based on adaptively resized, nearest neighbor interpolated split image - self.mask = cv2.inRange(image, MASK_LOWER_BOUND, MASK_UPPER_BOUND) - else: - image = cv2.resize(image, COMPARISON_RESIZE, interpolation=cv2.INTER_NEAREST) - # Add Alpha channel if missing - if image.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT: - image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA) - - self.byte_array = image - - def check_flag(self, flag: int): - return self.flags & flag == flag - - def compare_with_capture( - self, - default: AutoSplit | int, - capture: MatLike | None, - ): - """Compare image with capture using image's comparison method. Falls back to combobox.""" - if not is_valid_image(self.byte_array) or not is_valid_image(capture): - return 0.0 - resized_capture = cv2.resize(capture, self.byte_array.shape[1::-1]) - comparison_method = self.__get_comparison_method(default) - - return COMPARE_METHODS_BY_INDEX.get( - comparison_method, - compare_dummy, - )( - self.byte_array, - resized_capture, - self.mask, - ) - - -def compare_dummy(*_: object): - return 0.0 - - -if True: - from split_parser import ( - comparison_method_from_filename, - delay_time_from_filename, - flags_from_filename, - loop_from_filename, - pause_from_filename, - threshold_from_filename, - ) +import os +from enum import IntEnum +from math import sqrt +from typing import TYPE_CHECKING + +import cv2 +import numpy as np +from cv2.typing import MatLike + +import error_messages +from compare import COMPARE_METHODS_BY_INDEX, check_if_image_has_transparency +from utils import BGR_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + + +# Resize to these width and height so that FPS performance increases +COMPARISON_RESIZE_WIDTH = 320 +COMPARISON_RESIZE_HEIGHT = 240 +COMPARISON_RESIZE = (COMPARISON_RESIZE_WIDTH, COMPARISON_RESIZE_HEIGHT) +COMPARISON_RESIZE_AREA = COMPARISON_RESIZE_WIDTH * COMPARISON_RESIZE_HEIGHT +MASK_LOWER_BOUND = np.array([0, 0, 0, 1], dtype="uint8") +MASK_UPPER_BOUND = np.array([MAXBYTE, MAXBYTE, MAXBYTE, MAXBYTE], dtype="uint8") +START_KEYWORD = "start_auto_splitter" +RESET_KEYWORD = "reset" + + +class ImageType(IntEnum): + SPLIT = 0 + RESET = 1 + START = 2 + + +class AutoSplitImage: + path: str + filename: str + flags: int + loops: int + image_type: ImageType + byte_array: MatLike | None = None + mask: MatLike | None = None + # This value is internal, check for mask instead + _has_transparency = False + # These values should be overriden by some Defaults if None. Use getters instead + __delay_time: float | None = None + __comparison_method: int | None = None + __pause_time: float | None = None + __similarity_threshold: float | None = None + + def get_delay_time(self, default: AutoSplit | int): + """Get image's delay time or fallback to the default value from spinbox.""" + if self.__delay_time is not None: + return self.__delay_time + if isinstance(default, int): + return default + return default.settings_dict["default_delay_time"] + + def __get_comparison_method(self, default: AutoSplit | int): + """Get image's comparison or fallback to the default value from combobox.""" + if self.__comparison_method is not None: + return self.__comparison_method + if isinstance(default, int): + return default + return default.settings_dict["default_comparison_method"] + + def get_pause_time(self, default: AutoSplit | float): + """Get image's pause time or fallback to the default value from spinbox.""" + if self.__pause_time is not None: + return self.__pause_time + if isinstance(default, (float, int)): + return default + return default.settings_dict["default_pause_time"] + + def get_similarity_threshold(self, default: AutoSplit | float): + """Get image's similarity threshold or fallback to the default value from spinbox.""" + if self.__similarity_threshold is not None: + return self.__similarity_threshold + if isinstance(default, (float, int)): + return default + return default.settings_dict["default_similarity_threshold"] + + def __init__(self, path: str): + self.path = path + self.filename = os.path.split(path)[-1].lower() + self.flags = flags_from_filename(self.filename) + self.loops = loop_from_filename(self.filename) + self.__delay_time = delay_time_from_filename(self.filename) + self.__comparison_method = comparison_method_from_filename(self.filename) + self.__pause_time = pause_from_filename(self.filename) + self.__similarity_threshold = threshold_from_filename(self.filename) + self.__read_image_bytes(path) + + if START_KEYWORD in self.filename: + self.image_type = ImageType.START + elif RESET_KEYWORD in self.filename: + self.image_type = ImageType.RESET + else: + self.image_type = ImageType.SPLIT + + def __read_image_bytes(self, path: str): + image = cv2.imread(path, cv2.IMREAD_UNCHANGED) + if not is_valid_image(image): + self.byte_array = None + error_messages.image_type(path) + return + + self._has_transparency = check_if_image_has_transparency(image) + # If image has transparency, create a mask + if self._has_transparency: + # Adaptively determine the target size according to + # the number of nonzero elements in the alpha channel of the split image. + # This may result in images bigger than COMPARISON_RESIZE if there's plenty of transparency. + # Which wouldn't incur any performance loss in methods where masked regions are ignored. + scale = min(1, sqrt(COMPARISON_RESIZE_AREA / cv2.countNonZero(image[:, :, ColorChannel.Alpha]))) + + image = cv2.resize( + image, + dsize=None, + fx=scale, + fy=scale, + interpolation=cv2.INTER_NEAREST, + ) + + # Mask based on adaptively resized, nearest neighbor interpolated split image + self.mask = cv2.inRange(image, MASK_LOWER_BOUND, MASK_UPPER_BOUND) + else: + image = cv2.resize(image, COMPARISON_RESIZE, interpolation=cv2.INTER_NEAREST) + # Add Alpha channel if missing + if image.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT: + image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA) + + self.byte_array = image + + def check_flag(self, flag: int): + return self.flags & flag == flag + + def compare_with_capture( + self, + default: AutoSplit | int, + capture: MatLike | None, + ): + """Compare image with capture using image's comparison method. Falls back to combobox.""" + if not is_valid_image(self.byte_array) or not is_valid_image(capture): + return 0.0 + resized_capture = cv2.resize(capture, self.byte_array.shape[1::-1]) + comparison_method = self.__get_comparison_method(default) + + return COMPARE_METHODS_BY_INDEX.get( + comparison_method, + compare_dummy, + )( + self.byte_array, + resized_capture, + self.mask, + ) + + +def compare_dummy(*_: object): + return 0.0 + + +if True: + from split_parser import ( + comparison_method_from_filename, + delay_time_from_filename, + flags_from_filename, + loop_from_filename, + pause_from_filename, + threshold_from_filename, + ) diff --git a/src/capture_method/BitBltCaptureMethod.py b/src/capture_method/BitBltCaptureMethod.py index 8455f4ad..7bf10677 100644 --- a/src/capture_method/BitBltCaptureMethod.py +++ b/src/capture_method/BitBltCaptureMethod.py @@ -1,87 +1,85 @@ -from __future__ import annotations - -import ctypes -import ctypes.wintypes -from typing import TYPE_CHECKING, cast - -import numpy as np -import pywintypes -import win32con -import win32ui -from cv2.typing import MatLike -from typing_extensions import override -from win32 import win32gui - -from capture_method.CaptureMethodBase import CaptureMethodBase -from utils import BGRA_CHANNEL_COUNT, get_window_bounds, is_valid_hwnd, try_delete_dc - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - -# This is an undocumented nFlag value for PrintWindow -PW_RENDERFULLCONTENT = 0x00000002 - - -class BitBltCaptureMethod(CaptureMethodBase): - name = "BitBlt" - short_description = "fastest, least compatible" - description = ( - "\nThe best option when compatible. But it cannot properly record " - + "\nOpenGL, Hardware Accelerated or Exclusive Fullscreen windows. " - + "\nThe smaller the selected region, the more efficient it is. " - ) - - _render_full_content = False - - @override - def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: - selection = autosplit.settings_dict["capture_region"] - hwnd = autosplit.hwnd - image: MatLike | None = None - - if not self.check_selected_region_exists(autosplit): - return None, False - - # If the window closes while it's being manipulated, it could cause a crash - try: - window_dc = win32gui.GetWindowDC(hwnd) - dc_object = win32ui.CreateDCFromHandle(window_dc) - - # Causes a 10-15x performance drop. But allows recording hardware accelerated windows - if self._render_full_content: - ctypes.windll.user32.PrintWindow(hwnd, dc_object.GetSafeHdc(), PW_RENDERFULLCONTENT) - - # On Windows there is a shadow around the windows that we need to account for. - left_bounds, top_bounds, *_ = get_window_bounds(hwnd) - - compatible_dc = dc_object.CreateCompatibleDC() - bitmap = win32ui.CreateBitmap() - bitmap.CreateCompatibleBitmap(dc_object, selection["width"], selection["height"]) - compatible_dc.SelectObject(bitmap) - compatible_dc.BitBlt( - (0, 0), - (selection["width"], selection["height"]), - dc_object, - (selection["x"] + left_bounds, selection["y"] + top_bounds), - win32con.SRCCOPY, - ) - image = np.frombuffer(cast(bytes, bitmap.GetBitmapBits(True)), dtype=np.uint8) - image.shape = (selection["height"], selection["width"], BGRA_CHANNEL_COUNT) - except (win32ui.error, pywintypes.error): - # Invalid handle or the window was closed while it was being manipulated - return None, False - - # Cleanup DC and handle - try_delete_dc(dc_object) - try_delete_dc(compatible_dc) - win32gui.ReleaseDC(hwnd, window_dc) - win32gui.DeleteObject(bitmap.GetHandle()) - return image, False - - @override - def recover_window(self, captured_window_title: str, autosplit: AutoSplit): - hwnd = win32gui.FindWindow(None, captured_window_title) - if not is_valid_hwnd(hwnd): - return False - autosplit.hwnd = hwnd - return self.check_selected_region_exists(autosplit) +import ctypes +import ctypes.wintypes +from typing import TYPE_CHECKING, cast + +import numpy as np +import pywintypes +import win32con +import win32ui +from cv2.typing import MatLike +from typing_extensions import override +from win32 import win32gui + +from capture_method.CaptureMethodBase import CaptureMethodBase +from utils import BGRA_CHANNEL_COUNT, get_window_bounds, is_valid_hwnd, try_delete_dc + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + +# This is an undocumented nFlag value for PrintWindow +PW_RENDERFULLCONTENT = 0x00000002 + + +class BitBltCaptureMethod(CaptureMethodBase): + name = "BitBlt" + short_description = "fastest, least compatible" + description = ( + "\nThe best option when compatible. But it cannot properly record " + + "\nOpenGL, Hardware Accelerated or Exclusive Fullscreen windows. " + + "\nThe smaller the selected region, the more efficient it is. " + ) + + _render_full_content = False + + @override + def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: + selection = autosplit.settings_dict["capture_region"] + hwnd = autosplit.hwnd + image: MatLike | None = None + + if not self.check_selected_region_exists(autosplit): + return None, False + + # If the window closes while it's being manipulated, it could cause a crash + try: + window_dc = win32gui.GetWindowDC(hwnd) + dc_object = win32ui.CreateDCFromHandle(window_dc) + + # Causes a 10-15x performance drop. But allows recording hardware accelerated windows + if self._render_full_content: + ctypes.windll.user32.PrintWindow(hwnd, dc_object.GetSafeHdc(), PW_RENDERFULLCONTENT) + + # On Windows there is a shadow around the windows that we need to account for. + left_bounds, top_bounds, *_ = get_window_bounds(hwnd) + + compatible_dc = dc_object.CreateCompatibleDC() + bitmap = win32ui.CreateBitmap() + bitmap.CreateCompatibleBitmap(dc_object, selection["width"], selection["height"]) + compatible_dc.SelectObject(bitmap) + compatible_dc.BitBlt( + (0, 0), + (selection["width"], selection["height"]), + dc_object, + (selection["x"] + left_bounds, selection["y"] + top_bounds), + win32con.SRCCOPY, + ) + image = np.frombuffer(cast(bytes, bitmap.GetBitmapBits(True)), dtype=np.uint8) + image.shape = (selection["height"], selection["width"], BGRA_CHANNEL_COUNT) + except (win32ui.error, pywintypes.error): + # Invalid handle or the window was closed while it was being manipulated + return None, False + + # Cleanup DC and handle + try_delete_dc(dc_object) + try_delete_dc(compatible_dc) + win32gui.ReleaseDC(hwnd, window_dc) + win32gui.DeleteObject(bitmap.GetHandle()) + return image, False + + @override + def recover_window(self, captured_window_title: str, autosplit: AutoSplit): + hwnd = win32gui.FindWindow(None, captured_window_title) + if not is_valid_hwnd(hwnd): + return False + autosplit.hwnd = hwnd + return self.check_selected_region_exists(autosplit) diff --git a/src/capture_method/CaptureMethodBase.py b/src/capture_method/CaptureMethodBase.py index f8f3db7b..d470776c 100644 --- a/src/capture_method/CaptureMethodBase.py +++ b/src/capture_method/CaptureMethodBase.py @@ -1,43 +1,41 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -from cv2.typing import MatLike - -from utils import is_valid_hwnd - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - - -class CaptureMethodBase: - name = "None" - short_description = "" - description = "" - - def __init__(self, autosplit: AutoSplit | None): - # Some capture methods don't need an initialization process - pass - - def reinitialize(self, autosplit: AutoSplit): - self.close(autosplit) - self.__init__(autosplit) # type: ignore[misc] - - def close(self, autosplit: AutoSplit): - # Some capture methods don't need an initialization process - pass - - def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: # noqa: PLR6301 - """ - Captures an image of the region for a window matching the given - parameters of the bounding box. - - @return: The image of the region in the window in BGRA format - """ - return None, False - - def recover_window(self, captured_window_title: str, autosplit: AutoSplit) -> bool: # noqa: PLR6301 - return False - - def check_selected_region_exists(self, autosplit: AutoSplit) -> bool: # noqa: PLR6301 - return is_valid_hwnd(autosplit.hwnd) +from typing import TYPE_CHECKING + +from cv2.typing import MatLike + +from utils import is_valid_hwnd + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + + +class CaptureMethodBase: + name = "None" + short_description = "" + description = "" + + def __init__(self, autosplit: AutoSplit | None): + # Some capture methods don't need an initialization process + pass + + def reinitialize(self, autosplit: AutoSplit): + self.close(autosplit) + self.__init__(autosplit) # type: ignore[misc] + + def close(self, autosplit: AutoSplit): + # Some capture methods don't need an initialization process + pass + + def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: # noqa: PLR6301 + """ + Captures an image of the region for a window matching the given + parameters of the bounding box. + + @return: The image of the region in the window in BGRA format + """ + return None, False + + def recover_window(self, captured_window_title: str, autosplit: AutoSplit) -> bool: # noqa: PLR6301 + return False + + def check_selected_region_exists(self, autosplit: AutoSplit) -> bool: # noqa: PLR6301 + return is_valid_hwnd(autosplit.hwnd) diff --git a/src/capture_method/DesktopDuplicationCaptureMethod.py b/src/capture_method/DesktopDuplicationCaptureMethod.py index 6dd0056b..1ee63eb4 100644 --- a/src/capture_method/DesktopDuplicationCaptureMethod.py +++ b/src/capture_method/DesktopDuplicationCaptureMethod.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import ctypes from typing import TYPE_CHECKING, cast diff --git a/src/capture_method/ForceFullContentRenderingCaptureMethod.py b/src/capture_method/ForceFullContentRenderingCaptureMethod.py index 6bbcd70e..f7e0bea8 100644 --- a/src/capture_method/ForceFullContentRenderingCaptureMethod.py +++ b/src/capture_method/ForceFullContentRenderingCaptureMethod.py @@ -1,15 +1,13 @@ -from __future__ import annotations - -from capture_method.BitBltCaptureMethod import BitBltCaptureMethod - - -class ForceFullContentRenderingCaptureMethod(BitBltCaptureMethod): - name = "Force Full Content Rendering" - short_description = "very slow, can affect rendering" - description = ( - "\nUses BitBlt behind the scene, but passes a special flag " - + "\nto PrintWindow to force rendering the entire desktop. " - + "\nAbout 10-15x slower than BitBlt based on original window size " - + "\nand can mess up some applications' rendering pipelines. " - ) - _render_full_content = True +from capture_method.BitBltCaptureMethod import BitBltCaptureMethod + + +class ForceFullContentRenderingCaptureMethod(BitBltCaptureMethod): + name = "Force Full Content Rendering" + short_description = "very slow, can affect rendering" + description = ( + "\nUses BitBlt behind the scene, but passes a special flag " + + "\nto PrintWindow to force rendering the entire desktop. " + + "\nAbout 10-15x slower than BitBlt based on original window size " + + "\nand can mess up some applications' rendering pipelines. " + ) + _render_full_content = True diff --git a/src/capture_method/VideoCaptureDeviceCaptureMethod.py b/src/capture_method/VideoCaptureDeviceCaptureMethod.py index df9cbe75..44988992 100644 --- a/src/capture_method/VideoCaptureDeviceCaptureMethod.py +++ b/src/capture_method/VideoCaptureDeviceCaptureMethod.py @@ -1,146 +1,144 @@ -from __future__ import annotations - -from threading import Event, Thread -from typing import TYPE_CHECKING - -import cv2 -import cv2.Error -import numpy as np -from cv2.typing import MatLike -from pygrabber.dshow_graph import FilterGraph -from typing_extensions import override - -from capture_method.CaptureMethodBase import CaptureMethodBase -from error_messages import CREATE_NEW_ISSUE_MESSAGE, exception_traceback -from utils import ImageShape, is_valid_image - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - -OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL = [127, 129, 128] - - -def is_blank(image: MatLike): - # Running np.all on the entire array or looping manually through the - # entire array is extremely slow when we can't stop early. - # Instead we check for a few key pixels, in this case, corners - return np.all( - image[ - :: image.shape[ImageShape.Y] - 1, - :: image.shape[ImageShape.X] - 1, - ] - == OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL, - ) - - -class VideoCaptureDeviceCaptureMethod(CaptureMethodBase): - name = "Video Capture Device" - short_description = "see below" - description = ( - "\nUses a Video Capture Device, like a webcam, virtual cam, or capture card. " - + "\nYou can select one below. " - ) - - capture_device: cv2.VideoCapture - capture_thread: Thread | None = None - stop_thread: Event - last_captured_frame: MatLike | None = None - is_old_image = False - - def __read_loop(self, autosplit: AutoSplit): - try: - while not self.stop_thread.is_set(): - try: - result, image = self.capture_device.read() - except cv2.error as cv2_error: - if not ( - cv2_error.code == cv2.Error.STS_ERROR - and ( - # Likely means the camera is occupied OR the camera index is out of range (like -1) - cv2_error.msg.endswith("in function 'cv::VideoCapture::grab'\n") - # Some capture cards we cannot use directly - # https://github.com/opencv/opencv/issues/23539 - or cv2_error.msg.endswith("in function 'cv::VideoCapture::retrieve'\n") - ) - ): - raise - result = False - image = None - if not result: - image = None - - # Blank frame. Reuse the previous one. - if image is not None and is_blank(image): - continue - - self.last_captured_frame = image - self.is_old_image = False - except Exception as exception: # noqa: BLE001 # We really want to catch everything here - error = exception - self.capture_device.release() - autosplit.show_error_signal.emit( - lambda: exception_traceback( - error, - "AutoSplit encountered an unhandled exception while " - + "trying to grab a frame and has stopped capture. " - + CREATE_NEW_ISSUE_MESSAGE, - ), - ) - - def __init__(self, autosplit: AutoSplit): - super().__init__(autosplit) - self.capture_device = cv2.VideoCapture(autosplit.settings_dict["capture_device_id"]) - self.capture_device.setExceptionMode(True) - self.stop_thread = Event() - - # The video capture device isn't accessible, don't bother with it. - if not self.capture_device.isOpened(): - return - - filter_graph = FilterGraph() - filter_graph.add_video_input_device(autosplit.settings_dict["capture_device_id"]) - width, height = filter_graph.get_input_device().get_current_format() - filter_graph.remove_filters() - - # Ensure we're using the right camera size. And not OpenCV's default 640x480 - try: - self.capture_device.set(cv2.CAP_PROP_FRAME_WIDTH, width) - self.capture_device.set(cv2.CAP_PROP_FRAME_HEIGHT, height) - except cv2.error: - # Some cameras don't allow changing the resolution - pass - self.capture_thread = Thread(target=lambda: self.__read_loop(autosplit)) - self.capture_thread.start() - - @override - def close(self, autosplit: AutoSplit): - self.stop_thread.set() - if self.capture_thread: - self.capture_thread.join() - self.capture_thread = None - self.capture_device.release() - - @override - def get_frame(self, autosplit: AutoSplit): - if not self.check_selected_region_exists(autosplit): - return None, False - - image = self.last_captured_frame - is_old_image = self.is_old_image - self.is_old_image = True - if not is_valid_image(image): - return None, is_old_image - - selection = autosplit.settings_dict["capture_region"] - # Ensure we can't go OOB of the image - y = min(selection["y"], image.shape[ImageShape.Y] - 1) - x = min(selection["x"], image.shape[ImageShape.X] - 1) - image = image[ - y: y + selection["height"], - x: x + selection["width"], - ] - return cv2.cvtColor(image, cv2.COLOR_BGR2BGRA), is_old_image - - @override - def check_selected_region_exists(self, autosplit: AutoSplit): - return bool(self.capture_device.isOpened()) +from threading import Event, Thread +from typing import TYPE_CHECKING + +import cv2 +import cv2.Error +import numpy as np +from cv2.typing import MatLike +from pygrabber.dshow_graph import FilterGraph +from typing_extensions import override + +from capture_method.CaptureMethodBase import CaptureMethodBase +from error_messages import CREATE_NEW_ISSUE_MESSAGE, exception_traceback +from utils import ImageShape, is_valid_image + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + +OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL = [127, 129, 128] + + +def is_blank(image: MatLike): + # Running np.all on the entire array or looping manually through the + # entire array is extremely slow when we can't stop early. + # Instead we check for a few key pixels, in this case, corners + return np.all( + image[ + :: image.shape[ImageShape.Y] - 1, + :: image.shape[ImageShape.X] - 1, + ] + == OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL, + ) + + +class VideoCaptureDeviceCaptureMethod(CaptureMethodBase): + name = "Video Capture Device" + short_description = "see below" + description = ( + "\nUses a Video Capture Device, like a webcam, virtual cam, or capture card. " + + "\nYou can select one below. " + ) + + capture_device: cv2.VideoCapture + capture_thread: Thread | None = None + stop_thread: Event + last_captured_frame: MatLike | None = None + is_old_image = False + + def __read_loop(self, autosplit: AutoSplit): + try: + while not self.stop_thread.is_set(): + try: + result, image = self.capture_device.read() + except cv2.error as cv2_error: + if not ( + cv2_error.code == cv2.Error.STS_ERROR + and ( + # Likely means the camera is occupied OR the camera index is out of range (like -1) + cv2_error.msg.endswith("in function 'cv::VideoCapture::grab'\n") + # Some capture cards we cannot use directly + # https://github.com/opencv/opencv/issues/23539 + or cv2_error.msg.endswith("in function 'cv::VideoCapture::retrieve'\n") + ) + ): + raise + result = False + image = None + if not result: + image = None + + # Blank frame. Reuse the previous one. + if image is not None and is_blank(image): + continue + + self.last_captured_frame = image + self.is_old_image = False + except Exception as exception: # noqa: BLE001 # We really want to catch everything here + error = exception + self.capture_device.release() + autosplit.show_error_signal.emit( + lambda: exception_traceback( + error, + "AutoSplit encountered an unhandled exception while " + + "trying to grab a frame and has stopped capture. " + + CREATE_NEW_ISSUE_MESSAGE, + ), + ) + + def __init__(self, autosplit: AutoSplit): + super().__init__(autosplit) + self.capture_device = cv2.VideoCapture(autosplit.settings_dict["capture_device_id"]) + self.capture_device.setExceptionMode(True) + self.stop_thread = Event() + + # The video capture device isn't accessible, don't bother with it. + if not self.capture_device.isOpened(): + return + + filter_graph = FilterGraph() + filter_graph.add_video_input_device(autosplit.settings_dict["capture_device_id"]) + width, height = filter_graph.get_input_device().get_current_format() + filter_graph.remove_filters() + + # Ensure we're using the right camera size. And not OpenCV's default 640x480 + try: + self.capture_device.set(cv2.CAP_PROP_FRAME_WIDTH, width) + self.capture_device.set(cv2.CAP_PROP_FRAME_HEIGHT, height) + except cv2.error: + # Some cameras don't allow changing the resolution + pass + self.capture_thread = Thread(target=lambda: self.__read_loop(autosplit)) + self.capture_thread.start() + + @override + def close(self, autosplit: AutoSplit): + self.stop_thread.set() + if self.capture_thread: + self.capture_thread.join() + self.capture_thread = None + self.capture_device.release() + + @override + def get_frame(self, autosplit: AutoSplit): + if not self.check_selected_region_exists(autosplit): + return None, False + + image = self.last_captured_frame + is_old_image = self.is_old_image + self.is_old_image = True + if not is_valid_image(image): + return None, is_old_image + + selection = autosplit.settings_dict["capture_region"] + # Ensure we can't go OOB of the image + y = min(selection["y"], image.shape[ImageShape.Y] - 1) + x = min(selection["x"], image.shape[ImageShape.X] - 1) + image = image[ + y: y + selection["height"], + x: x + selection["width"], + ] + return cv2.cvtColor(image, cv2.COLOR_BGR2BGRA), is_old_image + + @override + def check_selected_region_exists(self, autosplit: AutoSplit): + return bool(self.capture_device.isOpened()) diff --git a/src/capture_method/WindowsGraphicsCaptureMethod.py b/src/capture_method/WindowsGraphicsCaptureMethod.py index 7691641c..04fed3de 100644 --- a/src/capture_method/WindowsGraphicsCaptureMethod.py +++ b/src/capture_method/WindowsGraphicsCaptureMethod.py @@ -1,156 +1,154 @@ -from __future__ import annotations - -import asyncio -from typing import TYPE_CHECKING, cast - -import numpy as np -from cv2.typing import MatLike -from typing_extensions import override -from win32 import win32gui -from winsdk.windows.graphics import SizeInt32 -from winsdk.windows.graphics.capture import Direct3D11CaptureFramePool, GraphicsCaptureSession -from winsdk.windows.graphics.capture.interop import create_for_window -from winsdk.windows.graphics.directx import DirectXPixelFormat -from winsdk.windows.graphics.imaging import BitmapBufferAccessMode, SoftwareBitmap - -from capture_method.CaptureMethodBase import CaptureMethodBase -from utils import BGRA_CHANNEL_COUNT, WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, get_direct3d_device, is_valid_hwnd - -if TYPE_CHECKING: - - from AutoSplit import AutoSplit - -WGC_NO_BORDER_MIN_BUILD = 20348 -LEARNING_MODE_DEVICE_BUILD = 17763 -"""https://learn.microsoft.com/en-us/uwp/api/windows.ai.machinelearning.learningmodeldevice""" - - -class WindowsGraphicsCaptureMethod(CaptureMethodBase): - name = "Windows Graphics Capture" - short_description = "fast, most compatible, capped at 60fps" - description = ( - f"\nOnly available in Windows 10.0.{WGC_MIN_BUILD} and up. " - + f"\nDue to current technical limitations, Windows versions below 10.0.0.{LEARNING_MODE_DEVICE_BUILD}" - + "\nrequire having at least one audio or video Capture Device connected and enabled." - + "\nAllows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. " - + "\nAdds a yellow border on Windows 10 (not on Windows 11)." - + "\nCaps at around 60 FPS. " - ) - - size: SizeInt32 - frame_pool: Direct3D11CaptureFramePool | None = None - session: GraphicsCaptureSession | None = None - """This is stored to prevent session from being garbage collected""" - last_captured_frame: MatLike | None = None - - def __init__(self, autosplit: AutoSplit): - super().__init__(autosplit) - if not is_valid_hwnd(autosplit.hwnd): - return - - item = create_for_window(autosplit.hwnd) - frame_pool = Direct3D11CaptureFramePool.create_free_threaded( - get_direct3d_device(), - DirectXPixelFormat.B8_G8_R8_A8_UINT_NORMALIZED, - 1, - item.size, - ) - if not frame_pool: - raise OSError("Unable to create a frame pool for a capture session.") - session = frame_pool.create_capture_session(item) - if not session: - raise OSError("Unable to create a capture session.") - session.is_cursor_capture_enabled = False - if WINDOWS_BUILD_NUMBER >= WGC_NO_BORDER_MIN_BUILD: - session.is_border_required = False - session.start_capture() - - self.session = session - self.size = item.size - self.frame_pool = frame_pool - - @override - def close(self, autosplit: AutoSplit): - if self.frame_pool: - self.frame_pool.close() - self.frame_pool = None - if self.session: - try: - self.session.close() - except OSError: - # OSError: The application called an interface that was marshalled for a different thread - # This still seems to close the session and prevent the following hard crash in LiveSplit - # "AutoSplit.exe " # noqa: E501 - pass - self.session = None - - @override - def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: - selection = autosplit.settings_dict["capture_region"] - # We still need to check the hwnd because WGC will return a blank black image - if not ( - self.check_selected_region_exists(autosplit) - # Only needed for the type-checker - and self.frame_pool - ): - return None, False - - try: - frame = self.frame_pool.try_get_next_frame() - # Frame pool is closed - except OSError: - return None, False - - async def coroutine(): - # We were too fast and the next frame wasn't ready yet - if not frame: - return None - return await (SoftwareBitmap.create_copy_from_surface_async(frame.surface) or asyncio.sleep(0, None)) - - try: - software_bitmap = asyncio.run(coroutine()) - except SystemError as exception: - # HACK: can happen when closing the GraphicsCapturePicker - if str(exception).endswith("returned a result with an error set"): - return self.last_captured_frame, True - raise - - if not software_bitmap: - # HACK: Can happen when starting the region selector - return self.last_captured_frame, True - # raise ValueError("Unable to convert Direct3D11CaptureFrame to SoftwareBitmap.") - bitmap_buffer = software_bitmap.lock_buffer(BitmapBufferAccessMode.READ_WRITE) - if not bitmap_buffer: - raise ValueError("Unable to obtain the BitmapBuffer from SoftwareBitmap.") - reference = bitmap_buffer.create_reference() - image = np.frombuffer(cast(bytes, reference), dtype=np.uint8) - image.shape = (self.size.height, self.size.width, BGRA_CHANNEL_COUNT) - image = image[ - selection["y"]: selection["y"] + selection["height"], - selection["x"]: selection["x"] + selection["width"], - ] - self.last_captured_frame = image - return image, False - - @override - def recover_window(self, captured_window_title: str, autosplit: AutoSplit): - hwnd = win32gui.FindWindow(None, captured_window_title) - if not is_valid_hwnd(hwnd): - return False - autosplit.hwnd = hwnd - try: - self.reinitialize(autosplit) - # Unrecordable hwnd found as the game is crashing - except OSError as exception: - if str(exception).endswith("The parameter is incorrect"): - return False - raise - return self.check_selected_region_exists(autosplit) - - @override - def check_selected_region_exists(self, autosplit: AutoSplit): - return bool( - is_valid_hwnd(autosplit.hwnd) - and self.frame_pool - and self.session, - ) +import asyncio +from typing import TYPE_CHECKING, cast + +import numpy as np +from cv2.typing import MatLike +from typing_extensions import override +from win32 import win32gui +from winsdk.windows.graphics import SizeInt32 +from winsdk.windows.graphics.capture import Direct3D11CaptureFramePool, GraphicsCaptureSession +from winsdk.windows.graphics.capture.interop import create_for_window +from winsdk.windows.graphics.directx import DirectXPixelFormat +from winsdk.windows.graphics.imaging import BitmapBufferAccessMode, SoftwareBitmap + +from capture_method.CaptureMethodBase import CaptureMethodBase +from utils import BGRA_CHANNEL_COUNT, WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, get_direct3d_device, is_valid_hwnd + +if TYPE_CHECKING: + + from AutoSplit import AutoSplit + +WGC_NO_BORDER_MIN_BUILD = 20348 +LEARNING_MODE_DEVICE_BUILD = 17763 +"""https://learn.microsoft.com/en-us/uwp/api/windows.ai.machinelearning.learningmodeldevice""" + + +class WindowsGraphicsCaptureMethod(CaptureMethodBase): + name = "Windows Graphics Capture" + short_description = "fast, most compatible, capped at 60fps" + description = ( + f"\nOnly available in Windows 10.0.{WGC_MIN_BUILD} and up. " + + f"\nDue to current technical limitations, Windows versions below 10.0.0.{LEARNING_MODE_DEVICE_BUILD}" + + "\nrequire having at least one audio or video Capture Device connected and enabled." + + "\nAllows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. " + + "\nAdds a yellow border on Windows 10 (not on Windows 11)." + + "\nCaps at around 60 FPS. " + ) + + size: SizeInt32 + frame_pool: Direct3D11CaptureFramePool | None = None + session: GraphicsCaptureSession | None = None + """This is stored to prevent session from being garbage collected""" + last_captured_frame: MatLike | None = None + + def __init__(self, autosplit: AutoSplit): + super().__init__(autosplit) + if not is_valid_hwnd(autosplit.hwnd): + return + + item = create_for_window(autosplit.hwnd) + frame_pool = Direct3D11CaptureFramePool.create_free_threaded( + get_direct3d_device(), + DirectXPixelFormat.B8_G8_R8_A8_UINT_NORMALIZED, + 1, + item.size, + ) + if not frame_pool: + raise OSError("Unable to create a frame pool for a capture session.") + session = frame_pool.create_capture_session(item) + if not session: + raise OSError("Unable to create a capture session.") + session.is_cursor_capture_enabled = False + if WINDOWS_BUILD_NUMBER >= WGC_NO_BORDER_MIN_BUILD: + session.is_border_required = False + session.start_capture() + + self.session = session + self.size = item.size + self.frame_pool = frame_pool + + @override + def close(self, autosplit: AutoSplit): + if self.frame_pool: + self.frame_pool.close() + self.frame_pool = None + if self.session: + try: + self.session.close() + except OSError: + # OSError: The application called an interface that was marshalled for a different thread + # This still seems to close the session and prevent the following hard crash in LiveSplit + # "AutoSplit.exe " # noqa: E501 + pass + self.session = None + + @override + def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: + selection = autosplit.settings_dict["capture_region"] + # We still need to check the hwnd because WGC will return a blank black image + if not ( + self.check_selected_region_exists(autosplit) + # Only needed for the type-checker + and self.frame_pool + ): + return None, False + + try: + frame = self.frame_pool.try_get_next_frame() + # Frame pool is closed + except OSError: + return None, False + + async def coroutine(): + # We were too fast and the next frame wasn't ready yet + if not frame: + return None + return await (SoftwareBitmap.create_copy_from_surface_async(frame.surface) or asyncio.sleep(0, None)) + + try: + software_bitmap = asyncio.run(coroutine()) + except SystemError as exception: + # HACK: can happen when closing the GraphicsCapturePicker + if str(exception).endswith("returned a result with an error set"): + return self.last_captured_frame, True + raise + + if not software_bitmap: + # HACK: Can happen when starting the region selector + return self.last_captured_frame, True + # raise ValueError("Unable to convert Direct3D11CaptureFrame to SoftwareBitmap.") + bitmap_buffer = software_bitmap.lock_buffer(BitmapBufferAccessMode.READ_WRITE) + if not bitmap_buffer: + raise ValueError("Unable to obtain the BitmapBuffer from SoftwareBitmap.") + reference = bitmap_buffer.create_reference() + image = np.frombuffer(cast(bytes, reference), dtype=np.uint8) + image.shape = (self.size.height, self.size.width, BGRA_CHANNEL_COUNT) + image = image[ + selection["y"]: selection["y"] + selection["height"], + selection["x"]: selection["x"] + selection["width"], + ] + self.last_captured_frame = image + return image, False + + @override + def recover_window(self, captured_window_title: str, autosplit: AutoSplit): + hwnd = win32gui.FindWindow(None, captured_window_title) + if not is_valid_hwnd(hwnd): + return False + autosplit.hwnd = hwnd + try: + self.reinitialize(autosplit) + # Unrecordable hwnd found as the game is crashing + except OSError as exception: + if str(exception).endswith("The parameter is incorrect"): + return False + raise + return self.check_selected_region_exists(autosplit) + + @override + def check_selected_region_exists(self, autosplit: AutoSplit): + return bool( + is_valid_hwnd(autosplit.hwnd) + and self.frame_pool + and self.session, + ) diff --git a/src/capture_method/__init__.py b/src/capture_method/__init__.py index e68093db..5e28b8e5 100644 --- a/src/capture_method/__init__.py +++ b/src/capture_method/__init__.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import asyncio from collections import OrderedDict from dataclasses import dataclass @@ -65,7 +63,7 @@ def __hash__(self): # https://github.com/python/typeshed/issues/10428 @override def _generate_next_value_( # type:ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - name: str | CaptureMethodEnum, # noqa: N805 + name: "str | CaptureMethodEnum", # noqa: N805 *_, ): return name diff --git a/src/compare.py b/src/compare.py index 494381f4..0668f9ee 100644 --- a/src/compare.py +++ b/src/compare.py @@ -1,119 +1,117 @@ -from __future__ import annotations - -from math import sqrt - -import cv2 -import imagehash -from cv2.typing import MatLike -from PIL import Image - -from utils import BGRA_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image - -MAXRANGE = MAXBYTE + 1 -CHANNELS = [ColorChannel.Red.value, ColorChannel.Green.value, ColorChannel.Blue.value] -HISTOGRAM_SIZE = [8, 8, 8] -RANGES = [0, MAXRANGE, 0, MAXRANGE, 0, MAXRANGE] -MASK_SIZE_MULTIPLIER = ColorChannel.Alpha * MAXBYTE * MAXBYTE - - -def compare_histograms(source: MatLike, capture: MatLike, mask: MatLike | None = None): - """ - Compares two images by calculating their histograms, normalizing - them, and then comparing them using Bhattacharyya distance. - - @param source: RGB or BGR image of any given width and height - @param capture: An image matching the shape, dimensions and format of the source - @param mask: An image matching the dimensions of the source, but 1 channel grayscale - @return: The similarity between the histograms as a number 0 to 1. - """ - source_hist = cv2.calcHist([source], CHANNELS, mask, HISTOGRAM_SIZE, RANGES) - capture_hist = cv2.calcHist([capture], CHANNELS, mask, HISTOGRAM_SIZE, RANGES) - - cv2.normalize(source_hist, source_hist) - cv2.normalize(capture_hist, capture_hist) - - return 1 - cv2.compareHist(source_hist, capture_hist, cv2.HISTCMP_BHATTACHARYYA) - - -def compare_l2_norm(source: MatLike, capture: MatLike, mask: MatLike | None = None): - """ - Compares two images by calculating the L2 Error (square-root of sum of squared error) - @param source: Image of any given shape - @param capture: Image matching the dimensions of the source - @param mask: An image matching the dimensions of the source, but 1 channel grayscale - @return: The similarity between the images as a number 0 to 1. - """ - error = cv2.norm(source, capture, cv2.NORM_L2, mask) - - # The L2 Error is summed across all pixels, so this normalizes - max_error = sqrt(source.size) * MAXBYTE \ - if not is_valid_image(mask)\ - else sqrt(cv2.countNonZero(mask) * MASK_SIZE_MULTIPLIER) - - if not max_error: - return 0.0 - return 1 - (error / max_error) - - -def compare_template(source: MatLike, capture: MatLike, mask: MatLike | None = None): - """ - Checks if the source is located within the capture by using the sum of square differences. - The mask is used to search for non-rectangular images within the capture. - - @param source: The subsection being searched for within the capture - @param capture: Capture of an image larger than the source - @param mask: The mask of the source with the same dimensions - @return: The best similarity for a region found in the image. This is - represented as a number from 0 to 1. - """ - result = cv2.matchTemplate(capture, source, cv2.TM_SQDIFF, mask=mask) - min_val, *_ = cv2.minMaxLoc(result) - - # matchTemplate returns the sum of square differences, this is the max - # that the value can be. Used for normalizing from 0 to 1. - max_error = source.size * MAXBYTE * MAXBYTE \ - if not is_valid_image(mask) \ - else cv2.countNonZero(mask) - - return 1 - (min_val / max_error) - - -def compare_phash(source: MatLike, capture: MatLike, mask: MatLike | None = None): - """ - Compares the Perceptual Hash of the two given images and returns the similarity between the two. - - @param source: Image of any given shape as a numpy array - @param capture: Image of any given shape as a numpy array - @param mask: An image matching the dimensions of the source, but 1 channel grayscale - @return: The similarity between the hashes of the image as a number 0 to 1. - """ - # Since imagehash doesn't have any masking itself, bitwise_and will allow us - # to apply the mask to the source and capture before calculating the pHash for - # each of the images. As a result of this, this function is not going to be very - # helpful for large masks as the images when shrinked down to 8x8 will mostly be - # the same - if is_valid_image(mask): - source = cv2.bitwise_and(source, source, mask=mask) - capture = cv2.bitwise_and(capture, capture, mask=mask) - - source_hash = imagehash.phash(Image.fromarray(source)) - capture_hash = imagehash.phash(Image.fromarray(capture)) - hash_diff = source_hash - capture_hash - return 1 - (hash_diff / 64.0) - - -def check_if_image_has_transparency(image: MatLike): - # Check if there's a transparency channel (4th channel) and if at least one pixel is transparent (< 255) - if image.shape[ImageShape.Channels] != BGRA_CHANNEL_COUNT: - return False - mean: float = image[:, :, ColorChannel.Alpha].mean() - if mean == 0: - # Non-transparent images code path is usually faster and simpler, so let's return that - return False - # TODO: error message if all pixels are transparent - # (the image appears as all black in windows, so it's not obvious for the user what they did wrong) - - return mean != MAXBYTE - - -COMPARE_METHODS_BY_INDEX = {0: compare_l2_norm, 1: compare_histograms, 2: compare_phash} +from math import sqrt + +import cv2 +import imagehash +from cv2.typing import MatLike +from PIL import Image + +from utils import BGRA_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image + +MAXRANGE = MAXBYTE + 1 +CHANNELS = [ColorChannel.Red.value, ColorChannel.Green.value, ColorChannel.Blue.value] +HISTOGRAM_SIZE = [8, 8, 8] +RANGES = [0, MAXRANGE, 0, MAXRANGE, 0, MAXRANGE] +MASK_SIZE_MULTIPLIER = ColorChannel.Alpha * MAXBYTE * MAXBYTE + + +def compare_histograms(source: MatLike, capture: MatLike, mask: MatLike | None = None): + """ + Compares two images by calculating their histograms, normalizing + them, and then comparing them using Bhattacharyya distance. + + @param source: RGB or BGR image of any given width and height + @param capture: An image matching the shape, dimensions and format of the source + @param mask: An image matching the dimensions of the source, but 1 channel grayscale + @return: The similarity between the histograms as a number 0 to 1. + """ + source_hist = cv2.calcHist([source], CHANNELS, mask, HISTOGRAM_SIZE, RANGES) + capture_hist = cv2.calcHist([capture], CHANNELS, mask, HISTOGRAM_SIZE, RANGES) + + cv2.normalize(source_hist, source_hist) + cv2.normalize(capture_hist, capture_hist) + + return 1 - cv2.compareHist(source_hist, capture_hist, cv2.HISTCMP_BHATTACHARYYA) + + +def compare_l2_norm(source: MatLike, capture: MatLike, mask: MatLike | None = None): + """ + Compares two images by calculating the L2 Error (square-root of sum of squared error) + @param source: Image of any given shape + @param capture: Image matching the dimensions of the source + @param mask: An image matching the dimensions of the source, but 1 channel grayscale + @return: The similarity between the images as a number 0 to 1. + """ + error = cv2.norm(source, capture, cv2.NORM_L2, mask) + + # The L2 Error is summed across all pixels, so this normalizes + max_error = sqrt(source.size) * MAXBYTE \ + if not is_valid_image(mask)\ + else sqrt(cv2.countNonZero(mask) * MASK_SIZE_MULTIPLIER) + + if not max_error: + return 0.0 + return 1 - (error / max_error) + + +def compare_template(source: MatLike, capture: MatLike, mask: MatLike | None = None): + """ + Checks if the source is located within the capture by using the sum of square differences. + The mask is used to search for non-rectangular images within the capture. + + @param source: The subsection being searched for within the capture + @param capture: Capture of an image larger than the source + @param mask: The mask of the source with the same dimensions + @return: The best similarity for a region found in the image. This is + represented as a number from 0 to 1. + """ + result = cv2.matchTemplate(capture, source, cv2.TM_SQDIFF, mask=mask) + min_val, *_ = cv2.minMaxLoc(result) + + # matchTemplate returns the sum of square differences, this is the max + # that the value can be. Used for normalizing from 0 to 1. + max_error = source.size * MAXBYTE * MAXBYTE \ + if not is_valid_image(mask) \ + else cv2.countNonZero(mask) + + return 1 - (min_val / max_error) + + +def compare_phash(source: MatLike, capture: MatLike, mask: MatLike | None = None): + """ + Compares the Perceptual Hash of the two given images and returns the similarity between the two. + + @param source: Image of any given shape as a numpy array + @param capture: Image of any given shape as a numpy array + @param mask: An image matching the dimensions of the source, but 1 channel grayscale + @return: The similarity between the hashes of the image as a number 0 to 1. + """ + # Since imagehash doesn't have any masking itself, bitwise_and will allow us + # to apply the mask to the source and capture before calculating the pHash for + # each of the images. As a result of this, this function is not going to be very + # helpful for large masks as the images when shrinked down to 8x8 will mostly be + # the same + if is_valid_image(mask): + source = cv2.bitwise_and(source, source, mask=mask) + capture = cv2.bitwise_and(capture, capture, mask=mask) + + source_hash = imagehash.phash(Image.fromarray(source)) + capture_hash = imagehash.phash(Image.fromarray(capture)) + hash_diff = source_hash - capture_hash + return 1 - (hash_diff / 64.0) + + +def check_if_image_has_transparency(image: MatLike): + # Check if there's a transparency channel (4th channel) and if at least one pixel is transparent (< 255) + if image.shape[ImageShape.Channels] != BGRA_CHANNEL_COUNT: + return False + mean: float = image[:, :, ColorChannel.Alpha].mean() + if mean == 0: + # Non-transparent images code path is usually faster and simpler, so let's return that + return False + # TODO: error message if all pixels are transparent + # (the image appears as all black in windows, so it's not obvious for the user what they did wrong) + + return mean != MAXBYTE + + +COMPARE_METHODS_BY_INDEX = {0: compare_l2_norm, 1: compare_histograms, 2: compare_phash} diff --git a/src/error_messages.py b/src/error_messages.py index 32917b09..da9e054d 100644 --- a/src/error_messages.py +++ b/src/error_messages.py @@ -1,197 +1,195 @@ -"""Error messages.""" -from __future__ import annotations - -import os -import signal -import sys -import traceback -from types import TracebackType -from typing import TYPE_CHECKING, NoReturn - -from PySide6 import QtCore, QtWidgets - -from utils import FROZEN, GITHUB_REPOSITORY - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - - -def __exit_program(): - # stop main thread (which is probably blocked reading input) via an interrupt signal - os.kill(os.getpid(), signal.SIGINT) - sys.exit(1) - - -def set_text_message(message: str, details: str = "", kill_button: str = "", accept_button: str = ""): - message_box = QtWidgets.QMessageBox() - message_box.setWindowTitle("Error") - message_box.setTextFormat(QtCore.Qt.TextFormat.RichText) - message_box.setText(message) - # Button order is important for default focus - if accept_button: - message_box.addButton(accept_button, QtWidgets.QMessageBox.ButtonRole.AcceptRole) - if kill_button: - force_quit_button = message_box.addButton(kill_button, QtWidgets.QMessageBox.ButtonRole.ResetRole) - force_quit_button.clicked.connect(__exit_program) - if details: - message_box.setDetailedText(details) - # Preopen the details - for button in message_box.buttons(): - if message_box.buttonRole(button) == QtWidgets.QMessageBox.ButtonRole.ActionRole: - button.click() - break - message_box.exec() - - -def split_image_directory(): - set_text_message("No split image folder is selected.") - - -def split_image_directory_not_found(): - set_text_message("The Split Image Folder does not exist.") - - -def split_image_directory_empty(): - set_text_message("The Split Image Folder is empty.") - - -def image_type(image: str): - set_text_message( - f"{image!r} is not a valid image file, does not exist, " - + "or the full image file path contains a special character.", - ) - - -def region(): - set_text_message( - "No region is selected or the Capture Region window is not open. " - + "Select a region or load settings while the Capture Region window is open.", - ) - - -def split_hotkey(): - set_text_message("No split hotkey has been set.") - - -def pause_hotkey(): - set_text_message( - "Your split image folder contains an image filename with a pause flag {p}, but no pause hotkey is set.", - ) - - -def image_validity(image: str = "File"): - set_text_message(f"{image} not a valid image file") - - -def alignment_not_matched(): - set_text_message("No area in capture region matched reference image. Alignment failed.") - - -def no_keyword_image(keyword: str): - set_text_message(f"Your split image folder does not contain an image with the keyword {keyword!r}.") - - -def multiple_keyword_images(keyword: str): - set_text_message(f"Only one image with the keyword {keyword!r} is allowed.") - - -def reset_hotkey(): - set_text_message("Your split image folder contains a Reset Image, but no reset hotkey is set.") - - -def old_version_settings_file(): - set_text_message( - "Old version settings file detected. This version allows settings files in .toml format. Starting from v2.0.", - ) - - -def invalid_settings(): - set_text_message("Invalid settings file.") - - -def invalid_hotkey(hotkey_name: str): - set_text_message(f"Invalid hotkey {hotkey_name!r}") - - -def no_settings_file_on_open(): - set_text_message( - "No settings file found. One can be loaded on open if placed in the same folder as the AutoSplit executable.", - ) - - -def too_many_settings_files_on_open(): - set_text_message( - "Too many settings files found. " - + "Only one can be loaded on open if placed in the same folder as the AutoSplit executable.", - ) - - -def check_for_updates(): - set_text_message("An error occurred while attempting to check for updates. Please check your connection.") - - -def load_start_image(): - set_text_message( - "Start Image found, but cannot be loaded unless Start hotkey is set. " - + "Please set the hotkey, and then click the Reload Start Image button.", - ) - - -def stdin_lost(): - set_text_message("stdin not supported or lost, external control like LiveSplit integration will not work.") - - -def already_open(): - set_text_message( - "An instance of AutoSplit is already running.
Are you sure you want to open a another one?", - "", - "Don't open", - "Ignore", - ) - - -def exception_traceback(exception: BaseException, message: str = ""): - if not message: - message = ( - "AutoSplit encountered an unhandled exception and will try to recover, " - + f"however, there is no guarantee it will keep working properly. {CREATE_NEW_ISSUE_MESSAGE}" - ) - set_text_message( - message, - "\n".join(traceback.format_exception(None, exception, exception.__traceback__)), - "Close AutoSplit", - ) - - -CREATE_NEW_ISSUE_MESSAGE = ( - f"Please create a New Issue at " - + f"github.com/{GITHUB_REPOSITORY}/issues, describe what happened, " - + "and copy & paste the entire error message below" -) - - -def make_excepthook(autosplit: AutoSplit): - def excepthook(exception_type: type[BaseException], exception: BaseException, _traceback: TracebackType | None): - # Catch Keyboard Interrupts for a clean close - if exception_type is KeyboardInterrupt or isinstance(exception, KeyboardInterrupt): - sys.exit(0) - # HACK: Can happen when starting the region selector while capturing with WindowsGraphicsCapture - if ( - exception_type is SystemError - and str(exception) == " returned a result with an error set" - ): - return - # Whithin LiveSplit excepthook needs to use MainWindow's signals to show errors - autosplit.show_error_signal.emit(lambda: exception_traceback(exception)) - - return excepthook - - -def handle_top_level_exceptions(exception: Exception) -> NoReturn: - message = f"AutoSplit encountered an unrecoverable exception and will likely now close. {CREATE_NEW_ISSUE_MESSAGE}" - # Print error to console if not running in executable - if FROZEN: - exception_traceback(exception, message) - else: - traceback.print_exception(type(exception), exception, exception.__traceback__) - sys.exit(1) +"""Error messages.""" +import os +import signal +import sys +import traceback +from types import TracebackType +from typing import TYPE_CHECKING, NoReturn + +from PySide6 import QtCore, QtWidgets + +from utils import FROZEN, GITHUB_REPOSITORY + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + + +def __exit_program(): + # stop main thread (which is probably blocked reading input) via an interrupt signal + os.kill(os.getpid(), signal.SIGINT) + sys.exit(1) + + +def set_text_message(message: str, details: str = "", kill_button: str = "", accept_button: str = ""): + message_box = QtWidgets.QMessageBox() + message_box.setWindowTitle("Error") + message_box.setTextFormat(QtCore.Qt.TextFormat.RichText) + message_box.setText(message) + # Button order is important for default focus + if accept_button: + message_box.addButton(accept_button, QtWidgets.QMessageBox.ButtonRole.AcceptRole) + if kill_button: + force_quit_button = message_box.addButton(kill_button, QtWidgets.QMessageBox.ButtonRole.ResetRole) + force_quit_button.clicked.connect(__exit_program) + if details: + message_box.setDetailedText(details) + # Preopen the details + for button in message_box.buttons(): + if message_box.buttonRole(button) == QtWidgets.QMessageBox.ButtonRole.ActionRole: + button.click() + break + message_box.exec() + + +def split_image_directory(): + set_text_message("No split image folder is selected.") + + +def split_image_directory_not_found(): + set_text_message("The Split Image Folder does not exist.") + + +def split_image_directory_empty(): + set_text_message("The Split Image Folder is empty.") + + +def image_type(image: str): + set_text_message( + f"{image!r} is not a valid image file, does not exist, " + + "or the full image file path contains a special character.", + ) + + +def region(): + set_text_message( + "No region is selected or the Capture Region window is not open. " + + "Select a region or load settings while the Capture Region window is open.", + ) + + +def split_hotkey(): + set_text_message("No split hotkey has been set.") + + +def pause_hotkey(): + set_text_message( + "Your split image folder contains an image filename with a pause flag {p}, but no pause hotkey is set.", + ) + + +def image_validity(image: str = "File"): + set_text_message(f"{image} not a valid image file") + + +def alignment_not_matched(): + set_text_message("No area in capture region matched reference image. Alignment failed.") + + +def no_keyword_image(keyword: str): + set_text_message(f"Your split image folder does not contain an image with the keyword {keyword!r}.") + + +def multiple_keyword_images(keyword: str): + set_text_message(f"Only one image with the keyword {keyword!r} is allowed.") + + +def reset_hotkey(): + set_text_message("Your split image folder contains a Reset Image, but no reset hotkey is set.") + + +def old_version_settings_file(): + set_text_message( + "Old version settings file detected. This version allows settings files in .toml format. Starting from v2.0.", + ) + + +def invalid_settings(): + set_text_message("Invalid settings file.") + + +def invalid_hotkey(hotkey_name: str): + set_text_message(f"Invalid hotkey {hotkey_name!r}") + + +def no_settings_file_on_open(): + set_text_message( + "No settings file found. One can be loaded on open if placed in the same folder as the AutoSplit executable.", + ) + + +def too_many_settings_files_on_open(): + set_text_message( + "Too many settings files found. " + + "Only one can be loaded on open if placed in the same folder as the AutoSplit executable.", + ) + + +def check_for_updates(): + set_text_message("An error occurred while attempting to check for updates. Please check your connection.") + + +def load_start_image(): + set_text_message( + "Start Image found, but cannot be loaded unless Start hotkey is set. " + + "Please set the hotkey, and then click the Reload Start Image button.", + ) + + +def stdin_lost(): + set_text_message("stdin not supported or lost, external control like LiveSplit integration will not work.") + + +def already_open(): + set_text_message( + "An instance of AutoSplit is already running.
Are you sure you want to open a another one?", + "", + "Don't open", + "Ignore", + ) + + +def exception_traceback(exception: BaseException, message: str = ""): + if not message: + message = ( + "AutoSplit encountered an unhandled exception and will try to recover, " + + f"however, there is no guarantee it will keep working properly. {CREATE_NEW_ISSUE_MESSAGE}" + ) + set_text_message( + message, + "\n".join(traceback.format_exception(None, exception, exception.__traceback__)), + "Close AutoSplit", + ) + + +CREATE_NEW_ISSUE_MESSAGE = ( + f"Please create a New Issue at " + + f"github.com/{GITHUB_REPOSITORY}/issues, describe what happened, " + + "and copy & paste the entire error message below" +) + + +def make_excepthook(autosplit: AutoSplit): + def excepthook(exception_type: type[BaseException], exception: BaseException, _traceback: TracebackType | None): + # Catch Keyboard Interrupts for a clean close + if exception_type is KeyboardInterrupt or isinstance(exception, KeyboardInterrupt): + sys.exit(0) + # HACK: Can happen when starting the region selector while capturing with WindowsGraphicsCapture + if ( + exception_type is SystemError + and str(exception) == " returned a result with an error set" + ): + return + # Whithin LiveSplit excepthook needs to use MainWindow's signals to show errors + autosplit.show_error_signal.emit(lambda: exception_traceback(exception)) + + return excepthook + + +def handle_top_level_exceptions(exception: Exception) -> NoReturn: + message = f"AutoSplit encountered an unrecoverable exception and will likely now close. {CREATE_NEW_ISSUE_MESSAGE}" + # Print error to console if not running in executable + if FROZEN: + exception_traceback(exception, message) + else: + traceback.print_exception(type(exception), exception, exception.__traceback__) + sys.exit(1) diff --git a/src/hotkeys.py b/src/hotkeys.py index 78d2bc01..bc6a9584 100644 --- a/src/hotkeys.py +++ b/src/hotkeys.py @@ -1,313 +1,311 @@ -from __future__ import annotations - -from collections.abc import Callable -from typing import TYPE_CHECKING, Literal, cast - -import keyboard -import pyautogui -from PySide6 import QtWidgets - -import error_messages -from utils import fire_and_forget, is_digit - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - -# While not usually recommended, we don't manipulate the mouse, and we don't want the extra delay -pyautogui.FAILSAFE = False - -SET_HOTKEY_TEXT = "Set Hotkey" -PRESS_A_KEY_TEXT = "Press a key..." - -Commands = Literal["split", "start", "pause", "reset", "skip", "undo"] -Hotkey = Literal["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"] -HOTKEYS: list[Hotkey] = ["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"] - - -def remove_all_hotkeys(): - keyboard.unhook_all() - - -def before_setting_hotkey(autosplit: AutoSplit): - """Do all of these after you click "Set Hotkey" but before you type the hotkey.""" - autosplit.start_auto_splitter_button.setEnabled(False) - if autosplit.SettingsWidget: - for hotkey in HOTKEYS: - getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(False) - - -def after_setting_hotkey(autosplit: AutoSplit): - """ - Do all of these things after you set a hotkey. - A signal connects to this because changing GUI stuff is only possible in the main thread. - """ - if not autosplit.is_running: - autosplit.start_auto_splitter_button.setEnabled(True) - if autosplit.SettingsWidget: - for hotkey in HOTKEYS: - getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setText(SET_HOTKEY_TEXT) - getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(True) - - -def send_command(autosplit: AutoSplit, command: Commands): - # Note: Rather than having the start image able to also reset the timer, - # having the reset image check be active at all time would be a better, more organic solution, - # but that is dependent on migrating to an observer pattern (#219) and being able to reload all images. - if autosplit.is_auto_controlled: - if command == "start" and autosplit.settings_dict["start_also_resets"]: - print("reset", flush=True) - print(command, flush=True) - elif command == "start": - if autosplit.settings_dict["start_also_resets"]: - _send_hotkey(autosplit.settings_dict["reset_hotkey"]) - _send_hotkey(autosplit.settings_dict["split_hotkey"]) - elif command == "split": - _send_hotkey(autosplit.settings_dict["split_hotkey"]) - elif command == "pause": - _send_hotkey(autosplit.settings_dict["pause_hotkey"]) - elif command == "reset": - _send_hotkey(autosplit.settings_dict["reset_hotkey"]) - elif command == "skip": - _send_hotkey(autosplit.settings_dict["skip_split_hotkey"]) - elif command == "undo": - _send_hotkey(autosplit.settings_dict["undo_split_hotkey"]) - - else: - raise KeyError(f"{command!r} is not a valid command") - - -def _unhook(hotkey_callback: Callable[[], None] | None): - try: - if hotkey_callback: - keyboard.unhook_key(hotkey_callback) - except (AttributeError, KeyError, ValueError): - pass - - -def _send_hotkey(hotkey_or_scan_code: int | str | None): - """Supports sending the appropriate scan code for all the special cases.""" - if not hotkey_or_scan_code: - return - - # Deal with regular inputs - # If an int or does not contain the following strings - if ( - isinstance(hotkey_or_scan_code, int) - or not any(key in hotkey_or_scan_code for key in ("num ", "decimal", "+")) - ): - keyboard.send(hotkey_or_scan_code) - return - - # FIXME: Localized keys won't work here - # Deal with problematic keys. Even by sending specific scan code "keyboard" still sends the default (wrong) key - # keyboard also has issues with capitalization modifier (shift+A) - # keyboard.send(keyboard.key_to_scan_codes(key_or_scan_code)[1]) - pyautogui.hotkey( - *[ - "+" if key == "plus" else key - for key - in hotkey_or_scan_code.replace(" ", "").split("+") - ], - ) - - -def __validate_keypad(expected_key: str, keyboard_event: keyboard.KeyboardEvent) -> bool: - """ - NOTE: This is a workaround very specific to numpads. - Windows reports different physical keys with the same scan code. - For example, "Home", "Num Home" and "Num 7" are all `71`. - See: https://github.com/boppreh/keyboard/issues/171#issuecomment-390437684 . - - Since we reuse the key string we set to send to LiveSplit, we can't use fake names like "num home". - We're also trying to achieve the same hotkey behaviour as LiveSplit has. - """ - # Prevent "(keypad)delete", "(keypad)./decimal" and "del" from triggering each other - # as well as "." and "(keypad)./decimal" - if keyboard_event.scan_code in {83, 52}: - # TODO: "del" won't work with "(keypad)delete" if localized in non-english (ie: "suppr" in french) - return expected_key == keyboard_event.name - # Prevent "action keys" from triggering "keypad keys" - if keyboard_event.name and is_digit(keyboard_event.name[-1]): - # Prevent "regular numbers" and "keypad numbers" from activating each other - return bool( - keyboard_event.is_keypad - if expected_key.startswith("num ") - else not keyboard_event.is_keypad, - ) - - # Prevent "keypad action keys" from triggering "regular numbers" and "keypad numbers" - # Still allow the same key that might be localized differently on keypad vs non-keypad - return not is_digit(expected_key[-1]) - - -def _hotkey_action(keyboard_event: keyboard.KeyboardEvent, key_name: str, action: Callable[[], None]): - """ - We're doing the check here instead of saving the key code because - the non-keypad shared keys are localized while the keypad ones aren't. - They also share scan codes on Windows. - """ - if keyboard_event.event_type == keyboard.KEY_DOWN and __validate_keypad(key_name, keyboard_event): - action() - - -def __get_key_name(keyboard_event: keyboard.KeyboardEvent): - """Ensures proper keypad name.""" - event_name = str(keyboard_event.name) - # Normally this is done by keyboard.get_hotkey_name. But our code won't always get there. - if event_name == "+": - return "plus" - return f"num {keyboard_event.name}" \ - if keyboard_event.is_keypad and is_digit(keyboard_event.name) \ - else event_name - - -def __get_hotkey_name(names: list[str]): - """ - Uses keyboard.get_hotkey_name but works with non-english modifiers and keypad - See: https://github.com/boppreh/keyboard/issues/516 . - """ - if not names: - return "" - - if len(names) == 1: - return names[0] - - def sorting_key(key: str): - return not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0]) - - clean_names = sorted(keyboard.get_hotkey_name(names).split("+"), key=sorting_key) - # Replace the last key in hotkey_name with what we actually got as a last key_name - # This ensures we keep proper keypad names - return "+".join(clean_names[:-1] + names[-1:]) - - -def __read_hotkey(): - """ - Blocks until a hotkey combination is read. - Returns the hotkey_name and last KeyboardEvent. - """ - names: list[str] = [] - while True: - keyboard_event = keyboard.read_event(True) - # LiveSplit supports modifier keys as the last key, so any keyup means end of hotkey - if keyboard_event.event_type == keyboard.KEY_UP: - # Unless keyup is also the very first event, - # which can happen from a very fast press at the same time we start reading - if not names: - continue - break - key_name = __get_key_name(keyboard_event) - # Ignore long presses - if names and names[-1] == key_name: - continue - names.append(__get_key_name(keyboard_event)) - # Stop at the first non-modifier to prevent registering a hotkey with multiple regular keys - if not keyboard.is_modifier(keyboard_event.scan_code): - break - return __get_hotkey_name(names) - - -def __remove_key_already_set(autosplit: AutoSplit, key_name: str): - for hotkey in HOTKEYS: - settings_key = f"{hotkey}_hotkey" - if autosplit.settings_dict.get(settings_key) == key_name: - _unhook(getattr(autosplit, f"{hotkey}_hotkey")) - autosplit.settings_dict[settings_key] = "" # pyright: ignore[reportGeneralTypeIssues] - if autosplit.SettingsWidget: - getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("") - - -def __get_hotkey_action(autosplit: AutoSplit, hotkey: Hotkey): - if hotkey == "split": - return autosplit.start_auto_splitter - if hotkey == "skip_split": - return lambda: autosplit.skip_split(True) - if hotkey == "undo_split": - return lambda: autosplit.undo_split(True) - if hotkey == "toggle_auto_reset_image": - - def toggle_auto_reset_image(): - new_value = not autosplit.settings_dict["enable_auto_reset"] - autosplit.settings_dict["enable_auto_reset"] = new_value - if autosplit.SettingsWidget: - autosplit.SettingsWidget.enable_auto_reset_image_checkbox.setChecked(new_value) - - return toggle_auto_reset_image - return getattr(autosplit, f"{hotkey}_signal").emit - - -def is_valid_hotkey_name(hotkey_name: str): - return any( - key and not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0]) - for key - in hotkey_name.split("+") - ) - -# TODO: using getattr/setattr is NOT a good way to go about this. It was only temporarily done to -# reduce duplicated code. We should use a dictionary of hotkey class or something. - - -def set_hotkey(autosplit: AutoSplit, hotkey: Hotkey, preselected_hotkey_name: str = ""): - if autosplit.SettingsWidget: - # Unfocus all fields - cast(QtWidgets.QWidget, autosplit.SettingsWidget).setFocus() - getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setText(PRESS_A_KEY_TEXT) - - # Disable some buttons - before_setting_hotkey(autosplit) - - # New thread points to read_and_set_hotkey. this thread is needed or GUI will freeze - # while the program waits for user input on the hotkey - @fire_and_forget - def read_and_set_hotkey(): - try: - hotkey_name = preselected_hotkey_name or __read_hotkey() - - # Unset hotkey by pressing "Escape". This is the same behaviour as LiveSplit - if hotkey_name == "esc": - _unhook(getattr(autosplit, f"{hotkey}_hotkey")) - autosplit.settings_dict[f"{hotkey}_hotkey"] = "" # pyright: ignore[reportGeneralTypeIssues] - if autosplit.SettingsWidget: - getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("") - return - - if not is_valid_hotkey_name(hotkey_name): - autosplit.show_error_signal.emit(lambda: error_messages.invalid_hotkey(hotkey_name)) - return - - # Try to remove the previously set hotkey if there is one - _unhook(getattr(autosplit, f"{hotkey}_hotkey")) - - # Remove any hotkey using the same key combination - __remove_key_already_set(autosplit, hotkey_name) - - action = __get_hotkey_action(autosplit, hotkey) - setattr( - autosplit, - f"{hotkey}_hotkey", - # keyboard.add_hotkey doesn't give the last keyboard event, so we can't __validate_keypad. - # This means "ctrl + num 5" and "ctrl + 5" will both be registered. - # For that reason, we still prefer keyboard.hook_key for single keys. - # keyboard module allows you to hit multiple keys for a hotkey. they are joined together by +. - keyboard.add_hotkey(hotkey_name, action) - if "+" in hotkey_name - # We need to inspect the event to know if it comes from numpad because of _canonial_names. - # See: https://github.com/boppreh/keyboard/issues/161#issuecomment-386825737 - # The best way to achieve this is make our own hotkey handling on top of hook - # See: https://github.com/boppreh/keyboard/issues/216#issuecomment-431999553 - else keyboard.hook_key( - hotkey_name, - lambda keyboard_event: _hotkey_action(keyboard_event, hotkey_name, action), - ), - ) - - if autosplit.SettingsWidget: - getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText(hotkey_name) - autosplit.settings_dict[f"{hotkey}_hotkey"] = hotkey_name # pyright: ignore[reportGeneralTypeIssues] - except Exception as exception: # noqa: BLE001 # We really want to catch everything here - error = exception - autosplit.show_error_signal.emit(lambda: error_messages.exception_traceback(error)) - finally: - autosplit.after_setting_hotkey_signal.emit() - - read_and_set_hotkey() +from collections.abc import Callable +from typing import TYPE_CHECKING, Literal, cast + +import keyboard +import pyautogui +from PySide6 import QtWidgets + +import error_messages +from utils import fire_and_forget, is_digit + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + +# While not usually recommended, we don't manipulate the mouse, and we don't want the extra delay +pyautogui.FAILSAFE = False + +SET_HOTKEY_TEXT = "Set Hotkey" +PRESS_A_KEY_TEXT = "Press a key..." + +Commands = Literal["split", "start", "pause", "reset", "skip", "undo"] +Hotkey = Literal["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"] +HOTKEYS: list[Hotkey] = ["split", "reset", "skip_split", "undo_split", "pause", "screenshot", "toggle_auto_reset_image"] + + +def remove_all_hotkeys(): + keyboard.unhook_all() + + +def before_setting_hotkey(autosplit: AutoSplit): + """Do all of these after you click "Set Hotkey" but before you type the hotkey.""" + autosplit.start_auto_splitter_button.setEnabled(False) + if autosplit.SettingsWidget: + for hotkey in HOTKEYS: + getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(False) + + +def after_setting_hotkey(autosplit: AutoSplit): + """ + Do all of these things after you set a hotkey. + A signal connects to this because changing GUI stuff is only possible in the main thread. + """ + if not autosplit.is_running: + autosplit.start_auto_splitter_button.setEnabled(True) + if autosplit.SettingsWidget: + for hotkey in HOTKEYS: + getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setText(SET_HOTKEY_TEXT) + getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(True) + + +def send_command(autosplit: AutoSplit, command: Commands): + # Note: Rather than having the start image able to also reset the timer, + # having the reset image check be active at all time would be a better, more organic solution, + # but that is dependent on migrating to an observer pattern (#219) and being able to reload all images. + if autosplit.is_auto_controlled: + if command == "start" and autosplit.settings_dict["start_also_resets"]: + print("reset", flush=True) + print(command, flush=True) + elif command == "start": + if autosplit.settings_dict["start_also_resets"]: + _send_hotkey(autosplit.settings_dict["reset_hotkey"]) + _send_hotkey(autosplit.settings_dict["split_hotkey"]) + elif command == "split": + _send_hotkey(autosplit.settings_dict["split_hotkey"]) + elif command == "pause": + _send_hotkey(autosplit.settings_dict["pause_hotkey"]) + elif command == "reset": + _send_hotkey(autosplit.settings_dict["reset_hotkey"]) + elif command == "skip": + _send_hotkey(autosplit.settings_dict["skip_split_hotkey"]) + elif command == "undo": + _send_hotkey(autosplit.settings_dict["undo_split_hotkey"]) + + else: + raise KeyError(f"{command!r} is not a valid command") + + +def _unhook(hotkey_callback: Callable[[], None] | None): + try: + if hotkey_callback: + keyboard.unhook_key(hotkey_callback) + except (AttributeError, KeyError, ValueError): + pass + + +def _send_hotkey(hotkey_or_scan_code: int | str | None): + """Supports sending the appropriate scan code for all the special cases.""" + if not hotkey_or_scan_code: + return + + # Deal with regular inputs + # If an int or does not contain the following strings + if ( + isinstance(hotkey_or_scan_code, int) + or not any(key in hotkey_or_scan_code for key in ("num ", "decimal", "+")) + ): + keyboard.send(hotkey_or_scan_code) + return + + # FIXME: Localized keys won't work here + # Deal with problematic keys. Even by sending specific scan code "keyboard" still sends the default (wrong) key + # keyboard also has issues with capitalization modifier (shift+A) + # keyboard.send(keyboard.key_to_scan_codes(key_or_scan_code)[1]) + pyautogui.hotkey( + *[ + "+" if key == "plus" else key + for key + in hotkey_or_scan_code.replace(" ", "").split("+") + ], + ) + + +def __validate_keypad(expected_key: str, keyboard_event: keyboard.KeyboardEvent) -> bool: + """ + NOTE: This is a workaround very specific to numpads. + Windows reports different physical keys with the same scan code. + For example, "Home", "Num Home" and "Num 7" are all `71`. + See: https://github.com/boppreh/keyboard/issues/171#issuecomment-390437684 . + + Since we reuse the key string we set to send to LiveSplit, we can't use fake names like "num home". + We're also trying to achieve the same hotkey behaviour as LiveSplit has. + """ + # Prevent "(keypad)delete", "(keypad)./decimal" and "del" from triggering each other + # as well as "." and "(keypad)./decimal" + if keyboard_event.scan_code in {83, 52}: + # TODO: "del" won't work with "(keypad)delete" if localized in non-english (ie: "suppr" in french) + return expected_key == keyboard_event.name + # Prevent "action keys" from triggering "keypad keys" + if keyboard_event.name and is_digit(keyboard_event.name[-1]): + # Prevent "regular numbers" and "keypad numbers" from activating each other + return bool( + keyboard_event.is_keypad + if expected_key.startswith("num ") + else not keyboard_event.is_keypad, + ) + + # Prevent "keypad action keys" from triggering "regular numbers" and "keypad numbers" + # Still allow the same key that might be localized differently on keypad vs non-keypad + return not is_digit(expected_key[-1]) + + +def _hotkey_action(keyboard_event: keyboard.KeyboardEvent, key_name: str, action: Callable[[], None]): + """ + We're doing the check here instead of saving the key code because + the non-keypad shared keys are localized while the keypad ones aren't. + They also share scan codes on Windows. + """ + if keyboard_event.event_type == keyboard.KEY_DOWN and __validate_keypad(key_name, keyboard_event): + action() + + +def __get_key_name(keyboard_event: keyboard.KeyboardEvent): + """Ensures proper keypad name.""" + event_name = str(keyboard_event.name) + # Normally this is done by keyboard.get_hotkey_name. But our code won't always get there. + if event_name == "+": + return "plus" + return f"num {keyboard_event.name}" \ + if keyboard_event.is_keypad and is_digit(keyboard_event.name) \ + else event_name + + +def __get_hotkey_name(names: list[str]): + """ + Uses keyboard.get_hotkey_name but works with non-english modifiers and keypad + See: https://github.com/boppreh/keyboard/issues/516 . + """ + if not names: + return "" + + if len(names) == 1: + return names[0] + + def sorting_key(key: str): + return not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0]) + + clean_names = sorted(keyboard.get_hotkey_name(names).split("+"), key=sorting_key) + # Replace the last key in hotkey_name with what we actually got as a last key_name + # This ensures we keep proper keypad names + return "+".join(clean_names[:-1] + names[-1:]) + + +def __read_hotkey(): + """ + Blocks until a hotkey combination is read. + Returns the hotkey_name and last KeyboardEvent. + """ + names: list[str] = [] + while True: + keyboard_event = keyboard.read_event(True) + # LiveSplit supports modifier keys as the last key, so any keyup means end of hotkey + if keyboard_event.event_type == keyboard.KEY_UP: + # Unless keyup is also the very first event, + # which can happen from a very fast press at the same time we start reading + if not names: + continue + break + key_name = __get_key_name(keyboard_event) + # Ignore long presses + if names and names[-1] == key_name: + continue + names.append(__get_key_name(keyboard_event)) + # Stop at the first non-modifier to prevent registering a hotkey with multiple regular keys + if not keyboard.is_modifier(keyboard_event.scan_code): + break + return __get_hotkey_name(names) + + +def __remove_key_already_set(autosplit: AutoSplit, key_name: str): + for hotkey in HOTKEYS: + settings_key = f"{hotkey}_hotkey" + if autosplit.settings_dict.get(settings_key) == key_name: + _unhook(getattr(autosplit, f"{hotkey}_hotkey")) + autosplit.settings_dict[settings_key] = "" # pyright: ignore[reportGeneralTypeIssues] + if autosplit.SettingsWidget: + getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("") + + +def __get_hotkey_action(autosplit: AutoSplit, hotkey: Hotkey): + if hotkey == "split": + return autosplit.start_auto_splitter + if hotkey == "skip_split": + return lambda: autosplit.skip_split(True) + if hotkey == "undo_split": + return lambda: autosplit.undo_split(True) + if hotkey == "toggle_auto_reset_image": + + def toggle_auto_reset_image(): + new_value = not autosplit.settings_dict["enable_auto_reset"] + autosplit.settings_dict["enable_auto_reset"] = new_value + if autosplit.SettingsWidget: + autosplit.SettingsWidget.enable_auto_reset_image_checkbox.setChecked(new_value) + + return toggle_auto_reset_image + return getattr(autosplit, f"{hotkey}_signal").emit + + +def is_valid_hotkey_name(hotkey_name: str): + return any( + key and not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0]) + for key + in hotkey_name.split("+") + ) + +# TODO: using getattr/setattr is NOT a good way to go about this. It was only temporarily done to +# reduce duplicated code. We should use a dictionary of hotkey class or something. + + +def set_hotkey(autosplit: AutoSplit, hotkey: Hotkey, preselected_hotkey_name: str = ""): + if autosplit.SettingsWidget: + # Unfocus all fields + cast(QtWidgets.QWidget, autosplit.SettingsWidget).setFocus() + getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setText(PRESS_A_KEY_TEXT) + + # Disable some buttons + before_setting_hotkey(autosplit) + + # New thread points to read_and_set_hotkey. this thread is needed or GUI will freeze + # while the program waits for user input on the hotkey + @fire_and_forget + def read_and_set_hotkey(): + try: + hotkey_name = preselected_hotkey_name or __read_hotkey() + + # Unset hotkey by pressing "Escape". This is the same behaviour as LiveSplit + if hotkey_name == "esc": + _unhook(getattr(autosplit, f"{hotkey}_hotkey")) + autosplit.settings_dict[f"{hotkey}_hotkey"] = "" # pyright: ignore[reportGeneralTypeIssues] + if autosplit.SettingsWidget: + getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("") + return + + if not is_valid_hotkey_name(hotkey_name): + autosplit.show_error_signal.emit(lambda: error_messages.invalid_hotkey(hotkey_name)) + return + + # Try to remove the previously set hotkey if there is one + _unhook(getattr(autosplit, f"{hotkey}_hotkey")) + + # Remove any hotkey using the same key combination + __remove_key_already_set(autosplit, hotkey_name) + + action = __get_hotkey_action(autosplit, hotkey) + setattr( + autosplit, + f"{hotkey}_hotkey", + # keyboard.add_hotkey doesn't give the last keyboard event, so we can't __validate_keypad. + # This means "ctrl + num 5" and "ctrl + 5" will both be registered. + # For that reason, we still prefer keyboard.hook_key for single keys. + # keyboard module allows you to hit multiple keys for a hotkey. they are joined together by +. + keyboard.add_hotkey(hotkey_name, action) + if "+" in hotkey_name + # We need to inspect the event to know if it comes from numpad because of _canonial_names. + # See: https://github.com/boppreh/keyboard/issues/161#issuecomment-386825737 + # The best way to achieve this is make our own hotkey handling on top of hook + # See: https://github.com/boppreh/keyboard/issues/216#issuecomment-431999553 + else keyboard.hook_key( + hotkey_name, + lambda keyboard_event: _hotkey_action(keyboard_event, hotkey_name, action), + ), + ) + + if autosplit.SettingsWidget: + getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText(hotkey_name) + autosplit.settings_dict[f"{hotkey}_hotkey"] = hotkey_name # pyright: ignore[reportGeneralTypeIssues] + except Exception as exception: # noqa: BLE001 # We really want to catch everything here + error = exception + autosplit.show_error_signal.emit(lambda: error_messages.exception_traceback(error)) + finally: + autosplit.after_setting_hotkey_signal.emit() + + read_and_set_hotkey() diff --git a/src/menu_bar.py b/src/menu_bar.py index becac6f8..71d9736f 100644 --- a/src/menu_bar.py +++ b/src/menu_bar.py @@ -1,402 +1,400 @@ -from __future__ import annotations - -import asyncio -import webbrowser -from typing import TYPE_CHECKING, Any, cast - -import requests -from packaging.version import parse as version_parse -from PySide6 import QtCore, QtWidgets -from PySide6.QtCore import Qt -from PySide6.QtGui import QBrush, QPalette -from PySide6.QtWidgets import QFileDialog -from requests.exceptions import RequestException -from typing_extensions import override - -import error_messages -import user_profile -from capture_method import ( - CAPTURE_METHODS, - CameraInfo, - CaptureMethodEnum, - change_capture_method, - get_all_video_capture_devices, -) -from gen import about, design, settings as settings_ui, update_checker -from hotkeys import HOTKEYS, Hotkey, set_hotkey -from utils import AUTOSPLIT_VERSION, GITHUB_REPOSITORY, decimal, fire_and_forget - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - -HALF_BRIGHTNESS = 128 - - -class __AboutWidget(QtWidgets.QWidget, about.Ui_AboutAutoSplitWidget): # noqa: N801 # Private class - """About Window.""" - - def __init__(self): - super().__init__() - self.setupUi(self) - self.created_by_label.setOpenExternalLinks(True) - self.donate_button_label.setOpenExternalLinks(True) - self.version_label.setText(f"Version: {AUTOSPLIT_VERSION}") - self.show() - - -def open_about(autosplit: AutoSplit): - if not autosplit.AboutWidget or cast(QtWidgets.QWidget, autosplit.AboutWidget).isHidden(): - autosplit.AboutWidget = __AboutWidget() - - -class __UpdateCheckerWidget(QtWidgets.QWidget, update_checker.Ui_UpdateChecker): # noqa: N801 # Private class - def __init__(self, latest_version: str, design_window: design.Ui_MainWindow, check_on_open: bool = False): - super().__init__() - self.setupUi(self) - self.current_version_number_label.setText(AUTOSPLIT_VERSION) - self.latest_version_number_label.setText(latest_version) - self.left_button.clicked.connect(self.open_update) - self.do_not_ask_again_checkbox.stateChanged.connect(self.do_not_ask_me_again_state_changed) - self.design_window = design_window - if version_parse(latest_version) > version_parse(AUTOSPLIT_VERSION): - self.do_not_ask_again_checkbox.setVisible(check_on_open) - self.left_button.setFocus() - self.show() - elif not check_on_open: - self.update_status_label.setText("You are on the latest AutoSplit version.") - self.go_to_download_label.setVisible(False) - self.left_button.setVisible(False) - self.right_button.setText("OK") - self.do_not_ask_again_checkbox.setVisible(False) - self.show() - - def open_update(self): - webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}/releases/latest") - self.close() - - def do_not_ask_me_again_state_changed(self): - user_profile.set_check_for_updates_on_open( - self.design_window, - self.do_not_ask_again_checkbox.isChecked(), - ) - - -def open_update_checker(autosplit: AutoSplit, latest_version: str, check_on_open: bool): - if not autosplit.UpdateCheckerWidget or cast(QtWidgets.QWidget, autosplit.UpdateCheckerWidget).isHidden(): - autosplit.UpdateCheckerWidget = __UpdateCheckerWidget(latest_version, autosplit, check_on_open) - - -def view_help(): - webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#tutorial") - - -class __CheckForUpdatesThread(QtCore.QThread): # noqa: N801 # Private class - def __init__(self, autosplit: AutoSplit, check_on_open: bool): - super().__init__() - self.autosplit = autosplit - self.check_on_open = check_on_open - - @override - def run(self): - try: - response = requests.get(f"https://api.github.com/repos/{GITHUB_REPOSITORY}/releases/latest", timeout=30) - latest_version = str(response.json()["name"]).split("v")[1] - self.autosplit.update_checker_widget_signal.emit(latest_version, self.check_on_open) - except (RequestException, KeyError): - if not self.check_on_open: - self.autosplit.show_error_signal.emit(error_messages.check_for_updates) - - -def about_qt(): - webbrowser.open("https://wiki.qt.io/About_Qt") - - -def about_qt_for_python(): - webbrowser.open("https://wiki.qt.io/Qt_for_Python") - - -def check_for_updates(autosplit: AutoSplit, check_on_open: bool = False): - autosplit.CheckForUpdatesThread = __CheckForUpdatesThread(autosplit, check_on_open) - autosplit.CheckForUpdatesThread.start() - - -class __SettingsWidget(QtWidgets.QWidget, settings_ui.Ui_SettingsWidget): # noqa: N801 # Private class - def __init__(self, autosplit: AutoSplit): - super().__init__() - self.__video_capture_devices: list[CameraInfo] = [] - """ - Used to temporarily store the existing cameras, - we don't want to call `get_all_video_capture_devices` agains and possibly have a different result - """ - - self.setupUi(self) - - # Fix Fusion Dark Theme's tabs content looking weird because it's using the button role - window_color = self.palette().color(QPalette.ColorRole.Window) - if window_color.red() < HALF_BRIGHTNESS: - brush = QBrush(window_color) - brush.setStyle(Qt.BrushStyle.SolidPattern) - palette = QPalette() - palette.setBrush(QPalette.ColorGroup.Active, QPalette.ColorRole.Button, brush) - palette.setBrush(QPalette.ColorGroup.Inactive, QPalette.ColorRole.Button, brush) - palette.setBrush(QPalette.ColorGroup.Disabled, QPalette.ColorRole.Button, brush) - self.settings_tabs.setPalette(palette) - - self.autosplit = autosplit - self.__set_readme_link() - # Don't autofocus any particular field - self.setFocus() - -# region Build the Capture method combobox - capture_method_values = CAPTURE_METHODS.values() - self.__set_all_capture_devices() - - # TODO: Word-wrapping works, but there's lots of extra padding to the right. Raise issue upstream - # list_view = QtWidgets.QListView() - # list_view.setWordWrap(True) - # list_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff) - # list_view.setFixedWidth(self.capture_method_combobox.width()) - # self.capture_method_combobox.setView(list_view) - - self.capture_method_combobox.addItems([ - f"- {method.name} ({method.short_description})" - for method in capture_method_values - ]) - self.capture_method_combobox.setToolTip( - "\n\n".join([ - f"{method.name} :\n{method.description}" - for method in capture_method_values - ]), - ) -# endregion - - self.__setup_bindings() - - self.show() - - def __update_default_threshold(self, value: Any): - self.__set_value("default_similarity_threshold", value) - self.autosplit.table_current_image_threshold_label.setText( - decimal(self.autosplit.split_image.get_similarity_threshold(self.autosplit)) - if self.autosplit.split_image - else "-", - ) - self.autosplit.table_reset_image_threshold_label.setText( - decimal(self.autosplit.reset_image.get_similarity_threshold(self.autosplit)) - if self.autosplit.reset_image - else "-", - ) - - def __set_value(self, key: str, value: Any): - self.autosplit.settings_dict[key] = value - - def get_capture_device_index(self, capture_device_id: int): - """Returns 0 if the capture_device_id is invalid.""" - try: - return [device.device_id for device in self.__video_capture_devices].index(capture_device_id) - except ValueError: - return 0 - - def __enable_capture_device_if_its_selected_method( - self, - selected_capture_method: str | CaptureMethodEnum | None = None, - ): - if selected_capture_method is None: - selected_capture_method = self.autosplit.settings_dict["capture_method"] - is_video_capture_device = selected_capture_method == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE - self.capture_device_combobox.setEnabled(is_video_capture_device) - if is_video_capture_device: - self.capture_device_combobox.setCurrentIndex( - self.get_capture_device_index(self.autosplit.settings_dict["capture_device_id"]), - ) - else: - self.capture_device_combobox.setPlaceholderText('Select "Video Capture Device" above') - self.capture_device_combobox.setCurrentIndex(-1) - - def __capture_method_changed(self): - selected_capture_method = CAPTURE_METHODS.get_method_by_index(self.capture_method_combobox.currentIndex()) - self.__enable_capture_device_if_its_selected_method(selected_capture_method) - change_capture_method(selected_capture_method, self.autosplit) - return selected_capture_method - - def __capture_device_changed(self): - device_index = self.capture_device_combobox.currentIndex() - if device_index == -1: - return - capture_device = self.__video_capture_devices[device_index] - self.autosplit.settings_dict["capture_device_name"] = capture_device.name - self.autosplit.settings_dict["capture_device_id"] = capture_device.device_id - if self.autosplit.settings_dict["capture_method"] == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE: - # Re-initializes the VideoCaptureDeviceCaptureMethod - change_capture_method(CaptureMethodEnum.VIDEO_CAPTURE_DEVICE, self.autosplit) - - def __fps_limit_changed(self, value: int): - value = self.fps_limit_spinbox.value() - self.autosplit.settings_dict["fps_limit"] = value - self.autosplit.timer_live_image.setInterval(int(1000 / value)) - self.autosplit.timer_live_image.setInterval(int(1000 / value)) - - @fire_and_forget - def __set_all_capture_devices(self): - self.__video_capture_devices = asyncio.run(get_all_video_capture_devices()) - if len(self.__video_capture_devices) > 0: - for i in range(self.capture_device_combobox.count()): - self.capture_device_combobox.removeItem(i) - self.capture_device_combobox.addItems([ - f"* {device.name}" - + (f" [{device.backend}]" if device.backend else "") - + (" (occupied)" if device.occupied else "") - for device in self.__video_capture_devices - ]) - self.__enable_capture_device_if_its_selected_method() - else: - self.capture_device_combobox.setPlaceholderText("No device found.") - - def __set_readme_link(self): - self.custom_image_settings_info_label.setText( - self.custom_image_settings_info_label - .text() - .format(GITHUB_REPOSITORY=GITHUB_REPOSITORY), - ) - # HACK: This is a workaround because custom_image_settings_info_label - # simply will not open links with a left click no matter what we tried. - self.readme_link_button.clicked.connect( - lambda: webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#readme"), - ) - self.readme_link_button.setStyleSheet("border: 0px; background-color:rgba(0,0,0,0%);") - - def __select_screenshot_directory(self): - self.autosplit.settings_dict["screenshot_directory"] = QFileDialog.getExistingDirectory( - self, - "Select Screenshots Directory", - self.autosplit.settings_dict["screenshot_directory"] - or self.autosplit.settings_dict["split_image_directory"], - ) - self.screenshot_directory_input.setText(self.autosplit.settings_dict["screenshot_directory"]) - - def __setup_bindings(self): - # Hotkey initial values and bindings - def hotkey_connect(hotkey: Hotkey): - return lambda: set_hotkey(self.autosplit, hotkey) - - for hotkey in HOTKEYS: - hotkey_input: QtWidgets.QLineEdit = getattr(self, f"{hotkey}_input") - set_hotkey_hotkey_button: QtWidgets.QPushButton = getattr(self, f"set_{hotkey}_hotkey_button") - hotkey_input.setText(self.autosplit.settings_dict.get(f"{hotkey}_hotkey", "")) - - set_hotkey_hotkey_button.clicked.connect(hotkey_connect(hotkey)) - # Make it very clear that hotkeys are not used when auto-controlled - if self.autosplit.is_auto_controlled and hotkey != "toggle_auto_reset_image": - set_hotkey_hotkey_button.setEnabled(False) - hotkey_input.setEnabled(False) - -# region Set initial values - # Capture Settings - self.fps_limit_spinbox.setValue(self.autosplit.settings_dict["fps_limit"]) - self.live_capture_region_checkbox.setChecked(self.autosplit.settings_dict["live_capture_region"]) - self.capture_method_combobox.setCurrentIndex( - CAPTURE_METHODS.get_index(self.autosplit.settings_dict["capture_method"]), - ) - # No self.capture_device_combobox.setCurrentIndex - # It'll set itself asynchronously in self.__set_all_capture_devices() - self.screenshot_directory_input.setText(self.autosplit.settings_dict["screenshot_directory"]) - self.open_screenshot_checkbox.setChecked(self.autosplit.settings_dict["open_screenshot"]) - - # Image Settings - self.default_comparison_method_combobox.setCurrentIndex( - self.autosplit.settings_dict["default_comparison_method"], - ) - self.default_similarity_threshold_spinbox.setValue(self.autosplit.settings_dict["default_similarity_threshold"]) - self.default_delay_time_spinbox.setValue(self.autosplit.settings_dict["default_delay_time"]) - self.default_pause_time_spinbox.setValue(self.autosplit.settings_dict["default_pause_time"]) - self.loop_splits_checkbox.setChecked(self.autosplit.settings_dict["loop_splits"]) - self.start_also_resets_checkbox.setChecked(self.autosplit.settings_dict["start_also_resets"]) - self.enable_auto_reset_image_checkbox.setChecked(self.autosplit.settings_dict["enable_auto_reset"]) -# endregion -# region Binding - # Capture Settings - self.fps_limit_spinbox.valueChanged.connect(self.__fps_limit_changed) - self.live_capture_region_checkbox.stateChanged.connect( - lambda: self.__set_value("live_capture_region", self.live_capture_region_checkbox.isChecked()), - ) - self.capture_method_combobox.currentIndexChanged.connect( - lambda: self.__set_value("capture_method", self.__capture_method_changed()), - ) - self.capture_device_combobox.currentIndexChanged.connect(self.__capture_device_changed) - self.screenshot_directory_browse_button.clicked.connect(self.__select_screenshot_directory) - self.open_screenshot_checkbox.stateChanged.connect( - lambda: self.__set_value("open_screenshot", self.open_screenshot_checkbox.isChecked()), - ) - - # Image Settings - self.default_comparison_method_combobox.currentIndexChanged.connect( - lambda: self.__set_value( - "default_comparison_method", - self.default_comparison_method_combobox.currentIndex(), - ), - ) - self.default_similarity_threshold_spinbox.valueChanged.connect( - lambda: self.__update_default_threshold(self.default_similarity_threshold_spinbox.value()), - ) - self.default_delay_time_spinbox.valueChanged.connect( - lambda: self.__set_value("default_delay_time", self.default_delay_time_spinbox.value()), - ) - self.default_pause_time_spinbox.valueChanged.connect( - lambda: self.__set_value("default_pause_time", self.default_pause_time_spinbox.value()), - ) - self.loop_splits_checkbox.stateChanged.connect( - lambda: self.__set_value("loop_splits", self.loop_splits_checkbox.isChecked()), - ) - self.start_also_resets_checkbox.stateChanged.connect( - lambda: self.__set_value("start_also_resets", self.start_also_resets_checkbox.isChecked()), - ) - self.enable_auto_reset_image_checkbox.stateChanged.connect( - lambda: self.__set_value("enable_auto_reset", self.enable_auto_reset_image_checkbox.isChecked()), - ) -# endregion - - -def open_settings(autosplit: AutoSplit): - if not autosplit.SettingsWidget or cast(QtWidgets.QWidget, autosplit.SettingsWidget).isHidden(): - autosplit.SettingsWidget = __SettingsWidget(autosplit) - - -def get_default_settings_from_ui(autosplit: AutoSplit): - temp_dialog = QtWidgets.QWidget() - default_settings_dialog = settings_ui.Ui_SettingsWidget() - default_settings_dialog.setupUi(temp_dialog) - default_settings: user_profile.UserProfileDict = { - "split_hotkey": default_settings_dialog.split_input.text(), - "reset_hotkey": default_settings_dialog.reset_input.text(), - "undo_split_hotkey": default_settings_dialog.undo_split_input.text(), - "skip_split_hotkey": default_settings_dialog.skip_split_input.text(), - "pause_hotkey": default_settings_dialog.pause_input.text(), - "screenshot_hotkey": default_settings_dialog.screenshot_input.text(), - "toggle_auto_reset_image_hotkey": default_settings_dialog.toggle_auto_reset_image_input.text(), - "fps_limit": default_settings_dialog.fps_limit_spinbox.value(), - "live_capture_region": default_settings_dialog.live_capture_region_checkbox.isChecked(), - "capture_method": CAPTURE_METHODS.get_method_by_index( - default_settings_dialog.capture_method_combobox.currentIndex(), - ), - "capture_device_id": default_settings_dialog.capture_device_combobox.currentIndex(), - "capture_device_name": "", - "default_comparison_method": default_settings_dialog.default_comparison_method_combobox.currentIndex(), - "default_similarity_threshold": default_settings_dialog.default_similarity_threshold_spinbox.value(), - "default_delay_time": default_settings_dialog.default_delay_time_spinbox.value(), - "default_pause_time": default_settings_dialog.default_pause_time_spinbox.value(), - "loop_splits": default_settings_dialog.loop_splits_checkbox.isChecked(), - "start_also_resets": default_settings_dialog.start_also_resets_checkbox.isChecked(), - "enable_auto_reset": default_settings_dialog.enable_auto_reset_image_checkbox.isChecked(), - "split_image_directory": autosplit.split_image_folder_input.text(), - "screenshot_directory": default_settings_dialog.screenshot_directory_input.text(), - "open_screenshot": default_settings_dialog.open_screenshot_checkbox.isChecked(), - "captured_window_title": "", - "capture_region": { - "x": autosplit.x_spinbox.value(), - "y": autosplit.y_spinbox.value(), - "width": autosplit.width_spinbox.value(), - "height": autosplit.height_spinbox.value(), - }, - } - del temp_dialog - return default_settings +import asyncio +import webbrowser +from typing import TYPE_CHECKING, Any, cast + +import requests +from packaging.version import parse as version_parse +from PySide6 import QtCore, QtWidgets +from PySide6.QtCore import Qt +from PySide6.QtGui import QBrush, QPalette +from PySide6.QtWidgets import QFileDialog +from requests.exceptions import RequestException +from typing_extensions import override + +import error_messages +import user_profile +from capture_method import ( + CAPTURE_METHODS, + CameraInfo, + CaptureMethodEnum, + change_capture_method, + get_all_video_capture_devices, +) +from gen import about, design, settings as settings_ui, update_checker +from hotkeys import HOTKEYS, Hotkey, set_hotkey +from utils import AUTOSPLIT_VERSION, GITHUB_REPOSITORY, decimal, fire_and_forget + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + +HALF_BRIGHTNESS = 128 + + +class __AboutWidget(QtWidgets.QWidget, about.Ui_AboutAutoSplitWidget): # noqa: N801 # Private class + """About Window.""" + + def __init__(self): + super().__init__() + self.setupUi(self) + self.created_by_label.setOpenExternalLinks(True) + self.donate_button_label.setOpenExternalLinks(True) + self.version_label.setText(f"Version: {AUTOSPLIT_VERSION}") + self.show() + + +def open_about(autosplit: AutoSplit): + if not autosplit.AboutWidget or cast(QtWidgets.QWidget, autosplit.AboutWidget).isHidden(): + autosplit.AboutWidget = __AboutWidget() + + +class __UpdateCheckerWidget(QtWidgets.QWidget, update_checker.Ui_UpdateChecker): # noqa: N801 # Private class + def __init__(self, latest_version: str, design_window: design.Ui_MainWindow, check_on_open: bool = False): + super().__init__() + self.setupUi(self) + self.current_version_number_label.setText(AUTOSPLIT_VERSION) + self.latest_version_number_label.setText(latest_version) + self.left_button.clicked.connect(self.open_update) + self.do_not_ask_again_checkbox.stateChanged.connect(self.do_not_ask_me_again_state_changed) + self.design_window = design_window + if version_parse(latest_version) > version_parse(AUTOSPLIT_VERSION): + self.do_not_ask_again_checkbox.setVisible(check_on_open) + self.left_button.setFocus() + self.show() + elif not check_on_open: + self.update_status_label.setText("You are on the latest AutoSplit version.") + self.go_to_download_label.setVisible(False) + self.left_button.setVisible(False) + self.right_button.setText("OK") + self.do_not_ask_again_checkbox.setVisible(False) + self.show() + + def open_update(self): + webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}/releases/latest") + self.close() + + def do_not_ask_me_again_state_changed(self): + user_profile.set_check_for_updates_on_open( + self.design_window, + self.do_not_ask_again_checkbox.isChecked(), + ) + + +def open_update_checker(autosplit: AutoSplit, latest_version: str, check_on_open: bool): + if not autosplit.UpdateCheckerWidget or cast(QtWidgets.QWidget, autosplit.UpdateCheckerWidget).isHidden(): + autosplit.UpdateCheckerWidget = __UpdateCheckerWidget(latest_version, autosplit, check_on_open) + + +def view_help(): + webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#tutorial") + + +class __CheckForUpdatesThread(QtCore.QThread): # noqa: N801 # Private class + def __init__(self, autosplit: AutoSplit, check_on_open: bool): + super().__init__() + self.autosplit = autosplit + self.check_on_open = check_on_open + + @override + def run(self): + try: + response = requests.get(f"https://api.github.com/repos/{GITHUB_REPOSITORY}/releases/latest", timeout=30) + latest_version = str(response.json()["name"]).split("v")[1] + self.autosplit.update_checker_widget_signal.emit(latest_version, self.check_on_open) + except (RequestException, KeyError): + if not self.check_on_open: + self.autosplit.show_error_signal.emit(error_messages.check_for_updates) + + +def about_qt(): + webbrowser.open("https://wiki.qt.io/About_Qt") + + +def about_qt_for_python(): + webbrowser.open("https://wiki.qt.io/Qt_for_Python") + + +def check_for_updates(autosplit: AutoSplit, check_on_open: bool = False): + autosplit.CheckForUpdatesThread = __CheckForUpdatesThread(autosplit, check_on_open) + autosplit.CheckForUpdatesThread.start() + + +class __SettingsWidget(QtWidgets.QWidget, settings_ui.Ui_SettingsWidget): # noqa: N801 # Private class + def __init__(self, autosplit: AutoSplit): + super().__init__() + self.__video_capture_devices: list[CameraInfo] = [] + """ + Used to temporarily store the existing cameras, + we don't want to call `get_all_video_capture_devices` agains and possibly have a different result + """ + + self.setupUi(self) + + # Fix Fusion Dark Theme's tabs content looking weird because it's using the button role + window_color = self.palette().color(QPalette.ColorRole.Window) + if window_color.red() < HALF_BRIGHTNESS: + brush = QBrush(window_color) + brush.setStyle(Qt.BrushStyle.SolidPattern) + palette = QPalette() + palette.setBrush(QPalette.ColorGroup.Active, QPalette.ColorRole.Button, brush) + palette.setBrush(QPalette.ColorGroup.Inactive, QPalette.ColorRole.Button, brush) + palette.setBrush(QPalette.ColorGroup.Disabled, QPalette.ColorRole.Button, brush) + self.settings_tabs.setPalette(palette) + + self.autosplit = autosplit + self.__set_readme_link() + # Don't autofocus any particular field + self.setFocus() + +# region Build the Capture method combobox + capture_method_values = CAPTURE_METHODS.values() + self.__set_all_capture_devices() + + # TODO: Word-wrapping works, but there's lots of extra padding to the right. Raise issue upstream + # list_view = QtWidgets.QListView() + # list_view.setWordWrap(True) + # list_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff) + # list_view.setFixedWidth(self.capture_method_combobox.width()) + # self.capture_method_combobox.setView(list_view) + + self.capture_method_combobox.addItems([ + f"- {method.name} ({method.short_description})" + for method in capture_method_values + ]) + self.capture_method_combobox.setToolTip( + "\n\n".join([ + f"{method.name} :\n{method.description}" + for method in capture_method_values + ]), + ) +# endregion + + self.__setup_bindings() + + self.show() + + def __update_default_threshold(self, value: Any): + self.__set_value("default_similarity_threshold", value) + self.autosplit.table_current_image_threshold_label.setText( + decimal(self.autosplit.split_image.get_similarity_threshold(self.autosplit)) + if self.autosplit.split_image + else "-", + ) + self.autosplit.table_reset_image_threshold_label.setText( + decimal(self.autosplit.reset_image.get_similarity_threshold(self.autosplit)) + if self.autosplit.reset_image + else "-", + ) + + def __set_value(self, key: str, value: Any): + self.autosplit.settings_dict[key] = value + + def get_capture_device_index(self, capture_device_id: int): + """Returns 0 if the capture_device_id is invalid.""" + try: + return [device.device_id for device in self.__video_capture_devices].index(capture_device_id) + except ValueError: + return 0 + + def __enable_capture_device_if_its_selected_method( + self, + selected_capture_method: str | CaptureMethodEnum | None = None, + ): + if selected_capture_method is None: + selected_capture_method = self.autosplit.settings_dict["capture_method"] + is_video_capture_device = selected_capture_method == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE + self.capture_device_combobox.setEnabled(is_video_capture_device) + if is_video_capture_device: + self.capture_device_combobox.setCurrentIndex( + self.get_capture_device_index(self.autosplit.settings_dict["capture_device_id"]), + ) + else: + self.capture_device_combobox.setPlaceholderText('Select "Video Capture Device" above') + self.capture_device_combobox.setCurrentIndex(-1) + + def __capture_method_changed(self): + selected_capture_method = CAPTURE_METHODS.get_method_by_index(self.capture_method_combobox.currentIndex()) + self.__enable_capture_device_if_its_selected_method(selected_capture_method) + change_capture_method(selected_capture_method, self.autosplit) + return selected_capture_method + + def __capture_device_changed(self): + device_index = self.capture_device_combobox.currentIndex() + if device_index == -1: + return + capture_device = self.__video_capture_devices[device_index] + self.autosplit.settings_dict["capture_device_name"] = capture_device.name + self.autosplit.settings_dict["capture_device_id"] = capture_device.device_id + if self.autosplit.settings_dict["capture_method"] == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE: + # Re-initializes the VideoCaptureDeviceCaptureMethod + change_capture_method(CaptureMethodEnum.VIDEO_CAPTURE_DEVICE, self.autosplit) + + def __fps_limit_changed(self, value: int): + value = self.fps_limit_spinbox.value() + self.autosplit.settings_dict["fps_limit"] = value + self.autosplit.timer_live_image.setInterval(int(1000 / value)) + self.autosplit.timer_live_image.setInterval(int(1000 / value)) + + @fire_and_forget + def __set_all_capture_devices(self): + self.__video_capture_devices = asyncio.run(get_all_video_capture_devices()) + if len(self.__video_capture_devices) > 0: + for i in range(self.capture_device_combobox.count()): + self.capture_device_combobox.removeItem(i) + self.capture_device_combobox.addItems([ + f"* {device.name}" + + (f" [{device.backend}]" if device.backend else "") + + (" (occupied)" if device.occupied else "") + for device in self.__video_capture_devices + ]) + self.__enable_capture_device_if_its_selected_method() + else: + self.capture_device_combobox.setPlaceholderText("No device found.") + + def __set_readme_link(self): + self.custom_image_settings_info_label.setText( + self.custom_image_settings_info_label + .text() + .format(GITHUB_REPOSITORY=GITHUB_REPOSITORY), + ) + # HACK: This is a workaround because custom_image_settings_info_label + # simply will not open links with a left click no matter what we tried. + self.readme_link_button.clicked.connect( + lambda: webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#readme"), + ) + self.readme_link_button.setStyleSheet("border: 0px; background-color:rgba(0,0,0,0%);") + + def __select_screenshot_directory(self): + self.autosplit.settings_dict["screenshot_directory"] = QFileDialog.getExistingDirectory( + self, + "Select Screenshots Directory", + self.autosplit.settings_dict["screenshot_directory"] + or self.autosplit.settings_dict["split_image_directory"], + ) + self.screenshot_directory_input.setText(self.autosplit.settings_dict["screenshot_directory"]) + + def __setup_bindings(self): + # Hotkey initial values and bindings + def hotkey_connect(hotkey: Hotkey): + return lambda: set_hotkey(self.autosplit, hotkey) + + for hotkey in HOTKEYS: + hotkey_input: QtWidgets.QLineEdit = getattr(self, f"{hotkey}_input") + set_hotkey_hotkey_button: QtWidgets.QPushButton = getattr(self, f"set_{hotkey}_hotkey_button") + hotkey_input.setText(self.autosplit.settings_dict.get(f"{hotkey}_hotkey", "")) + + set_hotkey_hotkey_button.clicked.connect(hotkey_connect(hotkey)) + # Make it very clear that hotkeys are not used when auto-controlled + if self.autosplit.is_auto_controlled and hotkey != "toggle_auto_reset_image": + set_hotkey_hotkey_button.setEnabled(False) + hotkey_input.setEnabled(False) + +# region Set initial values + # Capture Settings + self.fps_limit_spinbox.setValue(self.autosplit.settings_dict["fps_limit"]) + self.live_capture_region_checkbox.setChecked(self.autosplit.settings_dict["live_capture_region"]) + self.capture_method_combobox.setCurrentIndex( + CAPTURE_METHODS.get_index(self.autosplit.settings_dict["capture_method"]), + ) + # No self.capture_device_combobox.setCurrentIndex + # It'll set itself asynchronously in self.__set_all_capture_devices() + self.screenshot_directory_input.setText(self.autosplit.settings_dict["screenshot_directory"]) + self.open_screenshot_checkbox.setChecked(self.autosplit.settings_dict["open_screenshot"]) + + # Image Settings + self.default_comparison_method_combobox.setCurrentIndex( + self.autosplit.settings_dict["default_comparison_method"], + ) + self.default_similarity_threshold_spinbox.setValue(self.autosplit.settings_dict["default_similarity_threshold"]) + self.default_delay_time_spinbox.setValue(self.autosplit.settings_dict["default_delay_time"]) + self.default_pause_time_spinbox.setValue(self.autosplit.settings_dict["default_pause_time"]) + self.loop_splits_checkbox.setChecked(self.autosplit.settings_dict["loop_splits"]) + self.start_also_resets_checkbox.setChecked(self.autosplit.settings_dict["start_also_resets"]) + self.enable_auto_reset_image_checkbox.setChecked(self.autosplit.settings_dict["enable_auto_reset"]) +# endregion +# region Binding + # Capture Settings + self.fps_limit_spinbox.valueChanged.connect(self.__fps_limit_changed) + self.live_capture_region_checkbox.stateChanged.connect( + lambda: self.__set_value("live_capture_region", self.live_capture_region_checkbox.isChecked()), + ) + self.capture_method_combobox.currentIndexChanged.connect( + lambda: self.__set_value("capture_method", self.__capture_method_changed()), + ) + self.capture_device_combobox.currentIndexChanged.connect(self.__capture_device_changed) + self.screenshot_directory_browse_button.clicked.connect(self.__select_screenshot_directory) + self.open_screenshot_checkbox.stateChanged.connect( + lambda: self.__set_value("open_screenshot", self.open_screenshot_checkbox.isChecked()), + ) + + # Image Settings + self.default_comparison_method_combobox.currentIndexChanged.connect( + lambda: self.__set_value( + "default_comparison_method", + self.default_comparison_method_combobox.currentIndex(), + ), + ) + self.default_similarity_threshold_spinbox.valueChanged.connect( + lambda: self.__update_default_threshold(self.default_similarity_threshold_spinbox.value()), + ) + self.default_delay_time_spinbox.valueChanged.connect( + lambda: self.__set_value("default_delay_time", self.default_delay_time_spinbox.value()), + ) + self.default_pause_time_spinbox.valueChanged.connect( + lambda: self.__set_value("default_pause_time", self.default_pause_time_spinbox.value()), + ) + self.loop_splits_checkbox.stateChanged.connect( + lambda: self.__set_value("loop_splits", self.loop_splits_checkbox.isChecked()), + ) + self.start_also_resets_checkbox.stateChanged.connect( + lambda: self.__set_value("start_also_resets", self.start_also_resets_checkbox.isChecked()), + ) + self.enable_auto_reset_image_checkbox.stateChanged.connect( + lambda: self.__set_value("enable_auto_reset", self.enable_auto_reset_image_checkbox.isChecked()), + ) +# endregion + + +def open_settings(autosplit: AutoSplit): + if not autosplit.SettingsWidget or cast(QtWidgets.QWidget, autosplit.SettingsWidget).isHidden(): + autosplit.SettingsWidget = __SettingsWidget(autosplit) + + +def get_default_settings_from_ui(autosplit: AutoSplit): + temp_dialog = QtWidgets.QWidget() + default_settings_dialog = settings_ui.Ui_SettingsWidget() + default_settings_dialog.setupUi(temp_dialog) + default_settings: user_profile.UserProfileDict = { + "split_hotkey": default_settings_dialog.split_input.text(), + "reset_hotkey": default_settings_dialog.reset_input.text(), + "undo_split_hotkey": default_settings_dialog.undo_split_input.text(), + "skip_split_hotkey": default_settings_dialog.skip_split_input.text(), + "pause_hotkey": default_settings_dialog.pause_input.text(), + "screenshot_hotkey": default_settings_dialog.screenshot_input.text(), + "toggle_auto_reset_image_hotkey": default_settings_dialog.toggle_auto_reset_image_input.text(), + "fps_limit": default_settings_dialog.fps_limit_spinbox.value(), + "live_capture_region": default_settings_dialog.live_capture_region_checkbox.isChecked(), + "capture_method": CAPTURE_METHODS.get_method_by_index( + default_settings_dialog.capture_method_combobox.currentIndex(), + ), + "capture_device_id": default_settings_dialog.capture_device_combobox.currentIndex(), + "capture_device_name": "", + "default_comparison_method": default_settings_dialog.default_comparison_method_combobox.currentIndex(), + "default_similarity_threshold": default_settings_dialog.default_similarity_threshold_spinbox.value(), + "default_delay_time": default_settings_dialog.default_delay_time_spinbox.value(), + "default_pause_time": default_settings_dialog.default_pause_time_spinbox.value(), + "loop_splits": default_settings_dialog.loop_splits_checkbox.isChecked(), + "start_also_resets": default_settings_dialog.start_also_resets_checkbox.isChecked(), + "enable_auto_reset": default_settings_dialog.enable_auto_reset_image_checkbox.isChecked(), + "split_image_directory": autosplit.split_image_folder_input.text(), + "screenshot_directory": default_settings_dialog.screenshot_directory_input.text(), + "open_screenshot": default_settings_dialog.open_screenshot_checkbox.isChecked(), + "captured_window_title": "", + "capture_region": { + "x": autosplit.x_spinbox.value(), + "y": autosplit.y_spinbox.value(), + "width": autosplit.width_spinbox.value(), + "height": autosplit.height_spinbox.value(), + }, + } + del temp_dialog + return default_settings diff --git a/src/region_selection.py b/src/region_selection.py index b6517761..c6d23dba 100644 --- a/src/region_selection.py +++ b/src/region_selection.py @@ -1,399 +1,397 @@ -from __future__ import annotations - -import ctypes -import ctypes.wintypes -import os -from math import ceil -from typing import TYPE_CHECKING - -import cv2 -import numpy as np -from cv2.typing import MatLike -from PySide6 import QtCore, QtGui, QtWidgets -from PySide6.QtTest import QTest -from pywinctl import getTopWindowAt -from typing_extensions import override -from win32 import win32gui -from win32con import SM_CXVIRTUALSCREEN, SM_CYVIRTUALSCREEN, SM_XVIRTUALSCREEN, SM_YVIRTUALSCREEN -from winsdk._winrt import initialize_with_window -from winsdk.windows.foundation import AsyncStatus, IAsyncOperation -from winsdk.windows.graphics.capture import GraphicsCaptureItem, GraphicsCapturePicker - -import error_messages -from utils import ( - BGR_CHANNEL_COUNT, - MAXBYTE, - ImageShape, - auto_split_directory, - get_window_bounds, - is_valid_hwnd, - is_valid_image, -) - -user32 = ctypes.windll.user32 - - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - -ALIGN_REGION_THRESHOLD = 0.9 -BORDER_WIDTH = 2 -SUPPORTED_IMREAD_FORMATS = [ - ("Windows bitmaps", "*.bmp *.dib"), - ("JPEG files", "*.jpeg *.jpg *.jpe"), - ("JPEG 2000 files", "*.jp2"), - ("Portable Network Graphics", "*.png"), - ("WebP", "*.webp"), - ("AVIF", "*.avif"), - ("Portable image format", "*.pbm *.pgm *.ppm *.pxm *.pnm"), - ("PFM files", "*.pfm"), - ("Sun rasters", "*.sr *.ras"), - ("TIFF files", "*.tiff *.tif"), - ("OpenEXR Image files", "*.exr"), - ("Radiance HDR", "*.hdr *.pic"), -] -"""https://docs.opencv.org/4.8.0/d4/da8/group__imgcodecs.html#imread""" -IMREAD_EXT_FILTER = ( - "All Files (" - + " ".join([f"{extensions}" for _, extensions in SUPPORTED_IMREAD_FORMATS]) - + ");;" - + ";;".join([f"{imread_format} ({extensions})" for imread_format, extensions in SUPPORTED_IMREAD_FORMATS]) -) - - -# TODO: For later as a different picker option -def __select_graphics_item(autosplit: AutoSplit): # pyright: ignore [reportUnusedFunction] - """Uses the built-in GraphicsCapturePicker to select the Window.""" - - def callback(async_operation: IAsyncOperation[GraphicsCaptureItem], async_status: AsyncStatus): - try: - if async_status != AsyncStatus.COMPLETED: - return - except SystemError as exception: - # HACK: can happen when closing the GraphicsCapturePicker - if str(exception).endswith("returned a result with an error set"): - return - raise - item = async_operation.get_results() - if not item: - return - autosplit.settings_dict["captured_window_title"] = item.display_name - autosplit.capture_method.reinitialize(autosplit) - - picker = GraphicsCapturePicker() - initialize_with_window(picker, int(autosplit.effectiveWinId())) - async_operation = picker.pick_single_item_async() - # None if the selection is canceled - if async_operation: - async_operation.completed = callback - - -def select_region(autosplit: AutoSplit): - # Create a screen selector widget - selector = SelectRegionWidget() - - # Need to wait until the user has selected a region using the widget - # before moving on with selecting the window settings - while True: - width = selector.width() - height = selector.height() - x = selector.x() - y = selector.y() - if width > 0 and height > 0: - break - QTest.qWait(1) - del selector - - window = getTopWindowAt(x, y) - if not window: - error_messages.region() - return - hwnd = window.getHandle() - window_text = window.title - if not is_valid_hwnd(hwnd) or not window_text: - error_messages.region() - return - - autosplit.hwnd = hwnd - autosplit.settings_dict["captured_window_title"] = window_text - autosplit.capture_method.reinitialize(autosplit) - - left_bounds, top_bounds, *_ = get_window_bounds(hwnd) - window_x, window_y, *_ = win32gui.GetWindowRect(hwnd) - offset_x = window_x + left_bounds - offset_y = window_y + top_bounds - __set_region_values( - autosplit, - left=x - offset_x, - top=y - offset_y, - width=width, - height=height, - ) - - -def select_window(autosplit: AutoSplit): - # Create a screen selector widget - selector = SelectWindowWidget() - - # Need to wait until the user has selected a region using the widget before moving on with - # selecting the window settings - while True: - x = selector.x() - y = selector.y() - if x and y: - break - QTest.qWait(1) - del selector - - window = getTopWindowAt(x, y) - if not window: - error_messages.region() - return - hwnd = window.getHandle() - window_text = window.title - if not is_valid_hwnd(hwnd) or not window_text: - error_messages.region() - return - - autosplit.hwnd = hwnd - autosplit.settings_dict["captured_window_title"] = window_text - autosplit.capture_method.reinitialize(autosplit) - - # Exlude the borders and titlebar from the window selection. To only get the client area. - _, __, window_width, window_height = get_window_bounds(hwnd) - _, __, client_width, client_height = win32gui.GetClientRect(hwnd) - border_width = ceil((window_width - client_width) / 2) - titlebar_with_border_height = window_height - client_height - border_width - - __set_region_values( - autosplit, - left=border_width, - top=titlebar_with_border_height, - width=client_width, - height=client_height - border_width * 2, - ) - - -def align_region(autosplit: AutoSplit): - # Check to see if a region has been set - if not autosplit.capture_method.check_selected_region_exists(autosplit): - error_messages.region() - return - # This is the image used for aligning the capture region to the best fit for the user. - template_filename = QtWidgets.QFileDialog.getOpenFileName( - autosplit, - "Select Reference Image", - autosplit.settings_dict["split_image_directory"] or auto_split_directory, - IMREAD_EXT_FILTER, - )[0] - - # Return if the user presses cancel - if not template_filename: - return - - template = cv2.imread(template_filename, cv2.IMREAD_UNCHANGED) - # Add alpha channel to template if it's missing. - if template.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT: - template = cv2.cvtColor(template, cv2.COLOR_BGR2BGRA) - - # Validate template is a valid image file - if not is_valid_image(template): - error_messages.image_validity() - return - - # Obtaining the capture of a region which contains the - # subregion being searched for to align the image. - capture, _ = autosplit.capture_method.get_frame(autosplit) - - if not is_valid_image(capture): - error_messages.region() - return - - best_match, best_height, best_width, best_loc = __test_alignment(capture, template) - - # Go ahead and check if this satisfies our requirement before setting the region - # We don't want a low similarity image to be aligned. - if best_match < ALIGN_REGION_THRESHOLD: - error_messages.alignment_not_matched() - return - - # The new region can be defined by using the min_loc point and the best_height and best_width of the template. - __set_region_values( - autosplit, - left=autosplit.settings_dict["capture_region"]["x"] + best_loc[0], - top=autosplit.settings_dict["capture_region"]["y"] + best_loc[1], - width=best_width, - height=best_height, - ) - - -def __set_region_values(autosplit: AutoSplit, left: int, top: int, width: int, height: int): - autosplit.settings_dict["capture_region"]["x"] = left - autosplit.settings_dict["capture_region"]["y"] = top - autosplit.settings_dict["capture_region"]["width"] = width - autosplit.settings_dict["capture_region"]["height"] = height - - autosplit.x_spinbox.setValue(left) - autosplit.y_spinbox.setValue(top) - autosplit.width_spinbox.setValue(width) - autosplit.height_spinbox.setValue(height) - - -def __test_alignment(capture: MatLike, template: MatLike): - """ - Obtain the best matching point for the template within the - capture. This assumes that the template is actually smaller - than the dimensions of the capture. Since we are using SQDIFF - the best match will be the min_val which is located at min_loc. - The best match found in the image, set everything to 0 by default - so that way the first match will overwrite these values. - """ - best_match = 0.0 - best_height = 0 - best_width = 0 - best_loc = (0, 0) - - # This tests 50 images scaled from 20% to 300% of the original template size - for scale in np.linspace(0.2, 3, num=56): - width = int(template.shape[ImageShape.X] * scale) - height = int(template.shape[ImageShape.Y] * scale) - - # The template can not be larger than the capture - if width > capture.shape[ImageShape.X] or height > capture.shape[ImageShape.Y]: - continue - - resized = cv2.resize(template, (width, height), interpolation=cv2.INTER_NEAREST) - - result = cv2.matchTemplate(capture, resized, cv2.TM_SQDIFF) - min_val, _, min_loc, *_ = cv2.minMaxLoc(result) - - # The maximum value for SQ_DIFF is dependent on the size of the template - # we need this value to normalize it from 0.0 to 1.0 - max_error = resized.size * MAXBYTE * MAXBYTE - similarity = 1 - (min_val / max_error) - - # Check if the similarity was good enough to get alignment - if similarity > best_match: - best_match = similarity - best_width = width - best_height = height - best_loc = min_loc - return best_match, best_height, best_width, best_loc - - -def validate_before_parsing(autosplit: AutoSplit, show_error: bool = True, check_empty_directory: bool = True): - error = None - if not autosplit.settings_dict["split_image_directory"]: - error = error_messages.split_image_directory - elif not os.path.isdir(autosplit.settings_dict["split_image_directory"]): - error = error_messages.split_image_directory_not_found - elif check_empty_directory and not os.listdir(autosplit.settings_dict["split_image_directory"]): - error = error_messages.split_image_directory_empty - elif not autosplit.capture_method.check_selected_region_exists(autosplit): - error = error_messages.region - if error and show_error: - error() - return not error - - -class BaseSelectWidget(QtWidgets.QWidget): - _x = 0 - _y = 0 - - @override - def x(self): - return self._x - - @override - def y(self): - return self._y - - def __init__(self): - super().__init__() - # We need to pull the monitor information to correctly draw the geometry covering all portions - # of the user's screen. These parameters create the bounding box with left, top, width, and height - self.setGeometry( - user32.GetSystemMetrics(SM_XVIRTUALSCREEN), - user32.GetSystemMetrics(SM_YVIRTUALSCREEN), - user32.GetSystemMetrics(SM_CXVIRTUALSCREEN), - user32.GetSystemMetrics(SM_CYVIRTUALSCREEN), - ) - self.setWindowTitle(" ") - self.setWindowOpacity(0.5) - self.setWindowFlags(QtCore.Qt.WindowType.FramelessWindowHint) - self.show() - - @override - def keyPressEvent(self, event: QtGui.QKeyEvent): - if event.key() == QtCore.Qt.Key.Key_Escape: - self.close() - - -class SelectWindowWidget(BaseSelectWidget): - """Widget to select a window and obtain its bounds.""" - - @override - def mouseReleaseEvent(self, event: QtGui.QMouseEvent): - self._x = int(event.position().x()) + self.geometry().x() - self._y = int(event.position().y()) + self.geometry().y() - self.close() - - -class SelectRegionWidget(BaseSelectWidget): - """ - Widget for dragging screen region - Originated from https://github.com/harupy/snipping-tool . - """ - - _right: int = 0 - _bottom: int = 0 - __begin = QtCore.QPoint() - __end = QtCore.QPoint() - - def __init__(self): - QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor)) - super().__init__() - - @override - def height(self): - return self._bottom - self._y - - @override - def width(self): - return self._right - self._x - - @override - def paintEvent(self, event: QtGui.QPaintEvent): - if self.__begin != self.__end: - qpainter = QtGui.QPainter(self) - qpainter.setPen(QtGui.QPen(QtGui.QColor("red"), BORDER_WIDTH)) - qpainter.setBrush(QtGui.QColor("opaque")) - qpainter.drawRect(QtCore.QRect(self.__begin, self.__end)) - - @override - def mousePressEvent(self, event: QtGui.QMouseEvent): - self.__begin = event.position().toPoint() - self.__end = self.__begin - self.update() - - @override - def mouseMoveEvent(self, event: QtGui.QMouseEvent): - self.__end = event.position().toPoint() - self.update() - - @override - def mouseReleaseEvent(self, event: QtGui.QMouseEvent): - if self.__begin != self.__end: - # The coordinates are pulled relative to the top left of the set geometry, - # so the added virtual screen offsets convert them back to the virtual screen coordinates - self._x = min(self.__begin.x(), self.__end.x()) + self.geometry().x() - self._y = min(self.__begin.y(), self.__end.y()) + self.geometry().y() - self._right = max(self.__begin.x(), self.__end.x()) + self.geometry().x() - self._bottom = max(self.__begin.y(), self.__end.y()) + self.geometry().y() - - self.close() - - @override - def close(self): - QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor)) - return super().close() +import ctypes +import ctypes.wintypes +import os +from math import ceil +from typing import TYPE_CHECKING + +import cv2 +import numpy as np +from cv2.typing import MatLike +from PySide6 import QtCore, QtGui, QtWidgets +from PySide6.QtTest import QTest +from pywinctl import getTopWindowAt +from typing_extensions import override +from win32 import win32gui +from win32con import SM_CXVIRTUALSCREEN, SM_CYVIRTUALSCREEN, SM_XVIRTUALSCREEN, SM_YVIRTUALSCREEN +from winsdk._winrt import initialize_with_window +from winsdk.windows.foundation import AsyncStatus, IAsyncOperation +from winsdk.windows.graphics.capture import GraphicsCaptureItem, GraphicsCapturePicker + +import error_messages +from utils import ( + BGR_CHANNEL_COUNT, + MAXBYTE, + ImageShape, + auto_split_directory, + get_window_bounds, + is_valid_hwnd, + is_valid_image, +) + +user32 = ctypes.windll.user32 + + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + +ALIGN_REGION_THRESHOLD = 0.9 +BORDER_WIDTH = 2 +SUPPORTED_IMREAD_FORMATS = [ + ("Windows bitmaps", "*.bmp *.dib"), + ("JPEG files", "*.jpeg *.jpg *.jpe"), + ("JPEG 2000 files", "*.jp2"), + ("Portable Network Graphics", "*.png"), + ("WebP", "*.webp"), + ("AVIF", "*.avif"), + ("Portable image format", "*.pbm *.pgm *.ppm *.pxm *.pnm"), + ("PFM files", "*.pfm"), + ("Sun rasters", "*.sr *.ras"), + ("TIFF files", "*.tiff *.tif"), + ("OpenEXR Image files", "*.exr"), + ("Radiance HDR", "*.hdr *.pic"), +] +"""https://docs.opencv.org/4.8.0/d4/da8/group__imgcodecs.html#imread""" +IMREAD_EXT_FILTER = ( + "All Files (" + + " ".join([f"{extensions}" for _, extensions in SUPPORTED_IMREAD_FORMATS]) + + ");;" + + ";;".join([f"{imread_format} ({extensions})" for imread_format, extensions in SUPPORTED_IMREAD_FORMATS]) +) + + +# TODO: For later as a different picker option +def __select_graphics_item(autosplit: AutoSplit): # pyright: ignore [reportUnusedFunction] + """Uses the built-in GraphicsCapturePicker to select the Window.""" + + def callback(async_operation: IAsyncOperation[GraphicsCaptureItem], async_status: AsyncStatus): + try: + if async_status != AsyncStatus.COMPLETED: + return + except SystemError as exception: + # HACK: can happen when closing the GraphicsCapturePicker + if str(exception).endswith("returned a result with an error set"): + return + raise + item = async_operation.get_results() + if not item: + return + autosplit.settings_dict["captured_window_title"] = item.display_name + autosplit.capture_method.reinitialize(autosplit) + + picker = GraphicsCapturePicker() + initialize_with_window(picker, int(autosplit.effectiveWinId())) + async_operation = picker.pick_single_item_async() + # None if the selection is canceled + if async_operation: + async_operation.completed = callback + + +def select_region(autosplit: AutoSplit): + # Create a screen selector widget + selector = SelectRegionWidget() + + # Need to wait until the user has selected a region using the widget + # before moving on with selecting the window settings + while True: + width = selector.width() + height = selector.height() + x = selector.x() + y = selector.y() + if width > 0 and height > 0: + break + QTest.qWait(1) + del selector + + window = getTopWindowAt(x, y) + if not window: + error_messages.region() + return + hwnd = window.getHandle() + window_text = window.title + if not is_valid_hwnd(hwnd) or not window_text: + error_messages.region() + return + + autosplit.hwnd = hwnd + autosplit.settings_dict["captured_window_title"] = window_text + autosplit.capture_method.reinitialize(autosplit) + + left_bounds, top_bounds, *_ = get_window_bounds(hwnd) + window_x, window_y, *_ = win32gui.GetWindowRect(hwnd) + offset_x = window_x + left_bounds + offset_y = window_y + top_bounds + __set_region_values( + autosplit, + left=x - offset_x, + top=y - offset_y, + width=width, + height=height, + ) + + +def select_window(autosplit: AutoSplit): + # Create a screen selector widget + selector = SelectWindowWidget() + + # Need to wait until the user has selected a region using the widget before moving on with + # selecting the window settings + while True: + x = selector.x() + y = selector.y() + if x and y: + break + QTest.qWait(1) + del selector + + window = getTopWindowAt(x, y) + if not window: + error_messages.region() + return + hwnd = window.getHandle() + window_text = window.title + if not is_valid_hwnd(hwnd) or not window_text: + error_messages.region() + return + + autosplit.hwnd = hwnd + autosplit.settings_dict["captured_window_title"] = window_text + autosplit.capture_method.reinitialize(autosplit) + + # Exlude the borders and titlebar from the window selection. To only get the client area. + _, __, window_width, window_height = get_window_bounds(hwnd) + _, __, client_width, client_height = win32gui.GetClientRect(hwnd) + border_width = ceil((window_width - client_width) / 2) + titlebar_with_border_height = window_height - client_height - border_width + + __set_region_values( + autosplit, + left=border_width, + top=titlebar_with_border_height, + width=client_width, + height=client_height - border_width * 2, + ) + + +def align_region(autosplit: AutoSplit): + # Check to see if a region has been set + if not autosplit.capture_method.check_selected_region_exists(autosplit): + error_messages.region() + return + # This is the image used for aligning the capture region to the best fit for the user. + template_filename = QtWidgets.QFileDialog.getOpenFileName( + autosplit, + "Select Reference Image", + autosplit.settings_dict["split_image_directory"] or auto_split_directory, + IMREAD_EXT_FILTER, + )[0] + + # Return if the user presses cancel + if not template_filename: + return + + template = cv2.imread(template_filename, cv2.IMREAD_UNCHANGED) + # Add alpha channel to template if it's missing. + if template.shape[ImageShape.Channels] == BGR_CHANNEL_COUNT: + template = cv2.cvtColor(template, cv2.COLOR_BGR2BGRA) + + # Validate template is a valid image file + if not is_valid_image(template): + error_messages.image_validity() + return + + # Obtaining the capture of a region which contains the + # subregion being searched for to align the image. + capture, _ = autosplit.capture_method.get_frame(autosplit) + + if not is_valid_image(capture): + error_messages.region() + return + + best_match, best_height, best_width, best_loc = __test_alignment(capture, template) + + # Go ahead and check if this satisfies our requirement before setting the region + # We don't want a low similarity image to be aligned. + if best_match < ALIGN_REGION_THRESHOLD: + error_messages.alignment_not_matched() + return + + # The new region can be defined by using the min_loc point and the best_height and best_width of the template. + __set_region_values( + autosplit, + left=autosplit.settings_dict["capture_region"]["x"] + best_loc[0], + top=autosplit.settings_dict["capture_region"]["y"] + best_loc[1], + width=best_width, + height=best_height, + ) + + +def __set_region_values(autosplit: AutoSplit, left: int, top: int, width: int, height: int): + autosplit.settings_dict["capture_region"]["x"] = left + autosplit.settings_dict["capture_region"]["y"] = top + autosplit.settings_dict["capture_region"]["width"] = width + autosplit.settings_dict["capture_region"]["height"] = height + + autosplit.x_spinbox.setValue(left) + autosplit.y_spinbox.setValue(top) + autosplit.width_spinbox.setValue(width) + autosplit.height_spinbox.setValue(height) + + +def __test_alignment(capture: MatLike, template: MatLike): + """ + Obtain the best matching point for the template within the + capture. This assumes that the template is actually smaller + than the dimensions of the capture. Since we are using SQDIFF + the best match will be the min_val which is located at min_loc. + The best match found in the image, set everything to 0 by default + so that way the first match will overwrite these values. + """ + best_match = 0.0 + best_height = 0 + best_width = 0 + best_loc = (0, 0) + + # This tests 50 images scaled from 20% to 300% of the original template size + for scale in np.linspace(0.2, 3, num=56): + width = int(template.shape[ImageShape.X] * scale) + height = int(template.shape[ImageShape.Y] * scale) + + # The template can not be larger than the capture + if width > capture.shape[ImageShape.X] or height > capture.shape[ImageShape.Y]: + continue + + resized = cv2.resize(template, (width, height), interpolation=cv2.INTER_NEAREST) + + result = cv2.matchTemplate(capture, resized, cv2.TM_SQDIFF) + min_val, _, min_loc, *_ = cv2.minMaxLoc(result) + + # The maximum value for SQ_DIFF is dependent on the size of the template + # we need this value to normalize it from 0.0 to 1.0 + max_error = resized.size * MAXBYTE * MAXBYTE + similarity = 1 - (min_val / max_error) + + # Check if the similarity was good enough to get alignment + if similarity > best_match: + best_match = similarity + best_width = width + best_height = height + best_loc = min_loc + return best_match, best_height, best_width, best_loc + + +def validate_before_parsing(autosplit: AutoSplit, show_error: bool = True, check_empty_directory: bool = True): + error = None + if not autosplit.settings_dict["split_image_directory"]: + error = error_messages.split_image_directory + elif not os.path.isdir(autosplit.settings_dict["split_image_directory"]): + error = error_messages.split_image_directory_not_found + elif check_empty_directory and not os.listdir(autosplit.settings_dict["split_image_directory"]): + error = error_messages.split_image_directory_empty + elif not autosplit.capture_method.check_selected_region_exists(autosplit): + error = error_messages.region + if error and show_error: + error() + return not error + + +class BaseSelectWidget(QtWidgets.QWidget): + _x = 0 + _y = 0 + + @override + def x(self): + return self._x + + @override + def y(self): + return self._y + + def __init__(self): + super().__init__() + # We need to pull the monitor information to correctly draw the geometry covering all portions + # of the user's screen. These parameters create the bounding box with left, top, width, and height + self.setGeometry( + user32.GetSystemMetrics(SM_XVIRTUALSCREEN), + user32.GetSystemMetrics(SM_YVIRTUALSCREEN), + user32.GetSystemMetrics(SM_CXVIRTUALSCREEN), + user32.GetSystemMetrics(SM_CYVIRTUALSCREEN), + ) + self.setWindowTitle(" ") + self.setWindowOpacity(0.5) + self.setWindowFlags(QtCore.Qt.WindowType.FramelessWindowHint) + self.show() + + @override + def keyPressEvent(self, event: QtGui.QKeyEvent): + if event.key() == QtCore.Qt.Key.Key_Escape: + self.close() + + +class SelectWindowWidget(BaseSelectWidget): + """Widget to select a window and obtain its bounds.""" + + @override + def mouseReleaseEvent(self, event: QtGui.QMouseEvent): + self._x = int(event.position().x()) + self.geometry().x() + self._y = int(event.position().y()) + self.geometry().y() + self.close() + + +class SelectRegionWidget(BaseSelectWidget): + """ + Widget for dragging screen region + Originated from https://github.com/harupy/snipping-tool . + """ + + _right: int = 0 + _bottom: int = 0 + __begin = QtCore.QPoint() + __end = QtCore.QPoint() + + def __init__(self): + QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor)) + super().__init__() + + @override + def height(self): + return self._bottom - self._y + + @override + def width(self): + return self._right - self._x + + @override + def paintEvent(self, event: QtGui.QPaintEvent): + if self.__begin != self.__end: + qpainter = QtGui.QPainter(self) + qpainter.setPen(QtGui.QPen(QtGui.QColor("red"), BORDER_WIDTH)) + qpainter.setBrush(QtGui.QColor("opaque")) + qpainter.drawRect(QtCore.QRect(self.__begin, self.__end)) + + @override + def mousePressEvent(self, event: QtGui.QMouseEvent): + self.__begin = event.position().toPoint() + self.__end = self.__begin + self.update() + + @override + def mouseMoveEvent(self, event: QtGui.QMouseEvent): + self.__end = event.position().toPoint() + self.update() + + @override + def mouseReleaseEvent(self, event: QtGui.QMouseEvent): + if self.__begin != self.__end: + # The coordinates are pulled relative to the top left of the set geometry, + # so the added virtual screen offsets convert them back to the virtual screen coordinates + self._x = min(self.__begin.x(), self.__end.x()) + self.geometry().x() + self._y = min(self.__begin.y(), self.__end.y()) + self.geometry().y() + self._right = max(self.__begin.x(), self.__end.x()) + self.geometry().x() + self._bottom = max(self.__begin.y(), self.__end.y()) + self.geometry().y() + + self.close() + + @override + def close(self): + QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor)) + return super().close() diff --git a/src/split_parser.py b/src/split_parser.py index 7afabf23..cd7b5e9b 100644 --- a/src/split_parser.py +++ b/src/split_parser.py @@ -1,251 +1,249 @@ -from __future__ import annotations - -import os -from collections.abc import Callable -from typing import TYPE_CHECKING, TypeVar - -import error_messages -from AutoSplitImage import RESET_KEYWORD, START_KEYWORD, AutoSplitImage, ImageType -from utils import is_valid_image - -if TYPE_CHECKING: - from AutoSplit import AutoSplit - -[ - DUMMY_FLAG, - BELOW_FLAG, - PAUSE_FLAG, - *_, -] = [1 << i for i in range(31)] # 32 bits of flags - -T = TypeVar("T", str, int, float) - -# Note, the following symbols cannot be used in a filename: -# / \ : * ? " < > | - - -def __value_from_filename( - filename: str, - delimiters: str, - default_value: T, -) -> T: - if len(delimiters) != 2: # noqa: PLR2004 - raise ValueError("delimiters parameter must contain exactly 2 characters") - try: - string_value = filename.split(delimiters[0], 1)[1].split(delimiters[1])[0] - value: T = type(default_value)(string_value) - except (IndexError, ValueError): - return default_value - else: - return value - - -def threshold_from_filename(filename: str): - """ - Retrieve the threshold from the filename, if there is no threshold or the threshold - doesn't meet the requirements of being [0, 1], then None is returned. - - @param filename: String containing the file's name - @return: A valid threshold, if not then None - """ - # Check to make sure there is a valid floating point number between - # parentheses of the filename - value = __value_from_filename(filename, "()", -1.0) - - # Check to make sure if it is a valid threshold - return value if 0 <= value <= 1 else None - - -def pause_from_filename(filename: str): - """ - Retrieve the pause time from the filename, if there is no pause time or the pause time - isn't a valid positive number or 0, then None is returned. - - @param filename: String containing the file's name - @return: A valid pause time, if not then None - """ - # Check to make sure there is a valid pause time between brackets - # of the filename - value = __value_from_filename(filename, "[]", -1.0) - - # Pause times should always be positive or zero - return value if value >= 0 else None - - -def delay_time_from_filename(filename: str): - """ - Retrieve the delay time from the filename, if there is no delay time or the delay time - isn't a valid positive number or 0 number, then None is returned. - - @param filename: String containing the file's name - @return: A valid delay time, if not then none - """ - # Check to make sure there is a valid delay time between brackets - # of the filename - value = __value_from_filename(filename, "##", -1) - - # Delay times should always be positive or zero - return value if value >= 0 else None - - -def loop_from_filename(filename: str): - """ - Retrieve the number of loops from filename, if there is no loop number or the loop number isn't valid, - then 1 is returned. - - @param filename: String containing the file's name - @return: A valid loop number, if not then 1 - """ - # Check to make sure there is a valid delay time between brackets - # of the filename - value = __value_from_filename(filename, "@@", 1) - - # Loop should always be positive - return value if value >= 1 else 1 - - -def comparison_method_from_filename(filename: str): - """ - Retrieve the comparison method index from filename, if there is no comparison method or the index isn't valid, - then None is returned. - - @param filename: String containing the file's name - @return: A valid comparison method index, if not then none - """ - # Check to make sure there is a valid delay time between brackets - # of the filename - value = __value_from_filename(filename, "^^", -1) - - # Comparison method should always be positive or zero - return value if value >= 0 else None - - -def flags_from_filename(filename: str): - """ - Retrieve the flags from the filename, if there are no flags then 0 is returned. - - @param filename: String containing the file's name - @return: The flags as an integer, if invalid flags are found it returns 0 - - list of flags: - "d" = dummy, do nothing when this split is found - "b" = below threshold, after threshold is met, split when it goes below the threhsold. - "p" = pause, hit pause key when this split is found - """ - # Check to make sure there are flags between curly braces - # of the filename - flags_str = __value_from_filename(filename, "{}", "") - - if not flags_str: - return 0 - - flags = 0x00 - - for flag_str in flags_str: - character = flag_str.upper() - if character == "D": - flags |= DUMMY_FLAG - elif character == "B": - flags |= BELOW_FLAG - elif character == "P": - flags |= PAUSE_FLAG - # Legacy flags - elif character == "M": - continue - else: - # An invalid flag was caught, this filename was written incorrectly - # return 0. We don't want to interpret any misleading filenames - return 0 - - # Check for any conflicting flags that were set - # For instance, we can't have a dummy split also pause - if (flags & DUMMY_FLAG == DUMMY_FLAG) and (flags & PAUSE_FLAG == PAUSE_FLAG): - return 0 - - return flags - - -def __pop_image_type(split_image: list[AutoSplitImage], image_type: ImageType): - for image in split_image: - if image.image_type == image_type: - split_image.remove(image) - return image - - return None - - -def parse_and_validate_images(autosplit: AutoSplit): - # Get split images - all_images = [ - AutoSplitImage(os.path.join(autosplit.settings_dict["split_image_directory"], image_name)) - for image_name - in os.listdir(autosplit.settings_dict["split_image_directory"]) - ] - - # Find non-split images and then remove them from the list - start_image = __pop_image_type(all_images, ImageType.START) - reset_image = __pop_image_type(all_images, ImageType.RESET) - split_images = all_images - - error_message: Callable[[], object] | None = None - - # If there is no start hotkey set but a Start Image is present, and is not auto controlled, throw an error. - if ( - start_image - and not autosplit.settings_dict["split_hotkey"] - and not autosplit.is_auto_controlled - ): - error_message = error_messages.load_start_image - - # If there is no reset hotkey set but a Reset Image is present, and is not auto controlled, throw an error. - elif ( - reset_image - and not autosplit.settings_dict["reset_hotkey"] - and not autosplit.is_auto_controlled - ): - error_message = error_messages.reset_hotkey - - # Make sure that each of the images follows the guidelines for correct format - # according to all of the settings selected by the user. - else: - for image in split_images: - # Test for image without transparency - if not is_valid_image(image.byte_array): - - def image_validity(filename: str): - return lambda: error_messages.image_validity(filename) - - error_message = image_validity(image.filename) - break - - # error out if there is a {p} flag but no pause hotkey set and is not auto controlled. - if ( - not autosplit.settings_dict["pause_hotkey"] - and image.check_flag(PAUSE_FLAG) - and not autosplit.is_auto_controlled - ): - error_message = error_messages.pause_hotkey - break - - # Check that there's only one Reset Image - if image.image_type == ImageType.RESET: - error_message = lambda: error_messages.multiple_keyword_images(RESET_KEYWORD) # noqa: E731 - break - - # Check that there's only one Start Image - if image.image_type == ImageType.START: - error_message = lambda: error_messages.multiple_keyword_images(START_KEYWORD) # noqa: E731 - break - - if error_message: - autosplit.start_image = None - autosplit.reset_image = None - autosplit.split_images = [] - autosplit.gui_changes_on_reset() - error_message() - return False - - autosplit.start_image = start_image - autosplit.reset_image = reset_image - autosplit.split_images = split_images - return True +import os +from collections.abc import Callable +from typing import TYPE_CHECKING, TypeVar + +import error_messages +from AutoSplitImage import RESET_KEYWORD, START_KEYWORD, AutoSplitImage, ImageType +from utils import is_valid_image + +if TYPE_CHECKING: + from AutoSplit import AutoSplit + +[ + DUMMY_FLAG, + BELOW_FLAG, + PAUSE_FLAG, + *_, +] = [1 << i for i in range(31)] # 32 bits of flags + +T = TypeVar("T", str, int, float) + +# Note, the following symbols cannot be used in a filename: +# / \ : * ? " < > | + + +def __value_from_filename( + filename: str, + delimiters: str, + default_value: T, +) -> T: + if len(delimiters) != 2: # noqa: PLR2004 + raise ValueError("delimiters parameter must contain exactly 2 characters") + try: + string_value = filename.split(delimiters[0], 1)[1].split(delimiters[1])[0] + value: T = type(default_value)(string_value) + except (IndexError, ValueError): + return default_value + else: + return value + + +def threshold_from_filename(filename: str): + """ + Retrieve the threshold from the filename, if there is no threshold or the threshold + doesn't meet the requirements of being [0, 1], then None is returned. + + @param filename: String containing the file's name + @return: A valid threshold, if not then None + """ + # Check to make sure there is a valid floating point number between + # parentheses of the filename + value = __value_from_filename(filename, "()", -1.0) + + # Check to make sure if it is a valid threshold + return value if 0 <= value <= 1 else None + + +def pause_from_filename(filename: str): + """ + Retrieve the pause time from the filename, if there is no pause time or the pause time + isn't a valid positive number or 0, then None is returned. + + @param filename: String containing the file's name + @return: A valid pause time, if not then None + """ + # Check to make sure there is a valid pause time between brackets + # of the filename + value = __value_from_filename(filename, "[]", -1.0) + + # Pause times should always be positive or zero + return value if value >= 0 else None + + +def delay_time_from_filename(filename: str): + """ + Retrieve the delay time from the filename, if there is no delay time or the delay time + isn't a valid positive number or 0 number, then None is returned. + + @param filename: String containing the file's name + @return: A valid delay time, if not then none + """ + # Check to make sure there is a valid delay time between brackets + # of the filename + value = __value_from_filename(filename, "##", -1) + + # Delay times should always be positive or zero + return value if value >= 0 else None + + +def loop_from_filename(filename: str): + """ + Retrieve the number of loops from filename, if there is no loop number or the loop number isn't valid, + then 1 is returned. + + @param filename: String containing the file's name + @return: A valid loop number, if not then 1 + """ + # Check to make sure there is a valid delay time between brackets + # of the filename + value = __value_from_filename(filename, "@@", 1) + + # Loop should always be positive + return value if value >= 1 else 1 + + +def comparison_method_from_filename(filename: str): + """ + Retrieve the comparison method index from filename, if there is no comparison method or the index isn't valid, + then None is returned. + + @param filename: String containing the file's name + @return: A valid comparison method index, if not then none + """ + # Check to make sure there is a valid delay time between brackets + # of the filename + value = __value_from_filename(filename, "^^", -1) + + # Comparison method should always be positive or zero + return value if value >= 0 else None + + +def flags_from_filename(filename: str): + """ + Retrieve the flags from the filename, if there are no flags then 0 is returned. + + @param filename: String containing the file's name + @return: The flags as an integer, if invalid flags are found it returns 0 + + list of flags: + "d" = dummy, do nothing when this split is found + "b" = below threshold, after threshold is met, split when it goes below the threhsold. + "p" = pause, hit pause key when this split is found + """ + # Check to make sure there are flags between curly braces + # of the filename + flags_str = __value_from_filename(filename, "{}", "") + + if not flags_str: + return 0 + + flags = 0x00 + + for flag_str in flags_str: + character = flag_str.upper() + if character == "D": + flags |= DUMMY_FLAG + elif character == "B": + flags |= BELOW_FLAG + elif character == "P": + flags |= PAUSE_FLAG + # Legacy flags + elif character == "M": + continue + else: + # An invalid flag was caught, this filename was written incorrectly + # return 0. We don't want to interpret any misleading filenames + return 0 + + # Check for any conflicting flags that were set + # For instance, we can't have a dummy split also pause + if (flags & DUMMY_FLAG == DUMMY_FLAG) and (flags & PAUSE_FLAG == PAUSE_FLAG): + return 0 + + return flags + + +def __pop_image_type(split_image: list[AutoSplitImage], image_type: ImageType): + for image in split_image: + if image.image_type == image_type: + split_image.remove(image) + return image + + return None + + +def parse_and_validate_images(autosplit: AutoSplit): + # Get split images + all_images = [ + AutoSplitImage(os.path.join(autosplit.settings_dict["split_image_directory"], image_name)) + for image_name + in os.listdir(autosplit.settings_dict["split_image_directory"]) + ] + + # Find non-split images and then remove them from the list + start_image = __pop_image_type(all_images, ImageType.START) + reset_image = __pop_image_type(all_images, ImageType.RESET) + split_images = all_images + + error_message: Callable[[], object] | None = None + + # If there is no start hotkey set but a Start Image is present, and is not auto controlled, throw an error. + if ( + start_image + and not autosplit.settings_dict["split_hotkey"] + and not autosplit.is_auto_controlled + ): + error_message = error_messages.load_start_image + + # If there is no reset hotkey set but a Reset Image is present, and is not auto controlled, throw an error. + elif ( + reset_image + and not autosplit.settings_dict["reset_hotkey"] + and not autosplit.is_auto_controlled + ): + error_message = error_messages.reset_hotkey + + # Make sure that each of the images follows the guidelines for correct format + # according to all of the settings selected by the user. + else: + for image in split_images: + # Test for image without transparency + if not is_valid_image(image.byte_array): + + def image_validity(filename: str): + return lambda: error_messages.image_validity(filename) + + error_message = image_validity(image.filename) + break + + # error out if there is a {p} flag but no pause hotkey set and is not auto controlled. + if ( + not autosplit.settings_dict["pause_hotkey"] + and image.check_flag(PAUSE_FLAG) + and not autosplit.is_auto_controlled + ): + error_message = error_messages.pause_hotkey + break + + # Check that there's only one Reset Image + if image.image_type == ImageType.RESET: + error_message = lambda: error_messages.multiple_keyword_images(RESET_KEYWORD) # noqa: E731 + break + + # Check that there's only one Start Image + if image.image_type == ImageType.START: + error_message = lambda: error_messages.multiple_keyword_images(START_KEYWORD) # noqa: E731 + break + + if error_message: + autosplit.start_image = None + autosplit.reset_image = None + autosplit.split_images = [] + autosplit.gui_changes_on_reset() + error_message() + return False + + autosplit.start_image = start_image + autosplit.reset_image = reset_image + autosplit.split_images = split_images + return True diff --git a/src/user_profile.py b/src/user_profile.py index 26c21dad..ab329f68 100644 --- a/src/user_profile.py +++ b/src/user_profile.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import os from typing import TYPE_CHECKING, TypedDict, cast @@ -162,15 +160,15 @@ def __load_settings_from_file(autosplit: AutoSplit, load_settings_file_path: str def load_settings(autosplit: AutoSplit, from_path: str = ""): - load_settings_file_path = ( - from_path - or QtWidgets.QFileDialog.getOpenFileName( - autosplit, - "Load Profile", - os.path.join(auto_split_directory, "settings.toml"), - "TOML (*.toml)", - )[0] - ) + load_settings_file_path = ( + from_path + or QtWidgets.QFileDialog.getOpenFileName( + autosplit, + "Load Profile", + os.path.join(auto_split_directory, "settings.toml"), + "TOML (*.toml)", + )[0] + ) if not (load_settings_file_path and __load_settings_from_file(autosplit, load_settings_file_path)): return diff --git a/src/utils.py b/src/utils.py index f913c059..4a854384 100644 --- a/src/utils.py +++ b/src/utils.py @@ -1,191 +1,189 @@ -from __future__ import annotations - -import asyncio -import ctypes -import ctypes.wintypes -import os -import sys -from collections.abc import Callable, Iterable -from enum import IntEnum -from itertools import chain -from platform import version -from threading import Thread -from typing import TYPE_CHECKING, Any, TypeGuard, TypeVar - -import win32ui -from cv2.typing import MatLike -from win32 import win32gui -from winsdk.windows.ai.machinelearning import LearningModelDevice, LearningModelDeviceKind -from winsdk.windows.media.capture import MediaCapture - -from gen.build_vars import AUTOSPLIT_BUILD_NUMBER, AUTOSPLIT_GITHUB_REPOSITORY - -if TYPE_CHECKING: - # Source does not exist, keep this under TYPE_CHECKING - from _win32typing import PyCDC # pyright: ignore[reportMissingModuleSource] - -_T = TypeVar("_T") - - -DWMWA_EXTENDED_FRAME_BOUNDS = 9 -MAXBYTE = 255 -BGR_CHANNEL_COUNT = 3 -"""How many channels in an RGB image""" -BGRA_CHANNEL_COUNT = 4 -"""How many channels in an RGBA image""" - - -class ImageShape(IntEnum): - Y = 0 - X = 1 - Channels = 2 - - -class ColorChannel(IntEnum): - Blue = 0 - Green = 1 - Red = 2 - Alpha = 3 - - -def decimal(value: float): - # Using ljust instead of :2f because of python float rounding errors - return f"{int(value * 100) / 100}".ljust(4, "0") - - -def is_digit(value: str | int | None): - """Checks if `value` is a single-digit string from 0-9.""" - if value is None: - return False - try: - return 0 <= int(value) <= 9 # noqa: PLR2004 - except (ValueError, TypeError): - return False - - -def is_valid_image(image: MatLike | None) -> TypeGuard[MatLike]: - return image is not None and bool(image.size) - - -def is_valid_hwnd(hwnd: int) -> bool: - """Validate the hwnd points to a valid window and not the desktop or whatever window obtained with `""`.""" - if not hwnd: - return False - if sys.platform == "win32": - return bool(win32gui.IsWindow(hwnd) and win32gui.GetWindowText(hwnd)) - return True - - -T = TypeVar("T") - - -def first(iterable: Iterable[T]) -> T: - """@return: The first element of a collection. Dictionaries will return the first key.""" - return next(iter(iterable)) - - -def try_delete_dc(dc: PyCDC): - try: - dc.DeleteDC() - except win32ui.error: - pass - - -def get_window_bounds(hwnd: int) -> tuple[int, int, int, int]: - extended_frame_bounds = ctypes.wintypes.RECT() - ctypes.windll.dwmapi.DwmGetWindowAttribute( - hwnd, - DWMWA_EXTENDED_FRAME_BOUNDS, - ctypes.byref(extended_frame_bounds), - ctypes.sizeof(extended_frame_bounds), - ) - - window_rect = win32gui.GetWindowRect(hwnd) - window_left_bounds = extended_frame_bounds.left - window_rect[0] - window_top_bounds = extended_frame_bounds.top - window_rect[1] - window_width = extended_frame_bounds.right - extended_frame_bounds.left - window_height = extended_frame_bounds.bottom - extended_frame_bounds.top - return window_left_bounds, window_top_bounds, window_width, window_height - - -def open_file(file_path: str | bytes | os.PathLike[str] | os.PathLike[bytes]): - os.startfile(file_path) # noqa: S606 - - -def get_or_create_eventloop(): - try: - return asyncio.get_event_loop() - except RuntimeError: - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - return asyncio.get_event_loop() - - -def get_direct3d_device(): - # Note: Must create in the same thread (can't use a global) otherwise when ran from LiveSplit it will raise: - # OSError: The application called an interface that was marshalled for a different thread - media_capture = MediaCapture() - - async def init_mediacapture(): - await (media_capture.initialize_async() or asyncio.sleep(0)) - - asyncio.run(init_mediacapture()) - direct_3d_device = media_capture.media_capture_settings and media_capture.media_capture_settings.direct3_d11_device - if not direct_3d_device: - try: - # May be problematic? https://github.com/pywinrt/python-winsdk/issues/11#issuecomment-1315345318 - direct_3d_device = LearningModelDevice(LearningModelDeviceKind.DIRECT_X_HIGH_PERFORMANCE).direct3_d11_device - # TODO: Unknown potential error, I don't have an older Win10 machine to test. - except BaseException: # noqa: S110,BLE001 - pass - if not direct_3d_device: - raise OSError("Unable to initialize a Direct3D Device.") - return direct_3d_device - - -def try_get_direct3d_device(): - try: - return get_direct3d_device() - except OSError: - return None - - -def fire_and_forget(func: Callable[..., Any]): - """ - Runs synchronous function asynchronously without waiting for a response. - - Uses threads on Windows because ~~`RuntimeError: There is no current event loop in thread 'MainThread'.`~~ - Because maybe asyncio has issues. Unsure. See alpha.5 and https://github.com/Avasam/AutoSplit/issues/36 - - Uses asyncio on Linux because of a `Segmentation fault (core dumped)` - """ - - def wrapped(*args: Any, **kwargs: Any): - if sys.platform == "win32": - thread = Thread(target=func, args=args, kwargs=kwargs) - thread.start() - return thread - return get_or_create_eventloop().run_in_executor(None, func, *args, *kwargs) - - return wrapped - - -def flatten(nested_iterable: Iterable[Iterable[_T]]) -> chain[_T]: - return chain(*nested_iterable) - - -# Environment specifics -WINDOWS_BUILD_NUMBER = int(version().split(".")[-1]) if sys.platform == "win32" else -1 -FIRST_WIN_11_BUILD = 22000 -"""AutoSplit Version number""" -WGC_MIN_BUILD = 17134 -"""https://docs.microsoft.com/en-us/uwp/api/windows.graphics.capture.graphicscapturepicker#applies-to""" -FROZEN = hasattr(sys, "frozen") -"""Running from build made by PyInstaller""" -auto_split_directory = os.path.dirname(sys.executable if FROZEN else os.path.abspath(__file__)) -"""The directory of either the AutoSplit executable or AutoSplit.py""" - -# Shared strings -# Check `excludeBuildNumber` during workflow dispatch build generate a clean version number -AUTOSPLIT_VERSION = "2.2.0" + (f"-{AUTOSPLIT_BUILD_NUMBER}" if AUTOSPLIT_BUILD_NUMBER else "") -GITHUB_REPOSITORY = AUTOSPLIT_GITHUB_REPOSITORY +import asyncio +import ctypes +import ctypes.wintypes +import os +import sys +from collections.abc import Callable, Iterable +from enum import IntEnum +from itertools import chain +from platform import version +from threading import Thread +from typing import TYPE_CHECKING, Any, TypeGuard, TypeVar + +import win32ui +from cv2.typing import MatLike +from win32 import win32gui +from winsdk.windows.ai.machinelearning import LearningModelDevice, LearningModelDeviceKind +from winsdk.windows.media.capture import MediaCapture + +from gen.build_vars import AUTOSPLIT_BUILD_NUMBER, AUTOSPLIT_GITHUB_REPOSITORY + +if TYPE_CHECKING: + # Source does not exist, keep this under TYPE_CHECKING + from _win32typing import PyCDC # pyright: ignore[reportMissingModuleSource] + +_T = TypeVar("_T") + + +DWMWA_EXTENDED_FRAME_BOUNDS = 9 +MAXBYTE = 255 +BGR_CHANNEL_COUNT = 3 +"""How many channels in an RGB image""" +BGRA_CHANNEL_COUNT = 4 +"""How many channels in an RGBA image""" + + +class ImageShape(IntEnum): + Y = 0 + X = 1 + Channels = 2 + + +class ColorChannel(IntEnum): + Blue = 0 + Green = 1 + Red = 2 + Alpha = 3 + + +def decimal(value: float): + # Using ljust instead of :2f because of python float rounding errors + return f"{int(value * 100) / 100}".ljust(4, "0") + + +def is_digit(value: str | int | None): + """Checks if `value` is a single-digit string from 0-9.""" + if value is None: + return False + try: + return 0 <= int(value) <= 9 # noqa: PLR2004 + except (ValueError, TypeError): + return False + + +def is_valid_image(image: MatLike | None) -> TypeGuard[MatLike]: + return image is not None and bool(image.size) + + +def is_valid_hwnd(hwnd: int) -> bool: + """Validate the hwnd points to a valid window and not the desktop or whatever window obtained with `""`.""" + if not hwnd: + return False + if sys.platform == "win32": + return bool(win32gui.IsWindow(hwnd) and win32gui.GetWindowText(hwnd)) + return True + + +T = TypeVar("T") + + +def first(iterable: Iterable[T]) -> T: + """@return: The first element of a collection. Dictionaries will return the first key.""" + return next(iter(iterable)) + + +def try_delete_dc(dc: PyCDC): + try: + dc.DeleteDC() + except win32ui.error: + pass + + +def get_window_bounds(hwnd: int) -> tuple[int, int, int, int]: + extended_frame_bounds = ctypes.wintypes.RECT() + ctypes.windll.dwmapi.DwmGetWindowAttribute( + hwnd, + DWMWA_EXTENDED_FRAME_BOUNDS, + ctypes.byref(extended_frame_bounds), + ctypes.sizeof(extended_frame_bounds), + ) + + window_rect = win32gui.GetWindowRect(hwnd) + window_left_bounds = extended_frame_bounds.left - window_rect[0] + window_top_bounds = extended_frame_bounds.top - window_rect[1] + window_width = extended_frame_bounds.right - extended_frame_bounds.left + window_height = extended_frame_bounds.bottom - extended_frame_bounds.top + return window_left_bounds, window_top_bounds, window_width, window_height + + +def open_file(file_path: str | bytes | os.PathLike[str] | os.PathLike[bytes]): + os.startfile(file_path) # noqa: S606 + + +def get_or_create_eventloop(): + try: + return asyncio.get_event_loop() + except RuntimeError: + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + return asyncio.get_event_loop() + + +def get_direct3d_device(): + # Note: Must create in the same thread (can't use a global) otherwise when ran from LiveSplit it will raise: + # OSError: The application called an interface that was marshalled for a different thread + media_capture = MediaCapture() + + async def init_mediacapture(): + await (media_capture.initialize_async() or asyncio.sleep(0)) + + asyncio.run(init_mediacapture()) + direct_3d_device = media_capture.media_capture_settings and media_capture.media_capture_settings.direct3_d11_device + if not direct_3d_device: + try: + # May be problematic? https://github.com/pywinrt/python-winsdk/issues/11#issuecomment-1315345318 + direct_3d_device = LearningModelDevice(LearningModelDeviceKind.DIRECT_X_HIGH_PERFORMANCE).direct3_d11_device + # TODO: Unknown potential error, I don't have an older Win10 machine to test. + except BaseException: # noqa: S110,BLE001 + pass + if not direct_3d_device: + raise OSError("Unable to initialize a Direct3D Device.") + return direct_3d_device + + +def try_get_direct3d_device(): + try: + return get_direct3d_device() + except OSError: + return None + + +def fire_and_forget(func: Callable[..., Any]): + """ + Runs synchronous function asynchronously without waiting for a response. + + Uses threads on Windows because ~~`RuntimeError: There is no current event loop in thread 'MainThread'.`~~ + Because maybe asyncio has issues. Unsure. See alpha.5 and https://github.com/Avasam/AutoSplit/issues/36 + + Uses asyncio on Linux because of a `Segmentation fault (core dumped)` + """ + + def wrapped(*args: Any, **kwargs: Any): + if sys.platform == "win32": + thread = Thread(target=func, args=args, kwargs=kwargs) + thread.start() + return thread + return get_or_create_eventloop().run_in_executor(None, func, *args, *kwargs) + + return wrapped + + +def flatten(nested_iterable: Iterable[Iterable[_T]]) -> chain[_T]: + return chain(*nested_iterable) + + +# Environment specifics +WINDOWS_BUILD_NUMBER = int(version().split(".")[-1]) if sys.platform == "win32" else -1 +FIRST_WIN_11_BUILD = 22000 +"""AutoSplit Version number""" +WGC_MIN_BUILD = 17134 +"""https://docs.microsoft.com/en-us/uwp/api/windows.graphics.capture.graphicscapturepicker#applies-to""" +FROZEN = hasattr(sys, "frozen") +"""Running from build made by PyInstaller""" +auto_split_directory = os.path.dirname(sys.executable if FROZEN else os.path.abspath(__file__)) +"""The directory of either the AutoSplit executable or AutoSplit.py""" + +# Shared strings +# Check `excludeBuildNumber` during workflow dispatch build generate a clean version number +AUTOSPLIT_VERSION = "2.2.0" + (f"-{AUTOSPLIT_BUILD_NUMBER}" if AUTOSPLIT_BUILD_NUMBER else "") +GITHUB_REPOSITORY = AUTOSPLIT_GITHUB_REPOSITORY diff --git a/typings/multiprocessing/test_cases/check_pipe_connections.py b/typings/multiprocessing/test_cases/check_pipe_connections.py index 164dd74e..8310d68e 100644 --- a/typings/multiprocessing/test_cases/check_pipe_connections.py +++ b/typings/multiprocessing/test_cases/check_pipe_connections.py @@ -1,27 +1,25 @@ -from __future__ import annotations - -from multiprocessing.connection import Pipe, PipeConnection - -# Less type-safe, but no extra variable. User could mix up send and recv types. -# This should be improvable with PEP 695: Type Parameter Syntax in Python 3.12 -a: PipeConnection[str, int] -b: PipeConnection[int, str] -a, b = Pipe() - -# More type safe, but extra variable -connections_wrong: tuple[ - PipeConnection[str, int], - PipeConnection[str, int], -] = Pipe() # pyright: ignore[reportGeneralTypeIssues] -connections_ok: tuple[PipeConnection[str, int], PipeConnection[int, str]] = Pipe() -a, b = connections_ok - -a.send("test") -a.send(0) # pyright: ignore[reportGeneralTypeIssues] -test1: str = b.recv() -test2: int = b.recv() # pyright: ignore[reportGeneralTypeIssues] - -b.send("test") # pyright: ignore[reportGeneralTypeIssues] -b.send(0) -test3: str = a.recv() # pyright: ignore[reportGeneralTypeIssues] -test4: int = a.recv() +from multiprocessing.connection import Pipe, PipeConnection + +# Less type-safe, but no extra variable. User could mix up send and recv types. +# This should be improvable with PEP 695: Type Parameter Syntax in Python 3.12 +a: PipeConnection[str, int] +b: PipeConnection[int, str] +a, b = Pipe() + +# More type safe, but extra variable +connections_wrong: tuple[ + PipeConnection[str, int], + PipeConnection[str, int], +] = Pipe() # pyright: ignore[reportGeneralTypeIssues] +connections_ok: tuple[PipeConnection[str, int], PipeConnection[int, str]] = Pipe() +a, b = connections_ok + +a.send("test") +a.send(0) # pyright: ignore[reportGeneralTypeIssues] +test1: str = b.recv() +test2: int = b.recv() # pyright: ignore[reportGeneralTypeIssues] + +b.send("test") # pyright: ignore[reportGeneralTypeIssues] +b.send(0) +test3: str = a.recv() # pyright: ignore[reportGeneralTypeIssues] +test4: int = a.recv() From 0c406178bb5bf7030a515f507cceae9e6de75e51 Mon Sep 17 00:00:00 2001 From: Avasam Date: Fri, 20 Oct 2023 17:14:02 -0400 Subject: [PATCH 4/5] Manual updates, improved control flow (match-case), type ref fixes --- src/AutoControlledThread.py | 40 +++++---- src/AutoSplit.py | 48 +++++----- src/AutoSplitImage.py | 30 +++---- src/capture_method/BitBltCaptureMethod.py | 4 +- src/capture_method/CaptureMethodBase.py | 12 +-- .../DesktopDuplicationCaptureMethod.py | 4 +- .../VideoCaptureDeviceCaptureMethod.py | 10 +-- .../WindowsGraphicsCaptureMethod.py | 11 ++- src/capture_method/__init__.py | 13 ++- src/compare.py | 19 +++- src/error_messages.py | 2 +- src/hotkeys.py | 50 +++++------ src/menu_bar.py | 14 +-- src/region_selection.py | 12 +-- src/split_parser.py | 30 +++---- src/user_profile.py | 34 ++++---- src/utils.py | 2 +- typings/cv2/mat_wrapper/__init__.pyi | 29 ++++--- typings/multiprocessing/connection.pyi | 87 ++++++++++--------- 19 files changed, 227 insertions(+), 224 deletions(-) diff --git a/src/AutoControlledThread.py b/src/AutoControlledThread.py index 153049cd..45aa453f 100644 --- a/src/AutoControlledThread.py +++ b/src/AutoControlledThread.py @@ -10,7 +10,7 @@ class AutoControlledThread(QtCore.QThread): - def __init__(self, autosplit: AutoSplit): + def __init__(self, autosplit: "AutoSplit"): self.autosplit = autosplit super().__init__() @@ -24,21 +24,23 @@ def run(self): break except EOFError: continue - # This is for use in a Development environment - if line == "kill": - self.autosplit.closeEvent() - break - if line == "start": - self.autosplit.start_auto_splitter() - elif line in {"split", "skip"}: - self.autosplit.skip_split_signal.emit() - elif line == "undo": - self.autosplit.undo_split_signal.emit() - elif line == "reset": - self.autosplit.reset_signal.emit() - elif line.startswith("settings"): - # Allow for any split character between "settings" and the path - user_profile.load_settings(self.autosplit, line[9:]) - # TODO: Not yet implemented in AutoSplit Integration - # elif line == 'pause': - # self.pause_signal.emit() + match line: + # This is for use in a Development environment + case "kill": + self.autosplit.closeEvent() + break + case "start": + self.autosplit.start_auto_splitter() + case "split" | "skip": + self.autosplit.skip_split_signal.emit() + case "undo": + self.autosplit.undo_split_signal.emit() + case "reset": + self.autosplit.reset_signal.emit() + # TODO: Not yet implemented in AutoSplit Integration + # case 'pause': + # self.pause_signal.emit() + case line: + if line.startswith("settings"): + # Allow for any split character between "settings" and the path + user_profile.load_settings(self.autosplit, line[9:]) diff --git a/src/AutoSplit.py b/src/AutoSplit.py index ab588411..c5918ba9 100644 --- a/src/AutoSplit.py +++ b/src/AutoSplit.py @@ -876,40 +876,36 @@ def exit_program() -> NoReturn: os.kill(os.getpid(), signal.SIGINT) sys.exit() - # Simulates LiveSplit quitting without asking. See "TODO" at update_auto_control Worker + # `event is None` simulates LiveSplit quitting without asking. # This also more gracefully exits LiveSplit # Users can still manually save their settings - if event is None: + if event is None or not user_profile.have_settings_changed(self): exit_program() - if user_profile.have_settings_changed(self): - # Give a different warning if there was never a settings file that was loaded successfully, - # and "save as" instead of "save". - settings_file_name = ( - "Untitled" - if not self.last_successfully_loaded_settings_file_path - else os.path.basename(self.last_successfully_loaded_settings_file_path) - ) + # Give a different warning if there was never a settings file that was loaded successfully, + # and "save as" instead of "save". + settings_file_name = ( + "Untitled" + if not self.last_successfully_loaded_settings_file_path + else os.path.basename(self.last_successfully_loaded_settings_file_path) + ) - warning = QMessageBox.warning( - self, - "AutoSplit", - f"Do you want to save changes made to settings file {settings_file_name}?", - QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No | QMessageBox.StandardButton.Cancel, - ) + warning = QMessageBox.warning( + self, + "AutoSplit", + f"Do you want to save changes made to settings file {settings_file_name}?", + QMessageBox.StandardButton.Yes | QMessageBox.StandardButton.No | QMessageBox.StandardButton.Cancel, + ) - if warning is QMessageBox.StandardButton.Yes: - if user_profile.save_settings(self): - exit_program() - else: - event.ignore() - if warning is QMessageBox.StandardButton.No: - exit_program() - if warning is QMessageBox.StandardButton.Cancel: - event.ignore() - else: + if ( + (warning is QMessageBox.StandardButton.Yes and user_profile.save_settings(self)) + or warning is QMessageBox.StandardButton.No + ): exit_program() + # Fallthrough case: Prevent program from closing. + event.ignore() + def set_preview_image(qlabel: QLabel, image: MatLike | None): if not is_valid_image(image): diff --git a/src/AutoSplitImage.py b/src/AutoSplitImage.py index ff7ef5a6..4b32888b 100644 --- a/src/AutoSplitImage.py +++ b/src/AutoSplitImage.py @@ -1,5 +1,5 @@ import os -from enum import IntEnum +from enum import IntEnum, auto from math import sqrt from typing import TYPE_CHECKING @@ -8,7 +8,7 @@ from cv2.typing import MatLike import error_messages -from compare import COMPARE_METHODS_BY_INDEX, check_if_image_has_transparency +from compare import check_if_image_has_transparency, get_comparison_method_by_index from utils import BGR_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image if TYPE_CHECKING: @@ -27,9 +27,9 @@ class ImageType(IntEnum): - SPLIT = 0 - RESET = 1 - START = 2 + SPLIT = auto() + RESET = auto() + START = auto() class AutoSplitImage: @@ -48,7 +48,7 @@ class AutoSplitImage: __pause_time: float | None = None __similarity_threshold: float | None = None - def get_delay_time(self, default: AutoSplit | int): + def get_delay_time(self, default: "AutoSplit | int"): """Get image's delay time or fallback to the default value from spinbox.""" if self.__delay_time is not None: return self.__delay_time @@ -56,7 +56,7 @@ def get_delay_time(self, default: AutoSplit | int): return default return default.settings_dict["default_delay_time"] - def __get_comparison_method(self, default: AutoSplit | int): + def __get_comparison_method_index(self, default: "AutoSplit | int"): """Get image's comparison or fallback to the default value from combobox.""" if self.__comparison_method is not None: return self.__comparison_method @@ -64,7 +64,7 @@ def __get_comparison_method(self, default: AutoSplit | int): return default return default.settings_dict["default_comparison_method"] - def get_pause_time(self, default: AutoSplit | float): + def get_pause_time(self, default: "AutoSplit | float"): """Get image's pause time or fallback to the default value from spinbox.""" if self.__pause_time is not None: return self.__pause_time @@ -72,7 +72,7 @@ def get_pause_time(self, default: AutoSplit | float): return default return default.settings_dict["default_pause_time"] - def get_similarity_threshold(self, default: AutoSplit | float): + def get_similarity_threshold(self, default: "AutoSplit | float"): """Get image's similarity threshold or fallback to the default value from spinbox.""" if self.__similarity_threshold is not None: return self.__similarity_threshold @@ -137,18 +137,16 @@ def check_flag(self, flag: int): def compare_with_capture( self, - default: AutoSplit | int, + default: "AutoSplit | int", capture: MatLike | None, ): """Compare image with capture using image's comparison method. Falls back to combobox.""" if not is_valid_image(self.byte_array) or not is_valid_image(capture): return 0.0 resized_capture = cv2.resize(capture, self.byte_array.shape[1::-1]) - comparison_method = self.__get_comparison_method(default) - return COMPARE_METHODS_BY_INDEX.get( - comparison_method, - compare_dummy, + return get_comparison_method_by_index( + self.__get_comparison_method_index(default), )( self.byte_array, resized_capture, @@ -156,10 +154,6 @@ def compare_with_capture( ) -def compare_dummy(*_: object): - return 0.0 - - if True: from split_parser import ( comparison_method_from_filename, diff --git a/src/capture_method/BitBltCaptureMethod.py b/src/capture_method/BitBltCaptureMethod.py index 7bf10677..052d93e0 100644 --- a/src/capture_method/BitBltCaptureMethod.py +++ b/src/capture_method/BitBltCaptureMethod.py @@ -32,7 +32,7 @@ class BitBltCaptureMethod(CaptureMethodBase): _render_full_content = False @override - def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: + def get_frame(self, autosplit: "AutoSplit") -> tuple[MatLike | None, bool]: selection = autosplit.settings_dict["capture_region"] hwnd = autosplit.hwnd image: MatLike | None = None @@ -77,7 +77,7 @@ def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: return image, False @override - def recover_window(self, captured_window_title: str, autosplit: AutoSplit): + def recover_window(self, captured_window_title: str, autosplit: "AutoSplit"): hwnd = win32gui.FindWindow(None, captured_window_title) if not is_valid_hwnd(hwnd): return False diff --git a/src/capture_method/CaptureMethodBase.py b/src/capture_method/CaptureMethodBase.py index d470776c..4bea1a5c 100644 --- a/src/capture_method/CaptureMethodBase.py +++ b/src/capture_method/CaptureMethodBase.py @@ -13,19 +13,19 @@ class CaptureMethodBase: short_description = "" description = "" - def __init__(self, autosplit: AutoSplit | None): + def __init__(self, autosplit: "AutoSplit | None"): # Some capture methods don't need an initialization process pass - def reinitialize(self, autosplit: AutoSplit): + def reinitialize(self, autosplit: "AutoSplit"): self.close(autosplit) self.__init__(autosplit) # type: ignore[misc] - def close(self, autosplit: AutoSplit): + def close(self, autosplit: "AutoSplit"): # Some capture methods don't need an initialization process pass - def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: # noqa: PLR6301 + def get_frame(self, autosplit: "AutoSplit") -> tuple[MatLike | None, bool]: # noqa: PLR6301 """ Captures an image of the region for a window matching the given parameters of the bounding box. @@ -34,8 +34,8 @@ def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: # noq """ return None, False - def recover_window(self, captured_window_title: str, autosplit: AutoSplit) -> bool: # noqa: PLR6301 + def recover_window(self, captured_window_title: str, autosplit: "AutoSplit") -> bool: # noqa: PLR6301 return False - def check_selected_region_exists(self, autosplit: AutoSplit) -> bool: # noqa: PLR6301 + def check_selected_region_exists(self, autosplit: "AutoSplit") -> bool: # noqa: PLR6301 return is_valid_hwnd(autosplit.hwnd) diff --git a/src/capture_method/DesktopDuplicationCaptureMethod.py b/src/capture_method/DesktopDuplicationCaptureMethod.py index 1ee63eb4..4e4a2525 100644 --- a/src/capture_method/DesktopDuplicationCaptureMethod.py +++ b/src/capture_method/DesktopDuplicationCaptureMethod.py @@ -28,13 +28,13 @@ class DesktopDuplicationCaptureMethod(BitBltCaptureMethod): + f"\nhttps://www.github.com/{GITHUB_REPOSITORY}#capture-method " ) - def __init__(self, autosplit: AutoSplit | None): + def __init__(self, autosplit: "AutoSplit | None"): super().__init__(autosplit) # Must not set statically as some laptops will throw an error self.desktop_duplication = d3dshot.create(capture_output="numpy") @override - def get_frame(self, autosplit: AutoSplit): + def get_frame(self, autosplit: "AutoSplit"): selection = autosplit.settings_dict["capture_region"] hwnd = autosplit.hwnd hmonitor = ctypes.windll.user32.MonitorFromWindow(hwnd, win32con.MONITOR_DEFAULTTONEAREST) diff --git a/src/capture_method/VideoCaptureDeviceCaptureMethod.py b/src/capture_method/VideoCaptureDeviceCaptureMethod.py index 44988992..d7fa9a7d 100644 --- a/src/capture_method/VideoCaptureDeviceCaptureMethod.py +++ b/src/capture_method/VideoCaptureDeviceCaptureMethod.py @@ -45,7 +45,7 @@ class VideoCaptureDeviceCaptureMethod(CaptureMethodBase): last_captured_frame: MatLike | None = None is_old_image = False - def __read_loop(self, autosplit: AutoSplit): + def __read_loop(self, autosplit: "AutoSplit"): try: while not self.stop_thread.is_set(): try: @@ -85,7 +85,7 @@ def __read_loop(self, autosplit: AutoSplit): ), ) - def __init__(self, autosplit: AutoSplit): + def __init__(self, autosplit: "AutoSplit"): super().__init__(autosplit) self.capture_device = cv2.VideoCapture(autosplit.settings_dict["capture_device_id"]) self.capture_device.setExceptionMode(True) @@ -111,7 +111,7 @@ def __init__(self, autosplit: AutoSplit): self.capture_thread.start() @override - def close(self, autosplit: AutoSplit): + def close(self, autosplit: "AutoSplit"): self.stop_thread.set() if self.capture_thread: self.capture_thread.join() @@ -119,7 +119,7 @@ def close(self, autosplit: AutoSplit): self.capture_device.release() @override - def get_frame(self, autosplit: AutoSplit): + def get_frame(self, autosplit: "AutoSplit"): if not self.check_selected_region_exists(autosplit): return None, False @@ -140,5 +140,5 @@ def get_frame(self, autosplit: AutoSplit): return cv2.cvtColor(image, cv2.COLOR_BGR2BGRA), is_old_image @override - def check_selected_region_exists(self, autosplit: AutoSplit): + def check_selected_region_exists(self, autosplit: "AutoSplit"): return bool(self.capture_device.isOpened()) diff --git a/src/capture_method/WindowsGraphicsCaptureMethod.py b/src/capture_method/WindowsGraphicsCaptureMethod.py index 04fed3de..f338582b 100644 --- a/src/capture_method/WindowsGraphicsCaptureMethod.py +++ b/src/capture_method/WindowsGraphicsCaptureMethod.py @@ -15,7 +15,6 @@ from utils import BGRA_CHANNEL_COUNT, WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, get_direct3d_device, is_valid_hwnd if TYPE_CHECKING: - from AutoSplit import AutoSplit WGC_NO_BORDER_MIN_BUILD = 20348 @@ -41,7 +40,7 @@ class WindowsGraphicsCaptureMethod(CaptureMethodBase): """This is stored to prevent session from being garbage collected""" last_captured_frame: MatLike | None = None - def __init__(self, autosplit: AutoSplit): + def __init__(self, autosplit: "AutoSplit"): super().__init__(autosplit) if not is_valid_hwnd(autosplit.hwnd): return @@ -68,7 +67,7 @@ def __init__(self, autosplit: AutoSplit): self.frame_pool = frame_pool @override - def close(self, autosplit: AutoSplit): + def close(self, autosplit: "AutoSplit"): if self.frame_pool: self.frame_pool.close() self.frame_pool = None @@ -83,7 +82,7 @@ def close(self, autosplit: AutoSplit): self.session = None @override - def get_frame(self, autosplit: AutoSplit) -> tuple[MatLike | None, bool]: + def get_frame(self, autosplit: "AutoSplit") -> tuple[MatLike | None, bool]: selection = autosplit.settings_dict["capture_region"] # We still need to check the hwnd because WGC will return a blank black image if not ( @@ -131,7 +130,7 @@ async def coroutine(): return image, False @override - def recover_window(self, captured_window_title: str, autosplit: AutoSplit): + def recover_window(self, captured_window_title: str, autosplit: "AutoSplit"): hwnd = win32gui.FindWindow(None, captured_window_title) if not is_valid_hwnd(hwnd): return False @@ -146,7 +145,7 @@ def recover_window(self, captured_window_title: str, autosplit: AutoSplit): return self.check_selected_region_exists(autosplit) @override - def check_selected_region_exists(self, autosplit: AutoSplit): + def check_selected_region_exists(self, autosplit: "AutoSplit"): return bool( is_valid_hwnd(autosplit.hwnd) and self.frame_pool diff --git a/src/capture_method/__init__.py b/src/capture_method/__init__.py index 5e28b8e5..e5bf5087 100644 --- a/src/capture_method/__init__.py +++ b/src/capture_method/__init__.py @@ -3,11 +3,11 @@ from dataclasses import dataclass from enum import Enum, EnumMeta, auto, unique from itertools import starmap -from typing import TYPE_CHECKING, NoReturn, TypedDict, cast +from typing import TYPE_CHECKING, Never, NoReturn, TypedDict, cast from _ctypes import COMError from pygrabber.dshow_graph import FilterGraph -from typing_extensions import Never, override +from typing_extensions import override from capture_method.BitBltCaptureMethod import BitBltCaptureMethod from capture_method.CaptureMethodBase import CaptureMethodBase @@ -60,12 +60,9 @@ def __eq__(self, other: object): def __hash__(self): return self.value.__hash__() - # https://github.com/python/typeshed/issues/10428 @override - def _generate_next_value_( # type:ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] - name: "str | CaptureMethodEnum", # noqa: N805 - *_, - ): + @staticmethod + def _generate_next_value_(name: "str | CaptureMethodEnum", *_): return name NONE = "" @@ -136,7 +133,7 @@ def get(self, key: CaptureMethodEnum, __default: object = None): CAPTURE_METHODS[CaptureMethodEnum.VIDEO_CAPTURE_DEVICE] = VideoCaptureDeviceCaptureMethod -def change_capture_method(selected_capture_method: CaptureMethodEnum, autosplit: AutoSplit): +def change_capture_method(selected_capture_method: CaptureMethodEnum, autosplit: "AutoSplit"): autosplit.capture_method.close(autosplit) autosplit.capture_method = CAPTURE_METHODS.get(selected_capture_method)(autosplit) if selected_capture_method == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE: diff --git a/src/compare.py b/src/compare.py index 0668f9ee..c01ff336 100644 --- a/src/compare.py +++ b/src/compare.py @@ -100,6 +100,22 @@ def compare_phash(source: MatLike, capture: MatLike, mask: MatLike | None = None return 1 - (hash_diff / 64.0) +def get_comparison_method_by_index(comparison_method_index: int): + match comparison_method_index: + case 0: + return compare_l2_norm + case 1: + return compare_histograms + case 2: + return compare_phash + case _: + return __compare_dummy + + +def __compare_dummy(*_: object): + return 0.0 + + def check_if_image_has_transparency(image: MatLike): # Check if there's a transparency channel (4th channel) and if at least one pixel is transparent (< 255) if image.shape[ImageShape.Channels] != BGRA_CHANNEL_COUNT: @@ -112,6 +128,3 @@ def check_if_image_has_transparency(image: MatLike): # (the image appears as all black in windows, so it's not obvious for the user what they did wrong) return mean != MAXBYTE - - -COMPARE_METHODS_BY_INDEX = {0: compare_l2_norm, 1: compare_histograms, 2: compare_phash} diff --git a/src/error_messages.py b/src/error_messages.py index da9e054d..465d4dad 100644 --- a/src/error_messages.py +++ b/src/error_messages.py @@ -168,7 +168,7 @@ def exception_traceback(exception: BaseException, message: str = ""): ) -def make_excepthook(autosplit: AutoSplit): +def make_excepthook(autosplit: "AutoSplit"): def excepthook(exception_type: type[BaseException], exception: BaseException, _traceback: TracebackType | None): # Catch Keyboard Interrupts for a clean close if exception_type is KeyboardInterrupt or isinstance(exception, KeyboardInterrupt): diff --git a/src/hotkeys.py b/src/hotkeys.py index bc6a9584..1109cedb 100644 --- a/src/hotkeys.py +++ b/src/hotkeys.py @@ -26,7 +26,7 @@ def remove_all_hotkeys(): keyboard.unhook_all() -def before_setting_hotkey(autosplit: AutoSplit): +def before_setting_hotkey(autosplit: "AutoSplit"): """Do all of these after you click "Set Hotkey" but before you type the hotkey.""" autosplit.start_auto_splitter_button.setEnabled(False) if autosplit.SettingsWidget: @@ -34,7 +34,7 @@ def before_setting_hotkey(autosplit: AutoSplit): getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(False) -def after_setting_hotkey(autosplit: AutoSplit): +def after_setting_hotkey(autosplit: "AutoSplit"): """ Do all of these things after you set a hotkey. A signal connects to this because changing GUI stuff is only possible in the main thread. @@ -47,31 +47,29 @@ def after_setting_hotkey(autosplit: AutoSplit): getattr(autosplit.SettingsWidget, f"set_{hotkey}_hotkey_button").setEnabled(True) -def send_command(autosplit: AutoSplit, command: Commands): +def send_command(autosplit: "AutoSplit", command: Commands): # Note: Rather than having the start image able to also reset the timer, # having the reset image check be active at all time would be a better, more organic solution, # but that is dependent on migrating to an observer pattern (#219) and being able to reload all images. - if autosplit.is_auto_controlled: - if command == "start" and autosplit.settings_dict["start_also_resets"]: - print("reset", flush=True) - print(command, flush=True) - elif command == "start": - if autosplit.settings_dict["start_also_resets"]: + match command: + case _ if autosplit.settings_dict["start_also_resets"]: + if command == "start" and autosplit.settings_dict["start_also_resets"]: + print("reset", flush=True) + print(command, flush=True) + case "start" if autosplit.settings_dict["start_also_resets"]: _send_hotkey(autosplit.settings_dict["reset_hotkey"]) - _send_hotkey(autosplit.settings_dict["split_hotkey"]) - elif command == "split": - _send_hotkey(autosplit.settings_dict["split_hotkey"]) - elif command == "pause": - _send_hotkey(autosplit.settings_dict["pause_hotkey"]) - elif command == "reset": - _send_hotkey(autosplit.settings_dict["reset_hotkey"]) - elif command == "skip": - _send_hotkey(autosplit.settings_dict["skip_split_hotkey"]) - elif command == "undo": - _send_hotkey(autosplit.settings_dict["undo_split_hotkey"]) - - else: - raise KeyError(f"{command!r} is not a valid command") + case "reset": + _send_hotkey(autosplit.settings_dict["reset_hotkey"]) + case "start" | "split": + _send_hotkey(autosplit.settings_dict["split_hotkey"]) + case "pause": + _send_hotkey(autosplit.settings_dict["pause_hotkey"]) + case "skip": + _send_hotkey(autosplit.settings_dict["skip_split_hotkey"]) + case "undo": + _send_hotkey(autosplit.settings_dict["undo_split_hotkey"]) + case _: # pyright: ignore[reportUnnecessaryComparison] + raise KeyError(f"{command!r} is not a valid command") def _unhook(hotkey_callback: Callable[[], None] | None): @@ -205,7 +203,7 @@ def __read_hotkey(): return __get_hotkey_name(names) -def __remove_key_already_set(autosplit: AutoSplit, key_name: str): +def __remove_key_already_set(autosplit: "AutoSplit", key_name: str): for hotkey in HOTKEYS: settings_key = f"{hotkey}_hotkey" if autosplit.settings_dict.get(settings_key) == key_name: @@ -215,7 +213,7 @@ def __remove_key_already_set(autosplit: AutoSplit, key_name: str): getattr(autosplit.SettingsWidget, f"{hotkey}_input").setText("") -def __get_hotkey_action(autosplit: AutoSplit, hotkey: Hotkey): +def __get_hotkey_action(autosplit: "AutoSplit", hotkey: Hotkey): if hotkey == "split": return autosplit.start_auto_splitter if hotkey == "skip_split": @@ -245,7 +243,7 @@ def is_valid_hotkey_name(hotkey_name: str): # reduce duplicated code. We should use a dictionary of hotkey class or something. -def set_hotkey(autosplit: AutoSplit, hotkey: Hotkey, preselected_hotkey_name: str = ""): +def set_hotkey(autosplit: "AutoSplit", hotkey: Hotkey, preselected_hotkey_name: str = ""): if autosplit.SettingsWidget: # Unfocus all fields cast(QtWidgets.QWidget, autosplit.SettingsWidget).setFocus() diff --git a/src/menu_bar.py b/src/menu_bar.py index 71d9736f..45b79d76 100644 --- a/src/menu_bar.py +++ b/src/menu_bar.py @@ -42,7 +42,7 @@ def __init__(self): self.show() -def open_about(autosplit: AutoSplit): +def open_about(autosplit: "AutoSplit"): if not autosplit.AboutWidget or cast(QtWidgets.QWidget, autosplit.AboutWidget).isHidden(): autosplit.AboutWidget = __AboutWidget() @@ -79,7 +79,7 @@ def do_not_ask_me_again_state_changed(self): ) -def open_update_checker(autosplit: AutoSplit, latest_version: str, check_on_open: bool): +def open_update_checker(autosplit: "AutoSplit", latest_version: str, check_on_open: bool): if not autosplit.UpdateCheckerWidget or cast(QtWidgets.QWidget, autosplit.UpdateCheckerWidget).isHidden(): autosplit.UpdateCheckerWidget = __UpdateCheckerWidget(latest_version, autosplit, check_on_open) @@ -89,7 +89,7 @@ def view_help(): class __CheckForUpdatesThread(QtCore.QThread): # noqa: N801 # Private class - def __init__(self, autosplit: AutoSplit, check_on_open: bool): + def __init__(self, autosplit: "AutoSplit", check_on_open: bool): super().__init__() self.autosplit = autosplit self.check_on_open = check_on_open @@ -113,13 +113,13 @@ def about_qt_for_python(): webbrowser.open("https://wiki.qt.io/Qt_for_Python") -def check_for_updates(autosplit: AutoSplit, check_on_open: bool = False): +def check_for_updates(autosplit: "AutoSplit", check_on_open: bool = False): autosplit.CheckForUpdatesThread = __CheckForUpdatesThread(autosplit, check_on_open) autosplit.CheckForUpdatesThread.start() class __SettingsWidget(QtWidgets.QWidget, settings_ui.Ui_SettingsWidget): # noqa: N801 # Private class - def __init__(self, autosplit: AutoSplit): + def __init__(self, autosplit: "AutoSplit"): super().__init__() self.__video_capture_devices: list[CameraInfo] = [] """ @@ -354,12 +354,12 @@ def hotkey_connect(hotkey: Hotkey): # endregion -def open_settings(autosplit: AutoSplit): +def open_settings(autosplit: "AutoSplit"): if not autosplit.SettingsWidget or cast(QtWidgets.QWidget, autosplit.SettingsWidget).isHidden(): autosplit.SettingsWidget = __SettingsWidget(autosplit) -def get_default_settings_from_ui(autosplit: AutoSplit): +def get_default_settings_from_ui(autosplit: "AutoSplit"): temp_dialog = QtWidgets.QWidget() default_settings_dialog = settings_ui.Ui_SettingsWidget() default_settings_dialog.setupUi(temp_dialog) diff --git a/src/region_selection.py b/src/region_selection.py index c6d23dba..137e487a 100644 --- a/src/region_selection.py +++ b/src/region_selection.py @@ -60,7 +60,7 @@ # TODO: For later as a different picker option -def __select_graphics_item(autosplit: AutoSplit): # pyright: ignore [reportUnusedFunction] +def __select_graphics_item(autosplit: "AutoSplit"): # pyright: ignore [reportUnusedFunction] """Uses the built-in GraphicsCapturePicker to select the Window.""" def callback(async_operation: IAsyncOperation[GraphicsCaptureItem], async_status: AsyncStatus): @@ -86,7 +86,7 @@ def callback(async_operation: IAsyncOperation[GraphicsCaptureItem], async_status async_operation.completed = callback -def select_region(autosplit: AutoSplit): +def select_region(autosplit: "AutoSplit"): # Create a screen selector widget selector = SelectRegionWidget() @@ -129,7 +129,7 @@ def select_region(autosplit: AutoSplit): ) -def select_window(autosplit: AutoSplit): +def select_window(autosplit: "AutoSplit"): # Create a screen selector widget selector = SelectWindowWidget() @@ -172,7 +172,7 @@ def select_window(autosplit: AutoSplit): ) -def align_region(autosplit: AutoSplit): +def align_region(autosplit: "AutoSplit"): # Check to see if a region has been set if not autosplit.capture_method.check_selected_region_exists(autosplit): error_messages.region() @@ -225,7 +225,7 @@ def align_region(autosplit: AutoSplit): ) -def __set_region_values(autosplit: AutoSplit, left: int, top: int, width: int, height: int): +def __set_region_values(autosplit: "AutoSplit", left: int, top: int, width: int, height: int): autosplit.settings_dict["capture_region"]["x"] = left autosplit.settings_dict["capture_region"]["y"] = top autosplit.settings_dict["capture_region"]["width"] = width @@ -279,7 +279,7 @@ def __test_alignment(capture: MatLike, template: MatLike): return best_match, best_height, best_width, best_loc -def validate_before_parsing(autosplit: AutoSplit, show_error: bool = True, check_empty_directory: bool = True): +def validate_before_parsing(autosplit: "AutoSplit", show_error: bool = True, check_empty_directory: bool = True): error = None if not autosplit.settings_dict["split_image_directory"]: error = error_messages.split_image_directory diff --git a/src/split_parser.py b/src/split_parser.py index cd7b5e9b..6c1281db 100644 --- a/src/split_parser.py +++ b/src/split_parser.py @@ -140,20 +140,20 @@ def flags_from_filename(filename: str): flags = 0x00 for flag_str in flags_str: - character = flag_str.upper() - if character == "D": - flags |= DUMMY_FLAG - elif character == "B": - flags |= BELOW_FLAG - elif character == "P": - flags |= PAUSE_FLAG - # Legacy flags - elif character == "M": - continue - else: - # An invalid flag was caught, this filename was written incorrectly - # return 0. We don't want to interpret any misleading filenames - return 0 + match flag_str.upper(): + case "D": + flags |= DUMMY_FLAG + case "B": + flags |= BELOW_FLAG + case "P": + flags |= PAUSE_FLAG + # Legacy flags + case "M": + continue + # An invalid flag was caught, this filename was written incorrectly return 0. + # We don't want to interpret any misleading filenames + case _: + return 0 # Check for any conflicting flags that were set # For instance, we can't have a dummy split also pause @@ -172,7 +172,7 @@ def __pop_image_type(split_image: list[AutoSplitImage], image_type: ImageType): return None -def parse_and_validate_images(autosplit: AutoSplit): +def parse_and_validate_images(autosplit: "AutoSplit"): # Get split images all_images = [ AutoSplitImage(os.path.join(autosplit.settings_dict["split_image_directory"], image_name)) diff --git a/src/user_profile.py b/src/user_profile.py index ab329f68..c29220a0 100644 --- a/src/user_profile.py +++ b/src/user_profile.py @@ -69,14 +69,14 @@ class UserProfileDict(TypedDict): ) -def have_settings_changed(autosplit: AutoSplit): +def have_settings_changed(autosplit: "AutoSplit"): return ( autosplit.settings_dict != autosplit.last_saved_settings or autosplit.settings_dict != autosplit.last_loaded_settings ) -def save_settings(autosplit: AutoSplit): +def save_settings(autosplit: "AutoSplit"): """@return: The save settings filepath. Or None if "Save Settings As" is cancelled.""" return ( __save_settings_to_file(autosplit, autosplit.last_successfully_loaded_settings_file_path) @@ -85,7 +85,7 @@ def save_settings(autosplit: AutoSplit): ) -def save_settings_as(autosplit: AutoSplit): +def save_settings_as(autosplit: "AutoSplit"): """@return: The save settings filepath selected. Empty if cancelled.""" # User picks save destination save_settings_file_path = QtWidgets.QFileDialog.getSaveFileName( @@ -102,7 +102,7 @@ def save_settings_as(autosplit: AutoSplit): return __save_settings_to_file(autosplit, save_settings_file_path) -def __save_settings_to_file(autosplit: AutoSplit, save_settings_file_path: str): +def __save_settings_to_file(autosplit: "AutoSplit", save_settings_file_path: str): autosplit.last_saved_settings = autosplit.settings_dict # Save settings to a .toml file with open(save_settings_file_path, "w", encoding="utf-8") as file: @@ -111,7 +111,7 @@ def __save_settings_to_file(autosplit: AutoSplit, save_settings_file_path: str): return save_settings_file_path -def __load_settings_from_file(autosplit: AutoSplit, load_settings_file_path: str): +def __load_settings_from_file(autosplit: "AutoSplit", load_settings_file_path: str): if load_settings_file_path.endswith(".pkl"): autosplit.show_error_signal.emit(error_messages.old_version_settings_file) return False @@ -126,15 +126,15 @@ def __load_settings_from_file(autosplit: AutoSplit, load_settings_file_path: str **toml.load(file), }, ) - # TODO: Data Validation / fallbacks ? - autosplit.settings_dict = UserProfileDict(**loaded_settings) - autosplit.last_loaded_settings = autosplit.settings_dict - - autosplit.x_spinbox.setValue(autosplit.settings_dict["capture_region"]["x"]) - autosplit.y_spinbox.setValue(autosplit.settings_dict["capture_region"]["y"]) - autosplit.width_spinbox.setValue(autosplit.settings_dict["capture_region"]["width"]) - autosplit.height_spinbox.setValue(autosplit.settings_dict["capture_region"]["height"]) - autosplit.split_image_folder_input.setText(autosplit.settings_dict["split_image_directory"]) + # TODO: Data Validation / fallbacks ? + autosplit.settings_dict = UserProfileDict(**loaded_settings) + autosplit.last_loaded_settings = autosplit.settings_dict + + autosplit.x_spinbox.setValue(autosplit.settings_dict["capture_region"]["x"]) + autosplit.y_spinbox.setValue(autosplit.settings_dict["capture_region"]["y"]) + autosplit.width_spinbox.setValue(autosplit.settings_dict["capture_region"]["width"]) + autosplit.height_spinbox.setValue(autosplit.settings_dict["capture_region"]["height"]) + autosplit.split_image_folder_input.setText(autosplit.settings_dict["split_image_directory"]) except (FileNotFoundError, MemoryError, TypeError, toml.TomlDecodeError): autosplit.show_error_signal.emit(error_messages.invalid_settings) return False @@ -159,7 +159,7 @@ def __load_settings_from_file(autosplit: AutoSplit, load_settings_file_path: str return True -def load_settings(autosplit: AutoSplit, from_path: str = ""): +def load_settings(autosplit: "AutoSplit", from_path: str = ""): load_settings_file_path = ( from_path or QtWidgets.QFileDialog.getOpenFileName( @@ -178,7 +178,7 @@ def load_settings(autosplit: AutoSplit, from_path: str = ""): autosplit.load_start_image_signal.emit(False, True) -def load_settings_on_open(autosplit: AutoSplit): +def load_settings_on_open(autosplit: "AutoSplit"): settings_files = [ file for file in os.listdir(auto_split_directory) @@ -199,7 +199,7 @@ def load_settings_on_open(autosplit: AutoSplit): load_settings(autosplit, os.path.join(auto_split_directory, settings_files[0])) -def load_check_for_updates_on_open(autosplit: AutoSplit): +def load_check_for_updates_on_open(autosplit: "AutoSplit"): """ Retrieve the "Check For Updates On Open" QSettings and set the checkbox state These are only global settings values. They are not *toml settings values. diff --git a/src/utils.py b/src/utils.py index 4a854384..0e9338b4 100644 --- a/src/utils.py +++ b/src/utils.py @@ -82,7 +82,7 @@ def first(iterable: Iterable[T]) -> T: return next(iter(iterable)) -def try_delete_dc(dc: PyCDC): +def try_delete_dc(dc: "PyCDC"): try: dc.DeleteDC() except win32ui.error: diff --git a/typings/cv2/mat_wrapper/__init__.pyi b/typings/cv2/mat_wrapper/__init__.pyi index 0bfcd316..485f84e3 100644 --- a/typings/cv2/mat_wrapper/__init__.pyi +++ b/typings/cv2/mat_wrapper/__init__.pyi @@ -1,14 +1,15 @@ -from typing import TypeAlias - -import numpy as np -from _typeshed import Unused - -__all__: list[str] = [] -_NDArray: TypeAlias = np.ndarray[float, np.dtype[np.generic]] - -class Mat(_NDArray): - wrap_channels: bool | None - - def __new__(cls, arr: _NDArray, wrap_channels: bool = ..., **kwargs: Unused) -> _NDArray: ... - def __init__(self, arr: _NDArray, wrap_channels: bool = ...) -> None: ... - def __array_finalize__(self, obj: _NDArray | None) -> None: ... +from typing import TypeAlias + +import numpy as np +from _typeshed import Unused + +__all__: list[str] = [] +_NDArray: TypeAlias = np.ndarray[float, np.dtype[np.generic]] + + +class Mat(_NDArray): + wrap_channels: bool | None + + def __new__(cls, arr: _NDArray, wrap_channels: bool = ..., **kwargs: Unused) -> _NDArray: ... + def __init__(self, arr: _NDArray, wrap_channels: bool = ...) -> None: ... + def __array_finalize__(self, obj: _NDArray | None) -> None: ... diff --git a/typings/multiprocessing/connection.pyi b/typings/multiprocessing/connection.pyi index b4bc711d..033c2586 100644 --- a/typings/multiprocessing/connection.pyi +++ b/typings/multiprocessing/connection.pyi @@ -1,42 +1,45 @@ -# https://github.com/python/typeshed/blob/main/stdlib/multiprocessing/connection.pyi -import sys -from types import TracebackType -from typing import Any, Generic, SupportsIndex, TypeVar - -from _typeshed import ReadableBuffer -from typing_extensions import Self - -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") - -class _ConnectionBase(Generic[_T1, _T2]): - def __init__(self, handle: SupportsIndex, readable: bool = True, writable: bool = True) -> None: ... - @property - def closed(self) -> bool: ... # undocumented - @property - def readable(self) -> bool: ... # undocumented - @property - def writable(self) -> bool: ... # undocumented - def fileno(self) -> int: ... - def close(self) -> None: ... - def send_bytes(self, buf: ReadableBuffer, offset: int = 0, size: int | None = None) -> None: ... - def send(self, obj: _T1) -> None: ... - def recv_bytes(self, maxlength: int | None = None) -> bytes: ... - def recv_bytes_into(self, buf: Any, offset: int = 0) -> int: ... - def recv(self) -> _T2: ... - def poll(self, timeout: float | None = 0.0) -> bool: ... - def __enter__(self) -> Self: ... - def __exit__( - self, - exc_type: type[BaseException] | None, - exc_value: BaseException | None, - exc_tb: TracebackType | None, - ) -> None: ... - -class Connection(_ConnectionBase[_T1, _T2]): ... - -if sys.platform == "win32": - class PipeConnection(_ConnectionBase[_T1, _T2]): ... - def Pipe(duplex=True) -> tuple[PipeConnection[_T1, _T2], PipeConnection[_T2, _T1]]: ... -else: - def Pipe(duplex: bool = True) -> tuple[Connection[_T1, _T2], Connection[_T2, _T1]]: ... +# https://github.com/python/typeshed/blob/main/stdlib/multiprocessing/connection.pyi +import sys +from types import TracebackType +from typing import Any, Generic, Self, SupportsIndex, TypeVar + +from _typeshed import ReadableBuffer + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") + + +class _ConnectionBase(Generic[_T1, _T2]): + def __init__(self, handle: SupportsIndex, readable: bool = True, writable: bool = True) -> None: ... + @property + def closed(self) -> bool: ... # undocumented + @property + def readable(self) -> bool: ... # undocumented + @property + def writable(self) -> bool: ... # undocumented + def fileno(self) -> int: ... + def close(self) -> None: ... + def send_bytes(self, buf: ReadableBuffer, offset: int = 0, size: int | None = None) -> None: ... + def send(self, obj: _T1) -> None: ... + def recv_bytes(self, maxlength: int | None = None) -> bytes: ... + def recv_bytes_into(self, buf: Any, offset: int = 0) -> int: ... + def recv(self) -> _T2: ... + def poll(self, timeout: float | None = 0.0) -> bool: ... + def __enter__(self) -> Self: ... + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: ... + + +class Connection(_ConnectionBase[_T1, _T2]): ... + + +if sys.platform == "win32": + class PipeConnection(_ConnectionBase[_T1, _T2]): ... + def Pipe(duplex=True) -> tuple[PipeConnection[_T1, _T2], PipeConnection[_T2, _T1]]: ... +else: + def Pipe(duplex: bool = True) -> tuple[Connection[_T1, _T2], Connection[_T2, _T1]]: ... From d1e6cecad42b2c2e121e80101105739af9f7b6e5 Mon Sep 17 00:00:00 2001 From: Avasam Date: Fri, 20 Oct 2023 18:25:01 -0400 Subject: [PATCH 5/5] Fix pyright issue with versions --- .github/workflows/lint-and-build.yml | 1 + pyproject.toml | 1 + src/capture_method/__init__.py | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint-and-build.yml b/.github/workflows/lint-and-build.yml index 25d02200..e5c52376 100644 --- a/.github/workflows/lint-and-build.yml +++ b/.github/workflows/lint-and-build.yml @@ -92,6 +92,7 @@ jobs: uses: jakebailey/pyright-action@v1 with: working-directory: src/ + python-version: ${{ matrix.python-version }} Build: runs-on: windows-latest strategy: diff --git a/pyproject.toml b/pyproject.toml index 330afa02..6f7ea6ed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -142,6 +142,7 @@ ignore = [ # https://github.com/microsoft/pyright/blob/main/docs/configuration.md#sample-pyprojecttoml-file [tool.pyright] typeCheckingMode = "strict" +pythonVersion = "3.10" # Prefer `pyright: ignore` enableTypeIgnoreComments = false # Extra strict diff --git a/src/capture_method/__init__.py b/src/capture_method/__init__.py index 31a4e80a..b03b6582 100644 --- a/src/capture_method/__init__.py +++ b/src/capture_method/__init__.py @@ -3,11 +3,11 @@ from dataclasses import dataclass from enum import Enum, EnumMeta, auto, unique from itertools import starmap -from typing import TYPE_CHECKING, Never, NoReturn, TypedDict, cast +from typing import TYPE_CHECKING, NoReturn, TypedDict, cast from _ctypes import COMError from pygrabber.dshow_graph import FilterGraph -from typing_extensions import override +from typing_extensions import Never, override from capture_method.BitBltCaptureMethod import BitBltCaptureMethod from capture_method.CaptureMethodBase import CaptureMethodBase