Skip to content

Commit

Permalink
Merge branch 'dev' of https://github.com/Toufool/AutoSplit into refac…
Browse files Browse the repository at this point in the history
…tor-async-image-capture
  • Loading branch information
Avasam committed Oct 20, 2024
2 parents 6b6f96a + 9588834 commit 4926da2
Show file tree
Hide file tree
Showing 19 changed files with 815 additions and 306 deletions.
1 change: 0 additions & 1 deletion .sonarcloud.properties

This file was deleted.

2 changes: 2 additions & 0 deletions src/AutoControlledThread.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ def run(self):
break
except EOFError:
continue
if line in self._autosplit_ref.settings_dict["screenshot_on"]:
self._autosplit_ref.screenshot_signal.emit()
match line:
# This is for use in a Development environment
case "kill":
Expand Down
69 changes: 48 additions & 21 deletions src/AutoSplit.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,31 @@
#!/usr/bin/python3

import os
import signal
import sys

# Prevent PyAutoGUI and pywinctl from setting Process DPI Awareness,
# which Qt tries to do then throws warnings about it.
# The unittest workaround significantly increases
# build time, boot time and build size with PyInstaller.
# https://github.com/asweigart/pyautogui/issues/663#issuecomment-1296719464
# QT doesn't call those from Python/ctypes, meaning we can stop other programs from setting it.
if sys.platform == "win32":
import ctypes

# pyautogui._pyautogui_win.py
ctypes.windll.user32.SetProcessDPIAware = ( # pyright: ignore[reportAttributeAccessIssue]
lambda: None
)
# pymonctl._pymonctl_win.py
# pywinbox._pywinbox_win.py
ctypes.windll.shcore.SetProcessDpiAwareness = ( # pyright: ignore[reportAttributeAccessIssue]
lambda _: None # pyright: ignore[reportUnknownLambdaType]
)
if sys.platform == "linux":
# Fixes "undefined symbol: wl_proxy_marshal_flags": https://bugreports.qt.io/browse/QTBUG-114635
os.environ.setdefault("QT_QPA_PLATFORM", "xcb")

import signal
from collections.abc import Callable
from copy import deepcopy
from time import time
Expand Down Expand Up @@ -58,6 +82,7 @@
auto_split_directory,
decimal,
flatten,
imwrite,
is_valid_image,
open_file,
)
Expand Down Expand Up @@ -134,7 +159,7 @@ def _show_error_signal_slot(error_message_box: Callable[..., object]):
self.setupUi(self)
self.setWindowTitle(
f"AutoSplit v{AUTOSPLIT_VERSION}"
+ (" (externally controlled)" if self.is_auto_controlled else ""),
+ (" (externally controlled)" if self.is_auto_controlled else "")
)

# Hotkeys need to be initialized to be passed as thread arguments in hotkeys.py
Expand Down Expand Up @@ -190,7 +215,8 @@ def _show_error_signal_slot(error_message_box: Callable[..., object]):
)
self.action_check_for_updates_on_open.changed.connect(
lambda: user_profile.set_check_for_updates_on_open(
self, self.action_check_for_updates_on_open.isChecked()
self,
self.action_check_for_updates_on_open.isChecked(),
),
)

Expand Down Expand Up @@ -344,7 +370,7 @@ def __compare_capture_for_auto_start(self, capture: MatLike | None):
# TODO: Abstract with similar check in split image
below_flag = self.start_image.check_flag(BELOW_FLAG)

# Negative means belove threshold, positive means above
# Negative means below threshold, positive means above
similarity_diff = start_image_similarity - start_image_threshold
if below_flag and not self.split_below_threshold and similarity_diff >= 0:
self.split_below_threshold = True
Expand All @@ -369,7 +395,7 @@ def __compare_capture_for_auto_start(self, capture: MatLike | None):
while time_delta < start_delay:
delay_time_left = start_delay - time_delta
self.current_split_image.setText(
f"Delayed Before Starting:\n {seconds_remaining_text(delay_time_left)}",
f"Delayed Before Starting:\n {seconds_remaining_text(delay_time_left)}"
)
# Wait 0.1s. Doesn't need to be shorter as we only show 1 decimal
QTest.qWait(100)
Expand Down Expand Up @@ -414,7 +440,8 @@ def __take_screenshot(self):
screenshot_index = 1
while True:
screenshot_path = os.path.join(
screenshot_directory, f"{screenshot_index:03}_SplitImage.png"
screenshot_directory,
f"{screenshot_index:03}_SplitImage.png",
)
if not os.path.exists(screenshot_path):
break
Expand All @@ -427,7 +454,7 @@ def __take_screenshot(self):
return

# Save and open image
cv2.imwrite(screenshot_path, capture)
imwrite(screenshot_path, capture)
if self.settings_dict["open_screenshot"]:
open_file(screenshot_path)

Expand Down Expand Up @@ -540,8 +567,8 @@ def reset(self):
def start_auto_splitter(self):
# If the auto splitter is already running or the button is disabled,
# don't emit the signal to start it.
if (
self.is_running # fmt: skip
if ( # fmt: skip
self.is_running
or (not self.start_auto_splitter_button.isEnabled() and not self.is_auto_controlled)
):
return
Expand Down Expand Up @@ -573,7 +600,7 @@ def __auto_splitter(self): # noqa: C901,PLR0912,PLR0915
flatten(
((split_image, i + 1) for i in range(split_image.loops))
for split_image in self.split_images
),
)
)

# Construct groups of splits
Expand Down Expand Up @@ -675,7 +702,9 @@ def __auto_splitter(self): # noqa: C901,PLR0912,PLR0915
self.gui_changes_on_reset(safe_to_reload_start_image=True)

def __similarity_threshold_loop(
self, number_of_split_images: int, dummy_splits_array: list[bool]
self,
number_of_split_images: int,
dummy_splits_array: list[bool],
):
"""
Wait until the similarity threshold is met.
Expand Down Expand Up @@ -718,7 +747,7 @@ def __similarity_threshold_loop(
QApplication.processEvents()

# Limit the number of time the comparison runs to reduce cpu usage
frame_interval = 1 / self.settings_dict["fps_limit"]
frame_interval = 1 / self.split_image.get_fps_limit(self)
# Use a time delta to have a consistant check interval
wait_delta_ms = int((frame_interval - (time() - start) % frame_interval) * ONE_SECOND)

Expand Down Expand Up @@ -881,7 +910,11 @@ def __update_split_image(self, specific_image: AutoSplitImage | None = None):
specific_image # fmt: skip
or self.split_images_and_loop_number[0 + self.split_image_number][0]
)
if is_valid_image(self.split_image.byte_array):
if self.split_image.is_ocr:
# TODO: test if setText clears a set image
text = "\nor\n".join(self.split_image.texts)
self.current_split_image.setText(f"Looking for OCR text:\n{text}")
elif is_valid_image(self.split_image.byte_array):
set_preview_image(self.current_split_image, self.split_image.byte_array)

self.current_image_file_label.setText(self.split_image.filename)
Expand Down Expand Up @@ -960,19 +993,13 @@ def set_preview_image(qlabel: QLabel, image: MatLike | None):
image_format = QtGui.QImage.Format.Format_BGR888
capture = image

qimage = QtGui.QImage(
capture.data,
width,
height,
width * channels,
image_format,
)
qimage = QtGui.QImage(capture.data, width, height, width * channels, image_format)
qlabel.setPixmap(
QtGui.QPixmap(qimage).scaled(
qlabel.size(),
QtCore.Qt.AspectRatioMode.IgnoreAspectRatio,
QtCore.Qt.TransformationMode.SmoothTransformation,
),
)
)


Expand Down
109 changes: 90 additions & 19 deletions src/AutoSplitImage.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import tomllib
from enum import IntEnum, auto
from math import sqrt
from typing import TYPE_CHECKING
Expand All @@ -8,8 +9,20 @@
from cv2.typing import MatLike

import error_messages
from compare import check_if_image_has_transparency, get_comparison_method_by_index
from utils import BGR_CHANNEL_COUNT, MAXBYTE, ColorChannel, ImageShape, is_valid_image
from compare import (
check_if_image_has_transparency,
extract_and_compare_text,
get_comparison_method_by_index,
)
from utils import (
BGR_CHANNEL_COUNT,
MAXBYTE,
TESSERACT_PATH,
ColorChannel,
ImageShape,
imread,
is_valid_image,
)

if TYPE_CHECKING:
from AutoSplit import AutoSplit
Expand All @@ -20,8 +33,8 @@
COMPARISON_RESIZE_HEIGHT = 240
COMPARISON_RESIZE = (COMPARISON_RESIZE_WIDTH, COMPARISON_RESIZE_HEIGHT)
COMPARISON_RESIZE_AREA = COMPARISON_RESIZE_WIDTH * COMPARISON_RESIZE_HEIGHT
MASK_LOWER_BOUND = np.array([0, 0, 0, 1], dtype="uint8")
MASK_UPPER_BOUND = np.array([MAXBYTE, MAXBYTE, MAXBYTE, MAXBYTE], dtype="uint8")
MASK_LOWER_BOUND = np.array([0, 0, 0, 1], dtype=np.uint8)
MASK_UPPER_BOUND = np.array([MAXBYTE, MAXBYTE, MAXBYTE, MAXBYTE], dtype=np.uint8)
START_KEYWORD = "start_auto_splitter"
RESET_KEYWORD = "reset"

Expand All @@ -33,20 +46,26 @@ class ImageType(IntEnum):


class AutoSplitImage:
path: str
filename: str
flags: int
loops: int
image_type: ImageType
byte_array: MatLike | None = None
mask: MatLike | None = None
# This value is internal, check for mask instead
_has_transparency = False
# These values should be overriden by some Defaults if None. Use getters instead
# These values should be overridden by some Defaults if None. Use getters instead
__delay_time: float | None = None
__comparison_method: int | None = None
__pause_time: float | None = None
__similarity_threshold: float | None = None
__rect = (0, 0, 1, 1)
__fps_limit = 0

@property
def is_ocr(self):
"""
Whether a "split image" is actually for Optical Text Recognition
based on whether there's any text strings to search for.
"""
return bool(self.texts)

def get_delay_time(self, default: "AutoSplit | int"):
"""Get image's delay time or fallback to the default value from spinbox."""
Expand Down Expand Up @@ -80,6 +99,12 @@ def get_similarity_threshold(self, default: "AutoSplit | float"):
return default
return default.settings_dict["default_similarity_threshold"]

def get_fps_limit(self, default: "AutoSplit"):
"""Get image's fps limit or fallback to the default value from spinbox."""
if self.__fps_limit != 0:
return self.__fps_limit
return default.settings_dict["fps_limit"]

def __init__(self, path: str):
self.path = path
self.filename = os.path.split(path)[-1].lower()
Expand All @@ -89,7 +114,12 @@ def __init__(self, path: str):
self.__comparison_method = comparison_method_from_filename(self.filename)
self.__pause_time = pause_from_filename(self.filename)
self.__similarity_threshold = threshold_from_filename(self.filename)
self.__read_image_bytes(path)
self.texts: list[str] = []
self.__ocr_comparison_methods: list[int] = []
if path.endswith("txt"):
self.__parse_text_file(path)
else:
self.__read_image_bytes(path)

if START_KEYWORD in self.filename:
self.image_type = ImageType.START
Expand All @@ -98,8 +128,33 @@ def __init__(self, path: str):
else:
self.image_type = ImageType.SPLIT

def __parse_text_file(self, path: str):
if not TESSERACT_PATH:
error_messages.tesseract_missing(path)
return

with open(path, mode="rb") as f:
data = tomllib.load(f)

self.texts = [text.lower().strip() for text in data["texts"]]
self.__rect = (data["left"], data["right"], data["top"], data["bottom"])
self.__ocr_comparison_methods = data.get("methods", [0])
self.__fps_limit = data.get("fps_limit", 0)

if self.__validate_ocr():
error_messages.wrong_ocr_values(path)
return

def __validate_ocr(self):
values = [*self.__rect, *self.__ocr_comparison_methods, self.__fps_limit]
return (
all(value >= 0 for value in values) # Check for invalid negative values
and self.__rect[1] > self.__rect[0]
and self.__rect[3] > self.__rect[2]
)

def __read_image_bytes(self, path: str):
image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
image = imread(path, cv2.IMREAD_UNCHANGED)
if not is_valid_image(image):
self.byte_array = None
error_messages.image_type(path)
Expand Down Expand Up @@ -138,15 +193,31 @@ def __read_image_bytes(self, path: str):
def check_flag(self, flag: int):
return self.flags & flag == flag

def compare_with_capture(
self,
default: "AutoSplit | int",
capture: MatLike | None,
):
"""Compare image with capture using image's comparison method. Falls back to combobox."""
if not is_valid_image(self.byte_array) or not is_valid_image(capture):
def compare_with_capture(self, default: "AutoSplit | int", capture: MatLike | None):
"""
Compare image with capture using image's comparison method. Falls back to combobox.
For OCR text files:
extract image text from rectangle position and compare it with the expected string.
"""
if not is_valid_image(capture):
return 0.0

if self.is_ocr:
return extract_and_compare_text(
capture[
self.__rect[2] : self.__rect[3],
self.__rect[0] : self.__rect[1],
],
self.texts,
self.__ocr_comparison_methods,
)

if not is_valid_image(self.byte_array):
return 0.0
resized_capture = cv2.resize(capture, self.byte_array.shape[1::-1])
resized_capture = cv2.resize(
capture, self.byte_array.shape[1::-1], interpolation=cv2.INTER_NEAREST
)

return get_comparison_method_by_index(
self.__get_comparison_method_index(default),
Expand Down
9 changes: 4 additions & 5 deletions src/capture_method/BitBltCaptureMethod.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,10 @@ def is_blank(image: MatLike):
class BitBltCaptureMethod(ThreadedLoopCaptureMethod):
name = "BitBlt"
short_description = "fastest, least compatible"
description = (
"\nThe best option when compatible. But it cannot properly record "
+ "\nOpenGL, Hardware Accelerated or Exclusive Fullscreen windows. "
+ "\nThe smaller the selected region, the more efficient it is. "
)
description = """
The best option when compatible. But it cannot properly record
OpenGL, Hardware Accelerated or Exclusive Fullscreen windows.
The smaller the selected region, the more efficient it is."""

@property
@override
Expand Down
Loading

0 comments on commit 4926da2

Please sign in to comment.