244 lines
8.8 KiB
Python
244 lines
8.8 KiB
Python
"""Minimal image processing pipeline adapted for the Qt frontend."""
|
|
|
|
from __future__ import annotations
|
|
|
|
import colorsys
|
|
from dataclasses import dataclass
|
|
from pathlib import Path
|
|
from typing import Dict, Iterable, Tuple
|
|
|
|
from PIL import Image, ImageDraw
|
|
from PySide6 import QtGui
|
|
|
|
from app.logic import PREVIEW_MAX_SIZE
|
|
|
|
|
|
@dataclass
|
|
class Stats:
|
|
matches_all: int = 0
|
|
total_all: int = 0
|
|
matches_keep: int = 0
|
|
total_keep: int = 0
|
|
matches_excl: int = 0
|
|
total_excl: int = 0
|
|
|
|
def summary(self, translate) -> str:
|
|
if self.total_all == 0:
|
|
return translate("stats.placeholder")
|
|
with_pct = (self.matches_keep / self.total_keep * 100) if self.total_keep else 0.0
|
|
without_pct = (self.matches_all / self.total_all * 100) if self.total_all else 0.0
|
|
excluded_pct = (self.total_excl / self.total_all * 100) if self.total_all else 0.0
|
|
excluded_match_pct = (self.matches_excl / self.total_excl * 100) if self.total_excl else 0.0
|
|
return translate(
|
|
"stats.summary",
|
|
with_pct=with_pct,
|
|
without_pct=without_pct,
|
|
excluded_pct=excluded_pct,
|
|
excluded_match_pct=excluded_match_pct,
|
|
)
|
|
|
|
|
|
class QtImageProcessor:
|
|
"""Process images and build overlays for the Qt UI."""
|
|
|
|
def __init__(self) -> None:
|
|
self.orig_img: Image.Image | None = None
|
|
self.preview_img: Image.Image | None = None
|
|
self.overlay_img: Image.Image | None = None
|
|
self.preview_paths: list[Path] = []
|
|
self.current_index: int = -1
|
|
self.stats = Stats()
|
|
|
|
self.defaults: Dict[str, int] = {
|
|
"hue_min": 0,
|
|
"hue_max": 360,
|
|
"sat_min": 25,
|
|
"val_min": 15,
|
|
"val_max": 100,
|
|
"alpha": 120,
|
|
}
|
|
self.hue_min = self.defaults["hue_min"]
|
|
self.hue_max = self.defaults["hue_max"]
|
|
self.sat_min = self.defaults["sat_min"]
|
|
self.val_min = self.defaults["val_min"]
|
|
self.val_max = self.defaults["val_max"]
|
|
self.alpha = self.defaults["alpha"]
|
|
|
|
self.exclude_shapes: list[dict[str, object]] = []
|
|
self.reset_exclusions_on_switch: bool = False
|
|
|
|
def set_defaults(self, defaults: dict) -> None:
|
|
for key in self.defaults:
|
|
if key in defaults:
|
|
self.defaults[key] = int(defaults[key])
|
|
for key, value in self.defaults.items():
|
|
setattr(self, key, value)
|
|
self._rebuild_overlay()
|
|
|
|
# thresholds -------------------------------------------------------------
|
|
|
|
def set_threshold(self, key: str, value: int) -> None:
|
|
setattr(self, key, value)
|
|
if self.preview_img is not None:
|
|
self._rebuild_overlay()
|
|
|
|
# image handling --------------------------------------------------------
|
|
|
|
def load_single_image(self, path: Path, *, reset_collection: bool = True) -> Path:
|
|
image = Image.open(path).convert("RGBA")
|
|
self.orig_img = image
|
|
if reset_collection:
|
|
self.preview_paths = [path]
|
|
self.current_index = 0
|
|
self._build_preview()
|
|
self._rebuild_overlay()
|
|
return path
|
|
|
|
def load_folder(self, paths: Iterable[Path], start_index: int = 0) -> Path:
|
|
self.preview_paths = list(paths)
|
|
if not self.preview_paths:
|
|
raise ValueError("No images in folder.")
|
|
self.current_index = max(0, min(start_index, len(self.preview_paths) - 1))
|
|
return self._load_image_at_current()
|
|
|
|
def next_image(self) -> Path | None:
|
|
if not self.preview_paths:
|
|
return None
|
|
self.current_index = (self.current_index + 1) % len(self.preview_paths)
|
|
return self._load_image_at_current()
|
|
|
|
def previous_image(self) -> Path | None:
|
|
if not self.preview_paths:
|
|
return None
|
|
self.current_index = (self.current_index - 1) % len(self.preview_paths)
|
|
return self._load_image_at_current()
|
|
|
|
def _load_image_at_current(self) -> Path:
|
|
path = self.preview_paths[self.current_index]
|
|
return self.load_single_image(path, reset_collection=False)
|
|
|
|
# preview/overlay -------------------------------------------------------
|
|
|
|
def _build_preview(self) -> None:
|
|
if self.orig_img is None:
|
|
self.preview_img = None
|
|
return
|
|
width, height = self.orig_img.size
|
|
max_w, max_h = PREVIEW_MAX_SIZE
|
|
scale = min(max_w / width, max_h / height)
|
|
if scale <= 0:
|
|
scale = 1.0
|
|
size = (max(1, int(width * scale)), max(1, int(height * scale)))
|
|
self.preview_img = self.orig_img.resize(size, Image.LANCZOS)
|
|
|
|
def _rebuild_overlay(self) -> None:
|
|
if self.preview_img is None:
|
|
self.overlay_img = None
|
|
self.stats = Stats()
|
|
return
|
|
base = self.preview_img.convert("RGBA")
|
|
overlay = Image.new("RGBA", base.size, (0, 0, 0, 0))
|
|
draw = ImageDraw.Draw(overlay)
|
|
pixels = base.load()
|
|
width, height = base.size
|
|
highlight = (255, 0, 0, int(self.alpha))
|
|
matches_all = total_all = 0
|
|
matches_keep = total_keep = 0
|
|
matches_excl = total_excl = 0
|
|
|
|
mask = self._build_exclusion_mask(base.size)
|
|
mask_px = mask.load() if mask else None
|
|
|
|
for y in range(height):
|
|
for x in range(width):
|
|
r, g, b, a = pixels[x, y]
|
|
if a == 0:
|
|
continue
|
|
match = self._matches(r, g, b)
|
|
excluded = bool(mask_px and mask_px[x, y])
|
|
total_all += 1
|
|
if excluded:
|
|
total_excl += 1
|
|
if match:
|
|
matches_excl += 1
|
|
else:
|
|
total_keep += 1
|
|
if match:
|
|
draw.point((x, y), fill=highlight)
|
|
matches_keep += 1
|
|
if match:
|
|
matches_all += 1
|
|
|
|
self.overlay_img = overlay
|
|
self.stats = Stats(
|
|
matches_all=matches_all,
|
|
total_all=total_all,
|
|
matches_keep=matches_keep,
|
|
total_keep=total_keep,
|
|
matches_excl=matches_excl,
|
|
total_excl=total_excl,
|
|
)
|
|
|
|
# helpers ----------------------------------------------------------------
|
|
|
|
def _matches(self, r: int, g: int, b: int) -> bool:
|
|
h, s, v = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)
|
|
hue = (h * 360.0) % 360.0
|
|
if self.hue_min <= self.hue_max:
|
|
hue_ok = self.hue_min <= hue <= self.hue_max
|
|
else:
|
|
hue_ok = hue >= self.hue_min or hue <= self.hue_max
|
|
sat_ok = s * 100.0 >= self.sat_min
|
|
val_ok = self.val_min <= v * 100.0 <= self.val_max
|
|
return hue_ok and sat_ok and val_ok
|
|
|
|
# exported data ----------------------------------------------------------
|
|
|
|
def preview_pixmap(self) -> QtGui.QPixmap:
|
|
return self._to_pixmap(self.preview_img)
|
|
|
|
def overlay_pixmap(self) -> QtGui.QPixmap:
|
|
if self.preview_img is None or self.overlay_img is None:
|
|
return QtGui.QPixmap()
|
|
merged = Image.alpha_composite(self.preview_img.convert("RGBA"), self.overlay_img)
|
|
return self._to_pixmap(merged)
|
|
|
|
@staticmethod
|
|
def _to_pixmap(image: Image.Image | None) -> QtGui.QPixmap:
|
|
if image is None:
|
|
return QtGui.QPixmap()
|
|
buffer = image.tobytes("raw", "RGBA")
|
|
qt_image = QtGui.QImage(buffer, image.width, image.height, QtGui.QImage.Format_RGBA8888)
|
|
return QtGui.QPixmap.fromImage(qt_image)
|
|
|
|
# exclusions -------------------------------------------------------------
|
|
|
|
def set_exclusions(self, shapes: list[dict[str, object]]) -> None:
|
|
copied: list[dict[str, object]] = []
|
|
for shape in shapes:
|
|
kind = shape.get("kind")
|
|
if kind == "rect":
|
|
coords = tuple(shape.get("coords", (0, 0, 0, 0))) # type: ignore[assignment]
|
|
copied.append({"kind": "rect", "coords": tuple(int(c) for c in coords)})
|
|
elif kind == "polygon":
|
|
pts = shape.get("points", [])
|
|
copied.append({"kind": "polygon", "points": [(int(x), int(y)) for x, y in pts]})
|
|
self.exclude_shapes = copied
|
|
self._rebuild_overlay()
|
|
|
|
def _build_exclusion_mask(self, size: Tuple[int, int]) -> Image.Image | None:
|
|
if not self.exclude_shapes:
|
|
return None
|
|
mask = Image.new("L", size, 0)
|
|
draw = ImageDraw.Draw(mask)
|
|
for shape in self.exclude_shapes:
|
|
kind = shape.get("kind")
|
|
if kind == "rect":
|
|
x0, y0, x1, y1 = shape["coords"] # type: ignore[index]
|
|
draw.rectangle([x0, y0, x1, y1], fill=255)
|
|
elif kind == "polygon":
|
|
points = shape.get("points", [])
|
|
if len(points) >= 3:
|
|
draw.polygon(points, fill=255)
|
|
return mask
|