ICRA/app/qt/image_processor.py

705 lines
26 KiB
Python

"""Minimal image processing pipeline adapted for the Qt frontend."""
from __future__ import annotations
import colorsys
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Iterable, Tuple
import numpy as np
from PIL import Image, ImageDraw
from PySide6 import QtGui
from app.logic import PREVIEW_MAX_SIZE
@dataclass
class Stats:
matches_all: int = 0
total_all: int = 0
matches_keep: int = 0
total_keep: int = 0
matches_excl: int = 0
total_excl: int = 0
brightness_score: float = 0.0
grouping_score: float = 0.0
prefer_dark: bool = False
@property
def effective_brightness(self) -> float:
"""Returns inverted brightness when prefer_dark is on."""
return (100.0 - self.brightness_score) if self.prefer_dark else self.brightness_score
def composite_score(self, weights: dict[str, int]) -> float:
"""Calculates weighted composite based on provided weights (0-100)."""
pct_all = (self.matches_all / self.total_all * 100) if self.total_all else 0.0
pct_keep = (self.matches_keep / self.total_keep * 100) if self.total_keep else 0.0
# weights keys: match_all, match_keep, brightness, grouping
w_all = weights.get("match_all", 30) / 100.0
w_keep = weights.get("match_keep", 50) / 100.0
w_bright = weights.get("brightness", 10) / 100.0
w_group = weights.get("grouping", 10) / 100.0
return (w_all * pct_all +
w_keep * pct_keep +
w_bright * self.effective_brightness +
w_group * self.grouping_score)
def summary(self, translate, weights: dict[str, int]) -> str:
if self.total_all == 0:
return translate("stats.placeholder")
with_pct = (self.matches_keep / self.total_keep * 100) if self.total_keep else 0.0
without_pct = (self.matches_all / self.total_all * 100) if self.total_all else 0.0
excluded_pct = (self.total_excl / self.total_all * 100) if self.total_all else 0.0
brightness_label = translate("stats.darkness_label") if self.prefer_dark else translate("stats.brightness_label")
score = self.composite_score(weights)
return translate(
"stats.summary",
score=score,
with_pct=with_pct,
without_pct=without_pct,
brightness_label=brightness_label,
brightness=self.effective_brightness,
grouping=self.grouping_score,
excluded_pct=excluded_pct,
)
def _rgb_to_hsv_numpy(arr: np.ndarray) -> np.ndarray:
"""Vectorized RGB→HSV conversion. arr shape: (H, W, 3), dtype float32, range [0,1].
Returns array of same shape with channels [H(0-360), S(0-100), V(0-100)].
"""
r = arr[..., 0]
g = arr[..., 1]
b = arr[..., 2]
cmax = np.maximum(np.maximum(r, g), b)
cmin = np.minimum(np.minimum(r, g), b)
delta = cmax - cmin
# Value
v = cmax
# Saturation
s = np.zeros_like(r)
np.divide(delta, cmax, out=s, where=cmax > 0)
# Hue
h = np.zeros_like(r)
mask_r = (delta > 0) & (cmax == r)
mask_g = (delta > 0) & (cmax == g)
mask_b = (delta > 0) & (cmax == b)
h[mask_r] = (60.0 * ((g[mask_r] - b[mask_r]) / delta[mask_r])) % 360.0
h[mask_g] = (60.0 * ((b[mask_g] - r[mask_g]) / delta[mask_g]) + 120.0) % 360.0
h[mask_b] = (60.0 * ((r[mask_b] - g[mask_b]) / delta[mask_b]) + 240.0) % 360.0
return np.stack([h, s * 100.0, v * 100.0], axis=-1)
def _export_worker(args: tuple) -> tuple:
"""Standalone worker for ProcessPoolExecutor batch export.
Receives ``(image_path, params)`` where *params* is the dict produced by
``QtImageProcessor.get_export_params()``. Opens the image, runs the
full stats pipeline, and returns a plain results tuple. No processor
instance is needed so nothing has to be pickled.
"""
image_path, params = args
from pathlib import Path
try:
img_path = Path(image_path)
hue_min = params["hue_min"]
hue_max = params["hue_max"]
sat_min = params["sat_min"]
sat_max = params["sat_max"]
val_min = params["val_min"]
val_max = params["val_max"]
exclude_bg = params["exclude_bg"]
exclude_bg_rgb = tuple(params["exclude_bg_rgb"])
exclude_bg_tolerance = params["exclude_bg_tolerance"]
prefer_dark = params["prefer_dark"]
exclude_shapes = params["exclude_shapes"]
exclude_ref_size = params["exclude_ref_size"]
img = Image.open(img_path).convert("RGBA")
arr = np.asarray(img, dtype=np.float32)
rgb = arr[..., :3] / 255.0
alpha_ch = arr[..., 3].copy()
if exclude_bg:
r_bg, g_bg, b_bg = exclude_bg_rgb
tol = exclude_bg_tolerance
bg_mask = (
(np.abs(arr[..., 0] - r_bg) <= tol) &
(np.abs(arr[..., 1] - g_bg) <= tol) &
(np.abs(arr[..., 2] - b_bg) <= tol)
)
alpha_ch[bg_mask] = 0
hsv = _rgb_to_hsv_numpy(rgb)
hue = hsv[..., 0]
sat = hsv[..., 1]
val = hsv[..., 2]
if hue_min <= hue_max:
hue_ok = (hue >= hue_min) & (hue <= hue_max)
else:
hue_ok = (hue >= hue_min) | (hue <= hue_max)
match_mask = (
hue_ok
& (sat >= sat_min)
& (sat <= sat_max)
& (val >= val_min)
& (val <= val_max)
& (alpha_ch >= 128)
)
# Build exclusion mask
w, h = img.size
if not exclude_shapes:
excl_mask = np.zeros((h, w), dtype=bool)
else:
target_w, target_h = w, h
ref_w, ref_h = exclude_ref_size or (w, h)
sx = target_w / ref_w if ref_w > 0 else 1.0
sy = target_h / ref_h if ref_h > 0 else 1.0
mask_img = Image.new("L", (w, h), 0)
draw = ImageDraw.Draw(mask_img)
for shape in exclude_shapes:
kind = shape.get("kind")
if kind == "rect":
x0, y0, x1, y1 = shape["coords"]
draw.rectangle([x0 * sx, y0 * sy, x1 * sx, y1 * sy], fill=255)
elif kind == "polygon":
points = shape.get("points", [])
if len(points) >= 3:
scaled_pts = [(int(px * sx), int(py * sy)) for px, py in points]
draw.polygon(scaled_pts, fill=255)
excl_mask = np.asarray(mask_img, dtype=bool)
keep_match = match_mask & ~excl_mask
visible = alpha_ch >= 128
keep_visible = visible & ~excl_mask
brightness = float(val[keep_visible].mean()) if keep_visible.any() else 0.0
# Grouping score (inline for worker isolation)
if not keep_match.any():
grouping = 0.0
else:
mh, mw = keep_match.shape
padded = np.pad(keep_match, 5, mode='constant', constant_values=0)
cumsum = padded.astype(np.int32).cumsum(axis=0).cumsum(axis=1)
y2, x2 = np.arange(9, 9 + mh)[:, None], np.arange(9, 9 + mw)
y1_1, x1_1 = np.arange(0, mh)[:, None], np.arange(0, mw)
window_sums = cumsum[y2, x2] - cumsum[y1_1, x2] - cumsum[y2, x1_1] + cumsum[y1_1, x1_1]
neighbors = (window_sums - keep_match.astype(np.int32)).clip(min=0)
match_neighbors = neighbors[keep_match]
grouping = float(((match_neighbors / 80.0) ** 2).mean() * 100.0)
matches_all = int(match_mask[visible].sum())
total_all = int(visible.sum())
matches_keep = int(keep_match[visible].sum())
total_keep = int(keep_visible.sum())
eff_brightness = (100.0 - brightness) if prefer_dark else brightness
pct_all = (matches_all / total_all * 100) if total_all else 0.0
pct_keep = (matches_keep / total_keep * 100) if total_keep else 0.0
weights = params["weights"]
w_all = weights.get("match_all", 30) / 100.0
w_keep = weights.get("match_keep", 50) / 100.0
w_bright = weights.get("brightness", 10) / 100.0
w_group = weights.get("grouping", 10) / 100.0
composite = w_all * pct_all + w_keep * pct_keep + w_bright * eff_brightness + w_group * grouping
img.close()
return (img_path.name, pct_all, pct_keep, eff_brightness, grouping, composite)
except Exception:
return (img_path.name, None, None, None, None, None)
class QtImageProcessor:
"""Process images and build overlays for the Qt UI."""
def __init__(self) -> None:
self.orig_img: Image.Image | None = None
self.preview_img: Image.Image | None = None
self.overlay_img: Image.Image | None = None
self.preview_paths: list[Path] = []
self.current_index: int = -1
self.stats = Stats()
# Overlay tint color
self.overlay_r = 255
self.overlay_g = 0
self.overlay_b = 0
self.defaults: Dict[str, int] = {
"hue_min": 0,
"hue_max": 360,
"sat_min": 25,
"sat_max": 100,
"val_min": 15,
"val_max": 100,
"alpha": 120,
}
self.hue_min = self.defaults["hue_min"]
self.hue_max = self.defaults["hue_max"]
self.sat_min = self.defaults["sat_min"]
self.sat_max = self.defaults["sat_max"]
self.val_min = self.defaults["val_min"]
self.val_max = self.defaults["val_max"]
self.alpha = self.defaults["alpha"]
self.exclude_shapes: list[dict[str, object]] = []
self.reset_exclusions_on_switch: bool = False
# Mask caching
self._cached_mask: np.ndarray | None = None
self._cached_mask_size: Tuple[int, int] | None = None
self.exclude_ref_size: Tuple[int, int] | None = None
self.prefer_dark: bool = False
self.exclude_bg: bool = True
self.exclude_bg_rgb: Tuple[int, int, int] = (31, 41, 55)
self.exclude_bg_tolerance: int = 5
self.weights: Dict[str, int] = {
"match_all": 30,
"match_keep": 50,
"brightness": 10,
"grouping": 10
}
def set_defaults(self, defaults: dict) -> None:
for key in self.defaults:
if key in defaults:
self.defaults[key] = int(defaults[key])
for key, value in self.defaults.items():
setattr(self, key, value)
self._rebuild_overlay()
# thresholds -------------------------------------------------------------
def set_threshold(self, key: str, value: int) -> None:
setattr(self, key, value)
if self.preview_img is not None:
self._rebuild_overlay()
# image handling --------------------------------------------------------
def load_single_image(self, path: Path, *, reset_collection: bool = True) -> Path:
image = Image.open(path).convert("RGBA")
self.orig_img = image
if reset_collection:
self.preview_paths = [path]
self.current_index = 0
self._build_preview()
self._rebuild_overlay()
return path
def load_folder(self, paths: Iterable[Path], start_index: int = 0) -> Path:
self.preview_paths = list(paths)
if not self.preview_paths:
raise ValueError("No images in folder.")
self.current_index = max(0, min(start_index, len(self.preview_paths) - 1))
return self._load_image_at_current()
def next_image(self) -> Path | None:
if not self.preview_paths:
return None
self.current_index = (self.current_index + 1) % len(self.preview_paths)
return self._load_image_at_current()
def previous_image(self) -> Path | None:
if not self.preview_paths:
return None
self.current_index = (self.current_index - 1) % len(self.preview_paths)
return self._load_image_at_current()
def _load_image_at_current(self) -> Path:
path = self.preview_paths[self.current_index]
return self.load_single_image(path, reset_collection=False)
# preview/overlay -------------------------------------------------------
def _build_preview(self) -> None:
if self.orig_img is None:
self.preview_img = None
return
img_to_process = self.orig_img.convert("RGBA")
if self.exclude_bg:
# Mask the background color with tolerance on the original image before resizing
# this prevents interpolation artifacts from leaving a background 'halo'
arr = np.array(img_to_process)
r_bg, g_bg, b_bg = self.exclude_bg_rgb
tol = self.exclude_bg_tolerance
bg_mask = (
(np.abs(arr[..., 0].astype(np.int16) - r_bg) <= tol) &
(np.abs(arr[..., 1].astype(np.int16) - g_bg) <= tol) &
(np.abs(arr[..., 2].astype(np.int16) - b_bg) <= tol)
)
arr[bg_mask, 3] = 0
img_to_process = Image.fromarray(arr, "RGBA")
width, height = img_to_process.size
max_w, max_h = PREVIEW_MAX_SIZE
scale = min(max_w / width, max_h / height)
if scale <= 0:
scale = 1.0
size = (max(1, int(width * scale)), max(1, int(height * scale)))
self.preview_img = img_to_process.resize(size, Image.LANCZOS)
def _rebuild_overlay(self) -> None:
"""Build color-match overlay using vectorized NumPy operations."""
if self.preview_img is None:
self.overlay_img = None
self.stats = Stats()
return
base = self.preview_img.convert("RGBA")
arr = np.asarray(base, dtype=np.float32) # (H, W, 4)
rgb = arr[..., :3] / 255.0
alpha_ch = arr[..., 3].copy() # alpha channel of the image
if self.exclude_bg:
# Exclude specific background color
r_bg, g_bg, b_bg = self.exclude_bg_rgb
tol = self.exclude_bg_tolerance
bg_mask = (
(np.abs(arr[..., 0] - r_bg) <= tol) &
(np.abs(arr[..., 1] - g_bg) <= tol) &
(np.abs(arr[..., 2] - b_bg) <= tol)
)
alpha_ch[bg_mask] = 0
hsv = _rgb_to_hsv_numpy(rgb) # (H, W, 3): H°, S%, V%
hue = hsv[..., 0]
sat = hsv[..., 1]
val = hsv[..., 2]
hue_min = float(self.hue_min)
hue_max = float(self.hue_max)
if hue_min <= hue_max:
hue_ok = (hue >= hue_min) & (hue <= hue_max)
else:
hue_ok = (hue >= hue_min) | (hue <= hue_max)
match_mask = (
hue_ok
& (sat >= float(self.sat_min))
& (sat <= float(self.sat_max))
& (val >= float(self.val_min))
& (val <= float(self.val_max))
& (alpha_ch >= 128)
)
# Exclusion mask (same pixel space as preview)
excl_mask = self._build_exclusion_mask_numpy(base.size) # bool (H,W)
keep_match = match_mask & ~excl_mask
excl_match = match_mask & excl_mask
visible = alpha_ch >= 128
matches_all = int(match_mask[visible].sum())
total_all = int(visible.sum())
matches_keep = int(keep_match[visible].sum())
total_keep = int((visible & ~excl_mask).sum())
matches_excl = int(excl_match[visible].sum())
total_excl = int((visible & excl_mask).sum())
# Brightness: mean Value (0-100) of ALL non-excluded visible pixels
keep_visible = visible & ~excl_mask
brightness = float(val[keep_visible].mean()) if keep_visible.any() else 0.0
# Grouping: measure clustering of match_mask
grouping = self._calculate_grouping_score(keep_match)
# Build overlay image
overlay_arr = np.zeros((base.height, base.width, 4), dtype=np.uint8)
overlay_arr[keep_match, 0] = self.overlay_r
overlay_arr[keep_match, 1] = self.overlay_g
overlay_arr[keep_match, 2] = self.overlay_b
overlay_arr[keep_match, 3] = int(self.alpha)
self.overlay_img = Image.fromarray(overlay_arr, "RGBA")
self.stats = Stats(
matches_all=matches_all,
total_all=total_all,
matches_keep=matches_keep,
total_keep=total_keep,
matches_excl=matches_excl,
total_excl=total_excl,
brightness_score=brightness,
grouping_score=grouping,
prefer_dark=self.prefer_dark,
)
def get_stats_headless(self, image: Image.Image) -> Stats:
"""Calculate color-match statistics natively without building UI elements or scaling."""
base = image.convert("RGBA")
arr = np.asarray(base, dtype=np.float32)
rgb = arr[..., :3] / 255.0
alpha_ch = arr[..., 3].copy()
if self.exclude_bg:
# Exclude background color with tolerance
r_bg, g_bg, b_bg = self.exclude_bg_rgb
tol = self.exclude_bg_tolerance
bg_mask = (
(np.abs(arr[..., 0] - r_bg) <= tol) &
(np.abs(arr[..., 1] - g_bg) <= tol) &
(np.abs(arr[..., 2] - b_bg) <= tol)
)
alpha_ch[bg_mask] = 0
hsv = _rgb_to_hsv_numpy(rgb)
hue = hsv[..., 0]
sat = hsv[..., 1]
val = hsv[..., 2]
hue_min = float(self.hue_min)
hue_max = float(self.hue_max)
if hue_min <= hue_max:
hue_ok = (hue >= hue_min) & (hue <= hue_max)
else:
hue_ok = (hue >= hue_min) | (hue <= hue_max)
match_mask = (
hue_ok
& (sat >= float(self.sat_min))
& (sat <= float(self.sat_max))
& (val >= float(self.val_min))
& (val <= float(self.val_max))
& (alpha_ch >= 128)
)
excl_mask = self._build_exclusion_mask_numpy(base.size)
keep_match = match_mask & ~excl_mask
excl_match = match_mask & excl_mask
visible = alpha_ch >= 128
matches_keep_count = int(keep_match[visible].sum())
keep_visible = visible & ~excl_mask
brightness = float(val[keep_visible].mean()) if keep_visible.any() else 0.0
grouping = self._calculate_grouping_score(keep_match)
return Stats(
matches_all=int(match_mask[visible].sum()),
total_all=int(visible.sum()),
matches_keep=matches_keep_count,
total_keep=int((visible & ~excl_mask).sum()),
matches_excl=int(excl_match[visible].sum()),
total_excl=int((visible & excl_mask).sum()),
brightness_score=brightness,
grouping_score=grouping,
prefer_dark=self.prefer_dark,
)
def _calculate_grouping_score(self, mask: np.ndarray) -> float:
"""Measure clustering: average density in a 9x9 neighborhood (0-100)."""
if not mask.any():
return 0.0
h, w = mask.shape
# Use cumulative sums for O(1) box sum calculation
padded = np.pad(mask, 5, mode='constant', constant_values=0)
cumsum = padded.astype(np.int32).cumsum(axis=0).cumsum(axis=1)
# Indices for 9x9 windows centered at each mask pixel
y2, x2 = np.arange(9, 9 + h)[:, None], np.arange(9, 9 + w)
y1_1, x1_1 = np.arange(0, h)[:, None], np.arange(0, w)
# Box sum formula: S(window) = S(x2,y2) - S(x1-1,y2) - S(x2,y1-1) + S(x1-1,y1-1)
window_sums = cumsum[y2, x2] - cumsum[y1_1, x2] - cumsum[y2, x1_1] + cumsum[y1_1, x1_1]
# Max neighbors in 9x9 is 80 (excluding the center pixel itself)
neighbors = (window_sums - mask.astype(np.int32)).clip(min=0)
match_neighbors = neighbors[mask]
# Square the density to heavily penalize thin bridges and frayed edges
score = ( (match_neighbors / 80.0) ** 2 ).mean() * 100.0
return float(score)
# helpers ----------------------------------------------------------------
def _matches(self, r: int, g: int, b: int) -> bool:
"""Single-pixel match — kept for compatibility / eyedropper use."""
h, s, v = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)
hue = (h * 360.0) % 360.0
if self.hue_min <= self.hue_max:
hue_ok = self.hue_min <= hue <= self.hue_max
else:
hue_ok = hue >= self.hue_min or hue <= self.hue_max
sat_ok = self.sat_min <= s * 100.0 <= self.sat_max
val_ok = self.val_min <= v * 100.0 <= self.val_max
return hue_ok and sat_ok and val_ok
def pick_color(self, x: int, y: int) -> Tuple[float, float, float] | None:
"""Return (hue°, sat%, val%) of the preview pixel at (x, y), or None."""
if self.preview_img is None:
return None
img = self.preview_img.convert("RGBA")
try:
r, g, b, a = img.getpixel((x, y))
except IndexError:
return None
if a == 0:
return None
h, s, v = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0)
return (h * 360.0) % 360.0, s * 100.0, v * 100.0
# exported data ----------------------------------------------------------
def preview_pixmap(self) -> QtGui.QPixmap:
return self._to_pixmap(self.preview_img)
def overlay_pixmap(self) -> QtGui.QPixmap:
if self.preview_img is None:
return QtGui.QPixmap()
if self.overlay_img is None:
return self.preview_pixmap()
merged = Image.alpha_composite(self.preview_img.convert("RGBA"), self.overlay_img)
return self._to_pixmap(merged)
@staticmethod
def _to_pixmap(image: Image.Image | None) -> QtGui.QPixmap:
if image is None:
return QtGui.QPixmap()
buffer = image.tobytes("raw", "RGBA")
qt_image = QtGui.QImage(buffer, image.width, image.height, QtGui.QImage.Format_RGBA8888)
return QtGui.QPixmap.fromImage(qt_image)
# exclusions -------------------------------------------------------------
def set_exclusions(self, shapes: list[dict[str, object]], ref_size: Tuple[int, int] | None = None) -> None:
copied: list[dict[str, object]] = []
for shape in shapes:
kind = shape.get("kind")
if kind == "rect":
coords = tuple(shape.get("coords", (0, 0, 0, 0))) # type: ignore[assignment]
copied.append({"kind": "rect", "coords": tuple(int(c) for c in coords)})
elif kind == "polygon":
pts = shape.get("points", [])
copied.append({"kind": "polygon", "points": [(int(x), int(y)) for x, y in pts]})
self.exclude_shapes = copied
if ref_size:
self.exclude_ref_size = ref_size
elif self.preview_img:
self.exclude_ref_size = self.preview_img.size
else:
self.exclude_ref_size = None
self._cached_mask = None # Invalidate cache
self._cached_mask_size = None
self._rebuild_overlay()
def _build_exclusion_mask(self, size: Tuple[int, int]) -> Image.Image | None:
if not self.exclude_shapes:
return None
target_w, target_h = size
ref_w, ref_h = self.exclude_ref_size or size
sx = target_w / ref_w if ref_w > 0 else 1.0
sy = target_h / ref_h if ref_h > 0 else 1.0
mask = Image.new("L", size, 0)
draw = ImageDraw.Draw(mask)
for shape in self.exclude_shapes:
kind = shape.get("kind")
if kind == "rect":
x0, y0, x1, y1 = shape["coords"] # type: ignore[index]
draw.rectangle([x0 * sx, y0 * sy, x1 * sx, y1 * sy], fill=255)
elif kind == "polygon":
points = shape.get("points", [])
if len(points) >= 3:
scaled_pts = [(int(x * sx), int(y * sy)) for x, y in points]
draw.polygon(scaled_pts, fill=255)
return mask
def set_overlay_color(self, hex_code: str) -> None:
"""Set the RGB channels for the match overlay from a hex string."""
if not hex_code.startswith("#") or len(hex_code) not in (7, 9):
return
try:
self.overlay_r = int(hex_code[1:3], 16)
self.overlay_g = int(hex_code[3:5], 16)
self.overlay_b = int(hex_code[5:7], 16)
if self.preview_img:
self._rebuild_overlay()
except ValueError:
pass
def _build_exclusion_mask_numpy(self, size: Tuple[int, int]) -> np.ndarray:
"""Return a boolean (H, W) mask — True where pixels are excluded."""
if self._cached_mask is not None and self._cached_mask_size == size:
return self._cached_mask
w, h = size
if not self.exclude_shapes:
mask = np.zeros((h, w), dtype=bool)
else:
pil_mask = self._build_exclusion_mask(size)
if pil_mask is None:
mask = np.zeros((h, w), dtype=bool)
else:
mask = np.asarray(pil_mask, dtype=bool)
self._cached_mask = mask
self._cached_mask_size = size
return mask
def set_exclude_bg_color(self, hex_code: str, tolerance: int = 5) -> None:
"""Set the RGB channels for background exclusion from a hex string."""
self.exclude_bg_tolerance = tolerance
if not hex_code.startswith("#") or len(hex_code) not in (7, 9):
return
try:
r = int(hex_code[1:3], 16)
g = int(hex_code[3:5], 16)
b = int(hex_code[5:7], 16)
self.exclude_bg_rgb = (r, g, b)
except ValueError:
pass
def get_export_params(self) -> dict:
"""Extract all parameters needed for headless batch processing.
Called once before a batch export so that each worker receives
a plain dict instead of re-reading instance attributes.
"""
return {
"hue_min": float(self.hue_min),
"hue_max": float(self.hue_max),
"sat_min": float(self.sat_min),
"sat_max": float(self.sat_max),
"val_min": float(self.val_min),
"val_max": float(self.val_max),
"exclude_bg": self.exclude_bg,
"exclude_bg_rgb": self.exclude_bg_rgb,
"exclude_bg_tolerance": self.exclude_bg_tolerance,
"prefer_dark": self.prefer_dark,
"exclude_shapes": self.exclude_shapes,
"exclude_ref_size": self.exclude_ref_size,
"weights": self.weights,
}
@property
def exclude_bg_color_hex(self) -> str:
r, g, b = self.exclude_bg_rgb
return f"#{r:02x}{g:02x}{b:02x}"
@property
def overlay_color_hex(self) -> str:
return f"#{self.overlay_r:02x}{self.overlay_g:02x}{self.overlay_b:02x}"