Refine texture metrics and update default weights for premium pattern detection

This commit is contained in:
lukas 2026-03-31 01:44:45 +02:00
parent e13bef7f52
commit 3e42b24110
10 changed files with 382 additions and 52 deletions

View File

@ -40,13 +40,14 @@ def _load_translations(lang: str) -> Dict[str, str]:
data = tomllib.load(handle)
except (OSError, AttributeError, ValueError, TypeError): # type: ignore[arg-type]
return {}
translations = data.get("translations")
if not isinstance(translations, dict):
return {}
# Merge all dictionaries found in the TOML (e.g. [translations] and [tooltip])
out: Dict[str, str] = {}
for key, value in translations.items():
if isinstance(key, str) and isinstance(value, str):
out[key] = value
for section_name, section_data in data.items():
if isinstance(section_data, dict):
for key, value in section_data.items():
if isinstance(key, str) and isinstance(value, str):
out[key] = value
return out

View File

@ -46,10 +46,12 @@
"sliders.val_max" = "Helligkeit Max (%)"
"sliders.alpha" = "Overlay Alpha"
"stats.placeholder" = "Markierungen (mit Ausschlüssen): —"
"stats.summary" = "Wertung: {score:.2f}% | Treffer (mit Exkl.): {with_pct:.2f}% | Treffer: {without_pct:.2f}% | {brightness_label}: {brightness:.1f}% | Gruppierung: {grouping:.1f}% | Ausgeschlossen: {excluded_pct:.2f}%"
"stats.summary" = "Gesamtwertung: {score:.2f}% | Treffer (m. Ausschl.): {with_pct:.2f}% | Treffer: {without_pct:.2f}% | {brightness_label}: {brightness:.1f}% | Gruppierung: {grouping:.1f}% | Kontinuität: {continuity:.1f}% | Rand: {border:.1f}%"
"stats.brightness_label" = "Helligkeit"
"stats.darkness_label" = "Dunkelheit"
"stats.grouping_label" = "Gruppierung"
"stats.continuity_label" = "Kontinuität"
"stats.border_label" = "Rand"
"menu.copy" = "Kopieren"
"dialog.info_title" = "Info"
"dialog.error_title" = "Fehler"
@ -101,6 +103,8 @@
"dialog.weight_match_keep" = "Treffer (Behalten) %"
"dialog.weight_brightness" = "Helligkeit/Dunkelheit %"
"dialog.weight_grouping" = "Gruppierung %"
"dialog.weight_continuity" = "Kontinuität %"
"dialog.weight_border" = "Rand Sauberkeit %"
"dialog.total_weight" = "Gesamt:"
"dialog.weight_error" = "Gewichtungen müssen exakt 100% ergeben (aktuell {total}%)."

View File

@ -46,10 +46,12 @@
"sliders.val_max" = "Value max (%)"
"sliders.alpha" = "Overlay alpha"
"stats.placeholder" = "Matches (with exclusions): —"
"stats.summary" = "Score: {score:.2f}% | Matches (w/ excl.): {with_pct:.2f}% | Matches: {without_pct:.2f}% | {brightness_label}: {brightness:.1f}% | Grouping: {grouping:.1f}% | Excluded: {excluded_pct:.2f}%"
"stats.summary" = "Composite Score: {score:.2f}% | Matches (w/ excl.): {with_pct:.2f}% | Matches: {without_pct:.2f}% | {brightness_label}: {brightness:.1f}% | Grouping: {grouping:.1f}% | Continuity: {continuity:.1f}% | Border: {border:.1f}%"
"stats.brightness_label" = "Brightness"
"stats.darkness_label" = "Darkness"
"stats.grouping_label" = "Grouping"
"stats.continuity_label" = "Continuity"
"stats.border_label" = "Border"
"menu.copy" = "Copy"
"dialog.info_title" = "Info"
"dialog.error_title" = "Error"
@ -101,6 +103,8 @@
"dialog.weight_match_keep" = "Match (Keep) %"
"dialog.weight_brightness" = "Brightness/Darkness %"
"dialog.weight_grouping" = "Grouping %"
"dialog.weight_continuity" = "Continuity %"
"dialog.weight_border" = "Border Cleanliness %"
"dialog.total_weight" = "Total:"
"dialog.weight_error" = "Weights must sum exactly to 100% (currently {total}%)."

View File

@ -8,6 +8,7 @@ from .constants import (
OVERLAY_COLOR,
EXCLUDE_BG_COLOR,
EXCLUDE_BG_TOLERANCE,
WEIGHTS,
PREVIEW_MAX_SIZE,
RESET_EXCLUSIONS_ON_IMAGE_CHANGE,
SUPPORTED_IMAGE_EXTENSIONS,
@ -20,7 +21,7 @@ __all__ = [
"LANGUAGE",
"OVERLAY_COLOR",
"EXCLUDE_BG_COLOR",
"EXCLUDE_BG_TOLERANCE",
"WEIGHTS",
"PREVIEW_MAX_SIZE",
"RESET_EXCLUSIONS_ON_IMAGE_CHANGE",
"SUPPORTED_IMAGE_EXTENSIONS",

View File

@ -101,6 +101,15 @@ _OPTION_DEFAULTS = {
"exclude_bg_tolerance": 5,
}
_WEIGHT_DEFAULTS = {
"match_all": 20,
"match_keep": 20,
"brightness": 10,
"grouping": 10,
"continuity": 20,
"border": 20,
}
def _extract_options(data: dict[str, Any]) -> dict[str, Any]:
section = data.get("options")
@ -122,6 +131,18 @@ def _extract_options(data: dict[str, Any]) -> dict[str, Any]:
return result
def _extract_weights(data: dict[str, Any]) -> dict[str, int]:
section = data.get("weights")
if not isinstance(section, dict):
return {}
result: dict[str, int] = {}
for key in _WEIGHT_DEFAULTS:
value = section.get(key)
if isinstance(value, int):
result[key] = max(0, min(100, value))
return result
DEFAULTS = {**_DEFAULTS_BASE, **_extract_default_overrides(_CONFIG_DATA)}
LANGUAGE = _extract_language(_CONFIG_DATA)
OPTIONS = {**_OPTION_DEFAULTS, **_extract_options(_CONFIG_DATA)}
@ -129,3 +150,4 @@ RESET_EXCLUSIONS_ON_IMAGE_CHANGE = OPTIONS["reset_exclusions_on_image_change"]
OVERLAY_COLOR = OPTIONS["overlay_color"]
EXCLUDE_BG_COLOR = OPTIONS["exclude_bg_color"]
EXCLUDE_BG_TOLERANCE = OPTIONS["exclude_bg_tolerance"]
WEIGHTS = {**_WEIGHT_DEFAULTS, **_extract_weights(_CONFIG_DATA)}

View File

@ -46,11 +46,12 @@ def create_application() -> QtWidgets.QApplication:
def run() -> int:
"""Run the PySide6 GUI."""
app = create_application()
from app.logic import OVERLAY_COLOR, EXCLUDE_BG_COLOR, EXCLUDE_BG_TOLERANCE
from app.logic import OVERLAY_COLOR, EXCLUDE_BG_COLOR, EXCLUDE_BG_TOLERANCE, WEIGHTS
window = MainWindow(
language=LANGUAGE,
defaults=DEFAULTS.copy(),
reset_exclusions=RESET_EXCLUSIONS_ON_IMAGE_CHANGE,
weights=WEIGHTS.copy(),
overlay_color=OVERLAY_COLOR,
exclude_bg_color=EXCLUDE_BG_COLOR,
exclude_bg_tolerance=EXCLUDE_BG_TOLERANCE,

View File

@ -24,6 +24,8 @@ class Stats:
total_excl: int = 0
brightness_score: float = 0.0
grouping_score: float = 0.0
continuity_score: float = 0.0
border_score: float = 0.0
prefer_dark: bool = False
@property
@ -36,16 +38,20 @@ class Stats:
pct_all = (self.matches_all / self.total_all * 100) if self.total_all else 0.0
pct_keep = (self.matches_keep / self.total_keep * 100) if self.total_keep else 0.0
# weights keys: match_all, match_keep, brightness, grouping
# weights keys: match_all, match_keep, brightness, grouping, continuity, border
w_all = weights.get("match_all", 30) / 100.0
w_keep = weights.get("match_keep", 50) / 100.0
w_keep = weights.get("match_keep", 30) / 100.0
w_bright = weights.get("brightness", 10) / 100.0
w_group = weights.get("grouping", 10) / 100.0
w_cont = weights.get("continuity", 10) / 100.0
w_bord = weights.get("border", 10) / 100.0
return (w_all * pct_all +
w_keep * pct_keep +
w_bright * self.effective_brightness +
w_group * self.grouping_score)
w_group * self.grouping_score +
w_cont * self.continuity_score +
w_bord * self.border_score)
def summary(self, translate, weights: dict[str, int]) -> str:
if self.total_all == 0:
@ -63,6 +69,8 @@ class Stats:
brightness_label=brightness_label,
brightness=self.effective_brightness,
grouping=self.grouping_score,
continuity=self.continuity_score,
border=self.border_score,
excluded_pct=excluded_pct,
)
@ -98,6 +106,57 @@ def _rgb_to_hsv_numpy(arr: np.ndarray) -> np.ndarray:
return np.stack([h, s * 100.0, v * 100.0], axis=-1)
def _calculate_border_score(mask: np.ndarray, val: np.ndarray, alpha_ch: np.ndarray, prefer_dark: bool, excl_mask: np.ndarray | None = None) -> float:
"""Measure border cleanliness: penalizes extremely dark (or bright) pixels along the match perimeter.
Uses Top-10% percentile to ensure local artifacts (halos) aren't diluted by clean edges.
"""
if not mask.any():
return 100.0
dilated = mask.copy()
# Manual morphological 1-pixel dilation
dilated[:-1, :] |= mask[1:, :]
dilated[1:, :] |= mask[:-1, :]
dilated[:, :-1] |= mask[:, 1:]
dilated[:, 1:] |= mask[:, :-1]
dil2 = dilated.copy()
dil2[:-1, :] |= dilated[1:, :]
dil2[1:, :] |= dilated[:-1, :]
dil2[:, :-1] |= dilated[:, 1:]
dil2[:, 1:] |= dilated[:, :-1]
# Target exterior pixels that aren't transparent and NOT excluded
outer = dil2 & ~mask & (alpha_ch >= 128)
if excl_mask is not None:
outer &= ~excl_mask
if not outer.any():
return 100.0
border_vals = val[outer]
if prefer_dark:
# Penalize super bright edges (white/silver > 60)
penalties = np.clip(border_vals - 60.0, 0, None)
else:
# Penalize super dark edges (black/heavy shadows < 40)
penalties = np.clip(40.0 - border_vals, 0, None)
# Hammer down harsh cuts: focus on the 'worst' parts of the border
if not penalties.any():
return 100.0
# Using 4th power penalty for 'catastrophic' edge detection.
# A single pitch-black line (high diff) is now exponentially worse than a gray transition.
total_penalty = np.sum(penalties ** 4)
# Collector's Grade: only 20 pixels at full intensity (40^4)
# are required for a 1% drop in the Border Score.
max_penalty_sum = 20.0 * (40.0 ** 4)
score = 100.0 * (1.0 - (total_penalty / max_penalty_sum))
return max(0.0, float(score))
def _export_worker(args: tuple) -> tuple:
"""Standalone worker for ProcessPoolExecutor batch export.
@ -185,7 +244,17 @@ def _export_worker(args: tuple) -> tuple:
keep_match = match_mask & ~excl_mask
visible = alpha_ch >= 128
keep_visible = visible & ~excl_mask
brightness = float(val[keep_visible].mean()) if keep_visible.any() else 0.0
if keep_visible.any():
v_vals = val[keep_visible]
mean_v = float(v_vals.mean())
std_v = float(v_vals.std())
# Collector's Purity: multiply mean by a factor derived from variance
# A perfectly uniform pattern (std=0) gets 100% of its mean.
# Blotchy patterns (std > 10) get a significant reduction.
purity_factor = max(0.0, 1.0 - (std_v / 20.0))
brightness = mean_v * purity_factor
else:
brightness = 0.0
# Grouping score (inline for worker isolation)
if not keep_match.any():
@ -206,22 +275,50 @@ def _export_worker(args: tuple) -> tuple:
matches_keep = int(keep_match[visible].sum())
total_keep = int(keep_visible.sum())
# Continuity score (inline for worker isolation)
continuity = 0.0
if keep_match.any():
area = keep_match.sum()
y_idx, x_idx = np.nonzero(keep_match)
unvisited = set(zip(y_idx, x_idx))
max_cc_area = 0
while unvisited:
start_node = unvisited.pop()
queue = [start_node]
cc_area = 0
while queue:
cy, cx = queue.pop()
cc_area += 1
for ny, nx in ((cy-1, cx), (cy+1, cx), (cy, cx-1), (cy, cx+1)):
if (ny, nx) in unvisited:
unvisited.remove((ny, nx))
queue.append((ny, nx))
if cc_area > max_cc_area:
max_cc_area = cc_area
continuity = float(max_cc_area / area * 100.0) if area > 0 else 0.0
eff_brightness = (100.0 - brightness) if prefer_dark else brightness
# Border Cleanliness score calculation using standalone util
border = _calculate_border_score(keep_match, val, alpha_ch, prefer_dark, excl_mask)
pct_all = (matches_all / total_all * 100) if total_all else 0.0
pct_keep = (matches_keep / total_keep * 100) if total_keep else 0.0
weights = params["weights"]
w_all = weights.get("match_all", 30) / 100.0
w_keep = weights.get("match_keep", 50) / 100.0
w_keep = weights.get("match_keep", 30) / 100.0
w_bright = weights.get("brightness", 10) / 100.0
w_group = weights.get("grouping", 10) / 100.0
composite = w_all * pct_all + w_keep * pct_keep + w_bright * eff_brightness + w_group * grouping
w_cont = weights.get("continuity", 10) / 100.0
w_bord = weights.get("border", 10) / 100.0
composite = (w_all * pct_all + w_keep * pct_keep + w_bright * eff_brightness +
w_group * grouping + w_cont * continuity + w_bord * border)
img.close()
return (img_path.name, pct_all, pct_keep, eff_brightness, grouping, composite)
return (img_path.name, pct_all, pct_keep, eff_brightness, grouping, continuity, border, composite)
except Exception:
return (img_path.name, None, None, None, None, None)
return (img_path.name, None, None, None, None, None, None, None)
class QtImageProcessor:
@ -269,10 +366,12 @@ class QtImageProcessor:
self.exclude_bg_rgb: Tuple[int, int, int] = (31, 41, 55)
self.exclude_bg_tolerance: int = 5
self.weights: Dict[str, int] = {
"match_all": 30,
"match_keep": 50,
"match_all": 20,
"match_keep": 20,
"brightness": 10,
"grouping": 10
"grouping": 10,
"continuity": 20,
"border": 20
}
def set_defaults(self, defaults: dict) -> None:
@ -416,11 +515,24 @@ class QtImageProcessor:
# Brightness: mean Value (0-100) of ALL non-excluded visible pixels
keep_visible = visible & ~excl_mask
brightness = float(val[keep_visible].mean()) if keep_visible.any() else 0.0
if keep_visible.any():
v_vals = val[keep_visible]
mean_v = float(v_vals.mean())
std_v = float(v_vals.std())
# Purity factor: subtract deviation from mean to punish blotchy patterns
brightness = max(0.0, mean_v - (std_v * 1.5))
else:
brightness = 0.0
# Grouping: measure clustering of match_mask
grouping = self._calculate_grouping_score(keep_match)
# Continuity: Measure connectivity of matched area
continuity = self._calculate_continuity_score(keep_match)
# Border Cleanliness: Calculate hard edges based on preference
border = _calculate_border_score(keep_match, val, alpha_ch, self.prefer_dark, excl_mask)
# Build overlay image
overlay_arr = np.zeros((base.height, base.width, 4), dtype=np.uint8)
overlay_arr[keep_match, 0] = self.overlay_r
@ -438,6 +550,8 @@ class QtImageProcessor:
total_excl=total_excl,
brightness_score=brightness,
grouping_score=grouping,
continuity_score=continuity,
border_score=border,
prefer_dark=self.prefer_dark,
)
@ -490,8 +604,20 @@ class QtImageProcessor:
visible = alpha_ch >= 128
matches_keep_count = int(keep_match[visible].sum())
keep_visible = visible & ~excl_mask
brightness = float(val[keep_visible].mean()) if keep_visible.any() else 0.0
if keep_visible.any():
v_vals = val[keep_visible]
mean_v = float(v_vals.mean())
std_v = float(v_vals.std())
# Collector's Purity: multiply mean by a factor derived from variance
# A perfectly uniform pattern (std=0) gets 100% of its mean.
# Blotchy patterns (std > 10) get a significant reduction.
purity_factor = max(0.0, 1.0 - (std_v / 20.0))
brightness = mean_v * purity_factor
else:
brightness = 0.0
grouping = self._calculate_grouping_score(keep_match)
continuity = self._calculate_continuity_score(keep_match)
border = _calculate_border_score(keep_match, val, alpha_ch, self.prefer_dark, excl_mask)
return Stats(
matches_all=int(match_mask[visible].sum()),
@ -502,6 +628,8 @@ class QtImageProcessor:
total_excl=int((visible & excl_mask).sum()),
brightness_score=brightness,
grouping_score=grouping,
continuity_score=continuity,
border_score=border,
prefer_dark=self.prefer_dark,
)
@ -530,6 +658,79 @@ class QtImageProcessor:
score = ( (match_neighbors / 80.0) ** 2 ).mean() * 100.0
return float(score)
def _calculate_continuity_score(self, mask: np.ndarray) -> float:
"""Measure continuity: largest connected component ratio and surface smoothness (0-100).
Penalizes jaggedness and 'perforated' patterns with many internal holes.
"""
if not mask.any():
return 0.0
area = mask.sum()
# 1. Connectivity Ratio
y_idx, x_idx = np.nonzero(mask)
unvisited = set(zip(y_idx, x_idx))
max_cc_area = 0
while unvisited:
start_node = unvisited.pop()
queue = [start_node]
cc_area = 0
while queue:
cy, cx = queue.pop()
cc_area += 1
for ny, nx in ((cy-1, cx), (cy+1, cx), (cy, cx-1), (cy, cx+1)):
if (ny, nx) in unvisited:
unvisited.remove((ny, nx))
queue.append((ny, nx))
if cc_area > max_cc_area:
max_cc_area = cc_area
connectivity = max_cc_area / area
# 2. Smoothness / Jaggedness (Perimeter-to-Area)
# Theoretically perfect smoothness (circle) has perimeter 2*sqrt(pi*area)
# We penalize departure from 'ideal' shape density
eroded = mask.copy()
eroded[:-1, :] &= mask[1:, :]
eroded[1:, :] &= mask[:-1, :]
eroded[:, :-1] &= mask[:, 1:]
eroded[:, 1:] &= mask[:, :-1]
perimeter = np.count_nonzero(mask ^ eroded)
# min_perim for a circle
min_perim = 2.0 * np.sqrt(np.pi * area)
# Jaggedness factor (0 is perfect, higher is messier)
# We normalize by the expected complexity of the item (e.g. 15 for Karambit)
# but here we use a general sensitivity factor
jaggedness = max(0.0, (perimeter / min_perim) - 1.0)
# Penalty increases as jaggedness goes up.
# For Urban Masked, we are more lenient (factor of 40 instead of 20)
smoothness_factor = 1.0 / (1.0 + (jaggedness / 40.0))
# 3. Island Count Penalty
# Premium patterns should be unified. Each separate piece (island)
# adds a small deduction to the continuity score.
y, x = np.nonzero(mask)
unvisited = set(zip(y, x))
islands = 0
while unvisited:
islands += 1
node = unvisited.pop()
q = [node]
while q:
cy, cx = q.pop()
for ny, nx in ((cy-1, cx), (cy+1, cx), (cy, cx-1), (cy, cx+1)):
if (ny, nx) in unvisited:
unvisited.remove((ny, nx))
q.append((ny, nx))
# Collector's factor: 2000 is now the baseline for Karambits.
island_factor = max(0.0, 1.0 - (islands / 2000.0))
score = connectivity * smoothness_factor * island_factor * 100.0
return float(score)
# helpers ----------------------------------------------------------------
def _matches(self, r: int, g: int, b: int) -> bool:

View File

@ -552,6 +552,8 @@ class WeightingDialog(QtWidgets.QDialog):
("match_keep", "dialog.weight_match_keep"),
("brightness", "dialog.weight_brightness"),
("grouping", "dialog.weight_grouping"),
("continuity", "dialog.weight_continuity"),
("border", "dialog.weight_border"),
]
for i, (key, label_key) in enumerate(specs):
@ -608,7 +610,7 @@ class WeightingDialog(QtWidgets.QDialog):
class MainWindow(QtWidgets.QMainWindow, I18nMixin):
"""Main application window containing all controls."""
def __init__(self, language: str, defaults: dict, reset_exclusions: bool, overlay_color: str | None = None, exclude_bg_color: str | None = None, exclude_bg_tolerance: int = 5) -> None:
def __init__(self, language: str, defaults: dict, reset_exclusions: bool, weights: dict[str, int], overlay_color: str | None = None, exclude_bg_color: str | None = None, exclude_bg_tolerance: int = 5) -> None:
super().__init__()
self.init_i18n(language)
self.setWindowTitle(self._t("app.title"))
@ -628,6 +630,7 @@ class MainWindow(QtWidgets.QMainWindow, I18nMixin):
self.content = QtWidgets.QWidget()
self.processor = QtImageProcessor()
self.processor.weights = weights.copy()
self.processor.set_defaults(defaults)
self.processor.reset_exclusions_on_switch = reset_exclusions
# Always use red for the overlay regardless of the target color
@ -1203,18 +1206,22 @@ class MainWindow(QtWidgets.QMainWindow, I18nMixin):
decimal = ","
# Weights mapping
w_all = self.processor.weights.get("match_all", 30)
w_keep = self.processor.weights.get("match_keep", 50)
w_all = self.processor.weights.get("match_all", 20)
w_keep = self.processor.weights.get("match_keep", 30)
w_bright = self.processor.weights.get("brightness", 10)
w_group = self.processor.weights.get("grouping", 10)
w_cont = self.processor.weights.get("continuity", 15)
w_bord = self.processor.weights.get("border", 15)
brightness_col = self._t("stats.darkness_label") if self.processor.prefer_dark else self._t("stats.brightness_label")
headers = [
"Filename",
f"Matching Pixels ({w_all}%)", # Was the non-exclusion match percentage
f"Matching Pixels ({w_all}%)",
f"Matching Pixels w/ Exclusions ({w_keep}%)",
f"{brightness_col} ({w_bright}%)",
f"{self._t('stats.grouping_label')} ({w_group}%)",
f"{self._t('stats.continuity_label')} ({w_cont}%)",
f"{self._t('stats.border_label')} ({w_bord}%)",
"Composite Score"
]
@ -1233,25 +1240,21 @@ class MainWindow(QtWidgets.QMainWindow, I18nMixin):
for future in concurrent.futures.as_completed(future_to_idx):
idx = future_to_idx[future]
res = future.result()
name, pct_all, pct_keep, eff_brightness, grouping, composite_score = res
name, pct_all, pct_keep, eff_brightness, grouping, continuity, border, composite_score = res
if pct_keep is None:
# Error parsing image
results[idx] = [name, "Error", "Error", "Error", "Error", "Error"]
results[idx] = [name, "Error", "Error", "Error", "Error", "Error", "Error", -1.0]
else:
pct_all_str = f"{pct_all:.2f}".replace(".", decimal)
pct_keep_str = f"{pct_keep:.2f}".replace(".", decimal)
brightness_str = f"{eff_brightness:.2f}".replace(".", decimal)
grouping_str = f"{grouping:.2f}".replace(".", decimal)
composite_str = f"{composite_score:.2f}".replace(".", decimal)
results[idx] = [
name,
pct_all_str,
pct_keep_str,
brightness_str,
grouping_str,
composite_str
pct_all,
pct_keep,
eff_brightness,
grouping,
continuity,
border,
composite_score
]
done_count += 1
@ -1259,7 +1262,21 @@ class MainWindow(QtWidgets.QMainWindow, I18nMixin):
self.set_status(self._t("status.exporting", current=str(done_count), total=str(total)))
QtWidgets.QApplication.processEvents()
rows.extend(results)
# Sort results by composite_score (last element) descending
results.sort(key=lambda x: x[-1] if isinstance(x[-1], (int, float)) else -1.0, reverse=True)
# Convert numbers to strings with custom decimal separator for CSV
final_rows = []
for r in results:
str_row = []
for item in r:
if isinstance(item, (int, float)):
str_row.append(f"{item:.2f}".replace(".", decimal))
else:
str_row.append(str(item))
final_rows.append(str_row)
rows.extend(final_rows)
# Compute max width per column for alignment, plus extra space so it's not cramped
col_widths = [max(len(str(item)) for item in col) + 4 for col in zip(*rows)]

View File

@ -21,14 +21,20 @@ exclude_bg_color = "#1f2937"
exclude_bg_tolerance = 5
[defaults]
# Override any of the following keys to tweak the initial slider values whenever
# the application starts.
# hue_min, hue_max, sat_min, val_min, val_max accept floating point numbers.
# alpha accepts an integer between 0 and 255.
hue_min = 250.0
hue_max = 310.0
sat_min = 15.0
sat_max = 100.0
val_min = 15.0
val_max = 100.0
alpha = 150
# Override any of the following to tweak the initial slider values upon application start.
hue_min = 250.0 # (0-360) Starting Hue for the target color range
hue_max = 310.0 # (0-360) Ending Hue for the target color range
sat_min = 15.0 # (0-100) Minimum Saturation percentage
sat_max = 100.0 # (0-100) Maximum Saturation percentage
val_min = 15.0 # (0-100) Minimum Value/Brightness percentage
val_max = 100.0 # (0-100) Maximum Value/Brightness percentage
alpha = 150 # (0-255) Opacity of the red overlay in the UI preview
[weights]
# Contribution of each measurement to the final Composite Score (0-100%).
match_all = 20 # % of the total visible image that matches
match_keep = 30 # % of the non-excluded area that matches (the most important area)
brightness = 10 # % Importance of Vibrance (or Darkness if "Prefer Darkness" is on)
grouping = 10 # % Importance of pixel clustering (rewarding solid color blocks)
continuity = 15 # % Quality of the largest connected surface area
border = 15 # % Quality of the transition edges (penalizing dark/hard outlines)

73
tests/test_i18n.py Normal file
View File

@ -0,0 +1,73 @@
import pytest
from pathlib import Path
import re
# Base directory for language files
LANG_DIR = Path(__file__).resolve().parent.parent / "app" / "lang"
def get_structure(file_path: Path):
"""
Returns a list of (line_number, key/header) for a TOML file.
Only captures keys and section headers, ignoring the values.
"""
structure = []
# Regex to capture "key" = or [header]
key_pattern = re.compile(r'^\s*"?([^"\s=]+)"?\s*=')
header_pattern = re.compile(r'^\s*\[([^\]]+)\]')
with open(file_path, "r", encoding="utf-8") as f:
for i, line in enumerate(f, 1):
line = line.strip()
if not line:
structure.append((i, "<empty>"))
continue
# Check for header [section]
header_match = header_pattern.match(line)
if header_match:
structure.append((i, f"[{header_match.group(1)}]"))
continue
# Check for key "name" =
key_match = key_pattern.match(line)
if key_match:
structure.append((i, key_match.group(1)))
continue
# Comments or anything else
structure.append((i, "<other/comment>"))
return structure
def test_i18n_files_exist():
assert LANG_DIR.exists(), f"Language directory {LANG_DIR} not found"
en_file = LANG_DIR / "en.toml"
assert en_file.exists(), "English language file (en.toml) must exist as baseline"
def test_i18n_synchronization():
"""
Ensures all language files have the same keys/headers on the same lines
as the baseline en.toml.
"""
en_path = LANG_DIR / "en.toml"
en_structure = get_structure(en_path)
other_files = list(LANG_DIR.glob("*.toml"))
other_files.remove(en_path)
for lang_file in other_files:
lang_name = lang_file.name
lang_structure = get_structure(lang_file)
# Check line count
assert len(lang_structure) == len(en_structure), \
f"{lang_name} length mismatch: expected {len(en_structure)} lines, got {len(lang_structure)}"
# Check line-by-line sync
for (en_line, en_key), (lang_line, lang_key) in zip(en_structure, lang_structure):
assert en_key == lang_key, \
f"Sync error at {lang_name}:{lang_line}. Expected '{en_key}', found '{lang_key}'"
if __name__ == "__main__":
# Allow running directly as a script
pytest.main([__file__])