Enhance logging and error handling across tools; add full tool audit and Playwright tests

- Added model mismatch warnings in colorize, enhance-faces, and upscale routes.
- Improved error handling in colorize, enhance_faces, remove_bg, restore, and upscale scripts with detailed logging.
- Updated Dockerfile to align NCCL versions for compatibility.
- Introduced a new full tool audit script to test all tools for functionality and GPU usage.
- Created Playwright E2E tests for GPU-dependent tools to ensure proper functionality and performance.
This commit is contained in:
Ashim 2026-04-17 23:06:31 +08:00
parent 51f60a8269
commit 08a7ffe403
16 changed files with 607 additions and 42 deletions

View file

@ -142,6 +142,13 @@ export function registerColorize(app: FastifyInstance) {
});
}
if (model !== "auto" && result.method !== model) {
request.log.warn(
{ toolId: "colorize", requested: model, actual: result.method },
`Colorize model mismatch: requested ${model} but used ${result.method}`,
);
}
return reply.send({
jobId,
downloadUrl: `/api/v1/download/${jobId}/${encodeURIComponent(outputFilename)}`,

View file

@ -122,6 +122,13 @@ export function registerEnhanceFaces(app: FastifyInstance) {
});
}
if (model !== "auto" && result.model !== model) {
request.log.warn(
{ toolId: "enhance-faces", requested: model, actual: result.model },
`Face enhance model mismatch: requested ${model} but used ${result.model}`,
);
}
return reply.send({
jobId,
downloadUrl: `/api/v1/download/${jobId}/${encodeURIComponent(outputFilename)}`,

View file

@ -145,6 +145,7 @@ export function registerRemoveBackground(app: FastifyInstance) {
originalSize: fileBuffer.length,
processedSize: transparentResult.length,
filename,
model: settings.model,
});
} catch (err) {
request.log.error({ err, toolId: "remove-background" }, "Background removal failed");

View file

@ -166,6 +166,13 @@ export function registerUpscale(app: FastifyInstance) {
});
}
if (model !== "auto" && result.method !== model) {
request.log.warn(
{ toolId: "upscale", requested: model, actual: result.method },
`Upscale model mismatch: requested ${model} but used ${result.method}`,
);
}
return reply.send({
jobId,
downloadUrl: `/api/v1/download/${jobId}/${encodeURIComponent(outputFilename)}`,

View file

@ -108,7 +108,7 @@ RUN set -e; \
# Stage 3: Platform-specific base images
# ============================================
FROM node:22-bookworm AS base-linux-arm64
FROM nvidia/cuda:12.6.3-runtime-ubuntu24.04 AS base-linux-amd64
FROM nvidia/cuda:12.6.3-cudnn-runtime-ubuntu24.04 AS base-linux-amd64
# Node.js donor: provides Node binaries for the CUDA amd64 image without
# relying on NodeSource apt repos or Ubuntu mirrors (which are flaky on CI).
@ -212,6 +212,18 @@ RUN --mount=type=cache,target=/root/.cache/pip \
RUN --mount=type=cache,target=/root/.cache/pip \
/opt/venv/bin/pip install numpy==1.26.4
# Re-align NCCL to match torch's requirement. paddlepaddle-gpu pins an older
# nvidia-nccl-cu12 which silently downgrades the version installed by torch,
# causing "undefined symbol: ncclCommShrink" at import time. NCCL is ABI-
# backwards-compatible, so the newer version satisfies both packages.
RUN --mount=type=cache,target=/root/.cache/pip \
if [ "$TARGETARCH" = "amd64" ]; then \
/opt/venv/bin/pip install $(/opt/venv/bin/python3 -c \
"from importlib.metadata import requires; \
print([r.split(';')[0].strip() for r in requires('torch') \
if 'nccl' in r][0])") \
; fi
# Pin rembg model storage to a fixed path so models downloaded at build time
# (as root) are found at runtime (as the non-root ashim user, home=/app).
# Without this, rembg defaults to ~/.u2net which differs between users.
@ -227,7 +239,7 @@ COPY docker/download_models.py /tmp/download_models.py
RUN if [ "$SKIP_MODEL_DOWNLOADS" = "true" ]; then \
echo "Skipping model downloads (CI build)"; \
else \
/opt/venv/bin/python3 /tmp/download_models.py; \
CUDA_VISIBLE_DEVICES="" /opt/venv/bin/python3 /tmp/download_models.py; \
fi && rm -f /tmp/download_models.py && \
# Symlink PaddleX model dir into both possible HOME locations so models are
# found regardless of whether HOME=/root (build/root context) or HOME=/app

View file

@ -70,6 +70,37 @@ os.environ["PADDLE_DEVICE"] = "cpu"
os.environ["FLAGS_use_cuda"] = "0"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Prevent ONNX Runtime from loading the CUDA Execution Provider at build time.
# The cudnn-runtime base image includes cuDNN, which makes ONNX try to init
# the CUDA EP. Without a GPU driver (only available at container runtime),
# this segfaults. Temporarily renaming the provider .so is the most reliable
# way to prevent this — env vars alone are not enough.
def _hide_cuda_provider():
"""Rename ONNX CUDA provider .so to prevent load at build time."""
try:
import onnxruntime as _ort
ep_dir = os.path.join(os.path.dirname(_ort.__file__), "capi")
for name in ("libonnxruntime_providers_cuda.so", "libonnxruntime_providers_tensorrt.so"):
src = os.path.join(ep_dir, name)
if os.path.exists(src):
os.rename(src, src + ".build_hide")
except ImportError:
pass
def _restore_cuda_provider():
"""Restore hidden ONNX CUDA provider .so after build-time downloads."""
try:
import onnxruntime as _ort
ep_dir = os.path.join(os.path.dirname(_ort.__file__), "capi")
for name in ("libonnxruntime_providers_cuda.so", "libonnxruntime_providers_tensorrt.so"):
bak = os.path.join(ep_dir, name + ".build_hide")
if os.path.exists(bak):
os.rename(bak, os.path.join(ep_dir, name))
except ImportError:
pass
_hide_cuda_provider()
LAMA_MODEL_DIR = "/opt/models/lama"
LAMA_MODEL_URL = "https://huggingface.co/Carve/LaMa-ONNX/resolve/main/lama_fp32.onnx"
LAMA_MODEL_PATH = os.path.join(LAMA_MODEL_DIR, "lama_fp32.onnx")
@ -686,6 +717,7 @@ def main():
print("\nAll downloads complete. Running verification...\n")
verify_mediapipe()
smoke_test()
_restore_cuda_provider()
print("All models downloaded and verified.")

View file

@ -188,18 +188,29 @@ def main():
if os.path.exists(DDCOLOR_MODEL_PATH):
result_bgr, method = colorize_ddcolor(img_bgr, intensity)
elif model_choice == "ddcolor":
emit_progress(10, "DDColor model not found, using fallback")
raise FileNotFoundError(f"DDColor model not found: {DDCOLOR_MODEL_PATH}")
except Exception as e:
import traceback
print(f"[colorize] DDColor failed: {e}", file=sys.stderr, flush=True)
traceback.print_exc(file=sys.stderr)
if model_choice == "ddcolor":
emit_progress(10, f"DDColor failed: {str(e)[:50]}")
# User explicitly requested ddcolor — fail, don't degrade
raise
result_bgr = None
# Try OpenCV fallback
# Try OpenCV fallback only in auto mode
if result_bgr is None and model_choice in ("auto", "opencv"):
try:
if os.path.exists(OPENCV_PROTO_PATH) and os.path.exists(OPENCV_MODEL_PATH):
result_bgr, method = colorize_opencv(img_bgr, intensity)
except Exception:
elif model_choice == "opencv":
raise FileNotFoundError(f"OpenCV colorize models not found: {OPENCV_PROTO_PATH}")
except Exception as e:
import traceback
print(f"[colorize] OpenCV fallback failed: {e}", file=sys.stderr, flush=True)
traceback.print_exc(file=sys.stderr)
if model_choice == "opencv":
raise
result_bgr = None
if result_bgr is None:

View file

@ -32,8 +32,8 @@ available_modules = {}
def _try_import(name, import_fn):
try:
available_modules[name] = import_fn()
except ImportError:
pass
except ImportError as e:
print(f"[dispatcher] Module '{name}' not available: {e}", file=sys.stderr, flush=True)
_try_import("PIL", lambda: __import__("PIL"))
@ -94,6 +94,8 @@ def _run_script_main(script_name, args):
except SystemExit as e:
exit_code = e.code if isinstance(e.code, int) else 1
except Exception as e:
# Log full traceback to stderr for diagnostics
traceback.print_exc(file=sys.stderr)
# Write error to the captured stdout
sys.stdout.write(json.dumps({"success": False, "error": str(e)}) + "\n")
sys.stdout.flush()
@ -129,9 +131,10 @@ def main():
try:
from gpu import gpu_available
gpu = gpu_available()
except ImportError:
pass
except ImportError as e:
print(f"[dispatcher] GPU detection failed: {e}", file=sys.stderr, flush=True)
print(json.dumps({"ready": True, "gpu": gpu}), file=sys.stderr, flush=True)
print(f"[dispatcher] Ready. GPU: {gpu}. Modules: {list(available_modules.keys())}", file=sys.stderr, flush=True)
for line in sys.stdin:
line = line.strip()

View file

@ -17,8 +17,8 @@ except (ImportError, ModuleNotFoundError):
_shim = types.ModuleType("torchvision.transforms.functional_tensor")
_shim.rgb_to_grayscale = _F.rgb_to_grayscale
sys.modules["torchvision.transforms.functional_tensor"] = _shim
except ImportError:
pass # torchvision not installed at all
except ImportError as e:
print(f"[enhance-faces] torchvision shim failed: {e}", file=sys.stderr, flush=True)
def emit_progress(percent, stage):
@ -196,6 +196,9 @@ def enhance_with_codeformer(img_array, fidelity_weight):
finally:
torch.cuda.is_available = _orig_cuda_check
if restored_bgr is None:
raise RuntimeError("CodeFormer returned no result (face detection may have failed)")
restored_rgb = restored_bgr[:, :, ::-1].copy()
return restored_rgb
@ -258,7 +261,9 @@ def main():
# progress and init messages to stdout which would corrupt
# our JSON result.
stdout_fd = os.dup(1)
sys.stdout.flush() # Flush before redirect to avoid mixing buffers
os.dup2(2, 1)
sys.stdout = os.fdopen(1, "w", closefd=False) # Rebind sys.stdout to new fd 1
enhanced = None
model_used = None
@ -281,14 +286,19 @@ def main():
fidelity_weight = 1.0 - strength
enhanced = enhance_with_codeformer(img_array, fidelity_weight)
model_used = "codeformer"
except Exception:
except Exception as e:
import traceback
print(f"[enhance-faces] CodeFormer failed, falling back to GFPGAN: {e}", file=sys.stderr, flush=True)
traceback.print_exc(file=sys.stderr)
enhanced = enhance_with_gfpgan(img_array, only_center_face)
model_used = "gfpgan"
finally:
# Restore stdout after ALL AI processing
sys.stdout.flush()
os.dup2(stdout_fd, 1)
os.close(stdout_fd)
sys.stdout = sys.__stdout__ # Restore Python-level stdout
if enhanced is None:
raise RuntimeError("Face enhancement failed: no model available")

View file

@ -1,6 +1,8 @@
"""Runtime GPU/CUDA detection utility."""
import ctypes
import functools
import os
import sys
@functools.lru_cache(maxsize=1)
@ -11,16 +13,36 @@ def gpu_available():
if override is not None and override.lower() in ("0", "false", "no"):
return False
# Use torch.cuda as the source of truth. It actually probes
# the hardware. onnxruntime's get_available_providers() only
# reports compiled-in backends, not whether a GPU exists.
# Use torch.cuda as the source of truth when available. It actually
# probes the hardware. Fall back to onnxruntime provider detection
# when torch is not installed (e.g. CPU-only images without PyTorch).
try:
import torch
return torch.cuda.is_available()
except ImportError:
pass
avail = torch.cuda.is_available()
if avail:
name = torch.cuda.get_device_name(0)
print(f"[gpu] CUDA available via torch: {name}", file=sys.stderr, flush=True)
else:
print("[gpu] torch loaded but CUDA not available", file=sys.stderr, flush=True)
return avail
except ImportError as e:
print(f"[gpu] torch not importable: {e}", file=sys.stderr, flush=True)
return False
# Fallback: check if onnxruntime's CUDA provider can actually load.
# get_available_providers() only reports *compiled-in* backends, not whether
# the required libraries (cuDNN, etc.) are present at runtime. We verify
# by trying to load the provider shared library — this transitively checks
# that cuDNN is installed.
try:
import onnxruntime as _ort
if "CUDAExecutionProvider" not in _ort.get_available_providers():
return False
ep_dir = os.path.dirname(_ort.__file__)
ctypes.CDLL(os.path.join(ep_dir, "capi", "libonnxruntime_providers_cuda.so"))
return True
except (ImportError, OSError) as e:
print(f"[gpu] ONNX CUDA provider not functional: {e}", file=sys.stderr, flush=True)
return False
def onnx_providers():

View file

@ -93,7 +93,8 @@ def main():
alpha_matting_foreground_threshold=240,
alpha_matting_background_threshold=10,
)
except Exception:
except Exception as e:
print(f"[remove-bg] Alpha matting failed ({e}), using standard removal", file=sys.stderr, flush=True)
output_data = remove(input_data, session=session)
emit_progress(80, "Background removed")
@ -107,11 +108,12 @@ def main():
result = json.dumps({"success": True, "model": model})
except ImportError:
except ImportError as e:
print(f"[remove-bg] Import failed: {e}", file=sys.stderr, flush=True)
result = json.dumps(
{
"success": False,
"error": "rembg is not installed. Install with: pip install rembg[cpu]",
"error": f"rembg import failed: {e}",
}
)
except Exception as e:

View file

@ -368,7 +368,8 @@ def enhance_faces(img_bgr, fidelity=0.7):
# Run inference
try:
output = session.run(None, model_inputs)[0][0] # (3, 512, 512)
except Exception:
except Exception as e:
print(f"[restore] CodeFormer inference failed for face {i}: {e}", file=sys.stderr, flush=True)
continue
# Postprocess: [-1, 1] -> [0, 255], RGB -> BGR
@ -478,8 +479,8 @@ def colorize_bw(img_bgr, intensity=0.85):
from gpu import gpu_available
if gpu_available():
providers.insert(0, "CUDAExecutionProvider")
except ImportError:
pass
except ImportError as e:
print(f"[restore] GPU detection unavailable: {e}", file=sys.stderr, flush=True)
session = ort.InferenceSession(DDCOLOR_MODEL_PATH, providers=providers)
input_name = session.get_inputs()[0].name

View file

@ -17,8 +17,8 @@ except (ImportError, ModuleNotFoundError):
_shim = types.ModuleType("torchvision.transforms.functional_tensor")
_shim.rgb_to_grayscale = _F.rgb_to_grayscale
sys.modules["torchvision.transforms.functional_tensor"] = _shim
except ImportError:
pass # torchvision not installed at all, Real-ESRGAN unavailable
except ImportError as e:
print(f"[upscale] torchvision shim failed: {e}", file=sys.stderr, flush=True)
def emit_progress(percent, stage):
@ -167,14 +167,19 @@ def main():
os.dup2(stdout_fd, 1)
os.close(stdout_fd)
except (ImportError, FileNotFoundError, RuntimeError, OSError):
# RealESRGAN unavailable or failed
except (ImportError, FileNotFoundError, RuntimeError, OSError) as e:
import traceback
print(f"[upscale] Real-ESRGAN failed: {e}", file=sys.stderr, flush=True)
traceback.print_exc(file=sys.stderr)
if model_choice == "realesrgan":
emit_progress(15, "AI model not available, using fast resize")
# User explicitly requested realesrgan — fail, don't degrade
raise RuntimeError(f"Real-ESRGAN unavailable: {e}") from e
result = None
# Fall back to Lanczos
# Lanczos path: used when explicitly requested or as auto fallback
if result is None:
if model_choice not in ("auto", "lanczos"):
raise RuntimeError(f"Requested model '{model_choice}' is not available")
emit_progress(50, "Upscaling with Lanczos")
result = img.resize(new_size, Image.LANCZOS)
method = "lanczos"

View file

@ -32,11 +32,26 @@ function extractPythonError(error: unknown): string {
if (trimmed && !trimmed.startsWith("Traceback")) {
return trimmed;
}
// Extract the last meaningful line from a Python Traceback
if (trimmed) {
const lines = trimmed
.split("\n")
.map((l) => l.trim())
.filter(Boolean);
const lastLine = lines[lines.length - 1];
if (lastLine && lastLine !== "Traceback (most recent call last):") {
return lastLine;
}
}
}
}
}
if (execError.message) return execError.message;
// Return empty string for {stdout, stderr} objects with no useful content
// so the caller's fallback message (e.g. exit code) kicks in.
return "";
}
if (error instanceof Error) return error.message;
return String(error);
}
@ -85,6 +100,9 @@ function startDispatcher(): ChildProcess | null {
if (parsed.ready === true) {
dispatcherReady = true;
dispatcherGpuAvailable = parsed.gpu === true;
console.log(
`[bridge] Python dispatcher ready (GPU: ${parsed.gpu === true})`,
);
continue;
}
@ -97,7 +115,11 @@ function startDispatcher(): ChildProcess | null {
}
}
} catch {
// Not JSON - collect as error output for pending requests
// Not JSON - forward diagnostic messages to Node.js logger,
// collect the rest as error output for pending requests.
if (trimmed.startsWith("[")) {
console.log(`[python] ${trimmed}`);
}
for (const req of pendingRequests.values()) {
req.stderrLines.push(trimmed);
}
@ -121,14 +143,17 @@ function startDispatcher(): ChildProcess | null {
if (pending) {
pendingRequests.delete(reqId);
if (response.exitCode !== 0) {
pending.reject(
new Error(
extractPythonError({
stdout: response.stdout,
stderr: pending.stderrLines.join("\n"),
}) || `Python script exited with code ${response.exitCode}`,
),
);
const errText =
extractPythonError({
stdout: response.stdout,
stderr: pending.stderrLines.join("\n"),
}) ||
(response.exitCode === 137
? "Process killed (out of memory) — try a lighter model or smaller image"
: response.exitCode === 139
? "Process crashed (segmentation fault)"
: `Python script exited with code ${response.exitCode}`);
pending.reject(new Error(errText));
} else {
pending.resolve({
stdout: response.stdout || "",
@ -143,6 +168,7 @@ function startDispatcher(): ChildProcess | null {
});
child.on("error", (err: NodeJS.ErrnoException) => {
console.error(`[bridge] Dispatcher error: ${err.message} (code: ${err.code})`);
if (err.code === "ENOENT") {
// Venv python not found - mark as failed, will fall back to per-request
dispatcherFailed = true;
@ -307,7 +333,7 @@ function runPythonPerRequest(
}
});
child.on("close", (code) => {
child.on("close", (code, signal) => {
clearTimeout(timer);
if (stderrBuffer.trim()) {
@ -322,7 +348,16 @@ function runPythonPerRequest(
const stderr = stderrLines.join("\n");
if (code !== 0) {
// When the process was killed by a signal, use a clear message
// instead of surfacing unrelated stderr (e.g. CUDA warnings).
const signalMsg =
signal === "SIGKILL" || code === 137
? "Process killed (out of memory) — try a lighter model or smaller image"
: signal === "SIGSEGV" || code === 139
? "Process crashed (segmentation fault)"
: null;
const errorText =
signalMsg ||
extractPythonError({ stdout: stdout.trim(), stderr }) ||
`Python script exited with code ${code}`;
rejectPromise(new Error(errorText));
@ -361,6 +396,9 @@ export function runPythonWithProgress(
// Retry in an isolated per-request process which starts clean and has
// more available memory than the warm dispatcher.
if (err.message === "Python dispatcher exited unexpectedly") {
console.warn(
`[bridge] Dispatcher crashed during ${scriptName}, retrying with per-request process`,
);
return runPythonPerRequest(scriptName, args, options);
}
throw err;

View file

@ -0,0 +1,248 @@
/**
* FULL TOOL AUDIT Tests every tool in ashim on Windows/amd64.
* Verifies: (1) tool works, (2) GPU tools use GPU not CPU fallback,
* (3) no unexpected model/method downgrades.
*/
const BASE = "http://localhost:1349";
const USERNAME = "admin";
const PASSWORD = "qFIJS2KcQ0NuUfZ0";
const IMG = "C:/Users/siddh/Downloads/passport-photo-sample-correct.webp";
import { readFileSync, writeFileSync } from "fs";
const results = [];
let token = "";
function log(tool, status, detail = "") {
const icon = status === "PASS" ? "\u2713" : status === "FAIL" ? "\u2717" : "-";
const line = `${icon} [${status}] ${tool}${detail ? " -- " + detail : ""}`;
console.log(line);
results.push({ tool, status, detail });
}
async function callTool(path, settings, filename = "test.webp") {
const imageBuffer = readFileSync(IMG);
const imageBlob = new Blob([imageBuffer], { type: "image/webp" });
const formData = new FormData();
formData.append("file", new File([imageBlob], filename, { type: "image/webp" }));
formData.append("settings", JSON.stringify(settings));
const res = await fetch(`${BASE}/api/v1/tools/${path}`, {
method: "POST",
headers: { Authorization: `Bearer ${token}` },
body: formData,
});
const body = await res.json().catch(() => ({ error: `HTTP ${res.status} (non-JSON)` }));
return { status: res.status, ok: res.ok, body };
}
async function test(name, path, settings, checks = {}) {
try {
const { status, ok, body } = await callTool(path, settings);
if (!ok || body.error) {
log(name, "FAIL", body.details || body.error || `HTTP ${status}`);
return;
}
// Check expected model/method
if (checks.expectKey && checks.expectValue) {
const actual = body[checks.expectKey];
if (actual !== checks.expectValue) {
log(name, "FAIL", `Expected ${checks.expectKey}=${checks.expectValue} but got ${actual} (FALLBACK DETECTED)`);
return;
}
}
// Build detail string
const parts = [];
for (const k of ["method", "model", "engine", "format", "width", "height", "facesDetected", "steps"]) {
if (body[k] !== undefined) {
const v = Array.isArray(body[k]) ? JSON.stringify(body[k]) : body[k];
parts.push(`${k}=${v}`);
}
}
log(name, "PASS", parts.join(", "));
} catch (err) {
log(name, "FAIL", err.message.slice(0, 200));
}
}
async function main() {
console.log("=============================================================");
console.log(" ASHIM FULL TOOL AUDIT — Windows amd64 + RTX 4070");
console.log("=============================================================\n");
// Login
const loginRes = await fetch(`${BASE}/api/auth/login`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ username: USERNAME, password: PASSWORD }),
});
const loginBody = await loginRes.json();
token = loginBody.token;
console.log("Authenticated.\n");
// Check GPU status
const healthRes = await fetch(`${BASE}/api/v1/admin/health`, {
headers: { Authorization: `Bearer ${token}` },
});
const health = await healthRes.json();
console.log(`GPU detected: ${health.ai?.gpu}`);
console.log(`Version: ${health.version}\n`);
// ════════════════════════════════════════════════════════════════
// SECTION 1: GPU/AI TOOLS — verify correct model, no fallbacks
// ════════════════════════════════════════════════════════════════
console.log("--- GPU/AI TOOLS (must use GPU, no CPU fallback) ---\n");
// Background removal — all models
await test("Remove BG (birefnet-general-lite)", "remove-background", { model: "birefnet-general-lite" }, { expectKey: "model", expectValue: "birefnet-general-lite" });
await test("Remove BG (birefnet-portrait)", "remove-background", { model: "birefnet-portrait" }, { expectKey: "model", expectValue: "birefnet-portrait" });
await test("Remove BG (birefnet-general)", "remove-background", { model: "birefnet-general" }, { expectKey: "model", expectValue: "birefnet-general" });
await test("Remove BG (u2net)", "remove-background", { model: "u2net" }, { expectKey: "model", expectValue: "u2net" });
await test("Remove BG (bria-rmbg)", "remove-background", { model: "bria-rmbg" }, { expectKey: "model", expectValue: "bria-rmbg" });
await test("Remove BG (isnet-general-use)", "remove-background", { model: "isnet-general-use" }, { expectKey: "model", expectValue: "isnet-general-use" });
await test("Remove BG (birefnet-matting/Ultra)", "remove-background", { model: "birefnet-matting" }, { expectKey: "model", expectValue: "birefnet-matting" });
// Upscale
await test("Upscale (realesrgan 2x)", "upscale", { scale: 2, model: "realesrgan" }, { expectKey: "method", expectValue: "realesrgan" });
await test("Upscale (realesrgan 4x)", "upscale", { scale: 4, model: "realesrgan" }, { expectKey: "method", expectValue: "realesrgan" });
await test("Upscale (lanczos 2x)", "upscale", { scale: 2, model: "lanczos" });
await test("Upscale (auto)", "upscale", { scale: 2, model: "auto" });
// Face enhancement
await test("Face Enhance (gfpgan)", "enhance-faces", { model: "gfpgan" }, { expectKey: "model", expectValue: "gfpgan" });
await test("Face Enhance (codeformer)", "enhance-faces", { model: "codeformer" });
await test("Face Enhance (auto)", "enhance-faces", { model: "auto" });
// Colorize
await test("Colorize (ddcolor)", "colorize", { model: "ddcolor" }, { expectKey: "method", expectValue: "ddcolor" });
await test("Colorize (auto)", "colorize", { model: "auto" });
// Noise removal — all tiers
await test("Noise Removal (quick)", "noise-removal", { tier: "quick" });
await test("Noise Removal (balanced)", "noise-removal", { tier: "balanced" });
await test("Noise Removal (quality/SCUNet)", "noise-removal", { tier: "quality" });
await test("Noise Removal (maximum/NAFNet)", "noise-removal", { tier: "maximum" });
// Photo restoration
await test("Photo Restoration", "restore-photo", {});
// OCR
await test("OCR (tesseract)", "ocr", { engine: "tesseract" }, { expectKey: "engine", expectValue: "tesseract" });
await test("OCR (paddleocr)", "ocr", { engine: "paddleocr" }, { expectKey: "engine", expectValue: "paddleocr-v5" });
// Face operations (MediaPipe)
await test("Face Blur", "blur-faces", { intensity: 50 });
await test("Red-Eye Removal", "red-eye-removal", {});
// Content-aware resize (caire binary)
await test("Content-Aware Resize", "content-aware-resize", { width: 800, height: 800 });
// Erase object (LaMa inpainting — needs mask, likely fails without one)
// Skipping as it needs a mask input
// Smart crop
await test("Smart Crop", "smart-crop", { width: 400, height: 400 });
// ════════════════════════════════════════════════════════════════
// SECTION 2: IMAGE PROCESSING TOOLS (Sharp-based, CPU)
// ════════════════════════════════════════════════════════════════
console.log("\n--- IMAGE PROCESSING TOOLS (Sharp-based) ---\n");
await test("Resize", "resize", { width: 512, height: 512, fit: "cover" });
await test("Crop", "crop", { left: 100, top: 100, width: 500, height: 500 });
await test("Rotate (90)", "rotate", { angle: 90 });
await test("Rotate (45 + fill)", "rotate", { angle: 45, background: "#ffffff" });
await test("Compress (webp q50)", "compress", { quality: 50 });
await test("Convert (to PNG)", "convert", { format: "png" });
await test("Convert (to JPEG)", "convert", { format: "jpg", quality: 85 });
await test("Image Enhancement (auto)", "image-enhancement", { preset: "auto" });
// color-adjustments is part of image-enhancement, not a separate tool
await test("Image Enhancement (vivid)", "image-enhancement", { preset: "vivid" });
await test("Sharpening", "sharpening", { sigma: 1.5, amount: 1.0 });
await test("Border", "border", { size: 20, color: "#ff0000" });
await test("Replace Color", "replace-color", { targetColor: "#ffffff", replacementColor: "#000000", tolerance: 30 });
// ════════════════════════════════════════════════════════════════
// SECTION 3: UTILITY TOOLS
// ════════════════════════════════════════════════════════════════
console.log("\n--- UTILITY TOOLS ---\n");
await test("Info (metadata)", "info", {});
await test("Strip Metadata", "strip-metadata", {});
await test("Image to Base64", "image-to-base64", {});
await test("Optimize for Web", "optimize-for-web", { maxWidth: 1920, quality: 80 });
// Favicon returns binary ICO, not JSON — test via status code only
try {
const imageBuffer = readFileSync(IMG);
const imageBlob = new Blob([imageBuffer], { type: "image/webp" });
const formData = new FormData();
formData.append("file", new File([imageBlob], "test.webp", { type: "image/webp" }));
formData.append("settings", JSON.stringify({}));
const res = await fetch(`${BASE}/api/v1/tools/favicon`, {
method: "POST", headers: { Authorization: `Bearer ${token}` }, body: formData,
});
log("Favicon", res.ok ? "PASS" : "FAIL", `HTTP ${res.status}, ${res.headers.get('content-type')}`);
} catch (e) { log("Favicon", "FAIL", e.message.slice(0, 100)); }
// ════════════════════════════════════════════════════════════════
// SECTION 4: MULTI-IMAGE / SPECIAL TOOLS (may need special input)
// ════════════════════════════════════════════════════════════════
console.log("\n--- SPECIAL TOOLS (may need specific inputs) ---\n");
await test("QR Generate", "qr-generate", { text: "https://ashim.app", size: 512, format: "png" });
await test("Text Overlay", "text-overlay", { text: "TEST", fontSize: 48, color: "#ff0000", position: "center" });
await test("Vectorize", "vectorize", {});
await test("SVG to Raster", "svg-to-raster", {}); // Will fail - needs SVG input
// ════════════════════════════════════════════════════════════════
// SUMMARY
// ════════════════════════════════════════════════════════════════
console.log("\n=============================================================");
console.log(" SUMMARY");
console.log("=============================================================\n");
const passed = results.filter(r => r.status === "PASS");
const failed = results.filter(r => r.status === "FAIL");
console.log(`PASSED: ${passed.length}`);
console.log(`FAILED: ${failed.length}`);
console.log(`TOTAL: ${results.length}\n`);
if (failed.length > 0) {
console.log("FAILURES:");
for (const r of failed) {
// Truncate long error messages
const detail = r.detail.length > 150 ? r.detail.slice(0, 150) + "..." : r.detail;
console.log(` \u2717 ${r.tool}: ${detail}`);
}
}
// Check GPU usage in docker logs
console.log("\n--- GPU USAGE CHECK ---\n");
const { execSync } = await import("child_process");
const logs = execSync('docker logs ashim 2>&1', { encoding: 'utf-8', maxBuffer: 1024 * 1024 });
const gpuLines = logs.split('\n').filter(l =>
l.includes('[gpu]') || l.includes('[bridge]') || l.includes('[dispatcher]') ||
l.includes('GPU') || l.includes('CUDA') || l.includes('CUDAExecution')
);
for (const line of gpuLines.slice(0, 15)) {
console.log(" " + line.trim().slice(0, 120));
}
// Check for any fallback warnings
console.log("\n--- FALLBACK/MISMATCH WARNINGS ---\n");
const warnLines = logs.split('\n').filter(l =>
l.includes('mismatch') || l.includes('fallback') || l.includes('Falling back') ||
l.includes('degraded') || l.includes('lanczos') && l.includes('warn')
);
if (warnLines.length === 0) {
console.log(" None detected - no silent fallbacks occurred.");
} else {
for (const line of warnLines) {
console.log(" WARNING: " + line.trim().slice(0, 150));
}
}
process.exit(failed.length > 0 ? 1 : 0);
}
main().catch(err => { console.error("Fatal:", err); process.exit(1); });

View file

@ -0,0 +1,159 @@
/**
* Playwright E2E test for all GPU-dependent tools on Windows/amd64.
* Uses the API directly (multipart upload) with browser auth context.
*/
import { chromium } from "playwright";
const BASE = "http://localhost:1349";
const USERNAME = "admin";
const PASSWORD = "qFIJS2KcQ0NuUfZ0";
const TEST_IMAGE = "C:/Users/siddh/Downloads/passport-photo-sample-correct.webp";
const results = [];
function log(tool, status, detail = "") {
const icon = status === "PASS" ? "\u2713" : status === "FAIL" ? "\u2717" : "!";
console.log(`${icon} ${tool}: ${status}${detail ? " - " + detail : ""}`);
results.push({ tool, status, detail });
}
async function main() {
console.log("=== Ashim GPU Tools E2E Test ===\n");
// Login via API
const loginRes = await fetch(`${BASE}/api/auth/login`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ username: USERNAME, password: PASSWORD }),
});
const { token } = await loginRes.json();
console.log("Logged in.\n");
const { readFileSync } = await import("fs");
const imageBuffer = readFileSync(TEST_IMAGE);
const imageBlob = new Blob([imageBuffer], { type: "image/webp" });
const tools = [
{
name: "Remove Background",
path: "remove-background",
settings: { model: "birefnet-general-lite" },
resultKey: "model",
},
{
name: "Remove Background (portrait)",
path: "remove-background",
settings: { model: "birefnet-portrait" },
resultKey: "model",
},
{
name: "Upscale (realesrgan)",
path: "upscale",
settings: { scale: 2, model: "realesrgan" },
resultKey: "method",
},
{
name: "Face Enhancement (gfpgan)",
path: "enhance-faces",
settings: { model: "gfpgan" },
resultKey: "model",
},
{
name: "Face Enhancement (codeformer)",
path: "enhance-faces",
settings: { model: "codeformer" },
resultKey: "model",
},
{
name: "Colorize",
path: "colorize",
settings: { model: "auto" },
resultKey: "method",
},
{
name: "Noise Removal (quality/SCUNet)",
path: "noise-removal",
settings: { tier: "quality" },
resultKey: null,
},
{
name: "Photo Restoration",
path: "restore-photo",
settings: {},
resultKey: "steps",
},
{
name: "Face Blur",
path: "blur-faces",
settings: { intensity: 50 },
resultKey: "facesDetected",
},
{
name: "Red-Eye Removal",
path: "red-eye-removal",
settings: {},
resultKey: "facesDetected",
},
{
name: "OCR (tesseract)",
path: "ocr",
settings: { engine: "tesseract" },
resultKey: "engine",
},
{
name: "OCR (paddleocr)",
path: "ocr",
settings: { engine: "paddleocr" },
resultKey: "engine",
},
];
for (const tool of tools) {
try {
const formData = new FormData();
formData.append("file", new File([imageBlob], "test.webp", { type: "image/webp" }));
formData.append("settings", JSON.stringify(tool.settings));
const res = await fetch(`${BASE}/api/v1/tools/${tool.path}`, {
method: "POST",
headers: { Authorization: `Bearer ${token}` },
body: formData,
});
const body = await res.json().catch(() => ({ error: `HTTP ${res.status}` }));
if (res.ok && !body.error) {
const val = tool.resultKey ? body[tool.resultKey] : "ok";
const detail = Array.isArray(val)
? JSON.stringify(val)
: val !== undefined
? `${tool.resultKey}=${val}`
: "";
log(tool.name, "PASS", detail);
} else {
log(tool.name, "FAIL", body.details || body.error || `HTTP ${res.status}`);
}
} catch (err) {
log(tool.name, "FAIL", err.message.slice(0, 200));
}
}
// Summary
console.log("\n=== Summary ===");
const passed = results.filter((r) => r.status === "PASS").length;
const failed = results.filter((r) => r.status === "FAIL").length;
console.log(`Passed: ${passed} Failed: ${failed} Total: ${results.length}`);
if (failed > 0) {
console.log("\nFailed tests:");
for (const r of results.filter((r) => r.status === "FAIL")) {
console.log(` x ${r.tool}: ${r.detail}`);
}
process.exit(1);
}
}
main().catch((err) => {
console.error("Fatal:", err);
process.exit(1);
});