mirror of
https://github.com/ashim-hq/ashim
synced 2026-04-21 13:37:52 +00:00
Complete rebrand from Stirling-Image to ashim following the project move to https://github.com/ashim-hq/ashim. Changes across 117 files: - Package scope: @stirling-image/* → @ashim/* - GitHub URLs: stirling-image/stirling-image → ashim-hq/ashim - Docker Hub: stirlingimage/stirling-image → ashimhq/ashim - GitHub Pages: stirling-image.github.io → ashim-hq.github.io - All branding text: "Stirling Image" → "ashim" - Docker service/volumes/user: stirling → ashim - Database: stirling.db → ashim.db - localStorage keys: stirling-token → ashim-token - Environment variables: STIRLING_GPU → ASHIM_GPU - Python cache dirs: .cache/stirling-image → .cache/ashim - SVG filter IDs, test prefixes, and all other references
30 lines
929 B
Python
30 lines
929 B
Python
"""Runtime GPU/CUDA detection utility."""
|
|
import functools
|
|
import os
|
|
|
|
|
|
@functools.lru_cache(maxsize=1)
|
|
def gpu_available():
|
|
"""Return True if a usable CUDA GPU is present at runtime."""
|
|
# Allow explicit disable via env var (set to "false" or "0")
|
|
override = os.environ.get("ASHIM_GPU")
|
|
if override is not None and override.lower() in ("0", "false", "no"):
|
|
return False
|
|
|
|
# Use torch.cuda as the source of truth. It actually probes
|
|
# the hardware. onnxruntime's get_available_providers() only
|
|
# reports compiled-in backends, not whether a GPU exists.
|
|
try:
|
|
import torch
|
|
return torch.cuda.is_available()
|
|
except ImportError:
|
|
pass
|
|
|
|
return False
|
|
|
|
|
|
def onnx_providers():
|
|
"""Return ONNX Runtime execution providers in priority order."""
|
|
if gpu_available():
|
|
return ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
|
return ["CPUExecutionProvider"]
|