crowncode-backend / local_demo.py
Rthur2003's picture
feat: add wav2vec2 model display in build_models_md function
a12879f
"""
AURIS Local Demo - AI Music Detection
Calistir:
python local_demo.py
"""
from __future__ import annotations
import asyncio
import argparse
import csv
import json
import pickle
import socket
import sys
import time
import warnings
from collections import Counter
from dataclasses import dataclass
from pathlib import Path
from typing import Any
WINDOWS_SELECTOR_POLICY = None
if sys.platform == "win32":
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
WINDOWS_SELECTOR_POLICY = getattr(asyncio, "WindowsSelectorEventLoopPolicy", None)
if WINDOWS_SELECTOR_POLICY is not None:
# Gradio/Uvicorn is more stable with the selector loop on Windows.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
asyncio.set_event_loop_policy(WINDOWS_SELECTOR_POLICY())
try:
import gradio as gr
except ModuleNotFoundError as exc:
if exc.name == "gradio":
raise SystemExit(
"Gradio bu Python kurulumunda yok.\n"
f"Kullandigin yorumlayici: {sys.executable}\n"
"Kurulum komutu:\n"
f' "{sys.executable}" -m pip install -r requirements.txt'
) from exc
raise
import numpy as np
from app.training.extract_features_batch import extract_sample_features
from app.training.train_deep_classifiers import TorchSklearnWrapper
from app.training.wav2vec2_classifier import Wav2Vec2Config, Wav2Vec2MusicClassifier
class _AurisUnpickler(pickle.Unpickler):
"""Resolves TorchSklearnWrapper pickled under __main__ to the real module."""
_REMAP = {("__main__", "TorchSklearnWrapper"): TorchSklearnWrapper}
def find_class(self, module: str, name: str) -> Any:
cls = self._REMAP.get((module, name))
if cls is not None:
return cls
return super().find_class(module, name)
BASE_DIR = Path(__file__).resolve().parent
PROJECT_ROOT = BASE_DIR.parent
MODELS_DIR = BASE_DIR / "models"
FIGURES_DIR = PROJECT_ROOT / "docs" / "academic" / "figures"
DATASET_DIR = PROJECT_ROOT / "DataSet"
TEST_AUDIO_DIR = BASE_DIR / "test_audio"
@dataclass(frozen=True)
class DemoArtifacts:
feature_cols: list[str]
training_results: dict[str, Any]
scaler: Any
loaded_models: dict[str, Any]
best_model_name: str
best_model_label: str
model_labels: list[str]
label_to_name: dict[str, str]
feature_importance: dict[str, float]
feature_stats: dict[str, Any]
dataset_summary: dict[str, Any]
wav2vec2_model: Any # Wav2Vec2MusicClassifier | None
def _safe_model_name(name: str) -> str:
return (
name.lower()
.replace(" ", "_")
.replace("(", "")
.replace(")", "")
.replace("/", "_")
.replace("-", "_")
)
def _load_pickle(path: Path) -> Any:
with open(path, "rb") as f:
return _AurisUnpickler(f).load()
def _load_json(path: Path) -> dict[str, Any]:
with open(path, "r", encoding="utf-8") as f:
return json.load(f)
def _require_file(path: Path) -> None:
if not path.exists():
raise FileNotFoundError(f"Missing required artifact: {path}")
def _load_feature_stats() -> dict[str, Any]:
stats_path = MODELS_DIR / "feature_stats_v1.json"
if not stats_path.exists():
return {}
return _load_json(stats_path)
def _load_dataset_summary() -> dict[str, Any]:
manifest_path = DATASET_DIR / "manifest.csv"
if not manifest_path.exists():
return {}
label_counts: Counter[str] = Counter()
generator_counts: Counter[str] = Counter()
total = 0
with open(manifest_path, "r", encoding="utf-8") as f:
reader = csv.DictReader(f)
for row in reader:
total += 1
label = row.get("label", "").strip() or str(row.get("label_int", ""))
generator = row.get("generator", "").strip() or "unknown"
label_counts[label] += 1
generator_counts[generator] += 1
return {
"manifest_path": str(manifest_path),
"total": total,
"ai": label_counts.get("ai", 0) + label_counts.get("1", 0),
"human": label_counts.get("human", 0) + label_counts.get("0", 0),
"generators": generator_counts.most_common(8),
}
def _find_matching_name(raw_name: str, training_results: dict[str, Any]) -> str:
# DL model files are stored as "model_dl_<name>" — strip the "dl_" infix too
candidates = [raw_name]
if raw_name.startswith("dl_"):
candidates.append(raw_name[3:]) # "dl_deep_mlp_..." -> "deep_mlp_..."
for name in training_results:
if name.startswith("_"):
continue
safe = _safe_model_name(name)
if safe in candidates:
return name
return raw_name.replace("_", " ").title()
def _is_model_compatible(model: Any, n_features: int) -> bool:
expected = getattr(model, "n_features_in_", None)
return expected in (None, n_features)
def _load_wav2vec2() -> Any:
"""Load trained wav2vec2 model from .pt checkpoint. Returns None if unavailable."""
import torch
pt_path = MODELS_DIR / "wav2vec2_auris_v1.pt"
if not pt_path.exists():
return None
try:
config = Wav2Vec2Config()
model = Wav2Vec2MusicClassifier(config)
state = torch.load(str(pt_path), map_location="cpu", weights_only=True)
model.load_state_dict(state)
model.eval()
print(f"wav2vec2 loaded: {pt_path.name}")
return model
except Exception as exc: # noqa: BLE001
print(f"wav2vec2 skipped ({exc})")
return None
def _wav2vec2_predict(model: Any, audio_path: str) -> float | None:
"""Run wav2vec2 inference on a raw audio file. Returns AI probability or None."""
import torch
try:
import librosa
config: Wav2Vec2Config = model.config
y, _ = librosa.load(audio_path, sr=config.sample_rate, mono=True)
max_samples = int(config.max_audio_sec * config.sample_rate)
if len(y) > max_samples:
y = y[:max_samples]
elif len(y) < max_samples:
import numpy as _np
y = _np.pad(y, (0, max_samples - len(y)))
tensor = torch.tensor(y, dtype=torch.float32).unsqueeze(0) # (1, samples)
with torch.no_grad():
probs = model.predict_proba(tensor)
return float(probs[0])
except Exception as exc: # noqa: BLE001
print(f"wav2vec2 inference failed: {exc}")
return None
def _load_artifacts() -> DemoArtifacts:
scaler_path = MODELS_DIR / "feature_scaler_v1.pkl"
columns_path = MODELS_DIR / "feature_columns_v1.json"
results_path = MODELS_DIR / "training_results.json"
best_model_path = MODELS_DIR / "auris_classifier_v1.pkl"
for required in (scaler_path, columns_path, results_path, best_model_path):
_require_file(required)
scaler = _load_pickle(scaler_path)
feature_cols = _load_json(columns_path)
training_results = _load_json(results_path)
# Merge DL metrics into the unified training_results dict
dl_results_path = MODELS_DIR / "deep_learning_results.json"
if dl_results_path.exists():
dl_results = _load_json(dl_results_path)
for name, metrics in dl_results.items():
if name not in training_results:
training_results[name] = metrics
feature_importance = training_results.get("_feature_importance", {})
best_model_name = training_results.get("_best_model", "Gradient Boosting")
loaded_models: dict[str, Any] = {}
for model_path in sorted(MODELS_DIR.glob("model_*.pkl")):
try:
model = _load_pickle(model_path)
except Exception as exc: # noqa: BLE001
print(f"Skipping model file {model_path.name}: {exc}")
continue
raw_name = model_path.stem.replace("model_", "")
model_name = _find_matching_name(raw_name, training_results)
if not _is_model_compatible(model, len(feature_cols)):
print(
f"Skipping incompatible model {model_path.name}: "
f"expected {len(feature_cols)} features"
)
continue
loaded_models[model_name] = model
if best_model_name not in loaded_models:
best_model = _load_pickle(best_model_path)
if _is_model_compatible(best_model, len(feature_cols)):
loaded_models[best_model_name] = best_model
if not loaded_models:
raise RuntimeError("No compatible models were found in the models directory.")
sorted_names = sorted(
loaded_models,
key=lambda name: training_results.get(name, {}).get("roc_auc", 0.0),
reverse=True,
)
label_to_name: dict[str, str] = {}
model_labels: list[str] = []
for name in sorted_names:
result = training_results.get(name, {})
auc = result.get("roc_auc", 0.0)
acc = result.get("accuracy", 0.0)
badge = " [EN IYI]" if name == best_model_name else ""
label = f"{name}{badge} | AUC {auc:.3f} | Acc {acc:.1%}"
label_to_name[label] = name
model_labels.append(label)
best_model_label = next(
label for label, name in label_to_name.items() if name == best_model_name
)
return DemoArtifacts(
feature_cols=feature_cols,
training_results=training_results,
scaler=scaler,
loaded_models=loaded_models,
best_model_name=best_model_name,
best_model_label=best_model_label,
model_labels=model_labels,
label_to_name=label_to_name,
feature_importance=feature_importance,
feature_stats=_load_feature_stats(),
dataset_summary=_load_dataset_summary(),
wav2vec2_model=_load_wav2vec2(),
)
ARTIFACTS = _load_artifacts()
def _example_audio_paths(limit: int = 6) -> list[list[str]]:
if not TEST_AUDIO_DIR.exists():
return []
candidates = sorted(
path
for path in TEST_AUDIO_DIR.iterdir()
if path.is_file() and path.suffix.lower() in {".wav", ".mp3", ".flac"}
)
return [[str(path)] for path in candidates[:limit]]
def _normalize_score(value: float, cap: float = 1.0) -> float:
return max(0.0, min(float(value), cap))
def _extract_demo_features(audio_path: str) -> tuple[dict[str, float], float]:
row = extract_sample_features(audio_path)
if row is None:
raise RuntimeError("Ozellik cikarimi basarisiz oldu.")
features = {
column: float(row.get(column, 0.0))
for column in ARTIFACTS.feature_cols
}
duration_sec = float(row.get("duration_sec", 0.0))
return features, duration_sec
def _build_feature_vector(features: dict[str, float]) -> np.ndarray:
vector = np.array(
[[features.get(column, 0.0) for column in ARTIFACTS.feature_cols]],
dtype=np.float32,
)
return np.nan_to_num(vector, nan=0.0, posinf=1.0, neginf=-1.0)
def _format_verdict(ai_prob: float) -> tuple[str, str, str]:
if ai_prob >= 0.75:
return "ai-high", "Yuksek AI ihtimali", "AI kaynakli izler baskin"
if ai_prob >= 0.55:
return "ai-mid", "AI olasiligi yuksek", "Model sentetik duzene yakin buldu"
if ai_prob >= 0.40:
return "human-mid", "Sinirda sonuc", "Insan ve AI sinyalleri birbirine yakin"
return "human-high", "Insan yapimiya yakin", "Dogal varyasyon daha guclu"
def _build_result_html(
ai_prob: float,
duration: float,
elapsed: float,
selected_model_name: str,
) -> str:
verdict_class, verdict_title, verdict_subtitle = _format_verdict(ai_prob)
confidence_pct = ai_prob * 100
human_pct = (1.0 - ai_prob) * 100
return f"""
<section class="hero-card {verdict_class}">
<div class="hero-card__eyebrow">Canli analiz sonucu</div>
<div class="hero-card__score">%{confidence_pct:.1f}</div>
<div class="hero-card__title">{verdict_title}</div>
<div class="hero-card__subtitle">{verdict_subtitle}</div>
<div class="hero-card__meta">
<span>Model: {selected_model_name}</span>
<span>Sure: {duration:.1f}s</span>
<span>Islem: {elapsed:.2f}s</span>
<span>Insan olasiligi: %{human_pct:.1f}</span>
</div>
</section>
"""
def _build_signal_html(features: dict[str, float]) -> str:
rows = [
("Spektral duzen", _normalize_score(features.get("spectral_regularity", 0.0))),
("Zamansal kalip", _normalize_score(features.get("temporal_patterns", 0.0))),
("Harmonik yapi", _normalize_score(features.get("harmonic_structure", 0.0))),
("Vokal AI izi", _normalize_score(features.get("vocal_ai_score", 0.0))),
("Vokal guveni", _normalize_score(features.get("vocal_confidence", 0.0))),
("Pitch stabilitesi", _normalize_score(features.get("pitch_stability_score", 0.0))),
]
parts = ['<section class="panel-card"><div class="panel-card__title">Sinyal panosu</div>']
for label, raw_value in rows:
pct = raw_value * 100
bar_class = "bar-warm" if pct >= 60 else "bar-cool" if pct <= 35 else "bar-mid"
parts.append(
f"""
<div class="meter-row">
<div class="meter-row__label">{label}</div>
<div class="meter-row__track">
<div class="meter-row__fill {bar_class}" style="width:{pct:.0f}%"></div>
</div>
<div class="meter-row__value">%{pct:.0f}</div>
</div>
"""
)
parts.append("</section>")
return "".join(parts)
def _is_dl_wrapper(model: Any) -> bool:
"""True for TorchSklearnWrapper — it has its own internal scaler."""
return type(model).__name__ == "TorchSklearnWrapper"
def _build_model_table_html(
selected_model_name: str,
feature_vector: np.ndarray,
audio_path: str | None = None,
) -> str:
scaled = ARTIFACTS.scaler.transform(feature_vector)
scored_rows: list[tuple[str, float]] = []
for name, model in ARTIFACTS.loaded_models.items():
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
# DL wrappers scale internally — pass raw vector to avoid double-scaling
input_vector = feature_vector if _is_dl_wrapper(model) else scaled
probability = float(model.predict_proba(input_vector)[0][1])
except Exception: # noqa: BLE001
continue
scored_rows.append((name, probability))
# wav2vec2 row
if ARTIFACTS.wav2vec2_model is not None and audio_path is not None:
w2v_prob = _wav2vec2_predict(ARTIFACTS.wav2vec2_model, audio_path)
if w2v_prob is not None:
scored_rows.append(("wav2vec2", w2v_prob))
scored_rows.sort(key=lambda item: item[1], reverse=True)
parts = [
'<section class="panel-card"><div class="panel-card__title">Model karsilastirmasi</div>',
"<table class='model-table'><thead><tr><th>Model</th><th>Canli AI %</th><th>CV AUC</th><th>Acc</th></tr></thead><tbody>",
]
for name, probability in scored_rows:
metrics = ARTIFACTS.training_results.get(name, {})
row_class = "is-selected" if name == selected_model_name else ""
best_badge = " <span class='badge'>en iyi</span>" if name == ARTIFACTS.best_model_name else ""
w2v_badge = " <span class='badge'>wav2vec2</span>" if name == "wav2vec2" else ""
auc_str = f"{metrics['roc_auc']:.4f}" if metrics.get("roc_auc") else "—"
acc_str = f"{metrics['accuracy']:.4f}" if metrics.get("accuracy") else "—"
parts.append(
f"""
<tr class="{row_class}">
<td>{name}{best_badge}{w2v_badge}</td>
<td>%{probability * 100:.1f}</td>
<td>{auc_str}</td>
<td>{acc_str}</td>
</tr>
"""
)
parts.append("</tbody></table></section>")
return "".join(parts)
def _build_feature_details_md(features: dict[str, float], duration: float) -> str:
importance = ARTIFACTS.feature_importance
lines = [
"## Ses ozet",
"",
"| Metrik | Deger |",
"|--------|-------|",
f"| Sure | {duration:.1f}s |",
f"| Tempo | {features.get('tempo_bpm', 0.0):.1f} BPM |",
f"| RMS energy | {features.get('rms_energy', 0.0):.6f} |",
f"| Harmonic ratio | {features.get('harmonic_ratio', 0.0):.4f} |",
f"| Spectral centroid | {features.get('spectral_centroid_mean', 0.0):.1f} Hz |",
f"| Vocal confidence | {features.get('vocal_confidence', 0.0):.3f} |",
"",
]
insight_block = _build_feature_insights_md(features)
if insight_block:
lines.extend([insight_block, ""])
lines.extend(
[
"## Tum ozellikler",
"",
"| Ozellik | Deger | Global onem |",
"|---------|-------|-------------|",
]
)
for column in ARTIFACTS.feature_cols:
value = features.get(column, 0.0)
weight = importance.get(column, 0.0)
lines.append(f"| {column} | {value:.6f} | {weight:.4f} |")
return "\n".join(lines)
def _build_feature_insights_md(features: dict[str, float]) -> str:
stats = ARTIFACTS.feature_stats
if not stats:
return ""
by_class = stats.get("_by_class", {})
rows: list[tuple[float, str, float, float, float, float]] = []
for column in ARTIFACTS.feature_cols:
feature_stats = stats.get(column)
if not feature_stats:
continue
std = float(feature_stats.get("std", 1.0)) or 1.0
value = float(features.get(column, 0.0))
z_score = (value - float(feature_stats.get("mean", 0.0))) / std
ai_mean = float(by_class.get("ai", {}).get(column, {}).get("mean", 0.0))
human_mean = float(by_class.get("human", {}).get(column, {}).get("mean", 0.0))
rows.append((abs(z_score), column, value, z_score, ai_mean, human_mean))
if not rows:
return ""
rows.sort(reverse=True)
lines = [
"## Dikkat ceken sapmalar",
"",
"| Ozellik | Deger | Z-score | AI ort. | Human ort. |",
"|---------|-------|---------|---------|------------|",
]
for _, column, value, z_score, ai_mean, human_mean in rows[:10]:
lines.append(
f"| {column} | {value:.6f} | {z_score:+.2f} | {ai_mean:.6f} | {human_mean:.6f} |"
)
return "\n".join(lines)
def analyze_audio(audio_file: Any, selected_model_label: str) -> tuple[str, str, str, str]:
if not audio_file:
empty = '<section class="hero-card neutral"><div class="hero-card__title">Ses dosyasi bekleniyor</div><div class="hero-card__subtitle">Analiz icin bir .wav, .mp3 veya .flac yukleyin.</div></section>'
return empty, "", "", ""
audio_path = audio_file[0] if isinstance(audio_file, tuple) else audio_file
selected_model_name = ARTIFACTS.label_to_name.get(
selected_model_label,
ARTIFACTS.best_model_name,
)
start_time = time.time()
try:
features, duration = _extract_demo_features(str(audio_path))
except Exception as exc: # noqa: BLE001
error_html = f'<section class="hero-card neutral"><div class="hero-card__title">Analiz basarisiz</div><div class="hero-card__subtitle">{exc}</div></section>'
return error_html, "", "", ""
feature_vector = _build_feature_vector(features)
model = ARTIFACTS.loaded_models[selected_model_name]
# DL wrappers scale internally; ML models expect pre-scaled input
if _is_dl_wrapper(model):
input_vector = feature_vector
else:
input_vector = ARTIFACTS.scaler.transform(feature_vector)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
ai_prob = float(model.predict_proba(input_vector)[0][1])
elapsed = time.time() - start_time
result_html = _build_result_html(ai_prob, duration, elapsed, selected_model_name)
signal_html = _build_signal_html(features)
model_table_html = _build_model_table_html(selected_model_name, feature_vector, str(audio_path))
details_md = _build_feature_details_md(features, duration)
return result_html, signal_html, model_table_html, details_md
def build_models_md() -> str:
training_results = ARTIFACTS.training_results
lines = [
"## Egitilmis modeller",
"",
f"- En iyi model: **{ARTIFACTS.best_model_name}**",
f"- Ornek sayisi: **{training_results.get('_n_samples', '?')}**",
f"- Ozellik sayisi: **{training_results.get('_n_features', len(ARTIFACTS.feature_cols))}**",
f"- CV kat sayisi: **{training_results.get('_n_folds', '?')}**",
"",
"| Model | Tip | CV AUC | Holdout AUC | Acc | F1 |",
"|------|-----|--------|-------------|-----|----|",
]
model_names = [
name
for name in training_results
if not name.startswith("_") and isinstance(training_results[name], dict)
]
model_names.sort(key=lambda name: training_results[name].get("roc_auc", 0.0), reverse=True)
for name in model_names:
result = training_results[name]
display = f"**{name}**" if name == ARTIFACTS.best_model_name else name
model_type = "DL" if result.get("type") == "deep_learning" else "ML"
lines.append(
f"| {display} | {model_type} | {result.get('roc_auc', 0.0):.4f} | "
f"{result.get('validation_auc', 0.0):.4f} | "
f"{result.get('accuracy', 0.0):.4f} | {result.get('f1', 0.0):.4f} |"
)
if ARTIFACTS.wav2vec2_model is not None:
lines.append("| wav2vec2 (fine-tuned) | DL | — | — | — | — |")
lines.extend(["", "## Secilen parametreler", ""])
for name in model_names:
params = training_results[name].get("selected_params", {})
lines.append(f"- **{name}**: `{json.dumps(params, ensure_ascii=True)}`")
importance_items = sorted(
ARTIFACTS.feature_importance.items(),
key=lambda item: item[1],
reverse=True,
)[:15]
if importance_items:
lines.extend(["", "## Ilk 15 ozellik onemi", "", "| Ozellik | Onem |", "|---------|------|"])
for feature_name, score in importance_items:
lines.append(f"| {feature_name} | {score:.4f} |")
return "\n".join(lines)
def build_dataset_md() -> str:
summary = ARTIFACTS.dataset_summary
if not summary:
return "Veri seti ozeti bulunamadi."
lines = [
"## Egitim veri seti",
"",
"| Metrik | Deger |",
"|--------|-------|",
f"| Manifest | `{summary.get('manifest_path', '-')}` |",
f"| Toplam ornek | {summary.get('total', 0)} |",
f"| AI | {summary.get('ai', 0)} |",
f"| Human | {summary.get('human', 0)} |",
f"| Ozellik | {len(ARTIFACTS.feature_cols)} |",
"",
"## Kaynak dagilimi",
"",
"| Kaynak | Adet |",
"|--------|------|",
]
for generator, count in summary.get("generators", []):
lines.append(f"| {generator} | {count} |")
return "\n".join(lines)
AURIS_CSS = """
:root {
--bg: #120f0b;
--panel: rgba(31, 24, 17, 0.92);
--panel-strong: rgba(42, 31, 22, 0.98);
--line: rgba(215, 182, 122, 0.18);
--text: #f5ead8;
--muted: #c8af8a;
--gold: #dfb56f;
--gold-soft: #f1d4a2;
--danger: #d66a55;
--danger-soft: #5d2218;
--safe: #7fbb7c;
--safe-soft: #1f3b2d;
}
body {
background:
radial-gradient(circle at top left, rgba(223, 181, 111, 0.12), transparent 28%),
radial-gradient(circle at bottom right, rgba(88, 43, 23, 0.24), transparent 26%),
linear-gradient(135deg, #0d0a07 0%, #18120d 45%, #120f0b 100%);
}
.gradio-container {
max-width: 1360px !important;
margin: 0 auto !important;
background: transparent !important;
color: var(--text) !important;
font-family: "Segoe UI", sans-serif !important;
}
.app-shell {
padding: 24px 0 10px;
}
.app-hero {
display: grid;
grid-template-columns: 1.4fr 1fr;
gap: 18px;
align-items: stretch;
margin-bottom: 18px;
}
.app-brand,
.app-meta {
background: linear-gradient(160deg, rgba(35, 26, 18, 0.95), rgba(19, 14, 10, 0.96));
border: 1px solid var(--line);
border-radius: 22px;
padding: 22px 24px;
box-shadow: 0 24px 70px rgba(0, 0, 0, 0.28);
}
.app-brand__eyebrow {
text-transform: uppercase;
letter-spacing: 0.24em;
font-size: 0.78rem;
color: var(--gold);
margin-bottom: 12px;
}
.app-brand__title {
font-size: 3rem;
font-weight: 800;
line-height: 0.98;
margin: 0;
color: #fff6e6;
}
.app-brand__subtitle {
margin: 14px 0 0;
color: var(--muted);
line-height: 1.6;
max-width: 46rem;
}
.app-meta__grid {
display: grid;
grid-template-columns: repeat(2, minmax(0, 1fr));
gap: 12px;
}
.meta-chip {
background: rgba(255, 255, 255, 0.03);
border: 1px solid rgba(255, 255, 255, 0.06);
border-radius: 16px;
padding: 14px 16px;
}
.meta-chip__label {
display: block;
color: var(--muted);
font-size: 0.78rem;
margin-bottom: 6px;
}
.meta-chip__value {
display: block;
color: #fff2db;
font-size: 1.1rem;
font-weight: 700;
}
.hero-card,
.panel-card {
background: linear-gradient(180deg, rgba(34, 26, 19, 0.96), rgba(19, 14, 10, 0.96));
border: 1px solid var(--line);
border-radius: 20px;
padding: 20px;
box-shadow: 0 22px 60px rgba(0, 0, 0, 0.24);
}
.hero-card__eyebrow,
.panel-card__title {
font-size: 0.82rem;
letter-spacing: 0.12em;
text-transform: uppercase;
color: var(--gold);
margin-bottom: 10px;
}
.hero-card__score {
font-size: clamp(2.8rem, 7vw, 4.8rem);
line-height: 0.95;
font-weight: 900;
color: #fff6e7;
}
.hero-card__title {
margin-top: 8px;
font-size: 1.4rem;
font-weight: 800;
color: #fff6e7;
}
.hero-card__subtitle {
margin-top: 8px;
color: var(--muted);
line-height: 1.6;
}
.hero-card__meta {
display: flex;
flex-wrap: wrap;
gap: 10px;
margin-top: 16px;
}
.hero-card__meta span {
padding: 7px 12px;
border-radius: 999px;
background: rgba(255, 255, 255, 0.04);
border: 1px solid rgba(255, 255, 255, 0.08);
color: #f8ead4;
font-size: 0.88rem;
}
.hero-card.ai-high,
.hero-card.ai-mid {
background: linear-gradient(150deg, rgba(84, 28, 20, 0.96), rgba(33, 15, 12, 0.97));
}
.hero-card.human-high,
.hero-card.human-mid {
background: linear-gradient(150deg, rgba(23, 49, 34, 0.96), rgba(12, 23, 18, 0.97));
}
.hero-card.neutral {
background: linear-gradient(150deg, rgba(36, 29, 22, 0.96), rgba(17, 14, 11, 0.97));
}
.meter-row {
display: grid;
grid-template-columns: 170px minmax(0, 1fr) 54px;
gap: 12px;
align-items: center;
margin-top: 12px;
}
.meter-row__label {
color: #f7ecd8;
font-size: 0.92rem;
}
.meter-row__track {
position: relative;
height: 14px;
background: rgba(255, 255, 255, 0.06);
border-radius: 999px;
overflow: hidden;
border: 1px solid rgba(255, 255, 255, 0.08);
}
.meter-row__fill {
height: 100%;
border-radius: 999px;
}
.bar-warm {
background: linear-gradient(90deg, var(--gold), var(--danger));
}
.bar-mid {
background: linear-gradient(90deg, var(--gold), var(--gold-soft));
}
.bar-cool {
background: linear-gradient(90deg, #76c490, #5ca39b);
}
.meter-row__value {
color: var(--gold-soft);
font-weight: 700;
text-align: right;
}
.model-table {
width: 100%;
border-collapse: collapse;
}
.model-table th,
.model-table td {
padding: 10px 12px;
text-align: left;
border-bottom: 1px solid rgba(255, 255, 255, 0.08);
}
.model-table th {
color: var(--gold);
font-size: 0.82rem;
text-transform: uppercase;
letter-spacing: 0.06em;
}
.model-table td {
color: #f6ead6;
}
.model-table tr.is-selected td {
background: rgba(223, 181, 111, 0.08);
}
.badge {
display: inline-block;
margin-left: 8px;
padding: 3px 8px;
border-radius: 999px;
font-size: 0.72rem;
color: #fff2db;
background: rgba(223, 181, 111, 0.18);
border: 1px solid rgba(223, 181, 111, 0.25);
}
.block {
border: 1px solid var(--line) !important;
border-radius: 18px !important;
background: var(--panel) !important;
}
.gr-button-primary {
background: linear-gradient(135deg, #d4a85c, #b46d3f) !important;
border: 0 !important;
color: #20150d !important;
font-weight: 800 !important;
}
.prose,
.prose * {
color: var(--text) !important;
}
.prose table {
border-collapse: collapse;
width: 100%;
}
.prose th,
.prose td {
padding: 8px 10px;
border: 1px solid rgba(255, 255, 255, 0.08);
}
.prose th {
background: rgba(255, 255, 255, 0.04);
color: var(--gold) !important;
}
footer {
display: none !important;
}
@media (max-width: 920px) {
.app-hero {
grid-template-columns: 1fr;
}
.app-meta__grid {
grid-template-columns: 1fr;
}
.meter-row {
grid-template-columns: 1fr;
}
}
"""
def _build_header_html() -> str:
dataset_summary = ARTIFACTS.dataset_summary
training_results = ARTIFACTS.training_results
top_generator = dataset_summary.get("generators", [["-", 0]])[0][0] if dataset_summary else "-"
return f"""
<section class="app-shell">
<div class="app-hero">
<div class="app-brand">
<div class="app-brand__eyebrow">AURIS local demo</div>
<h1 class="app-brand__title">AI Muzik Tespiti<br />Canli Analiz</h1>
<p class="app-brand__subtitle">
Demo arayuzu artik egitim artefact'lari ile ayni ozellik semasini kullaniyor.
Yani yukledigin ses, `DataSet/features.csv` ile egitilen modellerle birebir uyumlu
sekilde analiz ediliyor.
</p>
</div>
<div class="app-meta">
<div class="app-meta__grid">
<div class="meta-chip">
<span class="meta-chip__label">En iyi model</span>
<span class="meta-chip__value">{ARTIFACTS.best_model_name}</span>
</div>
<div class="meta-chip">
<span class="meta-chip__label">Model sayisi</span>
<span class="meta-chip__value">{len(ARTIFACTS.loaded_models)}</span>
</div>
<div class="meta-chip">
<span class="meta-chip__label">Veri seti</span>
<span class="meta-chip__value">{training_results.get('_n_samples', '?')} ornek</span>
</div>
<div class="meta-chip">
<span class="meta-chip__label">Baskin kaynak</span>
<span class="meta-chip__value">{top_generator}</span>
</div>
</div>
</div>
</div>
</section>
"""
with gr.Blocks(title="AURIS Local Demo") as demo:
gr.HTML(_build_header_html())
with gr.Tabs():
with gr.Tab("Analiz"):
with gr.Row(equal_height=False):
with gr.Column(scale=1, min_width=320):
audio_input = gr.Audio(
label="Ses dosyasi yukle",
type="filepath",
)
model_dropdown = gr.Dropdown(
choices=ARTIFACTS.model_labels,
value=ARTIFACTS.best_model_label,
label="Calistirilacak model",
interactive=True,
)
analyze_button = gr.Button("Analizi baslat", variant="primary", size="lg")
if _example_audio_paths():
gr.Examples(
examples=_example_audio_paths(),
inputs=[audio_input],
label="Hazir ornekler",
)
with gr.Column(scale=2, min_width=520):
result_html = gr.HTML()
with gr.Row(equal_height=False):
signal_html = gr.HTML()
model_table_html = gr.HTML()
details_md = gr.Markdown()
analyze_button.click(
fn=analyze_audio,
inputs=[audio_input, model_dropdown],
outputs=[result_html, signal_html, model_table_html, details_md],
)
with gr.Tab("Modeller"):
gr.Markdown(build_models_md())
with gr.Tab("Veri Seti"):
gr.Markdown(build_dataset_md())
with gr.Tab("Gorseller"):
figure_paths = sorted(str(path) for path in FIGURES_DIR.glob("*.png")) if FIGURES_DIR.exists() else []
if figure_paths:
gr.Gallery(
value=figure_paths,
label="Akademik ciktılar",
columns=3,
height="auto",
object_fit="contain",
)
else:
gr.Markdown("Gorsel bulunamadi.")
def _pick_available_port(preferred_port: int) -> int:
for port in range(preferred_port, preferred_port + 25):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(0.2)
if sock.connect_ex(("127.0.0.1", port)) != 0:
return port
raise RuntimeError("Bos bir port bulunamadi.")
def _parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Run the AURIS local Gradio demo")
parser.add_argument("--host", default="127.0.0.1", help="Bind address")
parser.add_argument("--port", type=int, default=7864, help="Preferred port")
parser.add_argument(
"--no-browser",
action="store_true",
help="Do not open the browser automatically",
)
return parser.parse_args()
if __name__ == "__main__":
args = _parse_args()
port = _pick_available_port(args.port)
local_host = "127.0.0.1" if args.host == "0.0.0.0" else args.host
print("AURIS local demo")
print(f"Host: {args.host}")
print(f"Port: {port}")
print(f"Open: http://{local_host}:{port}")
demo.launch(
server_name=args.host,
server_port=port,
share=False,
inbrowser=not args.no_browser,
show_error=True,
css=AURIS_CSS,
)