Spaces:
Running
Running
File size: 3,656 Bytes
833aed9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
"""
FastAPI backend for LionGuard moderation
"""
from fastapi import FastAPI, HTTPException
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from fastapi.middleware.cors import CORSMiddleware
import os
from typing import List
from models import (
ModerateRequest,
ModerateResponse,
FeedbackRequest,
FeedbackResponse,
ChatRequest,
ChatResponse,
CategoryScore,
ChatHistories,
)
from services import (
analyze_text,
submit_feedback,
process_chat_message,
)
app = FastAPI(
title="LionGuard API",
description="Multilingual moderation and guardrail comparison",
version="2.0.0"
)
# Enable CORS for development
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Get the path to frontend directory
FRONTEND_DIR = os.path.join(os.path.dirname(__file__), "../frontend")
@app.post("/moderate", response_model=ModerateResponse)
async def moderate_text(request: ModerateRequest):
"""
Analyze text for moderation risks using LionGuard models
"""
try:
result = analyze_text(request.text, request.model)
return ModerateResponse(**result)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error analyzing text: {str(e)}")
@app.post("/send_feedback", response_model=FeedbackResponse)
async def send_feedback(request: FeedbackRequest):
"""
Submit user feedback on moderation result
"""
try:
result = submit_feedback(request.text_id, request.agree)
return FeedbackResponse(**result)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error submitting feedback: {str(e)}")
@app.post("/chat", response_model=ChatResponse)
async def chat_comparison(request: ChatRequest):
"""
Compare guardrails across three approaches:
- No moderation
- OpenAI moderation
- LionGuard moderation
"""
try:
# Convert request histories to list of dicts
history_no_mod = [msg.dict() for msg in request.histories.no_moderation]
history_openai = [msg.dict() for msg in request.histories.openai_moderation]
history_lg = [msg.dict() for msg in request.histories.lionguard]
# Process message
updated_no_mod, updated_openai, updated_lg, lg_score = await process_chat_message(
request.message,
request.model,
history_no_mod,
history_openai,
history_lg
)
# Convert back to response format
return ChatResponse(
histories=ChatHistories(
no_moderation=updated_no_mod,
openai_moderation=updated_openai,
lionguard=updated_lg
),
lionguard_score=lg_score
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error processing chat: {str(e)}")
# Serve static frontend files
app.mount("/static", StaticFiles(directory=FRONTEND_DIR), name="static")
@app.get("/")
async def serve_frontend():
"""
Serve the main HTML page
"""
index_path = os.path.join(FRONTEND_DIR, "index.html")
if os.path.exists(index_path):
return FileResponse(index_path)
raise HTTPException(status_code=404, detail="Frontend not found")
@app.get("/health")
async def health_check():
"""
Health check endpoint
"""
return {"status": "healthy", "service": "lionguard-api"}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
|