johnkezia9008's picture
Update app.py
3af2ede verified
import gradio as gr
import openai
import requests
import subprocess
import uuid
import os
# Load OpenRouter key from Hugging Face secret
openai.api_key = os.environ.get("OPENROUTER_API_KEY")
openai.api_base = "https://openrouter.ai/api/v1"
def enhance_prompt(user_prompt):
try:
completion = openai.ChatCompletion.create(
model="google/gemma-3n-e2b-it:free",
messages=[
{"role": "system", "content": "You are an expert in video storytelling. Expand the user prompt into a rich, visual scene description."},
{"role": "user", "content": user_prompt}
]
)
return completion.choices[0].message.content
except Exception as e:
return user_prompt # fallback
def generate_video(text, audio_file):
uid = str(uuid.uuid4())
enhanced_prompt = enhance_prompt(text)
# Step 1: Generate AI video from Hugging Face free model (like AnimateDiff Lightning)
video_url = f"https://huggingface.co/spaces/FFusion/AnimateDiff-Lightning/resolve/main/videos/sample.mp4"
raw_video = f"{uid}_ai.mp4"
final_video = f"{uid}_merged.mp4"
try:
# Download the sample video (Replace with real call later)
r = requests.get(video_url)
with open(raw_video, "wb") as f:
f.write(r.content)
except Exception as e:
return f"❌ Error downloading AI video: {str(e)}"
try:
# Merge audio with the video using FFmpeg
command = f"ffmpeg -y -i {raw_video} -i {audio_file} -map 0:v -map 1:a -shortest -c:v copy -c:a aac {final_video}"
subprocess.call(command, shell=True)
except Exception as e:
return f"❌ FFmpeg error: {str(e)}"
return final_video
demo = gr.Interface(
fn=generate_video,
inputs=[
gr.Textbox(label="🎬 Enter Scene Prompt"),
gr.Audio(type="filepath", label="πŸŽ™οΈ Upload Voiceover")
],
outputs=gr.Video(label="πŸ“½οΈ Final AI Video"),
title="AI Video Generator (OpenRouter + Hugging Face)",
description="Uses OpenRouter to enhance your prompt, then generates a video and merges it with your voiceover."
)
demo.launch(share=True)