File size: 2,201 Bytes
eb9570e
12ba1d2
 
eb9570e
 
f23eaa2
 
12ba1d2
 
 
f23eaa2
12ba1d2
f23eaa2
12ba1d2
 
 
 
 
 
f23eaa2
12ba1d2
f23eaa2
12ba1d2
f23eaa2
12ba1d2
 
 
 
 
 
 
 
 
 
f23eaa2
12ba1d2
 
 
 
2fdb7fc
12ba1d2
 
f23eaa2
12ba1d2
 
 
2fdb7fc
 
12ba1d2
 
eb9570e
 
12ba1d2
eb9570e
12ba1d2
 
eb9570e
2fdb7fc
12ba1d2
 
eb9570e
 
3af2ede
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import gradio as gr
import openai
import requests
import subprocess
import uuid
import os

# Load OpenRouter key from Hugging Face secret
openai.api_key = os.environ.get("OPENROUTER_API_KEY")
openai.api_base = "https://openrouter.ai/api/v1"

def enhance_prompt(user_prompt):
    try:
        completion = openai.ChatCompletion.create(
            model="google/gemma-3n-e2b-it:free",
            messages=[
                {"role": "system", "content": "You are an expert in video storytelling. Expand the user prompt into a rich, visual scene description."},
                {"role": "user", "content": user_prompt}
            ]
        )
        return completion.choices[0].message.content
    except Exception as e:
        return user_prompt  # fallback

def generate_video(text, audio_file):
    uid = str(uuid.uuid4())
    enhanced_prompt = enhance_prompt(text)
    
    # Step 1: Generate AI video from Hugging Face free model (like AnimateDiff Lightning)
    video_url = f"https://huggingface.co/spaces/FFusion/AnimateDiff-Lightning/resolve/main/videos/sample.mp4"
    
    raw_video = f"{uid}_ai.mp4"
    final_video = f"{uid}_merged.mp4"
    
    try:
        # Download the sample video (Replace with real call later)
        r = requests.get(video_url)
        with open(raw_video, "wb") as f:
            f.write(r.content)
    except Exception as e:
        return f"❌ Error downloading AI video: {str(e)}"
    
    try:
        # Merge audio with the video using FFmpeg
        command = f"ffmpeg -y -i {raw_video} -i {audio_file} -map 0:v -map 1:a -shortest -c:v copy -c:a aac {final_video}"
        subprocess.call(command, shell=True)
    except Exception as e:
        return f"❌ FFmpeg error: {str(e)}"
    
    return final_video

demo = gr.Interface(
    fn=generate_video,
    inputs=[
        gr.Textbox(label="🎬 Enter Scene Prompt"),
        gr.Audio(type="filepath", label="πŸŽ™οΈ Upload Voiceover")
    ],
    outputs=gr.Video(label="πŸ“½οΈ Final AI Video"),
    title="AI Video Generator (OpenRouter + Hugging Face)",
    description="Uses OpenRouter to enhance your prompt, then generates a video and merges it with your voiceover."
)

demo.launch(share=True)