diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -1,2020 +1,2409 @@
-"""
-AI Assistant - Gradio Application
-Powered by HuggingFace Hub
-"""
-import gradio as gr
-import os
-import json
-from datetime import datetime
-from hf_api import HuggingFaceAPI
-from utils import load_settings, save_settings
-
-# Optional import for Google Translate Gemma
-try:
- from google_translate import GoogleTranslateGemma
- GOOGLE_TRANSLATE_AVAILABLE = True
-except ImportError as e:
- print(f"Warning: Google Translate Gemma not available: {str(e)}")
- GOOGLE_TRANSLATE_AVAILABLE = False
- GoogleTranslateGemma = None
-
-# Settings paths
-SETTINGS_DIR = os.path.join(os.path.dirname(__file__), 'settings')
-MODELS_SETTINGS_FILE = os.path.join(SETTINGS_DIR, 'models.json')
-FIREBASE_SETTINGS_FILE = os.path.join(SETTINGS_DIR, 'firebase.json')
-APP_SETTINGS_FILE = os.path.join(SETTINGS_DIR, 'app.json')
-
-
-# Load initial settings
-model_settings = load_settings(MODELS_SETTINGS_FILE)
-HF_TOKEN = model_settings.get('huggingfaceToken', '')
-hf_api = HuggingFaceAPI(token=HF_TOKEN) if HF_TOKEN else None
-
-
-def reinit_api(token: str):
- """Reinitialize HuggingFace API with new token"""
- global hf_api
- hf_api = HuggingFaceAPI(token=token)
-
-
-def get_saved_models():
- """Get list of saved models"""
- settings = load_settings(MODELS_SETTINGS_FILE)
- models = settings.get('models', [])
- return [(m.get('name', m.get('modelId', 'Unknown')), m.get('modelId', '')) for m in models if m.get('enabled', True)]
-
-
-def get_model_choices():
- """Get model choices for dropdown"""
- models = get_saved_models()
- if not models:
- return ["meta-llama/Llama-3.2-3B-Instruct"]
- return [m[1] for m in models]
-
-
-# ============ Chat Functions ============
-
-def chat_response(message: str, history: list, model_id: str, temperature: float, max_tokens: int, system_prompt: str):
- """Generate chat response"""
- if not hf_api:
- return "Please set your HuggingFace token in Settings first."
-
- if not message.strip():
- return ""
-
- try:
- # Build messages with system prompt
- messages = []
- if system_prompt.strip():
- messages.append({"role": "system", "content": system_prompt})
-
- # Add history from message format
- for msg in history:
- if isinstance(msg, dict):
- messages.append(msg)
- elif isinstance(msg, tuple) and len(msg) == 2:
- # Handle legacy tuple format
- user_msg, assistant_msg = msg
- if user_msg:
- messages.append({"role": "user", "content": user_msg})
- if assistant_msg:
- messages.append({"role": "assistant", "content": assistant_msg})
-
- # Add current message
- messages.append({"role": "user", "content": message})
-
- response = hf_api.chat_completion(
- model=model_id,
- messages=messages,
- max_tokens=max_tokens,
- temperature=temperature
- )
-
- return response["choices"][0]["message"]["content"]
-
- except Exception as e:
- error_str = str(e)
- # Check if it's a model not supported error
- if "model_not_supported" in error_str or "not supported by any provider" in error_str:
- # Try to get fallback models
- try:
- fallback_models = hf_api._find_fallback_models(model_id)
- if fallback_models:
- fallback_list = "\n".join([f"- {m['id']}" for m in fallback_models[:3]])
- return f"Error: Model {model_id} is not supported. Try one of these models instead:\n{fallback_list}\n\nOriginal error: {error_str}"
- else:
- return f"Error: Model {model_id} is not supported and no fallback models are available.\n\nOriginal error: {error_str}"
- except:
- return f"Error: Model {model_id} is not supported. Please try a different model.\n\nOriginal error: {error_str}"
- else:
- return f"Error: {error_str}"
-
-
-def text_generation(prompt: str, model_id: str, temperature: float, max_tokens: int, top_p: float):
- """Generate text from prompt"""
- if not hf_api:
- return "Please set your HuggingFace token in Settings first."
-
- if not prompt.strip():
- return ""
-
- try:
- # Check model settings for task support
- model_settings = load_settings(MODELS_SETTINGS_FILE)
- models = model_settings.get('models', [])
-
- # Find the model in settings
- model_info = None
- for m in models:
- if m.get('modelId') == model_id:
- model_info = m
- break
-
- # Check if model recommends chat_completion
- if model_info and model_info.get('recommendedMethod') == 'chat_completion':
- # Use chat completion for conversational models
- messages = [{"role": "user", "content": prompt}]
- response = hf_api.chat_completion(
- model=model_id,
- messages=messages,
- max_tokens=max_tokens,
- temperature=temperature
- )
- return response["choices"][0]["message"]["content"]
- else:
- # Use text generation for other models
- response = hf_api.text_generation(
- model=model_id,
- prompt=prompt,
- max_new_tokens=max_tokens,
- temperature=temperature,
- top_p=top_p
- )
- return response.get("generated_text", "")
- except Exception as e:
- error_str = str(e)
- # Check if it's a model not supported error
- if "model_not_supported" in error_str or "not supported by any provider" in error_str:
- # Try to get fallback models
- try:
- fallback_models = hf_api._find_fallback_models(model_id)
- if fallback_models:
- fallback_list = "\n".join([f"- {m['id']}" for m in fallback_models[:3]])
- return f"Error: Model {model_id} is not supported. Try one of these models instead:\n{fallback_list}\n\nOriginal error: {error_str}"
- else:
- return f"Error: Model {model_id} is not supported and no fallback models are available.\n\nOriginal error: {error_str}"
- except:
- return f"Error: Model {model_id} is not supported. Please try a different model.\n\nOriginal error: {error_str}"
- else:
- return f"Error: {error_str}"
-
-
-def summarize_text(text: str, model_id: str, max_length: int, min_length: int):
- """Summarize text"""
- if not hf_api:
- return "Please set your HuggingFace token in Settings first."
-
- if not text.strip():
- return ""
-
- try:
- response = hf_api.summarization(
- model=model_id,
- text=text,
- max_length=max_length,
- min_length=min_length
- )
- if isinstance(response, list) and len(response) > 0:
- return response[0].get('summary_text', '')
- elif isinstance(response, dict):
- return response.get('summary_text', str(response))
- return str(response)
- except Exception as e:
- return f"Error: {str(e)}"
-
-
-def translate_text(text: str, model_id: str, target_language: str = "", source_language: str = "Auto-detect"):
- """Translate text"""
- if not hf_api:
- return "Please set your HuggingFace token in Settings first."
-
- if not text.strip():
- return ""
-
- try:
- # Use Google Translate Gemma module for Google TranslateGemma model
- if "translategemma" in model_id.lower():
- if not GOOGLE_TRANSLATE_AVAILABLE:
- # If Google Translate is not available, fall back to chat completion
- print("Google Translate Gemma not available, falling back to chat completion")
-
- source_info = f" from {source_language}" if source_language != "Auto-detect" else ""
- system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_language}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
- prompt = f"Translate the following text{source_info} to {target_language}: {text}"
-
- messages = [
- {"role": "system", "content": system_prompt},
- {"role": "user", "content": prompt}
- ]
-
- response = hf_api.chat_completion(
- model=model_id,
- messages=messages,
- max_tokens=1024,
- temperature=0.3
- )
-
- return response["choices"][0]["message"]["content"].strip()
-
- try:
- # Initialize the translator
- translator = GoogleTranslateGemma()
-
- # Map language names to language codes
- lang_code_map = {
- "English": "en",
- "Spanish": "es",
- "French": "fr",
- "German": "de-DE",
- "Chinese (Simplified)": "zh-CN",
- "Chinese (Traditional)": "zh-TW",
- "Japanese": "ja",
- "Korean": "ko",
- "Italian": "it",
- "Portuguese": "pt",
- "Russian": "ru",
- "Arabic": "ar",
- "Hindi": "hi",
- "Dutch": "nl",
- "Turkish": "tr",
- "Polish": "pl",
- "Vietnamese": "vi",
- "Thai": "th",
- "Indonesian": "id",
- "Greek": "el",
- "Hebrew": "he",
- "Czech": "cs",
- "Swedish": "sv",
- "Danish": "da",
- "Norwegian": "no",
- "Finnish": "fi"
- }
-
- # Get source language code
- source_lang = "en" # Default to English
- if source_language != "Auto-detect" and source_language in lang_code_map:
- source_lang = lang_code_map[source_language]
-
- # Get target language code
- target_lang = "en" # Default to English
- if target_language in lang_code_map:
- target_lang = lang_code_map[target_language]
-
- # Perform translation
- translated = translator.translate_text(
- text=text,
- source_lang=source_lang,
- target_lang=target_lang
- )
-
- return translated
-
- except Exception as gemma_e:
- # If Google Translate Gemma fails, fall back to chat completion
- print(f"Google Translate Gemma failed, falling back to chat completion: {str(gemma_e)}")
-
- # Use chat completion as fallback
- source_info = f" from {source_language}" if source_language != "Auto-detect" else ""
- system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_language}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
- prompt = f"Translate the following text{source_info} to {target_language}: {text}"
-
- messages = [
- {"role": "system", "content": system_prompt},
- {"role": "user", "content": prompt}
- ]
-
- response = hf_api.chat_completion(
- model=model_id,
- messages=messages,
- max_tokens=1024,
- temperature=0.3
- )
-
- return response["choices"][0]["message"]["content"].strip()
-
- # For models that support chat completion (like Llama and Mistral)
- elif "llama" in model_id.lower() or "mistral" in model_id.lower():
- # Use chat completion for translation
- # Dynamic system prompt based on target and source language
- if target_language:
- source_info = f" from {source_language}" if source_language != "Auto-detect" else ""
- system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_language}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
- prompt = f"Translate the following text{source_info} to {target_language}: {text}"
- else:
- system_prompt = "You are a professional translator. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
- prompt = f"Translate this text: {text}"
-
- messages = [
- {
- "role": "system",
- "content": system_prompt
- },
- {
- "role": "user",
- "content": prompt
- }
- ]
-
- response = hf_api.chat_completion(
- model=model_id,
- messages=messages,
- max_tokens=1024,
- temperature=0.3
- )
-
- return response["choices"][0]["message"]["content"].strip()
- else:
- # Use the standard translation endpoint for other models
- response = hf_api.translation(
- model=model_id,
- text=text
- )
- if isinstance(response, list) and len(response) > 0:
- return response[0].get('translation_text', '')
- elif isinstance(response, dict):
- return response.get('translation_text', str(response))
- return str(response)
- except Exception as e:
- # Handle specific model errors with better fallback options
- error_str = str(e).lower()
- if "model_not_supported" in error_str or "not supported by any provider" in error_str or "inference api enabled" in error_str:
- # Try fallback models for translation
- fallback_models = [
- "Helsinki-NLP/opus-mt-en-es", # English to Spanish
- "Helsinki-NLP/opus-mt-en-fr", # English to French
- "Helsinki-NLP/opus-mt-en-de", # English to German
- "Helsinki-NLP/opus-mt-en-zh", # English to Chinese
- "Helsinki-NLP/opus-mt-en-ja", # English to Japanese
- "meta-llama/Llama-3.2-3B-Instruct" # Llama as general fallback
- ]
-
- # Try each fallback model
- for fallback_model in fallback_models:
- try:
- # For Llama models, use chat completion
- if "llama" in fallback_model.lower():
- system_prompt = "You are a professional translator. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
-
- # If target language is specified, include it in the instruction
- if target_language:
- prompt = f"Translate the following text to {target_language}: {text}"
- else:
- prompt = f"Translate this text: {text}"
-
- messages = [
- {
- "role": "system",
- "content": system_prompt
- },
- {
- "role": "user",
- "content": prompt
- }
- ]
-
- response = hf_api.chat_completion(
- model=fallback_model,
- messages=messages,
- max_tokens=1024,
- temperature=0.3
- )
-
- return f"[Translated with fallback model: {fallback_model}] {response['choices'][0]['message']['content'].strip()}"
-
- # For Helsinki models, use standard translation
- else:
- response = hf_api.translation(
- model=fallback_model,
- text=text
- )
- if isinstance(response, list) and len(response) > 0:
- return f"[Translated with fallback model: {fallback_model}] {response[0].get('translation_text', '')}"
- elif isinstance(response, dict):
- return f"[Translated with fallback model: {fallback_model}] {response.get('translation_text', str(response))}"
-
- except Exception as fallback_e:
- continue # Try next fallback model
-
- # If all fallbacks fail, return original error with suggestions
- return f"Error: {str(e)}. Tried fallback models but none worked. Please try a different model or check your HuggingFace token."
- else:
- return f"Error: {str(e)}"
-
-
-def translate_image(image_path: str, model_id: str, target_language: str = "", source_language: str = "Auto-detect"):
- """Translate text from image"""
- if not image_path:
- return "Please upload an image first."
-
- # Only Google TranslateGemma supports image translation
- if "translategemma" not in model_id.lower():
- return "Image translation is only supported with Google TranslateGemma model. Please select 'google/translategemma-12b-it' from the model dropdown."
-
- if not GOOGLE_TRANSLATE_AVAILABLE:
- return "Google Translate Gemma is not available. Please check your installation of transformers, torch, and torchvision."
-
- try:
- # Initialize the translator
- translator = GoogleTranslateGemma()
-
- # Map language names to language codes
- lang_code_map = {
- "English": "en",
- "Spanish": "es",
- "French": "fr",
- "German": "de-DE",
- "Chinese (Simplified)": "zh-CN",
- "Chinese (Traditional)": "zh-TW",
- "Japanese": "ja",
- "Korean": "ko",
- "Italian": "it",
- "Portuguese": "pt",
- "Russian": "ru",
- "Arabic": "ar",
- "Hindi": "hi",
- "Dutch": "nl",
- "Turkish": "tr",
- "Polish": "pl",
- "Vietnamese": "vi",
- "Thai": "th",
- "Indonesian": "id",
- "Greek": "el",
- "Hebrew": "he",
- "Czech": "cs",
- "Swedish": "sv",
- "Danish": "da",
- "Norwegian": "no",
- "Finnish": "fi"
- }
-
- # Get source language code
- source_lang = "en" # Default to English
- if source_language != "Auto-detect" and source_language in lang_code_map:
- source_lang = lang_code_map[source_language]
-
- # Get target language code
- target_lang = "en" # Default to English
- if target_language in lang_code_map:
- target_lang = lang_code_map[target_language]
-
- # Translate from image (now supports local files)
- translated = translator.translate_image(
- image_input=image_path,
- source_lang=source_lang,
- target_lang=target_lang
- )
-
- return translated
- except Exception as e:
- return f"Error: {str(e)}"
-
-def answer_question(question: str, context: str, model_id: str):
- """Answer question based on context"""
- if not hf_api:
- return "Please set your HuggingFace token in Settings first.", 0.0
-
- if not question.strip() or not context.strip():
- return "", 0.0
-
- try:
- response = hf_api.question_answering(
- model=model_id,
- question=question,
- context=context
- )
- answer = response.get('answer', '')
- score = response.get('score', 0.0)
- return answer, round(score, 4)
- except Exception as e:
- return f"Error: {str(e)}", 0.0
-
-
-def generate_image(prompt: str, model_id: str, negative_prompt: str, num_steps: int):
- """Generate image from prompt"""
- if not hf_api:
- return None
-
- if not prompt.strip():
- return None
-
- try:
- image_bytes = hf_api.image_generation(
- model=model_id,
- prompt=prompt,
- negative_prompt=negative_prompt if negative_prompt.strip() else None,
- num_inference_steps=num_steps
- )
-
- # Save to temp file and return path
- import tempfile
- temp_path = os.path.join(tempfile.gettempdir(), "generated_image.png")
- with open(temp_path, "wb") as f:
- f.write(image_bytes)
- return temp_path
- except Exception as e:
- gr.Warning(f"Image generation error: {str(e)}")
- return None
-
-
-# ============ Model Management Functions ============
-
-def search_hf_models(query: str, task: str, limit: int):
- """Search HuggingFace models"""
- if not hf_api:
- return []
-
- if not query.strip():
- return []
-
- try:
- models = list(hf_api.list_models(
- search=query,
- pipeline_tag=task,
- sort="downloads",
- direction=-1,
- limit=limit
- ))
-
- results = []
- for model in models:
- downloads = model.downloads or 0
- likes = model.likes or 0
- downloads_str = f"{downloads/1000000:.1f}M" if downloads >= 1000000 else f"{downloads/1000:.1f}K" if downloads >= 1000 else str(downloads)
- results.append([
- model.id,
- model.author or '',
- model.pipeline_tag or '',
- downloads_str,
- likes
- ])
- return results
- except Exception as e:
- gr.Warning(f"Search error: {str(e)}")
- return []
-
-
-def get_model_info(model_id: str):
- """Get detailed model information"""
- if not hf_api or not model_id.strip():
- return "No model ID provided"
-
- try:
- info = hf_api.model_info(model_id)
-
- downloads = info.downloads or 0
- likes = info.likes or 0
-
- result = f"""### {model_id}
-
-**Author:** {info.author or 'Unknown'}
-**Pipeline:** {info.pipeline_tag or 'N/A'}
-**Library:** {info.library_name or 'N/A'}
-**Downloads:** {downloads:,}
-**Likes:** {likes:,}
-
-**Tags:** {', '.join(info.tags[:15]) if info.tags else 'None'}
-
-**Created:** {str(info.created_at)[:10] if info.created_at else 'Unknown'}
-"""
- return result
- except Exception as e:
- return f"Error fetching model info: {str(e)}"
-
-
-def add_model_to_settings(model_id: str, name: str, role: str, temperature: float, max_tokens: int, system_prompt: str):
- """Add a model to settings"""
- if not model_id.strip():
- return "Model ID is required", get_models_table()
-
- settings = load_settings(MODELS_SETTINGS_FILE)
- if 'models' not in settings:
- settings['models'] = []
-
- # Generate unique ID
- unique_id = f"model-{int(datetime.now().timestamp() * 1000)}"
-
- model_data = {
- "id": unique_id,
- "name": name or model_id.split('/')[-1],
- "modelId": model_id,
- "role": role,
- "temperature": temperature,
- "maxTokens": max_tokens,
- "systemPrompt": system_prompt,
- "keywords": [],
- "enabled": True,
- "createdAt": int(datetime.now().timestamp() * 1000),
- "updatedAt": int(datetime.now().timestamp() * 1000)
- }
-
- settings['models'].append(model_data)
- save_settings(MODELS_SETTINGS_FILE, settings)
-
- return f"Model '{name or model_id}' added successfully!", get_models_table()
-
-
-def get_models_table():
- """Get models as table data"""
- settings = load_settings(MODELS_SETTINGS_FILE)
- models = settings.get('models', [])
-
- table_data = []
- for m in models:
- table_data.append([
- m.get('id', ''),
- m.get('name', ''),
- m.get('modelId', ''),
- m.get('role', ''),
- m.get('temperature', 0.3),
- m.get('maxTokens', 500),
- "โ" if m.get('enabled', True) else "โ"
- ])
- return table_data
-
-
-def delete_model(model_id: str):
- """Delete a model from settings"""
- if not model_id.strip():
- return "No model selected", get_models_table()
-
- settings = load_settings(MODELS_SETTINGS_FILE)
- if 'models' in settings:
- settings['models'] = [m for m in settings['models'] if m['id'] != model_id]
- save_settings(MODELS_SETTINGS_FILE, settings)
- return f"Model deleted", get_models_table()
- return "Model not found", get_models_table()
-
-
-def toggle_model(model_id: str, enabled: bool):
- """Toggle model enabled state"""
- settings = load_settings(MODELS_SETTINGS_FILE)
- if 'models' in settings:
- for m in settings['models']:
- if m['id'] == model_id:
- m['enabled'] = enabled
- m['updatedAt'] = int(datetime.now().timestamp() * 1000)
- break
- save_settings(MODELS_SETTINGS_FILE, settings)
- return get_models_table()
-
-
-# ============ Settings Functions ============
-
-def save_hf_token(token: str):
- """Save HuggingFace token"""
- settings = load_settings(MODELS_SETTINGS_FILE)
- settings['huggingfaceToken'] = token
- save_settings(MODELS_SETTINGS_FILE, settings)
- reinit_api(token)
- return "Token saved successfully!"
-
-
-def get_hf_token():
- """Get current HuggingFace token"""
- settings = load_settings(MODELS_SETTINGS_FILE)
- return settings.get('huggingfaceToken', '')
-
-
-def get_account_info():
- """Get HuggingFace account info"""
- if not hf_api:
- return "No API token configured"
-
- try:
- info = hf_api.hf_api.whoami()
- return f"""### Account Info
-
-**Username:** {info.get('name', 'Unknown')}
-**Email:** {info.get('email', 'Not available')}
-**Organizations:** {len(info.get('orgs', []))}
-"""
- except Exception as e:
- return f"Error: {str(e)}"
-
-
- # ============ Gradio Interface ============
-
-# Custom CSS for professional dark theme
-custom_css = """
-/* Dark theme with modern design */
-.gradio-container {
- background: linear-gradient(135deg, #0f0f23 0%, #1a1a2e 100%) !important;
- color: #e0e0e0 !important;
- font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
-}
-
-/* Header styling */
-.main-header {
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
- padding: 2rem !important;
- border-radius: 16px !important;
- margin-bottom: 2rem !important;
- box-shadow: 0 10px 30px rgba(102, 126, 234, 0.3) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
-}
-
-.main-header h1 {
- color: white !important;
- margin: 0 !important;
- font-size: 2.5rem !important;
- font-weight: 700 !important;
- text-shadow: 0 2px 4px rgba(0,0,0,0.3) !important;
-}
-
-.main-header p {
- color: rgba(255,255,255,0.9) !important;
- margin: 0.5rem 0 0 0 !important;
- font-size: 1.1rem !important;
-}
-
-/* Tab styling */
-.tabs {
- background: transparent !important;
- border-radius: 12px !important;
- overflow: hidden !important;
-}
-
-.tab-nav {
- background: rgba(255, 255, 255, 0.05) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
- border-radius: 12px !important;
- padding: 0.5rem !important;
- margin-bottom: 1.5rem !important;
-}
-
-.tab-nav button {
- background: transparent !important;
- color: #a0a0a0 !important;
- border: none !important;
- padding: 0.75rem 1.5rem !important;
- margin: 0 0.25rem !important;
- border-radius: 8px !important;
- transition: all 0.3s ease !important;
- font-weight: 500 !important;
-}
-
-.tab-nav button:hover {
- background: rgba(255, 255, 255, 0.1) !important;
- color: #ffffff !important;
-}
-
-.tab-nav button.selected {
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
- color: white !important;
- box-shadow: 0 4px 12px rgba(102, 126, 234, 0.3) !important;
-}
-
-/* Card styling */
-.gradio-box {
- background: rgba(255, 255, 255, 0.05) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
- border-radius: 12px !important;
- padding: 1.5rem !important;
- backdrop-filter: blur(10px) !important;
- transition: all 0.3s ease !important;
-}
-
-.gradio-box:hover {
- background: rgba(255, 255, 255, 0.08) !important;
- border-color: rgba(255, 255, 255, 0.15) !important;
- transform: translateY(-2px) !important;
- box-shadow: 0 8px 24px rgba(0, 0, 0, 0.2) !important;
-}
-
-/* Button styling */
-.gradio-button {
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
- color: white !important;
- border: none !important;
- padding: 0.75rem 1.5rem !important;
- border-radius: 8px !important;
- font-weight: 600 !important;
- transition: all 0.3s ease !important;
- box-shadow: 0 4px 12px rgba(102, 126, 234, 0.3) !important;
-}
-
-.gradio-button:hover {
- transform: translateY(-2px) !important;
- box-shadow: 0 6px 20px rgba(102, 126, 234, 0.4) !important;
-}
-
-.gradio-button.secondary {
- background: rgba(255, 255, 255, 0.1) !important;
- color: #e0e0e0 !important;
- border: 1px solid rgba(255, 255, 255, 0.2) !important;
-}
-
-/* Input styling */
-.gradio-textbox, .gradio-dropdown {
- background: rgba(255, 255, 255, 0.05) !important;
- border: 1px solid rgba(255, 255, 255, 0.2) !important;
- border-radius: 8px !important;
- color: #e0e0e0 !important;
- transition: all 0.3s ease !important;
-}
-
-.gradio-textbox:focus, .gradio-dropdown:focus {
- border-color: #667eea !important;
- box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.2) !important;
- background: rgba(255, 255, 255, 0.08) !important;
-}
-
-.gradio-textbox::placeholder {
- color: rgba(255, 255, 255, 0.5) !important;
-}
-
-/* Slider styling */
-.gradio-slider {
- background: rgba(255, 255, 255, 0.1) !important;
-}
-
-.gradio-slider .slider-track {
- background: linear-gradient(90deg, #667eea 0%, #764ba2 100%) !important;
-}
-
-/* Chatbot styling */
-.gradio-chatbot {
- background: rgba(255, 255, 255, 0.03) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
- border-radius: 12px !important;
-}
-
-.gradio-chatbot .message {
- background: rgba(255, 255, 255, 0.05) !important;
- border-radius: 8px !important;
- margin: 0.5rem !important;
- padding: 1rem !important;
-}
-
-.gradio-chatbot .message.user {
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
- color: white !important;
-}
-
-/* Dataframe styling */
-.gradio-dataframe {
- background: rgba(255, 255, 255, 0.05) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
- border-radius: 8px !important;
-}
-
-.gradio-dataframe table {
- color: #e0e0e0 !important;
-}
-
-.gradio-dataframe th {
- background: rgba(255, 255, 255, 0.1) !important;
- border-bottom: 1px solid rgba(255, 255, 255, 0.2) !important;
-}
-
-.gradio-dataframe td {
- border-bottom: 1px solid rgba(255, 255, 255, 0.05) !important;
-}
-
-/* Markdown styling */
-.gradio-markdown {
- color: #e0e0e0 !important;
-}
-
-.gradio-markdown h1, .gradio-markdown h2, .gradio-markdown h3 {
- color: #ffffff !important;
- margin-top: 1.5rem !important;
-}
-
-.gradio-markdown a {
- color: #667eea !important;
-}
-
-/* Footer styling */
-footer {
- background: rgba(255, 255, 255, 0.03) !important;
- border-top: 1px solid rgba(255, 255, 255, 0.1) !important;
- padding: 1.5rem !important;
- text-align: center !important;
- color: rgba(255, 255, 255, 0.6) !important;
-}
-
-/* Loading animation */
-.loading {
- display: inline-block;
- width: 20px;
- height: 20px;
- border: 3px solid rgba(255, 255, 255, 0.3);
- border-radius: 50%;
- border-top-color: #667eea;
- animation: spin 1s ease-in-out infinite;
-}
-
-@keyframes spin {
- to { transform: rotate(360deg); }
-}
-
-/* Responsive design */
-@media (max-width: 768px) {
- .main-header h1 {
- font-size: 2rem !important;
- }
-
- .gradio-box {
- padding: 1rem !important;
- }
-
- .tab-nav button {
- padding: 0.5rem 1rem !important;
- font-size: 0.9rem !important;
- }
-}
-
-/* Custom component styles */
-.chatbot-container {
- background: rgba(255, 255, 255, 0.03) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
- border-radius: 12px !important;
- overflow: hidden !important;
-}
-
-.chat-input textarea {
- background: rgba(255, 255, 255, 0.05) !important;
- border: 1px solid rgba(255, 255, 255, 0.2) !important;
- border-radius: 8px !important;
- resize: none !important;
- transition: all 0.3s ease !important;
-}
-
-.chat-input textarea:focus {
- border-color: #667eea !important;
- box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.2) !important;
-}
-
-.send-button {
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
- border: none !important;
- height: 100% !important;
- min-height: 40px !important;
-}
-
-.settings-group {
- background: rgba(255, 255, 255, 0.05) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
- border-radius: 12px !important;
- padding: 1.5rem !important;
-}
-
-.input-group, .output-group {
- background: rgba(255, 255, 255, 0.05) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
- border-radius: 12px !important;
- padding: 1.5rem !important;
- height: 100% !important;
-}
-
-.output-textarea {
- background: rgba(255, 255, 255, 0.03) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
- border-radius: 8px !important;
- font-family: 'Inter', monospace !important;
- line-height: 1.6 !important;
-}
-
-.translation-input, .translation-output {
- background: rgba(255, 255, 255, 0.05) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
- border-radius: 12px !important;
- padding: 1.5rem !important;
-}
-
-.translation-result {
- background: rgba(102, 126, 234, 0.1) !important;
- border: 1px solid rgba(102, 126, 234, 0.3) !important;
- border-radius: 8px !important;
- font-weight: 500 !important;
-}
-
-.image-controls, .image-output {
- background: rgba(255, 255, 255, 0.05) !important;
- border: 1px solid rgba(255, 255, 255, 0.1) !important;
- border-radius: 12px !important;
- padding: 1.5rem !important;
-}
-
-.generated-image {
- border-radius: 8px !important;
- overflow: hidden !important;
- box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2) !important;
-}
-
-/* Animation classes */
-.fade-in {
- animation: fadeIn 0.5s ease-in-out;
-}
-
-@keyframes fadeIn {
- from { opacity: 0; transform: translateY(10px); }
- to { opacity: 1; transform: translateY(0); }
-}
-
-.slide-up {
- animation: slideUp 0.3s ease-out;
-}
-
-@keyframes slideUp {
- from { transform: translateY(20px); opacity: 0; }
- to { transform: translateY(0); opacity: 1; }
-}
-
-/* Custom scrollbar */
-::-webkit-scrollbar {
- width: 8px;
- height: 8px;
-}
-
-::-webkit-scrollbar-track {
- background: rgba(255, 255, 255, 0.05);
- border-radius: 4px;
-}
-
-::-webkit-scrollbar-thumb {
- background: rgba(102, 126, 234, 0.5);
- border-radius: 4px;
-}
-
-::-webkit-scrollbar-thumb:hover {
- background: rgba(102, 126, 234, 0.7);
-}
-"""
-
-# Build the Gradio app
-with gr.Blocks(
- title="AI Assistant - HuggingFace",
- theme=gr.themes.Soft(
- primary_hue="purple",
- secondary_hue="blue",
- neutral_hue="slate",
- font=["Inter", "system-ui", "sans-serif"]
- ),
- css=custom_css
-) as app:
-
- # Header with modern design
- gr.HTML("""
-
-
-
-
- ๐ค
- AI Assistant
-
-
Powered by HuggingFace Hub โข Advanced AI Models
-
-
-
- โ System Online
-
-
-
-
- """)
-
- with gr.Tabs() as tabs:
-
- # ============ Chat Tab ============
- with gr.Tab("๐ฌ Chat", id="chat"):
- with gr.Row(equal_height=True):
- with gr.Column(scale=3):
- # Chat header
- gr.HTML("""
-
-
- ๐ฌ Conversation
-
-
- Interactive chat with AI models
-
-
- """)
-
- chatbot = gr.Chatbot(
- label="",
- height=500,
- placeholder="Start a conversation... Type your message below",
- value=[],
- show_label=False,
- container=True,
- elem_classes=["chatbot-container"]
- )
-
- with gr.Row():
- with gr.Column(scale=4):
- chat_input = gr.Textbox(
- placeholder="Type your message here... (Press Enter to send)",
- label="",
- show_label=False,
- lines=1,
- max_lines=5,
- elem_classes=["chat-input"]
- )
- with gr.Column(scale=1, min_width=100):
- send_btn = gr.Button(
- "Send",
- variant="primary",
- size="lg",
- elem_classes=["send-button"]
- )
-
- with gr.Row():
- clear_btn = gr.Button(
- "๐๏ธ Clear Chat",
- size="sm",
- variant="secondary",
- elem_classes=["action-button"]
- )
- with gr.Column(scale=1):
- gr.HTML("""
-
- Tip: Press Shift+Enter for new line
-
- """)
-
- with gr.Column(scale=1, min_width=300):
- # Settings panel
- gr.HTML("""
-
-
- โ๏ธ Chat Settings
-
-
- """)
-
- with gr.Group(elem_classes=["settings-group"]):
- chat_model = gr.Dropdown(
- choices=get_model_choices(),
- value=get_model_choices()[0] if get_model_choices() else None,
- label="๐ค Model",
- interactive=True,
- info="Select AI model for conversation"
- )
-
- chat_temp = gr.Slider(
- minimum=0.1,
- maximum=1.0,
- value=0.7,
- step=0.1,
- label="๐ก๏ธ Temperature",
- info="Controls randomness (0.1 = focused, 1.0 = creative)"
- )
-
- chat_max_tokens = gr.Slider(
- minimum=50,
- maximum=4096,
- value=500,
- step=50,
- label="๐ Max Tokens",
- info="Maximum response length"
- )
-
- chat_system = gr.Textbox(
- label="๐ฏ System Prompt",
- placeholder="You are a helpful assistant...",
- lines=3,
- info="Define AI behavior and personality"
- )
-
- refresh_models_btn = gr.Button(
- "๐ Refresh Models",
- size="sm",
- variant="secondary",
- elem_classes=["refresh-button"]
- )
-
- def respond(message, history, model, temp, max_tok, system):
- if not message.strip():
- return history, ""
- response = chat_response(message, history, model, temp, max_tok, system)
- # Use tuple format for default chatbot
- history.append((message, response))
- return history, ""
-
- send_btn.click(
- respond,
- inputs=[chat_input, chatbot, chat_model, chat_temp, chat_max_tokens, chat_system],
- outputs=[chatbot, chat_input]
- )
-
- chat_input.submit(
- respond,
- inputs=[chat_input, chatbot, chat_model, chat_temp, chat_max_tokens, chat_system],
- outputs=[chatbot, chat_input]
- )
-
- clear_btn.click(lambda: [], outputs=[chatbot])
-
- refresh_models_btn.click(
- lambda: gr.update(choices=get_model_choices()),
- outputs=[chat_model]
- )
-
- # ============ Text Generation Tab ============
- with gr.Tab("๐ Text Generation", id="text-gen"):
- gr.HTML("""
-
-
- ๐ Text Generation
-
-
- Generate creative text, stories, articles, and more with AI
-
-
- """)
-
- with gr.Row(equal_height=True):
- with gr.Column(scale=1):
- with gr.Group(elem_classes=["input-group"]):
- gen_prompt = gr.Textbox(
- label="โ๏ธ Prompt",
- placeholder="Enter your prompt for text generation... Be creative and specific!",
- lines=6,
- info="The more detailed your prompt, the better the result"
- )
-
- with gr.Row():
- gen_model = gr.Dropdown(
- choices=get_model_choices(),
- value=get_model_choices()[0] if get_model_choices() else None,
- label="๐ค Model",
- info="Choose model for generation"
- )
- gen_temp = gr.Slider(
- 0.1, 1.0, 0.7, step=0.1,
- label="๐ก๏ธ Temperature",
- info="Creativity level"
- )
-
- with gr.Row():
- gen_max_tokens = gr.Slider(
- 50, 2048, 250, step=50,
- label="๐ Max Tokens",
- info="Response length"
- )
- gen_top_p = gr.Slider(
- 0.1, 1.0, 0.95, step=0.05,
- label="๐ฏ Top P",
- info="Nucleus sampling"
- )
-
- gen_btn = gr.Button(
- "โจ Generate",
- variant="primary",
- size="lg",
- elem_classes=["generate-button"]
- )
-
- with gr.Column(scale=1):
- with gr.Group(elem_classes=["output-group"]):
- gen_output = gr.Textbox(
- label="๐ Generated Text",
- lines=18,
- show_label=True,
- info="AI-generated content will appear here",
- elem_classes=["output-textarea"]
- )
-
- with gr.Row():
- copy_gen_btn = gr.Button(
- "๐ Copy",
- size="sm",
- variant="secondary",
- elem_classes=["copy-button"]
- )
- regenerate_btn = gr.Button(
- "๐ Regenerate",
- size="sm",
- variant="secondary"
- )
-
- gen_btn.click(
- text_generation,
- inputs=[gen_prompt, gen_model, gen_temp, gen_max_tokens, gen_top_p],
- outputs=[gen_output]
- )
-
- copy_gen_btn.click(
- fn=None,
- js="(text) => { navigator.clipboard.writeText(text); alert('Text copied to clipboard!'); }",
- inputs=[gen_output]
- )
-
- # ============ Summarization Tab ============
- with gr.Tab("๐ Summarization", id="summarize"):
- with gr.Row():
- with gr.Column():
- sum_text = gr.Textbox(
- label="Text to Summarize",
- placeholder="Paste the text you want to summarize...",
- lines=10
- )
-
- sum_model = gr.Dropdown(
- choices=["facebook/bart-large-cnn", "sshleifer/distilbart-cnn-12-6", "google/pegasus-xsum"],
- value="facebook/bart-large-cnn",
- label="Model"
- )
-
- with gr.Row():
- sum_max_len = gr.Slider(50, 500, 150, step=10, label="Max Length")
- sum_min_len = gr.Slider(10, 100, 30, step=5, label="Min Length")
-
- sum_btn = gr.Button("Summarize", variant="primary")
-
- with gr.Column():
- sum_output = gr.Textbox(
- label="Summary",
- lines=8
- )
-
- sum_btn.click(
- summarize_text,
- inputs=[sum_text, sum_model, sum_max_len, sum_min_len],
- outputs=[sum_output]
- )
-
- # ============ Translation Tab ============
- with gr.Tab("๐ Translation", id="translate"):
- gr.HTML("""
-
-
- ๐ Translation
-
-
- Translate text between multiple languages with advanced AI models
-
-
- """)
-
- with gr.Row(equal_height=True):
- with gr.Column(scale=1):
- with gr.Group(elem_classes=["translation-input"]):
- gr.HTML("""
-
-
- ๐ Translation Models
-
-
- - Google TranslateGemma - Advanced multilingual translation
- - Llama 3.2 - Multilingual with dynamic prompts
- - MADLAD-400 - 400+ languages support
- - Helsinki-NLP - Specialized language pairs
-
-
- โจ New: Target language selection now works with all models!
-
-
- Note: If a model is not available, the system will automatically try fallback models.
-
-
- """)
-
- trans_text = gr.Textbox(
- label="๐ Text to Translate",
- placeholder="Enter text to translate...",
- lines=6,
- info="Supports multiple languages and formats"
- )
-
- trans_model = gr.Dropdown(
- choices=[
- "google/translategemma-12b-it",
- "meta-llama/Llama-3.2-3B-Instruct",
- "google/madlad400-3b-mt",
- "Helsinki-NLP/opus-mt-en-de",
- "Helsinki-NLP/opus-mt-en-fr",
- "Helsinki-NLP/opus-mt-en-es",
- "Helsinki-NLP/opus-mt-en-zh",
- "Helsinki-NLP/opus-mt-en-ja",
- "Helsinki-NLP/opus-mt-de-en",
- "Helsinki-NLP/opus-mt-fr-en",
- "Helsinki-NLP/opus-mt-es-en"
- ],
- value="google/translategemma-12b-it",
- label="๐ค Translation Model",
- info="Choose model based on your language needs"
- )
-
- # Target language selection (works with all models)
- target_language = gr.Dropdown(
- choices=[
- "English", "Spanish", "French", "German", "Chinese (Simplified)", "Chinese (Traditional)",
- "Japanese", "Korean", "Italian", "Portuguese", "Russian", "Arabic", "Hindi",
- "Dutch", "Turkish", "Polish", "Vietnamese", "Thai", "Indonesian",
- "Greek", "Hebrew", "Czech", "Swedish", "Danish", "Norwegian", "Finnish"
- ],
- value="English",
- label="๐ฏ Target Language",
- info="Select target language for translation (works with all models)",
- visible=True
- )
-
- # Source language selection (optional)
- source_language = gr.Dropdown(
- choices=[
- "Auto-detect", "English", "Spanish", "French", "German", "Chinese (Simplified)", "Chinese (Traditional)",
- "Japanese", "Korean", "Italian", "Portuguese", "Russian", "Arabic", "Hindi",
- "Dutch", "Turkish", "Polish", "Vietnamese", "Thai", "Indonesian"
- ],
- value="Auto-detect",
- label="๐ค Source Language",
- info="Select source language or leave as Auto-detect",
- visible=True
- )
-
- trans_btn = gr.Button(
- "๐ Translate",
- variant="primary",
- size="lg",
- elem_classes=["translate-button"]
- )
-
- # Image translation section
- gr.HTML("""
-
-
- ๐ผ๏ธ Image Translation (Google TranslateGemma only)
-
-
- Upload an image containing text to extract and translate it
-
-
- """)
-
- trans_image = gr.Image(
- label="๐ผ๏ธ Upload Image",
- type="filepath",
- visible=True # Initially visible since default model is TranslateGemma
- )
-
- trans_image_btn = gr.Button(
- "๐ผ๏ธ Translate Image",
- variant="secondary",
- size="lg",
- elem_classes=["translate-button"],
- visible=True # Initially visible since default model is TranslateGemma
- )
-
- with gr.Column(scale=1):
- with gr.Group(elem_classes=["translation-output"]):
- trans_output = gr.Textbox(
- label="โ
Translation Result",
- lines=6,
- info="Translated text will appear here",
- elem_classes=["translation-result"]
- )
-
- with gr.Row():
- copy_trans_btn = gr.Button(
- "๐ Copy",
- size="sm",
- variant="secondary"
- )
- swap_lang_btn = gr.Button(
- "๐ Swap Languages",
- size="sm",
- variant="secondary"
- )
-
- # Function to show/hide target language dropdown based on model selection
- def update_target_language_visibility(model):
- show = "translategemma" in model.lower()
- return gr.update(visible=show)
-
- # Function to show/hide image elements based on model selection
- def update_image_visibility(model):
- show = "translategemma" in model.lower()
- return gr.update(visible=show)
-
- # Update visibility when model changes
- trans_model.change(
- update_target_language_visibility,
- inputs=[trans_model],
- outputs=[target_language]
- )
-
- trans_model.change(
- update_image_visibility,
- inputs=[trans_model],
- outputs=[trans_image]
- )
-
- trans_model.change(
- update_image_visibility,
- inputs=[trans_model],
- outputs=[trans_image_btn]
- )
-
- trans_btn.click(
- translate_text,
- inputs=[trans_text, trans_model, target_language, source_language],
- outputs=[trans_output]
- )
-
- trans_image_btn.click(
- translate_image,
- inputs=[trans_image, trans_model, target_language, source_language],
- outputs=[trans_output]
- )
-
- # Swap languages functionality
- def swap_languages(src_lang, tgt_lang, text):
- # Swap source and target languages
- # For simplicity, we'll just swap the dropdown values
- # In a more complex implementation, you might want to translate the text as well
- return tgt_lang, src_lang, ""
-
- swap_lang_btn.click(
- swap_languages,
- inputs=[source_language, target_language, trans_text],
- outputs=[source_language, target_language, trans_text]
- )
-
- # Copy translation functionality
- def copy_translation(text):
- # This will be handled by JavaScript in the frontend
- return text
-
- copy_trans_btn.click(
- copy_translation,
- inputs=[trans_output],
- js="(text) => { navigator.clipboard.writeText(text); alert('Translation copied to clipboard!'); }"
- )
-
- # ============ Question Answering Tab ============
- with gr.Tab("โ Q&A", id="qa"):
- with gr.Row():
- with gr.Column():
- qa_context = gr.Textbox(
- label="Context",
- placeholder="Paste the context/document here...",
- lines=8
- )
-
- qa_question = gr.Textbox(
- label="Question",
- placeholder="What would you like to know?"
- )
-
- qa_model = gr.Dropdown(
- choices=[
- "deepset/roberta-base-squad2",
- "distilbert-base-cased-distilled-squad",
- "bert-large-uncased-whole-word-masking-finetuned-squad"
- ],
- value="deepset/roberta-base-squad2",
- label="Model"
- )
-
- qa_btn = gr.Button("Get Answer", variant="primary")
-
- with gr.Column():
- qa_answer = gr.Textbox(label="Answer", lines=3)
- qa_score = gr.Number(label="Confidence Score")
-
- qa_btn.click(
- answer_question,
- inputs=[qa_question, qa_context, qa_model],
- outputs=[qa_answer, qa_score]
- )
-
- # ============ Image Generation Tab ============
- with gr.Tab("๐จ Image Generation", id="image-gen"):
- gr.HTML("""
-
-
- ๐จ Image Generation
-
-
- Create stunning images from text descriptions using advanced AI models
-
-
- """)
-
- with gr.Row(equal_height=True):
- with gr.Column(scale=1):
- with gr.Group(elem_classes=["image-controls"]):
- img_prompt = gr.Textbox(
- label="โจ Prompt",
- placeholder="Describe the image you want to generate... Be detailed and creative!",
- lines=4,
- info="Example: 'A beautiful sunset over mountains, digital art, highly detailed'"
- )
-
- img_negative = gr.Textbox(
- label="๐ซ Negative Prompt",
- placeholder="What to avoid in the image... (optional)",
- lines=2,
- info="Example: 'blurry, low quality, distorted'"
- )
-
- with gr.Row():
- img_model = gr.Dropdown(
- choices=[
- "stabilityai/stable-diffusion-xl-base-1.0",
- "runwayml/stable-diffusion-v1-5",
- "CompVis/stable-diffusion-v1-4"
- ],
- value="stabilityai/stable-diffusion-xl-base-1.0",
- label="๐ญ Model",
- info="Choose image generation model"
- )
- img_steps = gr.Slider(
- 10, 100, 30, step=5,
- label="โ๏ธ Steps",
- info="More steps = better quality (slower)"
- )
-
- img_btn = gr.Button(
- "๐จ Generate Image",
- variant="primary",
- size="lg",
- elem_classes=["generate-image-button"]
- )
-
- gr.HTML("""
-
-
- ๐ก Pro Tips
-
-
- - Be specific in your descriptions
- - Use art styles: 'digital art', 'oil painting', 'photo'
- - Add quality terms: 'highly detailed', '4K', 'sharp focus'
-
-
- """)
-
- with gr.Column(scale=1):
- with gr.Group(elem_classes=["image-output"]):
- img_output = gr.Image(
- label="๐ผ๏ธ Generated Image",
- type="filepath",
- elem_classes=["generated-image"]
- )
-
- with gr.Row():
- download_btn = gr.Button(
- "๐พ Download",
- size="sm",
- variant="secondary"
- )
- share_img_btn = gr.Button(
- "๐ Share",
- size="sm",
- variant="secondary"
- )
-
- img_btn.click(
- generate_image,
- inputs=[img_prompt, img_model, img_negative, img_steps],
- outputs=[img_output]
- )
-
- # ============ Models Tab ============
- with gr.Tab("๐ง Models", id="models"):
- with gr.Row():
- # Left: Search & Browse
- with gr.Column(scale=2):
- gr.Markdown("### ๐ Search HuggingFace Models")
-
- with gr.Row():
- search_query = gr.Textbox(
- placeholder="Search for models...",
- show_label=False,
- scale=3
- )
- search_task = gr.Dropdown(
- choices=[
- "text-generation",
- "text-classification",
- "summarization",
- "translation",
- "question-answering",
- "image-classification",
- "text-to-image"
- ],
- value="text-generation",
- show_label=False,
- scale=2
- )
- search_limit = gr.Slider(5, 50, 10, step=5, label="Limit", scale=1)
-
- search_btn = gr.Button("Search", variant="primary")
-
- search_results = gr.Dataframe(
- headers=["Model ID", "Author", "Task", "Downloads", "Likes"],
- label="Search Results",
- interactive=False,
- wrap=True
- )
-
- gr.Markdown("### ๐ Model Info")
- model_info_input = gr.Textbox(
- placeholder="Enter model ID to get info...",
- label="Model ID"
- )
- get_info_btn = gr.Button("Get Info")
- model_info_output = gr.Markdown(label="Model Information")
-
- # Right: My Models
- with gr.Column(scale=2):
- gr.Markdown("### ๐ฆ My Models")
-
- my_models_table = gr.Dataframe(
- headers=["ID", "Name", "Model ID", "Role", "Temp", "Max Tokens", "Enabled"],
- value=get_models_table(),
- label="Saved Models",
- interactive=False
- )
-
- refresh_table_btn = gr.Button("๐ Refresh")
-
- gr.Markdown("### โ Add New Model")
-
- add_model_id = gr.Textbox(label="Model ID", placeholder="e.g., meta-llama/Llama-3.2-3B-Instruct")
- add_model_name = gr.Textbox(label="Display Name", placeholder="My Model")
- add_model_role = gr.Dropdown(
- choices=["assistant", "creative", "coder", "analyst", "custom"],
- value="assistant",
- label="Role"
- )
-
- with gr.Row():
- add_model_temp = gr.Slider(0.1, 1.0, 0.3, step=0.1, label="Temperature")
- add_model_tokens = gr.Slider(50, 4096, 500, step=50, label="Max Tokens")
-
- add_model_system = gr.Textbox(
- label="System Prompt",
- placeholder="Optional system prompt...",
- lines=2
- )
-
- add_model_btn = gr.Button("Add Model", variant="primary")
- add_model_status = gr.Textbox(label="Status", interactive=False)
-
- gr.Markdown("### ๐๏ธ Delete Model")
- delete_model_id = gr.Textbox(label="Model ID to Delete", placeholder="Enter model ID")
- delete_model_btn = gr.Button("Delete", variant="stop")
-
- search_btn.click(
- search_hf_models,
- inputs=[search_query, search_task, search_limit],
- outputs=[search_results]
- )
-
- get_info_btn.click(
- get_model_info,
- inputs=[model_info_input],
- outputs=[model_info_output]
- )
-
- add_model_btn.click(
- add_model_to_settings,
- inputs=[add_model_id, add_model_name, add_model_role, add_model_temp, add_model_tokens, add_model_system],
- outputs=[add_model_status, my_models_table]
- )
-
- refresh_table_btn.click(
- get_models_table,
- outputs=[my_models_table]
- )
-
- delete_model_btn.click(
- delete_model,
- inputs=[delete_model_id],
- outputs=[add_model_status, my_models_table]
- )
-
- # ============ Settings Tab ============
- with gr.Tab("โ๏ธ Settings", id="settings"):
- with gr.Row():
- with gr.Column():
- gr.Markdown("### ๐ HuggingFace API Token")
-
- token_input = gr.Textbox(
- label="API Token",
- value=get_hf_token(),
- type="password",
- placeholder="hf_..."
- )
-
- save_token_btn = gr.Button("Save Token", variant="primary")
- token_status = gr.Textbox(label="Status", interactive=False)
-
- gr.Markdown("""
- ---
- **How to get your token:**
- 1. Go to [HuggingFace Settings](https://huggingface.co/settings/tokens)
- 2. Create a new token with read access
- 3. Paste it above and save
- """)
-
- with gr.Column():
- gr.Markdown("### ๐ค Account Info")
- account_info = gr.Markdown(value="Click refresh to load account info")
- refresh_account_btn = gr.Button("๐ Refresh Account Info")
-
- save_token_btn.click(
- save_hf_token,
- inputs=[token_input],
- outputs=[token_status]
- )
-
- refresh_account_btn.click(
- get_account_info,
- outputs=[account_info]
- )
-
- # Footer with modern design
- gr.HTML("""
-
- """)
-
- # Add custom JavaScript for enhanced interactions
- app.load(
- fn=None,
- js="""
- function() {
- // Add smooth scrolling
- document.querySelectorAll('a[href^="#"]').forEach(anchor => {
- anchor.addEventListener('click', function (e) {
- e.preventDefault();
- document.querySelector(this.getAttribute('href')).scrollIntoView({
- behavior: 'smooth'
- });
- });
- });
-
- // Add auto-resizing for chat input
- const chatInput = document.querySelector('.chat-input textarea');
- if (chatInput) {
- chatInput.addEventListener('input', function() {
- this.style.height = 'auto';
- this.style.height = Math.min(this.scrollHeight, 200) + 'px';
- });
-
- // Add shift+enter for new line
- chatInput.addEventListener('keydown', function(e) {
- if (e.key === 'Enter' && !e.shiftKey) {
- e.preventDefault();
- const sendBtn = document.querySelector('.send-button');
- if (sendBtn) sendBtn.click();
- }
- });
- }
-
- // Add hover effects to all interactive elements
- document.querySelectorAll('.gradio-button, .gradio-dropdown, .gradio-slider').forEach(element => {
- element.addEventListener('mouseenter', function() {
- this.style.transform = 'translateY(-2px)';
- this.style.transition = 'all 0.3s ease';
- });
- element.addEventListener('mouseleave', function() {
- this.style.transform = 'translateY(0)';
- });
- });
-
- // Add card hover effects
- document.querySelectorAll('.gradio-box, .input-group, .output-group').forEach(card => {
- card.addEventListener('mouseenter', function() {
- this.style.boxShadow = '0 8px 24px rgba(102, 126, 234, 0.3)';
- this.style.transform = 'translateY(-4px)';
- });
- card.addEventListener('mouseleave', function() {
- this.style.boxShadow = '';
- this.style.transform = 'translateY(0)';
- });
- });
-
- // Add loading animations
- window.addEventListener('load', function() {
- document.body.style.opacity = '0';
- setTimeout(() => {
- document.body.style.transition = 'opacity 0.5s ease-in-out';
- document.body.style.opacity = '1';
- }, 100);
- });
-
- // Add visual feedback for button clicks
- document.querySelectorAll('.gradio-button').forEach(button => {
- button.addEventListener('click', function() {
- this.style.transform = 'scale(0.95)';
- setTimeout(() => {
- this.style.transform = '';
- }, 100);
- });
- });
-
- // Add copy functionality with visual feedback
- document.querySelectorAll('[id*=\"copy\"], [id*=\"Copy\"]').forEach(button => {
- button.addEventListener('click', function() {
- const originalText = this.textContent;
- this.textContent = 'โ Copied!';
- this.style.background = 'linear-gradient(135deg, #4ade80, #22c55e)';
- this.style.color = 'white';
- setTimeout(() => {
- this.textContent = originalText;
- this.style.background = '';
- this.style.color = '';
- }, 2000);
- });
- });
-
- // Add keyboard shortcuts
- document.addEventListener('keydown', function(e) {
- // Ctrl/Cmd + K to focus chat input
- if ((e.ctrlKey || e.metaKey) && e.key === 'k') {
- e.preventDefault();
- const chatInput = document.querySelector('.chat-input textarea');
- if (chatInput) {
- chatInput.focus();
- chatInput.scrollIntoView({ behavior: 'smooth', block: 'center' });
- }
- }
- // Ctrl/Cmd + / to show shortcuts help
- if ((e.ctrlKey || e.metaKey) && e.key === '/') {
- e.preventDefault();
- alert('Keyboard Shortcuts:\\nโข Ctrl+K: Focus chat input\\nโข Escape: Clear chat\\nโข Shift+Enter: New line in chat');
- }
- // Escape to clear chat
- if (e.key === 'Escape') {
- const clearButton = document.querySelector('[id*=\"clear\"], [id*=\"Clear\"]');
- if (clearButton) clearButton.click();
- }
- });
-
- // Add focus effects to inputs
- document.querySelectorAll('input, textarea').forEach(input => {
- input.addEventListener('focus', function() {
- this.style.boxShadow = '0 0 0 3px rgba(102, 126, 234, 0.3)';
- });
- input.addEventListener('blur', function() {
- this.style.boxShadow = '';
- });
- });
-
- console.log('โจ AI Assistant UI Enhanced with Interactive Features');
- }
- """
- )
-
-
-
-
-# Launch the app
-if __name__ == "__main__":
- app.launch(
- server_name="127.0.0.1",
- server_port=7866,
- share=False,
- show_error=True,
- inbrowser=True
- )
+"""
+AI Assistant - Gradio Application
+Powered by HuggingFace Hub
+"""
+import gradio as gr
+import os
+import json
+from datetime import datetime
+from hf_api import HuggingFaceAPI
+from utils import load_settings, save_settings
+
+# Optional import for Google Translate Gemma
+try:
+ from google_translate import GoogleTranslateGemma
+ GOOGLE_TRANSLATE_AVAILABLE = True
+except ImportError as e:
+ print(f"Warning: Google Translate Gemma not available: {str(e)}")
+ GOOGLE_TRANSLATE_AVAILABLE = False
+ GoogleTranslateGemma = None
+
+# Translation testing functions
+def test_translategemma(text, source_lang, target_lang):
+ """Test Google Translate Gemma model directly"""
+ if not GOOGLE_TRANSLATE_AVAILABLE:
+ print("โ Google Translate Gemma not available. Using chat completion fallback.")
+ return test_chat_completion_translation(text, source_lang, target_lang)
+
+ print(f"๐งช Testing Google Translate Gemma")
+ print(f" Text: {text}")
+ print(f" Source: {source_lang}")
+ print(f" Target: {target_lang}")
+ print("-" * 50)
+
+ try:
+ # Initialize the model
+ translator = GoogleTranslateGemma()
+
+ # Perform translation
+ translation = translator.translate(text, source_lang, target_lang)
+
+ if translation:
+ print(f"โ
Translation: {translation}")
+ print(" โ
Google Translate Gemma working correctly!")
+ return translation
+ else:
+ print("โ No translation returned")
+ return None
+
+ except Exception as e:
+ print(f"โ Error: {str(e)}")
+ print(" โ ๏ธ Falling back to chat completion translation...")
+ return test_chat_completion_translation(text, source_lang, target_lang)
+
+def test_chat_completion_translation(text, source_lang, target_lang):
+ """Test translation using chat completion fallback"""
+ if not hf_api:
+ print("โ No HuggingFace API available. Please set your token first.")
+ return None
+
+ # Test models in order of preference
+ models_to_test = [
+ "google/translategemma-12b-it",
+ "meta-llama/Llama-3.2-3B-Instruct",
+ "microsoft/Phi-3-mini-4k-instruct",
+ "google/gemma-2-2b-it"
+ ]
+
+ print(f"๐งช Testing translation with chat completion")
+ print(f" Text: {text}")
+ print(f" Source: {source_lang}")
+ print(f" Target: {target_lang}")
+ print("-" * 50)
+
+ for model_id in models_to_test:
+ print(f"\n๐ Testing with model: {model_id}")
+
+ try:
+ # Use the same translation logic as in the main translation function
+ if "translategemma" in model_id.lower() and not GOOGLE_TRANSLATE_AVAILABLE:
+ print(" โ ๏ธ Google Translate Gemma not available, skipping...")
+ continue
+
+ # Dynamic system prompt based on target and source language
+ source_info = f" from {source_lang}" if source_lang != "Auto-detect" else ""
+ system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_lang}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
+ prompt = f"Translate the following text{source_info} to {target_lang}: {text}"
+
+ messages = [
+ {
+ "role": "system",
+ "content": system_prompt
+ },
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ]
+
+ response = hf_api.chat_completion(
+ model=model_id,
+ messages=messages,
+ max_tokens=1024,
+ temperature=0.3
+ )
+
+ translation = response["choices"][0]["message"]["content"].strip()
+ print(f"โ
Translation: {translation}")
+ print(f" โ
Success with {model_id}!")
+ return translation # Return first successful translation
+
+ except Exception as e:
+ print(f" โ Error with {model_id}: {str(e)}")
+ continue
+
+ print("\nโ All models failed. Please check your token and model availability.")
+ return None
+
+def run_multiple_translation_tests():
+ """Run multiple translation test scenarios"""
+ test_cases = [
+ {
+ "text": "Hello, how are you today?",
+ "source": "English",
+ "target": "Spanish",
+ "description": "English to Spanish"
+ },
+ {
+ "text": "V nejhorลกรญm pลรญpadฤ i k prasknutรญ ฤoฤky.",
+ "source": "Czech",
+ "target": "German",
+ "description": "Czech to German"
+ },
+ {
+ "text": "Bonjour, comment allez-vous?",
+ "source": "French",
+ "target": "English",
+ "description": "French to English"
+ },
+ {
+ "text": "่ฟๆฏไธไธชๆต่ฏใ",
+ "source": "Chinese (Simplified)",
+ "target": "English",
+ "description": "Chinese to English"
+ },
+ {
+ "text": "ยกHola! ยฟCรณmo estรกs?",
+ "source": "Spanish",
+ "target": "Japanese",
+ "description": "Spanish to Japanese"
+ }
+ ]
+
+ results = []
+
+ for i, case in enumerate(test_cases, 1):
+ print(f"\n๐ Test {i}: {case['description']}")
+ print(f" Source ({case['source']}): {case['text']}")
+
+ # Map language names to codes
+ lang_code_map = {
+ "English": "en",
+ "Spanish": "es",
+ "French": "fr",
+ "German": "de-DE",
+ "Chinese (Simplified)": "zh-CN",
+ "Chinese (Traditional)": "zh-TW",
+ "Japanese": "ja",
+ "Korean": "ko",
+ "Italian": "it",
+ "Portuguese": "pt",
+ "Russian": "ru",
+ "Arabic": "ar",
+ "Hindi": "hi",
+ "Dutch": "nl",
+ "Turkish": "tr",
+ "Polish": "pl",
+ "Vietnamese": "vi",
+ "Thai": "th",
+ "Indonesian": "id",
+ "Greek": "el",
+ "Hebrew": "he",
+ "Czech": "cs",
+ "Swedish": "sv",
+ "Danish": "da",
+ "Norwegian": "no",
+ "Finnish": "fi"
+ }
+
+ source_code = lang_code_map.get(case['source'], 'en')
+ target_code = lang_code_map.get(case['target'], 'en')
+
+ translation = test_translategemma(
+ text=case['text'],
+ source_lang=source_code,
+ target_lang=target_code
+ )
+
+ if translation:
+ print(f" Target ({case['target']}): {translation}")
+ results.append({
+ 'case': case['description'],
+ 'original': case['text'],
+ 'translation': translation,
+ 'success': True
+ })
+ else:
+ results.append({
+ 'case': case['description'],
+ 'original': case['text'],
+ 'translation': None,
+ 'success': False
+ })
+
+ # Summary
+ successful = sum(1 for r in results if r['success'])
+ total = len(results)
+
+ summary = f"""
+๐ Test Summary
+{"=" * 60}
+Total tests: {total}
+Successful: {successful}
+Failed: {total - successful}
+Success rate: {successful/total*100:.1f}%
+
+"""
+
+ if successful < total:
+ summary += "โ Some tests failed. Check your HuggingFace token and model availability."
+ else:
+ summary += "โ
All tests passed successfully!"
+
+ return results, summary
+
+# Settings paths
+SETTINGS_DIR = os.path.join(os.path.dirname(__file__), 'settings')
+MODELS_SETTINGS_FILE = os.path.join(SETTINGS_DIR, 'models.json')
+FIREBASE_SETTINGS_FILE = os.path.join(SETTINGS_DIR, 'firebase.json')
+APP_SETTINGS_FILE = os.path.join(SETTINGS_DIR, 'app.json')
+
+
+# Load initial settings
+model_settings = load_settings(MODELS_SETTINGS_FILE)
+HF_TOKEN = model_settings.get('huggingfaceToken', '')
+hf_api = HuggingFaceAPI(token=HF_TOKEN) if HF_TOKEN else None
+
+
+def reinit_api(token: str):
+ """Reinitialize HuggingFace API with new token"""
+ global hf_api
+ hf_api = HuggingFaceAPI(token=token)
+
+
+def get_saved_models():
+ """Get list of saved models"""
+ settings = load_settings(MODELS_SETTINGS_FILE)
+ models = settings.get('models', [])
+ return [(m.get('name', m.get('modelId', 'Unknown')), m.get('modelId', '')) for m in models if m.get('enabled', True)]
+
+
+def get_model_choices():
+ """Get model choices for dropdown"""
+ models = get_saved_models()
+ if not models:
+ return ["meta-llama/Llama-3.2-3B-Instruct"]
+ return [m[1] for m in models]
+
+
+# ============ Chat Functions ============
+
+def chat_response(message: str, history: list, model_id: str, temperature: float, max_tokens: int, system_prompt: str):
+ """Generate chat response"""
+ if not hf_api:
+ return "Please set your HuggingFace token in Settings first."
+
+ if not message.strip():
+ return ""
+
+ try:
+ # Build messages with system prompt
+ messages = []
+ if system_prompt.strip():
+ messages.append({"role": "system", "content": system_prompt})
+
+ # Add history from message format
+ for msg in history:
+ if isinstance(msg, dict):
+ messages.append(msg)
+ elif isinstance(msg, tuple) and len(msg) == 2:
+ # Handle legacy tuple format
+ user_msg, assistant_msg = msg
+ if user_msg:
+ messages.append({"role": "user", "content": user_msg})
+ if assistant_msg:
+ messages.append({"role": "assistant", "content": assistant_msg})
+
+ # Add current message
+ messages.append({"role": "user", "content": message})
+
+ response = hf_api.chat_completion(
+ model=model_id,
+ messages=messages,
+ max_tokens=max_tokens,
+ temperature=temperature
+ )
+
+ return response["choices"][0]["message"]["content"]
+
+ except Exception as e:
+ error_str = str(e)
+ # Check if it's a model not supported error
+ if "model_not_supported" in error_str or "not supported by any provider" in error_str:
+ # Try to get fallback models
+ try:
+ fallback_models = hf_api._find_fallback_models(model_id)
+ if fallback_models:
+ fallback_list = "\n".join([f"- {m['id']}" for m in fallback_models[:3]])
+ return f"Error: Model {model_id} is not supported. Try one of these models instead:\n{fallback_list}\n\nOriginal error: {error_str}"
+ else:
+ return f"Error: Model {model_id} is not supported and no fallback models are available.\n\nOriginal error: {error_str}"
+ except:
+ return f"Error: Model {model_id} is not supported. Please try a different model.\n\nOriginal error: {error_str}"
+ else:
+ return f"Error: {error_str}"
+
+
+def text_generation(prompt: str, model_id: str, temperature: float, max_tokens: int, top_p: float):
+ """Generate text from prompt"""
+ if not hf_api:
+ return "Please set your HuggingFace token in Settings first."
+
+ if not prompt.strip():
+ return ""
+
+ try:
+ # Check model settings for task support
+ model_settings = load_settings(MODELS_SETTINGS_FILE)
+ models = model_settings.get('models', [])
+
+ # Find the model in settings
+ model_info = None
+ for m in models:
+ if m.get('modelId') == model_id:
+ model_info = m
+ break
+
+ # Check if model recommends chat_completion
+ if model_info and model_info.get('recommendedMethod') == 'chat_completion':
+ # Use chat completion for conversational models
+ messages = [{"role": "user", "content": prompt}]
+ response = hf_api.chat_completion(
+ model=model_id,
+ messages=messages,
+ max_tokens=max_tokens,
+ temperature=temperature
+ )
+ return response["choices"][0]["message"]["content"]
+ else:
+ # Use text generation for other models
+ response = hf_api.text_generation(
+ model=model_id,
+ prompt=prompt,
+ max_new_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p
+ )
+ return response.get("generated_text", "")
+ except Exception as e:
+ error_str = str(e)
+ # Check if it's a model not supported error
+ if "model_not_supported" in error_str or "not supported by any provider" in error_str:
+ # Try to get fallback models
+ try:
+ fallback_models = hf_api._find_fallback_models(model_id)
+ if fallback_models:
+ fallback_list = "\n".join([f"- {m['id']}" for m in fallback_models[:3]])
+ return f"Error: Model {model_id} is not supported. Try one of these models instead:\n{fallback_list}\n\nOriginal error: {error_str}"
+ else:
+ return f"Error: Model {model_id} is not supported and no fallback models are available.\n\nOriginal error: {error_str}"
+ except:
+ return f"Error: Model {model_id} is not supported. Please try a different model.\n\nOriginal error: {error_str}"
+ else:
+ return f"Error: {error_str}"
+
+
+def summarize_text(text: str, model_id: str, max_length: int, min_length: int):
+ """Summarize text"""
+ if not hf_api:
+ return "Please set your HuggingFace token in Settings first."
+
+ if not text.strip():
+ return ""
+
+ try:
+ response = hf_api.summarization(
+ model=model_id,
+ text=text,
+ max_length=max_length,
+ min_length=min_length
+ )
+ if isinstance(response, list) and len(response) > 0:
+ return response[0].get('summary_text', '')
+ elif isinstance(response, dict):
+ return response.get('summary_text', str(response))
+ return str(response)
+ except Exception as e:
+ return f"Error: {str(e)}"
+
+
+def translate_text(text: str, model_id: str, target_language: str = "", source_language: str = "Auto-detect"):
+ """Translate text"""
+ if not hf_api:
+ return "Please set your HuggingFace token in Settings first."
+
+ if not text.strip():
+ return ""
+
+ try:
+ # Use Google Translate Gemma module for Google TranslateGemma model
+ if "translategemma" in model_id.lower():
+ if not GOOGLE_TRANSLATE_AVAILABLE:
+ # If Google Translate is not available, fall back to chat completion
+ print("Google Translate Gemma not available, falling back to chat completion")
+
+ source_info = f" from {source_language}" if source_language != "Auto-detect" else ""
+ system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_language}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
+ prompt = f"Translate the following text{source_info} to {target_language}: {text}"
+
+ messages = [
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": prompt}
+ ]
+
+ response = hf_api.chat_completion(
+ model=model_id,
+ messages=messages,
+ max_tokens=1024,
+ temperature=0.3
+ )
+
+ return response["choices"][0]["message"]["content"].strip()
+
+ try:
+ # Initialize the translator
+ translator = GoogleTranslateGemma()
+
+ # Map language names to language codes
+ lang_code_map = {
+ "English": "en",
+ "Spanish": "es",
+ "French": "fr",
+ "German": "de-DE",
+ "Chinese (Simplified)": "zh-CN",
+ "Chinese (Traditional)": "zh-TW",
+ "Japanese": "ja",
+ "Korean": "ko",
+ "Italian": "it",
+ "Portuguese": "pt",
+ "Russian": "ru",
+ "Arabic": "ar",
+ "Hindi": "hi",
+ "Dutch": "nl",
+ "Turkish": "tr",
+ "Polish": "pl",
+ "Vietnamese": "vi",
+ "Thai": "th",
+ "Indonesian": "id",
+ "Greek": "el",
+ "Hebrew": "he",
+ "Czech": "cs",
+ "Swedish": "sv",
+ "Danish": "da",
+ "Norwegian": "no",
+ "Finnish": "fi"
+ }
+
+ # Get source language code
+ source_lang = "en" # Default to English
+ if source_language != "Auto-detect" and source_language in lang_code_map:
+ source_lang = lang_code_map[source_language]
+
+ # Get target language code
+ target_lang = "en" # Default to English
+ if target_language in lang_code_map:
+ target_lang = lang_code_map[target_language]
+
+ # Perform translation
+ translated = translator.translate_text(
+ text=text,
+ source_lang=source_lang,
+ target_lang=target_lang
+ )
+
+ return translated
+
+ except Exception as gemma_e:
+ # If Google Translate Gemma fails, fall back to chat completion
+ print(f"Google Translate Gemma failed, falling back to chat completion: {str(gemma_e)}")
+
+ # Use chat completion as fallback
+ source_info = f" from {source_language}" if source_language != "Auto-detect" else ""
+ system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_language}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
+ prompt = f"Translate the following text{source_info} to {target_language}: {text}"
+
+ messages = [
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": prompt}
+ ]
+
+ response = hf_api.chat_completion(
+ model=model_id,
+ messages=messages,
+ max_tokens=1024,
+ temperature=0.3
+ )
+
+ return response["choices"][0]["message"]["content"].strip()
+
+ # For models that support chat completion (like Llama and Mistral)
+ elif "llama" in model_id.lower() or "mistral" in model_id.lower():
+ # Use chat completion for translation
+ # Dynamic system prompt based on target and source language
+ if target_language:
+ source_info = f" from {source_language}" if source_language != "Auto-detect" else ""
+ system_prompt = f"You are a professional translator specializing in translating{source_info} to {target_language}. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
+ prompt = f"Translate the following text{source_info} to {target_language}: {text}"
+ else:
+ system_prompt = "You are a professional translator. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
+ prompt = f"Translate this text: {text}"
+
+ messages = [
+ {
+ "role": "system",
+ "content": system_prompt
+ },
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ]
+
+ response = hf_api.chat_completion(
+ model=model_id,
+ messages=messages,
+ max_tokens=1024,
+ temperature=0.3
+ )
+
+ return response["choices"][0]["message"]["content"].strip()
+ else:
+ # Use the standard translation endpoint for other models
+ response = hf_api.translation(
+ model=model_id,
+ text=text
+ )
+ if isinstance(response, list) and len(response) > 0:
+ return response[0].get('translation_text', '')
+ elif isinstance(response, dict):
+ return response.get('translation_text', str(response))
+ return str(response)
+ except Exception as e:
+ # Handle specific model errors with better fallback options
+ error_str = str(e).lower()
+ if "model_not_supported" in error_str or "not supported by any provider" in error_str or "inference api enabled" in error_str:
+ # Try fallback models for translation
+ fallback_models = [
+ "Helsinki-NLP/opus-mt-en-es", # English to Spanish
+ "Helsinki-NLP/opus-mt-en-fr", # English to French
+ "Helsinki-NLP/opus-mt-en-de", # English to German
+ "Helsinki-NLP/opus-mt-en-zh", # English to Chinese
+ "Helsinki-NLP/opus-mt-en-ja", # English to Japanese
+ "meta-llama/Llama-3.2-3B-Instruct" # Llama as general fallback
+ ]
+
+ # Try each fallback model
+ for fallback_model in fallback_models:
+ try:
+ # For Llama models, use chat completion
+ if "llama" in fallback_model.lower():
+ system_prompt = "You are a professional translator. Translate the given text accurately while preserving the original meaning and tone. Only provide the translation without any additional explanations."
+
+ # If target language is specified, include it in the instruction
+ if target_language:
+ prompt = f"Translate the following text to {target_language}: {text}"
+ else:
+ prompt = f"Translate this text: {text}"
+
+ messages = [
+ {
+ "role": "system",
+ "content": system_prompt
+ },
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ]
+
+ response = hf_api.chat_completion(
+ model=fallback_model,
+ messages=messages,
+ max_tokens=1024,
+ temperature=0.3
+ )
+
+ return f"{response['choices'][0]['message']['content'].strip()}"
+
+ # For Helsinki models, use standard translation
+ else:
+ response = hf_api.translation(
+ model=fallback_model,
+ text=text
+ )
+ if isinstance(response, list) and len(response) > 0:
+ return f"{response[0].get('translation_text', '')}"
+ elif isinstance(response, dict):
+ return f"{response.get('translation_text', str(response))}"
+
+ except Exception as fallback_e:
+ continue # Try next fallback model
+
+ # If all fallbacks fail, return original error with suggestions
+ return f"Error: {str(e)}. Tried fallback models but none worked. Please try a different model or check your HuggingFace token."
+ else:
+ return f"Error: {str(e)}"
+
+
+def translate_image(image_path: str, model_id: str, target_language: str = "", source_language: str = "Auto-detect"):
+ """Translate text from image"""
+ if not image_path:
+ return "Please upload an image first."
+
+ # Only Google TranslateGemma supports image translation
+ if "translategemma" not in model_id.lower():
+ return "Image translation is only supported with Google TranslateGemma model. Please select 'google/translategemma-12b-it' from the model dropdown."
+
+ if not GOOGLE_TRANSLATE_AVAILABLE:
+ return "Google Translate Gemma is not available. Please check your installation of transformers, torch, and torchvision."
+
+ try:
+ # Initialize the translator
+ translator = GoogleTranslateGemma()
+
+ # Map language names to language codes
+ lang_code_map = {
+ "English": "en",
+ "Spanish": "es",
+ "French": "fr",
+ "German": "de-DE",
+ "Chinese (Simplified)": "zh-CN",
+ "Chinese (Traditional)": "zh-TW",
+ "Japanese": "ja",
+ "Korean": "ko",
+ "Italian": "it",
+ "Portuguese": "pt",
+ "Russian": "ru",
+ "Arabic": "ar",
+ "Hindi": "hi",
+ "Dutch": "nl",
+ "Turkish": "tr",
+ "Polish": "pl",
+ "Vietnamese": "vi",
+ "Thai": "th",
+ "Indonesian": "id",
+ "Greek": "el",
+ "Hebrew": "he",
+ "Czech": "cs",
+ "Swedish": "sv",
+ "Danish": "da",
+ "Norwegian": "no",
+ "Finnish": "fi"
+ }
+
+ # Get source language code
+ source_lang = "en" # Default to English
+ if source_language != "Auto-detect" and source_language in lang_code_map:
+ source_lang = lang_code_map[source_language]
+
+ # Get target language code
+ target_lang = "en" # Default to English
+ if target_language in lang_code_map:
+ target_lang = lang_code_map[target_language]
+
+ # Translate from image (now supports local files)
+ translated = translator.translate_image(
+ image_input=image_path,
+ source_lang=source_lang,
+ target_lang=target_lang
+ )
+
+ return translated
+ except Exception as e:
+ return f"Error: {str(e)}"
+
+def answer_question(question: str, context: str, model_id: str):
+ """Answer question based on context"""
+ if not hf_api:
+ return "Please set your HuggingFace token in Settings first.", 0.0
+
+ if not question.strip() or not context.strip():
+ return "", 0.0
+
+ try:
+ response = hf_api.question_answering(
+ model=model_id,
+ question=question,
+ context=context
+ )
+ answer = response.get('answer', '')
+ score = response.get('score', 0.0)
+ return answer, round(score, 4)
+ except Exception as e:
+ return f"Error: {str(e)}", 0.0
+
+
+def generate_image(prompt: str, model_id: str, negative_prompt: str, num_steps: int):
+ """Generate image from prompt"""
+ if not hf_api:
+ return None
+
+ if not prompt.strip():
+ return None
+
+ try:
+ image_bytes = hf_api.image_generation(
+ model=model_id,
+ prompt=prompt,
+ negative_prompt=negative_prompt if negative_prompt.strip() else None,
+ num_inference_steps=num_steps
+ )
+
+ # Save to temp file and return path
+ import tempfile
+ temp_path = os.path.join(tempfile.gettempdir(), "generated_image.png")
+ with open(temp_path, "wb") as f:
+ f.write(image_bytes)
+ return temp_path
+ except Exception as e:
+ gr.Warning(f"Image generation error: {str(e)}")
+ return None
+
+
+# ============ Model Management Functions ============
+
+def search_hf_models(query: str, task: str, limit: int):
+ """Search HuggingFace models"""
+ if not hf_api:
+ return []
+
+ if not query.strip():
+ return []
+
+ try:
+ models = list(hf_api.list_models(
+ search=query,
+ pipeline_tag=task,
+ sort="downloads",
+ direction=-1,
+ limit=limit
+ ))
+
+ results = []
+ for model in models:
+ downloads = model.downloads or 0
+ likes = model.likes or 0
+ downloads_str = f"{downloads/1000000:.1f}M" if downloads >= 1000000 else f"{downloads/1000:.1f}K" if downloads >= 1000 else str(downloads)
+ results.append([
+ model.id,
+ model.author or '',
+ model.pipeline_tag or '',
+ downloads_str,
+ likes
+ ])
+ return results
+ except Exception as e:
+ gr.Warning(f"Search error: {str(e)}")
+ return []
+
+
+def get_model_info(model_id: str):
+ """Get detailed model information"""
+ if not hf_api or not model_id.strip():
+ return "No model ID provided"
+
+ try:
+ info = hf_api.model_info(model_id)
+
+ downloads = info.downloads or 0
+ likes = info.likes or 0
+
+ result = f"""### {model_id}
+
+**Author:** {info.author or 'Unknown'}
+**Pipeline:** {info.pipeline_tag or 'N/A'}
+**Library:** {info.library_name or 'N/A'}
+**Downloads:** {downloads:,}
+**Likes:** {likes:,}
+
+**Tags:** {', '.join(info.tags[:15]) if info.tags else 'None'}
+
+**Created:** {str(info.created_at)[:10] if info.created_at else 'Unknown'}
+"""
+ return result
+ except Exception as e:
+ return f"Error fetching model info: {str(e)}"
+
+
+def add_model_to_settings(model_id: str, name: str, role: str, temperature: float, max_tokens: int, system_prompt: str):
+ """Add a model to settings"""
+ if not model_id.strip():
+ return "Model ID is required", get_models_table()
+
+ settings = load_settings(MODELS_SETTINGS_FILE)
+ if 'models' not in settings:
+ settings['models'] = []
+
+ # Generate unique ID
+ unique_id = f"model-{int(datetime.now().timestamp() * 1000)}"
+
+ model_data = {
+ "id": unique_id,
+ "name": name or model_id.split('/')[-1],
+ "modelId": model_id,
+ "role": role,
+ "temperature": temperature,
+ "maxTokens": max_tokens,
+ "systemPrompt": system_prompt,
+ "keywords": [],
+ "enabled": True,
+ "createdAt": int(datetime.now().timestamp() * 1000),
+ "updatedAt": int(datetime.now().timestamp() * 1000)
+ }
+
+ settings['models'].append(model_data)
+ save_settings(MODELS_SETTINGS_FILE, settings)
+
+ return f"Model '{name or model_id}' added successfully!", get_models_table()
+
+
+def get_models_table():
+ """Get models as table data"""
+ settings = load_settings(MODELS_SETTINGS_FILE)
+ models = settings.get('models', [])
+
+ table_data = []
+ for m in models:
+ table_data.append([
+ m.get('id', ''),
+ m.get('name', ''),
+ m.get('modelId', ''),
+ m.get('role', ''),
+ m.get('temperature', 0.3),
+ m.get('maxTokens', 500),
+ "โ" if m.get('enabled', True) else "โ"
+ ])
+ return table_data
+
+
+def delete_model(model_id: str):
+ """Delete a model from settings"""
+ if not model_id.strip():
+ return "No model selected", get_models_table()
+
+ settings = load_settings(MODELS_SETTINGS_FILE)
+ if 'models' in settings:
+ settings['models'] = [m for m in settings['models'] if m['id'] != model_id]
+ save_settings(MODELS_SETTINGS_FILE, settings)
+ return f"Model deleted", get_models_table()
+ return "Model not found", get_models_table()
+
+
+def toggle_model(model_id: str, enabled: bool):
+ """Toggle model enabled state"""
+ settings = load_settings(MODELS_SETTINGS_FILE)
+ if 'models' in settings:
+ for m in settings['models']:
+ if m['id'] == model_id:
+ m['enabled'] = enabled
+ m['updatedAt'] = int(datetime.now().timestamp() * 1000)
+ break
+ save_settings(MODELS_SETTINGS_FILE, settings)
+ return get_models_table()
+
+
+# ============ Settings Functions ============
+
+def save_hf_token(token: str):
+ """Save HuggingFace token"""
+ settings = load_settings(MODELS_SETTINGS_FILE)
+ settings['huggingfaceToken'] = token
+ save_settings(MODELS_SETTINGS_FILE, settings)
+ reinit_api(token)
+ return "Token saved successfully!"
+
+
+def get_hf_token():
+ """Get current HuggingFace token"""
+ settings = load_settings(MODELS_SETTINGS_FILE)
+ return settings.get('huggingfaceToken', '')
+
+
+def get_account_info():
+ """Get HuggingFace account info"""
+ if not hf_api:
+ return "No API token configured"
+
+ try:
+ info = hf_api.hf_api.whoami()
+ return f"""### Account Info
+
+**Username:** {info.get('name', 'Unknown')}
+**Email:** {info.get('email', 'Not available')}
+**Organizations:** {len(info.get('orgs', []))}
+"""
+ except Exception as e:
+ return f"Error: {str(e)}"
+
+
+ # ============ Gradio Interface ============
+
+# Custom CSS for professional dark theme
+custom_css = """
+/* Dark theme with modern design */
+.gradio-container {
+ background: linear-gradient(135deg, #0f0f23 0%, #1a1a2e 100%) !important;
+ color: #e0e0e0 !important;
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif !important;
+}
+
+/* Header styling */
+.main-header {
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
+ padding: 2rem !important;
+ border-radius: 16px !important;
+ margin-bottom: 2rem !important;
+ box-shadow: 0 10px 30px rgba(102, 126, 234, 0.3) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+}
+
+.main-header h1 {
+ color: white !important;
+ margin: 0 !important;
+ font-size: 2.5rem !important;
+ font-weight: 700 !important;
+ text-shadow: 0 2px 4px rgba(0,0,0,0.3) !important;
+}
+
+.main-header p {
+ color: rgba(255,255,255,0.9) !important;
+ margin: 0.5rem 0 0 0 !important;
+ font-size: 1.1rem !important;
+}
+
+/* Tab styling */
+.tabs {
+ background: transparent !important;
+ border-radius: 12px !important;
+ overflow: hidden !important;
+}
+
+.tab-nav {
+ background: rgba(255, 255, 255, 0.05) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+ border-radius: 12px !important;
+ padding: 0.5rem !important;
+ margin-bottom: 1.5rem !important;
+}
+
+.tab-nav button {
+ background: transparent !important;
+ color: #a0a0a0 !important;
+ border: none !important;
+ padding: 0.75rem 1.5rem !important;
+ margin: 0 0.25rem !important;
+ border-radius: 8px !important;
+ transition: all 0.3s ease !important;
+ font-weight: 500 !important;
+}
+
+.tab-nav button:hover {
+ background: rgba(255, 255, 255, 0.1) !important;
+ color: #ffffff !important;
+}
+
+.tab-nav button.selected {
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
+ color: white !important;
+ box-shadow: 0 4px 12px rgba(102, 126, 234, 0.3) !important;
+}
+
+/* Card styling */
+.gradio-box {
+ background: rgba(255, 255, 255, 0.05) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+ border-radius: 12px !important;
+ padding: 1.5rem !important;
+ backdrop-filter: blur(10px) !important;
+ transition: all 0.3s ease !important;
+}
+
+.gradio-box:hover {
+ background: rgba(255, 255, 255, 0.08) !important;
+ border-color: rgba(255, 255, 255, 0.15) !important;
+ transform: translateY(-2px) !important;
+ box-shadow: 0 8px 24px rgba(0, 0, 0, 0.2) !important;
+}
+
+/* Button styling */
+.gradio-button {
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
+ color: white !important;
+ border: none !important;
+ padding: 0.75rem 1.5rem !important;
+ border-radius: 8px !important;
+ font-weight: 600 !important;
+ transition: all 0.3s ease !important;
+ box-shadow: 0 4px 12px rgba(102, 126, 234, 0.3) !important;
+}
+
+.gradio-button:hover {
+ transform: translateY(-2px) !important;
+ box-shadow: 0 6px 20px rgba(102, 126, 234, 0.4) !important;
+}
+
+.gradio-button.secondary {
+ background: rgba(255, 255, 255, 0.1) !important;
+ color: #e0e0e0 !important;
+ border: 1px solid rgba(255, 255, 255, 0.2) !important;
+}
+
+/* Input styling */
+.gradio-textbox, .gradio-dropdown {
+ background: rgba(255, 255, 255, 0.05) !important;
+ border: 1px solid rgba(255, 255, 255, 0.2) !important;
+ border-radius: 8px !important;
+ color: #e0e0e0 !important;
+ transition: all 0.3s ease !important;
+}
+
+.gradio-textbox:focus, .gradio-dropdown:focus {
+ border-color: #667eea !important;
+ box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.2) !important;
+ background: rgba(255, 255, 255, 0.08) !important;
+}
+
+.gradio-textbox::placeholder {
+ color: rgba(255, 255, 255, 0.5) !important;
+}
+
+/* Slider styling */
+.gradio-slider {
+ background: rgba(255, 255, 255, 0.1) !important;
+}
+
+.gradio-slider .slider-track {
+ background: linear-gradient(90deg, #667eea 0%, #764ba2 100%) !important;
+}
+
+/* Chatbot styling */
+.gradio-chatbot {
+ background: rgba(255, 255, 255, 0.03) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+ border-radius: 12px !important;
+}
+
+.gradio-chatbot .message {
+ background: rgba(255, 255, 255, 0.05) !important;
+ border-radius: 8px !important;
+ margin: 0.5rem !important;
+ padding: 1rem !important;
+}
+
+.gradio-chatbot .message.user {
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
+ color: white !important;
+}
+
+/* Dataframe styling */
+.gradio-dataframe {
+ background: rgba(255, 255, 255, 0.05) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+ border-radius: 8px !important;
+}
+
+.gradio-dataframe table {
+ color: #e0e0e0 !important;
+}
+
+.gradio-dataframe th {
+ background: rgba(255, 255, 255, 0.1) !important;
+ border-bottom: 1px solid rgba(255, 255, 255, 0.2) !important;
+}
+
+.gradio-dataframe td {
+ border-bottom: 1px solid rgba(255, 255, 255, 0.05) !important;
+}
+
+/* Markdown styling */
+.gradio-markdown {
+ color: #e0e0e0 !important;
+}
+
+.gradio-markdown h1, .gradio-markdown h2, .gradio-markdown h3 {
+ color: #ffffff !important;
+ margin-top: 1.5rem !important;
+}
+
+.gradio-markdown a {
+ color: #667eea !important;
+}
+
+/* Footer styling */
+footer {
+ background: rgba(255, 255, 255, 0.03) !important;
+ border-top: 1px solid rgba(255, 255, 255, 0.1) !important;
+ padding: 1.5rem !important;
+ text-align: center !important;
+ color: rgba(255, 255, 255, 0.6) !important;
+}
+
+/* Loading animation */
+.loading {
+ display: inline-block;
+ width: 20px;
+ height: 20px;
+ border: 3px solid rgba(255, 255, 255, 0.3);
+ border-radius: 50%;
+ border-top-color: #667eea;
+ animation: spin 1s ease-in-out infinite;
+}
+
+@keyframes spin {
+ to { transform: rotate(360deg); }
+}
+
+/* Responsive design */
+@media (max-width: 768px) {
+ .main-header h1 {
+ font-size: 2rem !important;
+ }
+
+ .gradio-box {
+ padding: 1rem !important;
+ }
+
+ .tab-nav button {
+ padding: 0.5rem 1rem !important;
+ font-size: 0.9rem !important;
+ }
+}
+
+/* Custom component styles */
+.chatbot-container {
+ background: rgba(255, 255, 255, 0.03) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+ border-radius: 12px !important;
+ overflow: hidden !important;
+}
+
+.chat-input textarea {
+ background: rgba(255, 255, 255, 0.05) !important;
+ border: 1px solid rgba(255, 255, 255, 0.2) !important;
+ border-radius: 8px !important;
+ resize: none !important;
+ transition: all 0.3s ease !important;
+}
+
+.chat-input textarea:focus {
+ border-color: #667eea !important;
+ box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.2) !important;
+}
+
+.send-button {
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
+ border: none !important;
+ height: 100% !important;
+ min-height: 40px !important;
+}
+
+.settings-group {
+ background: rgba(255, 255, 255, 0.05) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+ border-radius: 12px !important;
+ padding: 1.5rem !important;
+}
+
+.input-group, .output-group {
+ background: rgba(255, 255, 255, 0.05) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+ border-radius: 12px !important;
+ padding: 1.5rem !important;
+ height: 100% !important;
+}
+
+.output-textarea {
+ background: rgba(255, 255, 255, 0.03) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+ border-radius: 8px !important;
+ font-family: 'Inter', monospace !important;
+ line-height: 1.6 !important;
+}
+
+.translation-input, .translation-output {
+ background: rgba(255, 255, 255, 0.05) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+ border-radius: 12px !important;
+ padding: 1.5rem !important;
+}
+
+.translation-result {
+ background: rgba(102, 126, 234, 0.1) !important;
+ border: 1px solid rgba(102, 126, 234, 0.3) !important;
+ border-radius: 8px !important;
+ font-weight: 500 !important;
+}
+
+.image-controls, .image-output {
+ background: rgba(255, 255, 255, 0.05) !important;
+ border: 1px solid rgba(255, 255, 255, 0.1) !important;
+ border-radius: 12px !important;
+ padding: 1.5rem !important;
+}
+
+.generated-image {
+ border-radius: 8px !important;
+ overflow: hidden !important;
+ box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2) !important;
+}
+
+/* Animation classes */
+.fade-in {
+ animation: fadeIn 0.5s ease-in-out;
+}
+
+@keyframes fadeIn {
+ from { opacity: 0; transform: translateY(10px); }
+ to { opacity: 1; transform: translateY(0); }
+}
+
+.slide-up {
+ animation: slideUp 0.3s ease-out;
+}
+
+@keyframes slideUp {
+ from { transform: translateY(20px); opacity: 0; }
+ to { transform: translateY(0); opacity: 1; }
+}
+
+/* Custom scrollbar */
+::-webkit-scrollbar {
+ width: 8px;
+ height: 8px;
+}
+
+::-webkit-scrollbar-track {
+ background: rgba(255, 255, 255, 0.05);
+ border-radius: 4px;
+}
+
+::-webkit-scrollbar-thumb {
+ background: rgba(102, 126, 234, 0.5);
+ border-radius: 4px;
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background: rgba(102, 126, 234, 0.7);
+}
+"""
+
+# Build the Gradio app
+with gr.Blocks(
+ title="AI Assistant - HuggingFace",
+ theme=gr.themes.Soft(
+ primary_hue="purple",
+ secondary_hue="blue",
+ neutral_hue="slate",
+ font=["Inter", "system-ui", "sans-serif"]
+ ),
+ css=custom_css
+) as app:
+
+ # Header with modern design
+ gr.HTML("""
+
+
+
+
+ ๐ค
+ AI Assistant
+
+
Powered by HuggingFace Hub โข Advanced AI Models
+
+
+
+ โ System Online
+
+
+
+
+ """)
+
+ with gr.Tabs() as tabs:
+
+ # ============ Chat Tab ============
+ with gr.Tab("๐ฌ Chat", id="chat"):
+ with gr.Row(equal_height=True):
+ with gr.Column(scale=3):
+ # Chat header
+ gr.HTML("""
+
+
+ ๐ฌ Conversation
+
+
+ Interactive chat with AI models
+
+
+ """)
+
+ chatbot = gr.Chatbot(
+ label="",
+ height=500,
+ placeholder="Start a conversation... Type your message below",
+ value=[],
+ show_label=False,
+ container=True,
+ elem_classes=["chatbot-container"]
+ )
+
+ with gr.Row():
+ with gr.Column(scale=4):
+ chat_input = gr.Textbox(
+ placeholder="Type your message here... (Press Enter to send)",
+ label="",
+ show_label=False,
+ lines=1,
+ max_lines=5,
+ elem_classes=["chat-input"]
+ )
+ with gr.Column(scale=1, min_width=100):
+ send_btn = gr.Button(
+ "Send",
+ variant="primary",
+ size="lg",
+ elem_classes=["send-button"]
+ )
+
+ with gr.Row():
+ clear_btn = gr.Button(
+ "๐๏ธ Clear Chat",
+ size="sm",
+ variant="secondary",
+ elem_classes=["action-button"]
+ )
+ with gr.Column(scale=1):
+ gr.HTML("""
+
+ Tip: Press Shift+Enter for new line
+
+ """)
+
+ with gr.Column(scale=1, min_width=300):
+ # Settings panel
+ gr.HTML("""
+
+
+ โ๏ธ Chat Settings
+
+
+ """)
+
+ with gr.Group(elem_classes=["settings-group"]):
+ chat_model = gr.Dropdown(
+ choices=get_model_choices(),
+ value=get_model_choices()[0] if get_model_choices() else None,
+ label="๐ค Model",
+ interactive=True,
+ info="Select AI model for conversation"
+ )
+
+ chat_temp = gr.Slider(
+ minimum=0.1,
+ maximum=1.0,
+ value=0.7,
+ step=0.1,
+ label="๐ก๏ธ Temperature",
+ info="Controls randomness (0.1 = focused, 1.0 = creative)"
+ )
+
+ chat_max_tokens = gr.Slider(
+ minimum=50,
+ maximum=4096,
+ value=500,
+ step=50,
+ label="๐ Max Tokens",
+ info="Maximum response length"
+ )
+
+ chat_system = gr.Textbox(
+ label="๐ฏ System Prompt",
+ placeholder="You are a helpful assistant...",
+ lines=3,
+ info="Define AI behavior and personality"
+ )
+
+ refresh_models_btn = gr.Button(
+ "๐ Refresh Models",
+ size="sm",
+ variant="secondary",
+ elem_classes=["refresh-button"]
+ )
+
+ def respond(message, history, model, temp, max_tok, system):
+ if not message.strip():
+ return history, ""
+ response = chat_response(message, history, model, temp, max_tok, system)
+ # Use tuple format for default chatbot
+ history.append((message, response))
+ return history, ""
+
+ send_btn.click(
+ respond,
+ inputs=[chat_input, chatbot, chat_model, chat_temp, chat_max_tokens, chat_system],
+ outputs=[chatbot, chat_input]
+ )
+
+ chat_input.submit(
+ respond,
+ inputs=[chat_input, chatbot, chat_model, chat_temp, chat_max_tokens, chat_system],
+ outputs=[chatbot, chat_input]
+ )
+
+ clear_btn.click(lambda: [], outputs=[chatbot])
+
+ refresh_models_btn.click(
+ lambda: gr.update(choices=get_model_choices()),
+ outputs=[chat_model]
+ )
+
+ # ============ Text Generation Tab ============
+ with gr.Tab("๐ Text Generation", id="text-gen"):
+ gr.HTML("""
+
+
+ ๐ Text Generation
+
+
+ Generate creative text, stories, articles, and more with AI
+
+
+ """)
+
+ with gr.Row(equal_height=True):
+ with gr.Column(scale=1):
+ with gr.Group(elem_classes=["input-group"]):
+ gen_prompt = gr.Textbox(
+ label="โ๏ธ Prompt",
+ placeholder="Enter your prompt for text generation... Be creative and specific!",
+ lines=6,
+ info="The more detailed your prompt, the better the result"
+ )
+
+ with gr.Row():
+ gen_model = gr.Dropdown(
+ choices=get_model_choices(),
+ value=get_model_choices()[0] if get_model_choices() else None,
+ label="๐ค Model",
+ info="Choose model for generation"
+ )
+ gen_temp = gr.Slider(
+ 0.1, 1.0, 0.7, step=0.1,
+ label="๐ก๏ธ Temperature",
+ info="Creativity level"
+ )
+
+ with gr.Row():
+ gen_max_tokens = gr.Slider(
+ 50, 2048, 250, step=50,
+ label="๐ Max Tokens",
+ info="Response length"
+ )
+ gen_top_p = gr.Slider(
+ 0.1, 1.0, 0.95, step=0.05,
+ label="๐ฏ Top P",
+ info="Nucleus sampling"
+ )
+
+ gen_btn = gr.Button(
+ "โจ Generate",
+ variant="primary",
+ size="lg",
+ elem_classes=["generate-button"]
+ )
+
+ with gr.Column(scale=1):
+ with gr.Group(elem_classes=["output-group"]):
+ gen_output = gr.Textbox(
+ label="๐ Generated Text",
+ lines=18,
+ show_label=True,
+ info="AI-generated content will appear here",
+ elem_classes=["output-textarea"]
+ )
+
+ with gr.Row():
+ copy_gen_btn = gr.Button(
+ "๐ Copy",
+ size="sm",
+ variant="secondary",
+ elem_classes=["copy-button"]
+ )
+ regenerate_btn = gr.Button(
+ "๐ Regenerate",
+ size="sm",
+ variant="secondary"
+ )
+
+ gen_btn.click(
+ text_generation,
+ inputs=[gen_prompt, gen_model, gen_temp, gen_max_tokens, gen_top_p],
+ outputs=[gen_output]
+ )
+
+ copy_gen_btn.click(
+ fn=None,
+ js="(text) => { navigator.clipboard.writeText(text); alert('Text copied to clipboard!'); }",
+ inputs=[gen_output]
+ )
+
+ # ============ Summarization Tab ============
+ with gr.Tab("๐ Summarization", id="summarize"):
+ with gr.Row():
+ with gr.Column():
+ sum_text = gr.Textbox(
+ label="Text to Summarize",
+ placeholder="Paste the text you want to summarize...",
+ lines=10
+ )
+
+ sum_model = gr.Dropdown(
+ choices=["facebook/bart-large-cnn", "sshleifer/distilbart-cnn-12-6", "google/pegasus-xsum"],
+ value="facebook/bart-large-cnn",
+ label="Model"
+ )
+
+ with gr.Row():
+ sum_max_len = gr.Slider(50, 500, 150, step=10, label="Max Length")
+ sum_min_len = gr.Slider(10, 100, 30, step=5, label="Min Length")
+
+ sum_btn = gr.Button("Summarize", variant="primary")
+
+ with gr.Column():
+ sum_output = gr.Textbox(
+ label="Summary",
+ lines=8
+ )
+
+ sum_btn.click(
+ summarize_text,
+ inputs=[sum_text, sum_model, sum_max_len, sum_min_len],
+ outputs=[sum_output]
+ )
+
+ # ============ Translation Tab ============
+ with gr.Tab("๐ Translation", id="translate"):
+ gr.HTML("""
+
+
+ ๐ Translation
+
+
+ Translate text between multiple languages with advanced AI models
+
+
+ """)
+
+ with gr.Row(equal_height=True):
+ with gr.Column(scale=1):
+ with gr.Group(elem_classes=["translation-input"]):
+ gr.HTML("""
+
+
+ ๐ Translation Models
+
+
+ - Google TranslateGemma - Advanced multilingual translation
+ - Llama 3.2 - Multilingual with dynamic prompts
+ - MADLAD-400 - 400+ languages support
+ - Helsinki-NLP - Specialized language pairs
+
+
+ โจ New: Target language selection now works with all models!
+
+
+ Note: If a model is not available, the system will automatically try fallback models.
+
+
+ """)
+
+ trans_text = gr.Textbox(
+ label="๐ Text to Translate",
+ placeholder="Enter text to translate...",
+ lines=6,
+ info="Supports multiple languages and formats"
+ )
+
+ trans_model = gr.Dropdown(
+ choices=[
+ "google/translategemma-12b-it",
+ "meta-llama/Llama-3.2-3B-Instruct",
+ "google/madlad400-3b-mt",
+ "Helsinki-NLP/opus-mt-en-de",
+ "Helsinki-NLP/opus-mt-en-fr",
+ "Helsinki-NLP/opus-mt-en-es",
+ "Helsinki-NLP/opus-mt-en-zh",
+ "Helsinki-NLP/opus-mt-en-ja",
+ "Helsinki-NLP/opus-mt-de-en",
+ "Helsinki-NLP/opus-mt-fr-en",
+ "Helsinki-NLP/opus-mt-es-en"
+ ],
+ value="google/translategemma-12b-it",
+ label="๐ค Translation Model",
+ info="Choose model based on your language needs"
+ )
+
+ # Target language selection (works with all models)
+ target_language = gr.Dropdown(
+ choices=[
+ "English", "Spanish", "French", "German", "Chinese (Simplified)", "Chinese (Traditional)",
+ "Japanese", "Korean", "Italian", "Portuguese", "Russian", "Arabic", "Hindi",
+ "Dutch", "Turkish", "Polish", "Vietnamese", "Thai", "Indonesian",
+ "Greek", "Hebrew", "Czech", "Swedish", "Danish", "Norwegian", "Finnish"
+ ],
+ value="English",
+ label="๐ฏ Target Language",
+ info="Select target language for translation (works with all models)",
+ visible=True
+ )
+
+ # Source language selection (optional)
+ source_language = gr.Dropdown(
+ choices=[
+ "Auto-detect", "English", "Spanish", "French", "German", "Chinese (Simplified)", "Chinese (Traditional)",
+ "Japanese", "Korean", "Italian", "Portuguese", "Russian", "Arabic", "Hindi",
+ "Dutch", "Turkish", "Polish", "Vietnamese", "Thai", "Indonesian"
+ ],
+ value="Auto-detect",
+ label="๐ค Source Language",
+ info="Select source language or leave as Auto-detect",
+ visible=True
+ )
+
+ trans_btn = gr.Button(
+ "๐ Translate",
+ variant="primary",
+ size="lg",
+ elem_classes=["translate-button"]
+ )
+
+ # Image translation section
+ gr.HTML("""
+
+
+ ๐ผ๏ธ Image Translation (Google TranslateGemma only)
+
+
+ Upload an image containing text to extract and translate it
+
+
+ """)
+
+ trans_image = gr.Image(
+ label="๐ผ๏ธ Upload Image",
+ type="filepath",
+ visible=True # Initially visible since default model is TranslateGemma
+ )
+
+ trans_image_btn = gr.Button(
+ "๐ผ๏ธ Translate Image",
+ variant="secondary",
+ size="lg",
+ elem_classes=["translate-button"],
+ visible=True # Initially visible since default model is TranslateGemma
+ )
+
+ with gr.Column(scale=1):
+ with gr.Group(elem_classes=["translation-output"]):
+ trans_output = gr.Textbox(
+ label="โ
Translation Result",
+ lines=6,
+ info="Translated text will appear here",
+ elem_classes=["translation-result"]
+ )
+
+ with gr.Row():
+ copy_trans_btn = gr.Button(
+ "๐ Copy",
+ size="sm",
+ variant="secondary"
+ )
+ swap_lang_btn = gr.Button(
+ "๐ Swap Languages",
+ size="sm",
+ variant="secondary"
+ )
+
+ # Function to show/hide target language dropdown based on model selection
+ def update_target_language_visibility(model):
+ show = "translategemma" in model.lower()
+ return gr.update(visible=show)
+
+ # Function to show/hide image elements based on model selection
+ def update_image_visibility(model):
+ show = "translategemma" in model.lower()
+ return gr.update(visible=show)
+
+ # Update visibility when model changes
+ trans_model.change(
+ update_target_language_visibility,
+ inputs=[trans_model],
+ outputs=[target_language]
+ )
+
+ trans_model.change(
+ update_image_visibility,
+ inputs=[trans_model],
+ outputs=[trans_image]
+ )
+
+ trans_model.change(
+ update_image_visibility,
+ inputs=[trans_model],
+ outputs=[trans_image_btn]
+ )
+
+ trans_btn.click(
+ translate_text,
+ inputs=[trans_text, trans_model, target_language, source_language],
+ outputs=[trans_output]
+ )
+
+ trans_image_btn.click(
+ translate_image,
+ inputs=[trans_image, trans_model, target_language, source_language],
+ outputs=[trans_output]
+ )
+
+ # Swap languages functionality
+ def swap_languages(src_lang, tgt_lang, text):
+ # Swap source and target languages
+ # For simplicity, we'll just swap the dropdown values
+ # In a more complex implementation, you might want to translate the text as well
+ return tgt_lang, src_lang, ""
+
+ swap_lang_btn.click(
+ swap_languages,
+ inputs=[source_language, target_language, trans_text],
+ outputs=[source_language, target_language, trans_text]
+ )
+
+ # Copy translation functionality
+ def copy_translation(text):
+ # This will be handled by JavaScript in the frontend
+ return text
+
+ copy_trans_btn.click(
+ copy_translation,
+ inputs=[trans_output],
+ js="(text) => { navigator.clipboard.writeText(text); alert('Translation copied to clipboard!'); }"
+ )
+
+ # ============ Translation Testing Tab ============
+ with gr.Tab("๐งช Translation Testing", id="translation-testing"):
+ gr.HTML("""
+
+
+ ๐งช Translation Testing
+
+
+ Test and validate translation functionality with comprehensive test scenarios
+
+
+ """)
+
+ with gr.Row(equal_height=True):
+ with gr.Column(scale=1):
+ with gr.Group(elem_classes=["test-input"]):
+ gr.HTML("""
+
+
+ ๐งช Test Options
+
+
+ Choose between single translation test or comprehensive test suite
+
+
+ """)
+
+ # Single test inputs
+ test_text = gr.Textbox(
+ label="๐ Test Text",
+ placeholder="Enter text to test translation...",
+ lines=3,
+ info="Text to translate for testing"
+ )
+
+ test_source_lang = gr.Dropdown(
+ choices=[
+ "en", "es", "fr", "de-DE", "zh-CN", "zh-TW", "ja", "ko", "it", "pt", "ru", "ar", "hi",
+ "nl", "tr", "pl", "vi", "th", "id", "el", "he", "cs", "sv", "da", "no", "fi"
+ ],
+ value="en",
+ label="๐ค Source Language Code",
+ info="ISO language code (e.g., 'en' for English)"
+ )
+
+ test_target_lang = gr.Dropdown(
+ choices=[
+ "en", "es", "fr", "de-DE", "zh-CN", "zh-TW", "ja", "ko", "it", "pt", "ru", "ar", "hi",
+ "nl", "tr", "pl", "vi", "th", "id", "el", "he", "cs", "sv", "da", "no", "fi"
+ ],
+ value="es",
+ label="๐ฏ Target Language Code",
+ info="ISO language code (e.g., 'es' for Spanish)"
+ )
+
+ with gr.Row():
+ single_test_btn = gr.Button(
+ "๐งช Run Single Test",
+ variant="primary",
+ size="lg"
+ )
+
+ comprehensive_test_btn = gr.Button(
+ "๐ Run Comprehensive Tests",
+ variant="secondary",
+ size="lg"
+ )
+
+ with gr.Column(scale=1):
+ with gr.Group(elem_classes=["test-output"]):
+ test_output = gr.Textbox(
+ label="๐ Test Results",
+ lines=15,
+ info="Test results and diagnostics will appear here",
+ elem_classes=["test-result"]
+ )
+
+ test_summary = gr.Textbox(
+ label="๐ Test Summary",
+ lines=5,
+ info="Summary of test results",
+ elem_classes=["test-summary"]
+ )
+
+ copy_test_btn = gr.Button(
+ "๐ Copy Results",
+ size="sm",
+ variant="secondary"
+ )
+
+ # Test functions
+ def run_single_test(text, source_lang, target_lang):
+ """Run a single translation test"""
+ if not text.strip():
+ return "โ Please enter text to test", ""
+
+ result = test_translategemma(text, source_lang, target_lang)
+ if result:
+ return f"""
+๐งช Single Translation Test Results
+{"=" * 50}
+โ
Test completed successfully!
+
+๐ Input:
+ Text: {text}
+ Source: {source_lang}
+ Target: {target_lang}
+
+๐ Translation:
+ {result}
+
+โ
Status: PASSED
+๐ง Method: {'Google Translate Gemma' if GOOGLE_TRANSLATE_AVAILABLE else 'Chat Completion Fallback'}
+""", "โ
Single test completed successfully"
+ else:
+ return f"""
+๐งช Single Translation Test Results
+{"=" * 50}
+โ Test failed!
+
+๐ Input:
+ Text: {text}
+ Source: {source_lang}
+ Target: {target_lang}
+
+โ Status: FAILED
+๐ง Method: {'Google Translate Gemma' if GOOGLE_TRANSLATE_AVAILABLE else 'Chat Completion Fallback'}
+
+โ ๏ธ Please check your HuggingFace token and model availability.
+""", "โ Single test failed"
+
+ def run_comprehensive_tests():
+ """Run comprehensive translation tests"""
+ results, summary = run_multiple_translation_tests()
+
+ # Format results for display
+ output = "๐งช Comprehensive Translation Test Results\n" + "=" * 60 + "\n\n"
+
+ for i, result in enumerate(results, 1):
+ status = "โ
PASSED" if result['success'] else "โ FAILED"
+ output += f"๐ Test {i}: {result['case']}\n"
+ output += f" Status: {status}\n"
+ output += f" Original: {result['original']}\n"
+ if result['translation']:
+ output += f" Translation: {result['translation']}\n"
+ output += "\n"
+
+ return output, summary
+
+ # Button click handlers
+ single_test_btn.click(
+ run_single_test,
+ inputs=[test_text, test_source_lang, test_target_lang],
+ outputs=[test_output, test_summary]
+ )
+
+ comprehensive_test_btn.click(
+ run_comprehensive_tests,
+ inputs=[],
+ outputs=[test_output, test_summary]
+ )
+
+ # Copy test results functionality
+ def copy_test_results(results, summary):
+ # This will be handled by JavaScript in the frontend
+ return results + "\n\n" + summary
+
+ copy_test_btn.click(
+ copy_test_results,
+ inputs=[test_output, test_summary],
+ js="(results, summary) => { const text = results + '\\n\\n' + summary; navigator.clipboard.writeText(text); alert('Test results copied to clipboard!'); }"
+ )
+
+ # ============ Question Answering Tab ============
+ with gr.Tab("โ Q&A", id="qa"):
+ with gr.Row():
+ with gr.Column():
+ qa_context = gr.Textbox(
+ label="Context",
+ placeholder="Paste the context/document here...",
+ lines=8
+ )
+
+ qa_question = gr.Textbox(
+ label="Question",
+ placeholder="What would you like to know?"
+ )
+
+ qa_model = gr.Dropdown(
+ choices=[
+ "deepset/roberta-base-squad2",
+ "distilbert-base-cased-distilled-squad",
+ "bert-large-uncased-whole-word-masking-finetuned-squad"
+ ],
+ value="deepset/roberta-base-squad2",
+ label="Model"
+ )
+
+ qa_btn = gr.Button("Get Answer", variant="primary")
+
+ with gr.Column():
+ qa_answer = gr.Textbox(label="Answer", lines=3)
+ qa_score = gr.Number(label="Confidence Score")
+
+ qa_btn.click(
+ answer_question,
+ inputs=[qa_question, qa_context, qa_model],
+ outputs=[qa_answer, qa_score]
+ )
+
+ # ============ Image Generation Tab ============
+ with gr.Tab("๐จ Image Generation", id="image-gen"):
+ gr.HTML("""
+
+
+ ๐จ Image Generation
+
+
+ Create stunning images from text descriptions using advanced AI models
+
+
+ """)
+
+ with gr.Row(equal_height=True):
+ with gr.Column(scale=1):
+ with gr.Group(elem_classes=["image-controls"]):
+ img_prompt = gr.Textbox(
+ label="โจ Prompt",
+ placeholder="Describe the image you want to generate... Be detailed and creative!",
+ lines=4,
+ info="Example: 'A beautiful sunset over mountains, digital art, highly detailed'"
+ )
+
+ img_negative = gr.Textbox(
+ label="๐ซ Negative Prompt",
+ placeholder="What to avoid in the image... (optional)",
+ lines=2,
+ info="Example: 'blurry, low quality, distorted'"
+ )
+
+ with gr.Row():
+ img_model = gr.Dropdown(
+ choices=[
+ "stabilityai/stable-diffusion-xl-base-1.0",
+ "runwayml/stable-diffusion-v1-5",
+ "CompVis/stable-diffusion-v1-4"
+ ],
+ value="stabilityai/stable-diffusion-xl-base-1.0",
+ label="๐ญ Model",
+ info="Choose image generation model"
+ )
+ img_steps = gr.Slider(
+ 10, 100, 30, step=5,
+ label="โ๏ธ Steps",
+ info="More steps = better quality (slower)"
+ )
+
+ img_btn = gr.Button(
+ "๐จ Generate Image",
+ variant="primary",
+ size="lg",
+ elem_classes=["generate-image-button"]
+ )
+
+ gr.HTML("""
+
+
+ ๐ก Pro Tips
+
+
+ - Be specific in your descriptions
+ - Use art styles: 'digital art', 'oil painting', 'photo'
+ - Add quality terms: 'highly detailed', '4K', 'sharp focus'
+
+
+ """)
+
+ with gr.Column(scale=1):
+ with gr.Group(elem_classes=["image-output"]):
+ img_output = gr.Image(
+ label="๐ผ๏ธ Generated Image",
+ type="filepath",
+ elem_classes=["generated-image"]
+ )
+
+ with gr.Row():
+ download_btn = gr.Button(
+ "๐พ Download",
+ size="sm",
+ variant="secondary"
+ )
+ share_img_btn = gr.Button(
+ "๐ Share",
+ size="sm",
+ variant="secondary"
+ )
+
+ img_btn.click(
+ generate_image,
+ inputs=[img_prompt, img_model, img_negative, img_steps],
+ outputs=[img_output]
+ )
+
+ # ============ Models Tab ============
+ with gr.Tab("๐ง Models", id="models"):
+ with gr.Row():
+ # Left: Search & Browse
+ with gr.Column(scale=2):
+ gr.Markdown("### ๐ Search HuggingFace Models")
+
+ with gr.Row():
+ search_query = gr.Textbox(
+ placeholder="Search for models...",
+ show_label=False,
+ scale=3
+ )
+ search_task = gr.Dropdown(
+ choices=[
+ "text-generation",
+ "text-classification",
+ "summarization",
+ "translation",
+ "question-answering",
+ "image-classification",
+ "text-to-image"
+ ],
+ value="text-generation",
+ show_label=False,
+ scale=2
+ )
+ search_limit = gr.Slider(5, 50, 10, step=5, label="Limit", scale=1)
+
+ search_btn = gr.Button("Search", variant="primary")
+
+ search_results = gr.Dataframe(
+ headers=["Model ID", "Author", "Task", "Downloads", "Likes"],
+ label="Search Results",
+ interactive=False,
+ wrap=True
+ )
+
+ gr.Markdown("### ๐ Model Info")
+ model_info_input = gr.Textbox(
+ placeholder="Enter model ID to get info...",
+ label="Model ID"
+ )
+ get_info_btn = gr.Button("Get Info")
+ model_info_output = gr.Markdown(label="Model Information")
+
+ # Right: My Models
+ with gr.Column(scale=2):
+ gr.Markdown("### ๐ฆ My Models")
+
+ my_models_table = gr.Dataframe(
+ headers=["ID", "Name", "Model ID", "Role", "Temp", "Max Tokens", "Enabled"],
+ value=get_models_table(),
+ label="Saved Models",
+ interactive=False
+ )
+
+ refresh_table_btn = gr.Button("๐ Refresh")
+
+ gr.Markdown("### โ Add New Model")
+
+ add_model_id = gr.Textbox(label="Model ID", placeholder="e.g., meta-llama/Llama-3.2-3B-Instruct")
+ add_model_name = gr.Textbox(label="Display Name", placeholder="My Model")
+ add_model_role = gr.Dropdown(
+ choices=["assistant", "creative", "coder", "analyst", "custom"],
+ value="assistant",
+ label="Role"
+ )
+
+ with gr.Row():
+ add_model_temp = gr.Slider(0.1, 1.0, 0.3, step=0.1, label="Temperature")
+ add_model_tokens = gr.Slider(50, 4096, 500, step=50, label="Max Tokens")
+
+ add_model_system = gr.Textbox(
+ label="System Prompt",
+ placeholder="Optional system prompt...",
+ lines=2
+ )
+
+ add_model_btn = gr.Button("Add Model", variant="primary")
+ add_model_status = gr.Textbox(label="Status", interactive=False)
+
+ gr.Markdown("### ๐๏ธ Delete Model")
+ delete_model_id = gr.Textbox(label="Model ID to Delete", placeholder="Enter model ID")
+ delete_model_btn = gr.Button("Delete", variant="stop")
+
+ search_btn.click(
+ search_hf_models,
+ inputs=[search_query, search_task, search_limit],
+ outputs=[search_results]
+ )
+
+ get_info_btn.click(
+ get_model_info,
+ inputs=[model_info_input],
+ outputs=[model_info_output]
+ )
+
+ add_model_btn.click(
+ add_model_to_settings,
+ inputs=[add_model_id, add_model_name, add_model_role, add_model_temp, add_model_tokens, add_model_system],
+ outputs=[add_model_status, my_models_table]
+ )
+
+ refresh_table_btn.click(
+ get_models_table,
+ outputs=[my_models_table]
+ )
+
+ delete_model_btn.click(
+ delete_model,
+ inputs=[delete_model_id],
+ outputs=[add_model_status, my_models_table]
+ )
+
+ # ============ Settings Tab ============
+ with gr.Tab("โ๏ธ Settings", id="settings"):
+ with gr.Row():
+ with gr.Column():
+ gr.Markdown("### ๐ HuggingFace API Token")
+
+ token_input = gr.Textbox(
+ label="API Token",
+ value=get_hf_token(),
+ type="password",
+ placeholder="hf_..."
+ )
+
+ save_token_btn = gr.Button("Save Token", variant="primary")
+ token_status = gr.Textbox(label="Status", interactive=False)
+
+ gr.Markdown("""
+ ---
+ **How to get your token:**
+ 1. Go to [HuggingFace Settings](https://huggingface.co/settings/tokens)
+ 2. Create a new token with read access
+ 3. Paste it above and save
+ """)
+
+ with gr.Column():
+ gr.Markdown("### ๐ค Account Info")
+ account_info = gr.Markdown(value="Click refresh to load account info")
+ refresh_account_btn = gr.Button("๐ Refresh Account Info")
+
+ save_token_btn.click(
+ save_hf_token,
+ inputs=[token_input],
+ outputs=[token_status]
+ )
+
+ refresh_account_btn.click(
+ get_account_info,
+ outputs=[account_info]
+ )
+
+ # Footer with modern design
+ gr.HTML("""
+
+ """)
+
+ # Add custom JavaScript for enhanced interactions
+ app.load(
+ fn=None,
+ js="""
+ function() {
+ // Add smooth scrolling
+ document.querySelectorAll('a[href^="#"]').forEach(anchor => {
+ anchor.addEventListener('click', function (e) {
+ e.preventDefault();
+ document.querySelector(this.getAttribute('href')).scrollIntoView({
+ behavior: 'smooth'
+ });
+ });
+ });
+
+ // Add auto-resizing for chat input
+ const chatInput = document.querySelector('.chat-input textarea');
+ if (chatInput) {
+ chatInput.addEventListener('input', function() {
+ this.style.height = 'auto';
+ this.style.height = Math.min(this.scrollHeight, 200) + 'px';
+ });
+
+ // Add shift+enter for new line
+ chatInput.addEventListener('keydown', function(e) {
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault();
+ const sendBtn = document.querySelector('.send-button');
+ if (sendBtn) sendBtn.click();
+ }
+ });
+ }
+
+ // Add hover effects to all interactive elements
+ document.querySelectorAll('.gradio-button, .gradio-dropdown, .gradio-slider').forEach(element => {
+ element.addEventListener('mouseenter', function() {
+ this.style.transform = 'translateY(-2px)';
+ this.style.transition = 'all 0.3s ease';
+ });
+ element.addEventListener('mouseleave', function() {
+ this.style.transform = 'translateY(0)';
+ });
+ });
+
+ // Add card hover effects
+ document.querySelectorAll('.gradio-box, .input-group, .output-group').forEach(card => {
+ card.addEventListener('mouseenter', function() {
+ this.style.boxShadow = '0 8px 24px rgba(102, 126, 234, 0.3)';
+ this.style.transform = 'translateY(-4px)';
+ });
+ card.addEventListener('mouseleave', function() {
+ this.style.boxShadow = '';
+ this.style.transform = 'translateY(0)';
+ });
+ });
+
+ // Add loading animations
+ window.addEventListener('load', function() {
+ document.body.style.opacity = '0';
+ setTimeout(() => {
+ document.body.style.transition = 'opacity 0.5s ease-in-out';
+ document.body.style.opacity = '1';
+ }, 100);
+ });
+
+ // Add visual feedback for button clicks
+ document.querySelectorAll('.gradio-button').forEach(button => {
+ button.addEventListener('click', function() {
+ this.style.transform = 'scale(0.95)';
+ setTimeout(() => {
+ this.style.transform = '';
+ }, 100);
+ });
+ });
+
+ // Add copy functionality with visual feedback
+ document.querySelectorAll('[id*=\"copy\"], [id*=\"Copy\"]').forEach(button => {
+ button.addEventListener('click', function() {
+ const originalText = this.textContent;
+ this.textContent = 'โ Copied!';
+ this.style.background = 'linear-gradient(135deg, #4ade80, #22c55e)';
+ this.style.color = 'white';
+ setTimeout(() => {
+ this.textContent = originalText;
+ this.style.background = '';
+ this.style.color = '';
+ }, 2000);
+ });
+ });
+
+ // Add keyboard shortcuts
+ document.addEventListener('keydown', function(e) {
+ // Ctrl/Cmd + K to focus chat input
+ if ((e.ctrlKey || e.metaKey) && e.key === 'k') {
+ e.preventDefault();
+ const chatInput = document.querySelector('.chat-input textarea');
+ if (chatInput) {
+ chatInput.focus();
+ chatInput.scrollIntoView({ behavior: 'smooth', block: 'center' });
+ }
+ }
+ // Ctrl/Cmd + / to show shortcuts help
+ if ((e.ctrlKey || e.metaKey) && e.key === '/') {
+ e.preventDefault();
+ alert('Keyboard Shortcuts:\\nโข Ctrl+K: Focus chat input\\nโข Escape: Clear chat\\nโข Shift+Enter: New line in chat');
+ }
+ // Escape to clear chat
+ if (e.key === 'Escape') {
+ const clearButton = document.querySelector('[id*=\"clear\"], [id*=\"Clear\"]');
+ if (clearButton) clearButton.click();
+ }
+ });
+
+ // Add focus effects to inputs
+ document.querySelectorAll('input, textarea').forEach(input => {
+ input.addEventListener('focus', function() {
+ this.style.boxShadow = '0 0 0 3px rgba(102, 126, 234, 0.3)';
+ });
+ input.addEventListener('blur', function() {
+ this.style.boxShadow = '';
+ });
+ });
+
+ console.log('โจ AI Assistant UI Enhanced with Interactive Features');
+ }
+ """
+ )
+
+
+
+
+# Launch the app
+if __name__ == "__main__":
+ app.launch(
+ server_name="127.0.0.1",
+ server_port=7867,
+ share=False,
+ show_error=True,
+ inbrowser=True
+ )