Spaces:
Paused
Paused
| import gradio as gr | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # Load models and tokenizers | |
| def load_models(): | |
| # Load a conversational model and tokenizer (you can customize it further) | |
| model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium") | |
| tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") | |
| return model, tokenizer | |
| # Generate responses | |
| def chat_with_model(user_input, model, tokenizer, chat_history): | |
| # Tokenize the user input and chat history | |
| new_user_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt') | |
| # Append new user input to chat history | |
| bot_input_ids = torch.cat([chat_history, new_user_input_ids], dim=-1) if chat_history is not None else new_user_input_ids | |
| # Generate a response from the model | |
| chat_history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id) | |
| # Decode the model's output and return | |
| bot_output = tokenizer.decode(chat_history[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) | |
| return chat_history, bot_output | |
| # Initialize model and tokenizer | |
| model, tokenizer = load_models() | |
| # Build Gradio interface | |
| def build_gradio_interface(): | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Chagrin AI Chatbot") | |
| # Set up chat window | |
| chatbot = gr.Chatbot() | |
| # Create text input box for user to type | |
| user_input = gr.Textbox(label="Type your message", placeholder="Ask something...", interactive=True) | |
| # Create button for sending the input | |
| submit_btn = gr.Button("Send Message") | |
| # Button click function | |
| submit_btn.click(chat_with_model, inputs=[user_input, model, tokenizer, chatbot], outputs=[chatbot, chatbot]) | |
| demo.launch() | |
| # Run the Gradio interface | |
| if __name__ == "__main__": | |
| build_gradio_interface() | |