import gradio as gr from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM from gtts import gTTS import tempfile import torch # ======= Sentiment Analysis ======= sentiment_model = pipeline("sentiment-analysis") # ======= Summarization with tokenizer/model ======= sum_model_name = "sshleifer/distilbart-xsum-12-6" sum_tokenizer = AutoTokenizer.from_pretrained(sum_model_name) sum_model = AutoModelForSeq2SeqLM.from_pretrained(sum_model_name) def summarize_text(text): inputs = sum_tokenizer(text, return_tensors="pt", max_length=1024, truncation=True) summary_ids = sum_model.generate(inputs["input_ids"], max_length=60, min_length=15, do_sample=False) return sum_tokenizer.decode(summary_ids[0], skip_special_tokens=True) # ======= Text to Speech ======= def text_to_speech(text): tts = gTTS(text) with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp: tts.save(fp.name) return fp.name # ======= Chatbot Tab ======= chat_model_name = "microsoft/DialoGPT-medium" chat_tokenizer = AutoTokenizer.from_pretrained(chat_model_name) chat_model = AutoModelForCausalLM.from_pretrained(chat_model_name) chat_history = [] def chat_with_bot(user_input): global chat_history new_input_ids = chat_tokenizer.encode(user_input + chat_tokenizer.eos_token, return_tensors='pt') bot_input_ids = torch.cat(chat_history + [new_input_ids], dim=-1) if chat_history else new_input_ids chat_history.append(new_input_ids) response_ids = chat_model.generate(bot_input_ids, max_length=1000, pad_token_id=chat_tokenizer.eos_token_id) response = chat_tokenizer.decode(response_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) return response # ======= Gradio UI ======= with gr.Blocks() as demo: gr.Markdown("## 📘 Homework - Tuwaiq Academy") with gr.Tab("🔍 Sentiment Analysis"): gr.Markdown("### Analyze the sentiment of your text") input_sent = gr.Textbox(label="Enter your text", lines=6, placeholder="Type something...") output_sent = gr.Textbox(label="Sentiment Result") btn_sent = gr.Button("Analyze") btn_sent.click(lambda text: sentiment_model(text)[0]['label'] + ", Confidence: " + str(round(sentiment_model(text)[0]['score'], 2)), inputs=input_sent, outputs=output_sent) with gr.Tab("📝 Summarization"): gr.Markdown("### Summarize your text") input_sum = gr.Textbox(label="Enter your text", lines=6, placeholder="Paste a paragraph...") output_sum = gr.Textbox(label="Summary") btn_sum = gr.Button("Summarize") btn_sum.click(summarize_text, inputs=input_sum, outputs=output_sum) with gr.Tab("🔊 Text to Speech"): gr.Markdown("### Convert text to speech") input_tts = gr.Textbox(label="Enter your text", lines=6, placeholder="Text for audio...") output_audio = gr.Audio(label="Speech Output", type="filepath") btn_tts = gr.Button("Convert") btn_tts.click(text_to_speech, inputs=input_tts, outputs=output_audio) with gr.Tab("🤖 Chatbot"): gr.Markdown("### Chat with an AI Bot") chat_input = gr.Textbox(label="You:", placeholder="Ask me anything...", lines=2) chat_output = gr.Textbox(label="Bot:", lines=4) btn_chat = gr.Button("Send") btn_chat.click(chat_with_bot, inputs=chat_input, outputs=chat_output) demo.launch()