File size: 3,457 Bytes
0cd6b01
c0c5d67
e0e9835
cbdbe66
c0c5d67
0cd6b01
c0c5d67
0cd6b01
 
c0c5d67
 
 
 
0cd6b01
 
c0c5d67
 
 
0cd6b01
c0c5d67
0cd6b01
e0e9835
cbdbe66
 
 
0cd6b01
c0c5d67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cd6b01
cbdbe66
 
3a300c0
 
 
 
 
c0c5d67
 
3a300c0
 
 
 
 
 
 
 
 
 
 
 
 
 
0cd6b01
c0c5d67
 
 
 
 
 
 
0cd6b01
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
from gtts import gTTS
import tempfile
import torch

# ======= Sentiment Analysis =======
sentiment_model = pipeline("sentiment-analysis")

# ======= Summarization with tokenizer/model =======
sum_model_name = "sshleifer/distilbart-xsum-12-6"
sum_tokenizer = AutoTokenizer.from_pretrained(sum_model_name)
sum_model = AutoModelForSeq2SeqLM.from_pretrained(sum_model_name)

def summarize_text(text):
    inputs = sum_tokenizer(text, return_tensors="pt", max_length=1024, truncation=True)
    summary_ids = sum_model.generate(inputs["input_ids"], max_length=60, min_length=15, do_sample=False)
    return sum_tokenizer.decode(summary_ids[0], skip_special_tokens=True)

# ======= Text to Speech =======
def text_to_speech(text):
    tts = gTTS(text)
    with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp:
        tts.save(fp.name)
        return fp.name

# ======= Chatbot Tab =======
chat_model_name = "microsoft/DialoGPT-medium"
chat_tokenizer = AutoTokenizer.from_pretrained(chat_model_name)
chat_model = AutoModelForCausalLM.from_pretrained(chat_model_name)
chat_history = []

def chat_with_bot(user_input):
    global chat_history
    new_input_ids = chat_tokenizer.encode(user_input + chat_tokenizer.eos_token, return_tensors='pt')
    bot_input_ids = torch.cat(chat_history + [new_input_ids], dim=-1) if chat_history else new_input_ids
    chat_history.append(new_input_ids)
    response_ids = chat_model.generate(bot_input_ids, max_length=1000, pad_token_id=chat_tokenizer.eos_token_id)
    response = chat_tokenizer.decode(response_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
    return response

# ======= Gradio UI =======
with gr.Blocks() as demo:
    gr.Markdown("## πŸ“˜ Homework - Tuwaiq Academy")

    with gr.Tab("πŸ” Sentiment Analysis"):
        gr.Markdown("### Analyze the sentiment of your text")
        input_sent = gr.Textbox(label="Enter your text", lines=6, placeholder="Type something...")
        output_sent = gr.Textbox(label="Sentiment Result")
        btn_sent = gr.Button("Analyze")
        btn_sent.click(lambda text: sentiment_model(text)[0]['label'] + ", Confidence: " + str(round(sentiment_model(text)[0]['score'], 2)), 
                       inputs=input_sent, outputs=output_sent)

    with gr.Tab("πŸ“ Summarization"):
        gr.Markdown("### Summarize your text")
        input_sum = gr.Textbox(label="Enter your text", lines=6, placeholder="Paste a paragraph...")
        output_sum = gr.Textbox(label="Summary")
        btn_sum = gr.Button("Summarize")
        btn_sum.click(summarize_text, inputs=input_sum, outputs=output_sum)

    with gr.Tab("πŸ”Š Text to Speech"):
        gr.Markdown("### Convert text to speech")
        input_tts = gr.Textbox(label="Enter your text", lines=6, placeholder="Text for audio...")
        output_audio = gr.Audio(label="Speech Output", type="filepath")
        btn_tts = gr.Button("Convert")
        btn_tts.click(text_to_speech, inputs=input_tts, outputs=output_audio)

    with gr.Tab("πŸ€– Chatbot"):
        gr.Markdown("### Chat with an AI Bot")
        chat_input = gr.Textbox(label="You:", placeholder="Ask me anything...", lines=2)
        chat_output = gr.Textbox(label="Bot:", lines=4)
        btn_chat = gr.Button("Send")
        btn_chat.click(chat_with_bot, inputs=chat_input, outputs=chat_output)

demo.launch()