File size: 1,282 Bytes
494b70f
f1fac87
6c48535
f1fac87
 
 
494b70f
f1fac87
 
 
 
 
 
494b70f
f1fac87
 
 
494b70f
ac26782
f1fac87
494b70f
 
f1fac87
494b70f
f1fac87
 
8d9e775
f1fac87
ac26782
 
 
494b70f
f1fac87
 
 
 
494b70f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# Load GODEL model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("microsoft/GODEL-v1_1-base-seq2seq")
model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/GODEL-v1_1-base-seq2seq")

# Define the chatbot function
def generate_response(message, history):
    # Format dialog history
    dialog = [turn["content"] for turn in history if turn["role"] == "user"]
    dialog.append(message)
    dialog_text = " EOS ".join(dialog)

    # GODEL expects an instruction and context
    instruction = "Instruction: given a dialog context, respond appropriately."
    query = f"{instruction} [CONTEXT] {dialog_text}"

    # Tokenize and generate
    input_ids = tokenizer(query, return_tensors="pt").input_ids
    output_ids = model.generate(
        input_ids,
        max_length=128,
        do_sample=True,
        top_p=0.9,
        temperature=0.7
    )
    response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
    return {"role": "assistant", "content": response}

# Launch the chatbot
gr.ChatInterface(
    fn=generate_response,
    title="Muhammad’s GODEL Chatbot",
    description="A grounded chatbot powered by Microsoft's GODEL model.",
    type="messages"
).launch()