4eJIoBek kingabzpro commited on
Commit
71990ef
Β·
0 Parent(s):

Duplicate from EXFINITE/BlenderBot-UI

Browse files

Co-authored-by: Abid Ali Awan <kingabzpro@users.noreply.huggingface.co>

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +15 -0
  3. app.py +44 -0
  4. img/cover.png +0 -0
  5. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: BlenderBot New UI
3
+ emoji: πŸ±β€πŸ‘“
4
+ colorFrom: purple
5
+ colorTo: pink
6
+ sdk: gradio
7
+ app_file: app.py
8
+ pinned: false
9
+ license: apache-2.0
10
+ duplicated_from: EXFINITE/BlenderBot-UI
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
14
+
15
+ <a href='https://www.freepik.com/vectors/chat-bot'>Chat bot vector used in app is created by roserodionova - www.freepik.com</a>
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+
4
+ title = "Have Fun With ChubbyBot"
5
+ description = """
6
+ <p>
7
+ <center>
8
+ The bot is trained on blended_skill_talk dataset using facebook/blenderbot-400M-distill.
9
+ <img src="https://huggingface.co/spaces/EXFINITE/BlenderBot-UI/resolve/main/img/cover.png" alt="rick" width="250"/>
10
+ </center>
11
+ </p>
12
+ """
13
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1907.06616' target='_blank'>Recipes for building an open-domain chatbot</a></p><p style='text-align: center'><a href='https://parl.ai/projects/recipes/' target='_blank'>Original PARLAI Code</a></p></center></p>"
14
+
15
+ import torch
16
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, BlenderbotForConditionalGeneration, BlenderbotForCausalLM, BlenderbotTokenizer
17
+
18
+ tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
19
+ model = BlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill",add_cross_attention=False)
20
+
21
+ def predict(input, history=[]):
22
+ # tokenize the new input sentence
23
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
24
+
25
+ # append the new user input tokens to the chat history
26
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
27
+
28
+ # generate a response
29
+ history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
30
+
31
+ # convert the tokens to text, and then split the responses into the right format
32
+ response = tokenizer.decode(history[0]).replace("<s>","").split("</s>")
33
+ response = [(response[i], response[i+1]) for i in range(0, len(response), 2)] # convert to tuples of list
34
+ return response, history
35
+
36
+ gr.Interface(
37
+ fn = predict,
38
+ inputs = ["textbox","state"],
39
+ outputs = ["chatbot","state"],
40
+ theme ="seafoam",
41
+ title = title,
42
+ description = description,
43
+ article = article
44
+ ).launch(enable_queue=True)
img/cover.png ADDED
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ torch