Fragenfragen commited on
Commit
2dd77fc
·
verified ·
1 Parent(s): eda8ef3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +144 -59
app.py CHANGED
@@ -1,64 +1,149 @@
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
8
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
  ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
-
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, copy
2
+ os.environ["RWKV_V7_ON"] = '1'
3
+ os.environ["RWKV_JIT_ON"] = '1'
4
+ os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster)
5
+
6
+ from rwkv.model import RWKV
7
+
8
+ import gc, re
9
  import gradio as gr
10
+ import base64
11
+ from io import BytesIO
12
+ import torch
13
+ import torch.nn.functional as F
14
+ from datetime import datetime
15
+ from huggingface_hub import hf_hub_download
16
+ from pynvml import *
17
+ nvmlInit()
18
+ gpu_h = nvmlDeviceGetHandleByIndex(0)
19
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
+
21
+ ctx_limit = 4000
22
+ gen_limit = 1000
23
+
24
+ ########################## text rwkv ################################################################
25
+ from rwkv.utils import PIPELINE, PIPELINE_ARGS
26
+
27
+ title_v6 = "rwkv7-g1-1.5b-20250429-ctx4096"
28
+ model_path_v6 = hf_hub_download(repo_id="BlinkDL/rwkv7-g1", filename=f"{title_v6}.pth")
29
+ model_v6 = RWKV(model=model_path_v6.replace('.pth',''), strategy='cuda fp16')
30
+ pipeline_v6 = PIPELINE(model_v6, "rwkv_vocab_v20230424")
31
+
32
+ args = model_v6.args
33
+
34
+ penalty_decay = 0.996
35
 
36
+ def generate_prompt(instruction, input=""):
37
+ instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
38
+ input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
39
+ if input:
40
+ return f"""Instruction: {instruction}\n\nInput: {input}\n\nResponse:"""
41
+ else:
42
+ return f"""User: {instruction}\n\nAssistant:"""
43
 
44
+ def qa_prompt(instruction):
45
+ instruction = instruction.strip().replace('\r\n','\n')
46
+ instruction = re.sub(r'\n+', '\n', instruction)
47
+ return f"User: {instruction}\n\nAssistant:"""
48
 
49
+ def evaluate(
50
+ ctx,
51
+ token_count=200,
52
+ temperature=1.0,
53
+ top_p=0.7,
54
+ presencePenalty = 0.1,
55
+ countPenalty = 0.1,
56
  ):
57
+ args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
58
+ alpha_frequency = countPenalty,
59
+ alpha_presence = presencePenalty,
60
+ token_ban = [], # ban the generation of some tokens
61
+ token_stop = [0]) # stop generation whenever you see any token here
62
+ ctx = ctx.strip()
63
+ all_tokens = []
64
+ out_last = 0
65
+ out_str = ''
66
+ occurrence = {}
67
+ state = None
68
+ for i in range(int(token_count)):
69
+
70
+ input_ids = pipeline_v6.encode(ctx)[-ctx_limit:] if i == 0 else [token]
71
+ out, state = model_v6.forward(input_ids, state)
72
+ for n in occurrence:
73
+ out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
74
+
75
+ token = pipeline_v6.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
76
+ if token in args.token_stop:
77
+ break
78
+ all_tokens += [token]
79
+ for xxx in occurrence:
80
+ occurrence[xxx] *= penalty_decay
81
+
82
+ ttt = pipeline_v6.decode([token])
83
+ www = 1
84
+ if ttt in ' \t0123456789':
85
+ www = 0
86
+ #elif ttt in '\r\n,.;?!"\':+-*/=#@$%^&_`~|<>\\()[]{},。;“”:?!()【】':
87
+ # www = 0.5
88
+ if token not in occurrence:
89
+ occurrence[token] = www
90
+ else:
91
+ occurrence[token] += www
92
+
93
+ tmp = pipeline_v6.decode(all_tokens[out_last:])
94
+ if '\ufffd' not in tmp:
95
+ out_str += tmp
96
+ yield out_str.strip()
97
+ out_last = i + 1
98
+
99
+ gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
100
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
101
+ print(f'{timestamp} - vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
102
+ del out
103
+ del state
104
+ gc.collect()
105
+ torch.cuda.empty_cache()
106
+ yield out_str.strip()
107
+
108
+ examples = [
109
+ ["User: simulate SpaceX mars landing using python\n\nAssistant: <think", gen_limit, 1, 0.3, 0.5, 0.5],
110
+ [generate_prompt("Please give the pros and cons of hodl versus active trading."), gen_limit, 1, 0.3, 0.5, 0.5],
111
+ ["Assistant: How can we craft an engaging story featuring vampires on Mars? Let's think step by step and provide an expert response:", gen_limit, 1, 0.3, 0.5, 0.5],
112
+ ["Assistant: How can we persuade Elon Musk to follow you on Twitter? Let's think step by step and provide an expert response:", gen_limit, 1, 0.3, 0.5, 0.5],
113
+ [generate_prompt("東京で訪れるべき素晴らしい場所とその紹介をいくつか挙げてください。"), gen_limit, 1, 0.3, 0.5, 0.5],
114
+ [generate_prompt("Write a story using the following information.", "A man named Alex chops a tree down."), gen_limit, 1, 0.3, 0.5, 0.5],
115
+ ["A few light taps upon the pane made her turn to the window. It had begun to snow again.", gen_limit, 1, 0.3, 0.5, 0.5],
116
+ ['''Edward: I am Edward Elric from Fullmetal Alchemist.\n\nUser: Hello Edward. What have you been up to recently?\n\nEdward:''', gen_limit, 1, 0.3, 0.5, 0.5],
117
+ [generate_prompt("Write a simple webpage. When a user clicks the button, it shows a random joke from a list of 4 jokes."), gen_limit, 1, 0.3, 0.5, 0.5],
118
+ ["En una pequeña aldea escondida entre las montañas de Andalucía, donde las calles aún conservaban el eco de antiguas leyendas, vivía un joven llamado Alejandro.", gen_limit, 1, 0.3, 0.5, 0.5],
119
+ ["Dans le cœur battant de Paris, sous le ciel teinté d'un crépuscule d'or et de pourpre, se tenait une petite librairie oubliée par le temps.", gen_limit, 1, 0.3, 0.5, 0.5],
120
+ ["في تطور مذهل وغير مسبوق، أعلنت السلطات المحلية في العاصمة عن اكتشاف أثري قد يغير مجرى التاريخ كما نعرفه.", gen_limit, 1, 0.3, 0.5, 0.5],
121
+ ['''“当然可以,大宇宙不会因为这五公斤就不坍缩了。”关一帆说,他还有一个没说出来的想法:也许大宇宙真的会因为相差一个原子的质量而由封闭转为开放。大自然的精巧有时超出想象,比如生命的诞生,就需要各项宇宙参数在几亿亿分之一精度上的精确配合。但程心仍然可以留下她的生态球,因为在那无数文明创造的无数小宇宙中,肯定有相当一部分不响应回归运动的号召,所以,大宇宙最终被夺走的质量至少有几亿吨,甚至可能是几亿亿亿吨。\n但愿大宇宙能够忽略这个误差。\n程心和关一帆进入了飞船,智子最后也进来了。她早就不再穿那身华丽的和服了,她现在身着迷彩服,再次成为一名轻捷精悍的战士,她的身上佩带着许多武器和生存装备,最引人注目的是那把插在背后的武士刀。\n“放心,我在,你们就在!”智子对两位人类朋友说。\n聚变发动机启动了,推进器发出幽幽的蓝光,''', gen_limit, 1, 0.3, 0.5, 0.5],
122
+ ]
123
+
124
+ ##################################################################################################################
125
+ with gr.Blocks(title=title_v6) as demo:
126
+ gr.HTML(f"<div style=\"text-align: center;\">\n<h1>{title_v6}</h1>\n</div>")
127
+
128
+ with gr.Tab("=== Base Model (Raw Generation) ==="):
129
+ gr.Markdown(f'This is [RWKV7 G1](https://huggingface.co/BlinkDL/rwkv7-g1) 1.5B L24-D2048 reasoning base LM - an attention-free pure RNN [RWKV-LM](https://github.com/BlinkDL/RWKV-LM). Supports 100+ world languages and code. Check [400+ Github RWKV projects](https://github.com/search?o=desc&p=1&q=rwkv&s=updated&type=Repositories). *** Can try examples (bottom of page) *** (can edit them). Demo limited to ctxlen {ctx_limit}.')
130
+ with gr.Row():
131
+ with gr.Column():
132
+ prompt = gr.Textbox(lines=6, label="Prompt", value="User: simulate SpaceX mars landing using python\n\nAssistant: <think")
133
+ token_count = gr.Slider(10, gen_limit, label="Max Tokens", step=10, value=gen_limit)
134
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
135
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.3)
136
+ presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0.5)
137
+ count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0.5)
138
+ with gr.Column():
139
+ with gr.Row():
140
+ submit = gr.Button("Submit", variant="primary")
141
+ clear = gr.Button("Clear", variant="secondary")
142
+ output = gr.Textbox(label="Output", lines=20, max_lines=100)
143
+ data = gr.Dataset(components=[prompt, token_count, temperature, top_p, presence_penalty, count_penalty], samples=examples, samples_per_page=50, label="Example Instructions", headers=["Prompt", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
144
+ submit.click(evaluate, [prompt, token_count, temperature, top_p, presence_penalty, count_penalty], [output])
145
+ clear.click(lambda: None, [], [output])
146
+ data.click(lambda x: x, [data], [prompt, token_count, temperature, top_p, presence_penalty, count_penalty])
147
+
148
+ demo.queue(concurrency_count=1, max_size=10)
149
+ demo.launch(share=False)