Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,8 +6,11 @@ import modelscope_studio.components.base as ms
|
|
6 |
import modelscope_studio.components.pro as pro
|
7 |
from openai import OpenAI
|
8 |
from config import API_KEY, MODEL, SYSTEM_PROMPT, ENDPOINT, EXAMPLES, DEFAULT_LOCALE, DEFAULT_THEME
|
|
|
9 |
|
10 |
-
|
|
|
|
|
11 |
|
12 |
react_imports = {
|
13 |
"lucide-react": "https://esm.sh/lucide-react@0.525.0",
|
@@ -66,25 +69,85 @@ class GradioEvents:
|
|
66 |
# 'content': system_prompt_input_value
|
67 |
}] + state_value["history"]
|
68 |
|
69 |
-
messages.append({'role':
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
state_value["history"] = messages + [{
|
80 |
-
'role':
|
81 |
'content': response
|
82 |
}]
|
83 |
generated_files = get_generated_files(response)
|
84 |
react_code = generated_files.get(
|
85 |
"index.tsx") or generated_files.get("index.jsx")
|
86 |
html_code = generated_files.get("index.html")
|
87 |
-
|
88 |
yield {
|
89 |
output:
|
90 |
gr.update(value=response),
|
@@ -104,17 +167,18 @@ import "@tailwindcss/browser"
|
|
104 |
|
105 |
export default Demo
|
106 |
""",
|
107 |
-
|
108 |
-
|
109 |
state:
|
110 |
gr.update(value=state_value)
|
111 |
}
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
yield {
|
116 |
-
output: gr.update(value=
|
117 |
output_loading: gr.update(spinning=False),
|
|
|
118 |
}
|
119 |
|
120 |
@staticmethod
|
@@ -215,7 +279,7 @@ with gr.Blocks(css=css) as demo:
|
|
215 |
height=200,
|
216 |
preview=False)
|
217 |
antd.Typography.Title(
|
218 |
-
"Qwen3-Coder-WebDev"
|
219 |
level=1,
|
220 |
elem_style=dict(fontSize=24))
|
221 |
# Input
|
@@ -455,4 +519,4 @@ with gr.Blocks(css=css) as demo:
|
|
455 |
|
456 |
if __name__ == "__main__":
|
457 |
demo.queue(default_concurrency_limit=100,
|
458 |
-
max_size=100).launch(ssr_mode=False, max_threads=100)
|
|
|
6 |
import modelscope_studio.components.pro as pro
|
7 |
from openai import OpenAI
|
8 |
from config import API_KEY, MODEL, SYSTEM_PROMPT, ENDPOINT, EXAMPLES, DEFAULT_LOCALE, DEFAULT_THEME
|
9 |
+
from huggingface_hub import InferenceClient
|
10 |
|
11 |
+
# Initialize both clients
|
12 |
+
hf_client = InferenceClient(model=MODEL, token=API_KEY)
|
13 |
+
openai_client = OpenAI(api_key=API_KEY, base_url=ENDPOINT + MODEL)
|
14 |
|
15 |
react_imports = {
|
16 |
"lucide-react": "https://esm.sh/lucide-react@0.525.0",
|
|
|
69 |
# 'content': system_prompt_input_value
|
70 |
}] + state_value["history"]
|
71 |
|
72 |
+
messages.append({'role': 'user', 'content': input_value})
|
73 |
+
|
74 |
+
try:
|
75 |
+
# Try using HuggingFace InferenceClient first
|
76 |
+
formatted_messages = [{"role": msg["role"], "content": msg["content"]} for msg in messages]
|
77 |
+
response = ""
|
78 |
+
for chunk in hf_client.chat_completion(
|
79 |
+
messages=formatted_messages,
|
80 |
+
stream=True,
|
81 |
+
max_tokens=4000,
|
82 |
+
temperature=0.7
|
83 |
+
):
|
84 |
+
if chunk.choices[0].delta.content:
|
85 |
+
response += chunk.choices[0].delta.content
|
86 |
+
yield {
|
87 |
+
output: gr.update(value=response),
|
88 |
+
output_loading: gr.update(spinning=False),
|
89 |
+
}
|
90 |
+
|
91 |
+
# Process the final response
|
92 |
+
state_value["history"] = messages + [{
|
93 |
+
'role': 'assistant',
|
94 |
+
'content': response
|
95 |
+
}]
|
96 |
+
generated_files = get_generated_files(response)
|
97 |
+
react_code = generated_files.get(
|
98 |
+
"index.tsx") or generated_files.get("index.jsx")
|
99 |
+
html_code = generated_files.get("index.html")
|
100 |
+
|
101 |
+
yield {
|
102 |
+
output:
|
103 |
+
gr.update(value=response),
|
104 |
+
download_content:
|
105 |
+
gr.update(value=react_code or html_code),
|
106 |
+
state_tab:
|
107 |
+
gr.update(active_key="render"),
|
108 |
+
output_loading:
|
109 |
+
gr.update(spinning=False),
|
110 |
+
sandbox:
|
111 |
+
gr.update(
|
112 |
+
template="react" if react_code else "html",
|
113 |
+
imports=react_imports if react_code else {},
|
114 |
+
value={
|
115 |
+
"./index.tsx": """import Demo from './demo.tsx'
|
116 |
+
import "@tailwindcss/browser"
|
117 |
|
118 |
+
export default Demo
|
119 |
+
""",
|
120 |
+
"./demo.tsx": react_code
|
121 |
+
} if react_code else {"./index.html": html_code}),
|
122 |
+
state:
|
123 |
+
gr.update(value=state_value)
|
124 |
+
}
|
125 |
+
except Exception as e:
|
126 |
+
# Fallback to OpenAI client if HuggingFace client fails
|
127 |
+
try:
|
128 |
+
generator = openai_client.chat.completions.create(model=MODEL,
|
129 |
+
messages=messages,
|
130 |
+
stream=True)
|
131 |
+
response = ""
|
132 |
+
for chunk in generator:
|
133 |
+
content = chunk.choices[0].delta.content
|
134 |
+
if content:
|
135 |
+
response += content
|
136 |
+
yield {
|
137 |
+
output: gr.update(value=response),
|
138 |
+
output_loading: gr.update(spinning=False),
|
139 |
+
}
|
140 |
+
|
141 |
+
# Process the final response
|
142 |
state_value["history"] = messages + [{
|
143 |
+
'role': 'assistant',
|
144 |
'content': response
|
145 |
}]
|
146 |
generated_files = get_generated_files(response)
|
147 |
react_code = generated_files.get(
|
148 |
"index.tsx") or generated_files.get("index.jsx")
|
149 |
html_code = generated_files.get("index.html")
|
150 |
+
|
151 |
yield {
|
152 |
output:
|
153 |
gr.update(value=response),
|
|
|
167 |
|
168 |
export default Demo
|
169 |
""",
|
170 |
+
"./demo.tsx": react_code
|
171 |
+
} if react_code else {"./index.html": html_code}),
|
172 |
state:
|
173 |
gr.update(value=state_value)
|
174 |
}
|
175 |
+
except Exception as e2:
|
176 |
+
# If both methods fail, show an error message
|
177 |
+
error_message = f"Error generating code: {str(e2)}"
|
178 |
yield {
|
179 |
+
output: gr.update(value=error_message),
|
180 |
output_loading: gr.update(spinning=False),
|
181 |
+
state_tab: gr.update(active_key="render"),
|
182 |
}
|
183 |
|
184 |
@staticmethod
|
|
|
279 |
height=200,
|
280 |
preview=False)
|
281 |
antd.Typography.Title(
|
282 |
+
"GLM-4.5-Coder", # Changed from "Qwen3-Coder-WebDev" to "GLM-4.5-Coder"
|
283 |
level=1,
|
284 |
elem_style=dict(fontSize=24))
|
285 |
# Input
|
|
|
519 |
|
520 |
if __name__ == "__main__":
|
521 |
demo.queue(default_concurrency_limit=100,
|
522 |
+
max_size=100).launch(ssr_mode=False, max_threads=100)
|