Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -8,10 +8,6 @@ from openai import OpenAI
|
|
8 |
from config import API_KEY, MODEL, SYSTEM_PROMPT, ENDPOINT, EXAMPLES, DEFAULT_LOCALE, DEFAULT_THEME
|
9 |
from huggingface_hub import InferenceClient
|
10 |
|
11 |
-
# Initialize both clients
|
12 |
-
hf_client = InferenceClient(model=MODEL, token=API_KEY)
|
13 |
-
openai_client = OpenAI(api_key=API_KEY, base_url=ENDPOINT + MODEL)
|
14 |
-
|
15 |
react_imports = {
|
16 |
"lucide-react": "https://esm.sh/lucide-react@0.525.0",
|
17 |
"recharts": "https://esm.sh/recharts@3.1.0",
|
@@ -72,6 +68,9 @@ class GradioEvents:
|
|
72 |
messages.append({'role': 'user', 'content': input_value})
|
73 |
|
74 |
try:
|
|
|
|
|
|
|
75 |
# Try using HuggingFace InferenceClient first
|
76 |
formatted_messages = [{"role": msg["role"], "content": msg["content"]} for msg in messages]
|
77 |
response = ""
|
@@ -125,6 +124,9 @@ export default Demo
|
|
125 |
except Exception as e:
|
126 |
# Fallback to OpenAI client if HuggingFace client fails
|
127 |
try:
|
|
|
|
|
|
|
128 |
generator = openai_client.chat.completions.create(model=MODEL,
|
129 |
messages=messages,
|
130 |
stream=True)
|
|
|
8 |
from config import API_KEY, MODEL, SYSTEM_PROMPT, ENDPOINT, EXAMPLES, DEFAULT_LOCALE, DEFAULT_THEME
|
9 |
from huggingface_hub import InferenceClient
|
10 |
|
|
|
|
|
|
|
|
|
11 |
react_imports = {
|
12 |
"lucide-react": "https://esm.sh/lucide-react@0.525.0",
|
13 |
"recharts": "https://esm.sh/recharts@3.1.0",
|
|
|
68 |
messages.append({'role': 'user', 'content': input_value})
|
69 |
|
70 |
try:
|
71 |
+
# Initialize HuggingFace InferenceClient
|
72 |
+
hf_client = InferenceClient(model=MODEL, token=API_KEY)
|
73 |
+
|
74 |
# Try using HuggingFace InferenceClient first
|
75 |
formatted_messages = [{"role": msg["role"], "content": msg["content"]} for msg in messages]
|
76 |
response = ""
|
|
|
124 |
except Exception as e:
|
125 |
# Fallback to OpenAI client if HuggingFace client fails
|
126 |
try:
|
127 |
+
# Initialize OpenAI client
|
128 |
+
openai_client = OpenAI(api_key=API_KEY, base_url=ENDPOINT + MODEL)
|
129 |
+
|
130 |
generator = openai_client.chat.completions.create(model=MODEL,
|
131 |
messages=messages,
|
132 |
stream=True)
|