Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ import modelscope_studio.components.pro as pro
|
|
7 |
from openai import OpenAI
|
8 |
from config import API_KEY, MODEL, SYSTEM_PROMPT, ENDPOINT, EXAMPLES, DEFAULT_LOCALE, DEFAULT_THEME
|
9 |
from huggingface_hub import InferenceClient
|
|
|
10 |
|
11 |
react_imports = {
|
12 |
"lucide-react": "https://esm.sh/lucide-react@0.525.0",
|
@@ -68,8 +69,14 @@ class GradioEvents:
|
|
68 |
messages.append({'role': 'user', 'content': input_value})
|
69 |
|
70 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
# Initialize HuggingFace InferenceClient
|
72 |
-
hf_client = InferenceClient(model=MODEL, token=
|
73 |
|
74 |
# Try using HuggingFace InferenceClient first
|
75 |
formatted_messages = [{"role": msg["role"], "content": msg["content"]} for msg in messages]
|
@@ -124,8 +131,14 @@ export default Demo
|
|
124 |
except Exception as e:
|
125 |
# Fallback to OpenAI client if HuggingFace client fails
|
126 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
# Initialize OpenAI client
|
128 |
-
openai_client = OpenAI(api_key=
|
129 |
|
130 |
generator = openai_client.chat.completions.create(model=MODEL,
|
131 |
messages=messages,
|
@@ -446,7 +459,7 @@ with gr.Blocks(css=css) as demo:
|
|
446 |
antd.Tour.Step(
|
447 |
title="Step 3",
|
448 |
description="Wait for the result.",
|
449 |
-
|
450 |
"() => document.querySelector('#output-container')"
|
451 |
)
|
452 |
antd.Tour.Step(
|
|
|
7 |
from openai import OpenAI
|
8 |
from config import API_KEY, MODEL, SYSTEM_PROMPT, ENDPOINT, EXAMPLES, DEFAULT_LOCALE, DEFAULT_THEME
|
9 |
from huggingface_hub import InferenceClient
|
10 |
+
import os
|
11 |
|
12 |
react_imports = {
|
13 |
"lucide-react": "https://esm.sh/lucide-react@0.525.0",
|
|
|
69 |
messages.append({'role': 'user', 'content': input_value})
|
70 |
|
71 |
try:
|
72 |
+
# Get API key from environment variable if not already set
|
73 |
+
api_key = API_KEY or os.getenv("HF_TOKEN") or os.getenv("OPENAI_API_KEY")
|
74 |
+
|
75 |
+
if not api_key:
|
76 |
+
raise ValueError("No API key found. Please set HF_TOKEN or OPENAI_API_KEY environment variable.")
|
77 |
+
|
78 |
# Initialize HuggingFace InferenceClient
|
79 |
+
hf_client = InferenceClient(model=MODEL, token=api_key)
|
80 |
|
81 |
# Try using HuggingFace InferenceClient first
|
82 |
formatted_messages = [{"role": msg["role"], "content": msg["content"]} for msg in messages]
|
|
|
131 |
except Exception as e:
|
132 |
# Fallback to OpenAI client if HuggingFace client fails
|
133 |
try:
|
134 |
+
# Get API key from environment variable if not already set
|
135 |
+
api_key = API_KEY or os.getenv("HF_TOKEN") or os.getenv("OPENAI_API_KEY")
|
136 |
+
|
137 |
+
if not api_key:
|
138 |
+
raise ValueError("No API key found. Please set HF_TOKEN or OPENAI_API_KEY environment variable.")
|
139 |
+
|
140 |
# Initialize OpenAI client
|
141 |
+
openai_client = OpenAI(api_key=api_key, base_url=ENDPOINT + MODEL)
|
142 |
|
143 |
generator = openai_client.chat.completions.create(model=MODEL,
|
144 |
messages=messages,
|
|
|
459 |
antd.Tour.Step(
|
460 |
title="Step 3",
|
461 |
description="Wait for the result.",
|
462 |
+
get_target=
|
463 |
"() => document.querySelector('#output-container')"
|
464 |
)
|
465 |
antd.Tour.Step(
|