Spaces:
Sleeping
Sleeping
update
Browse files
app.py
CHANGED
@@ -15,7 +15,7 @@ import gradio as gr
|
|
15 |
sys.path.append(os.path.dirname(__file__))
|
16 |
|
17 |
# Import the main application components
|
18 |
-
from demo import create_interface
|
19 |
|
20 |
# Set up environment variables for Hugging Face Spaces
|
21 |
def setup_environment():
|
@@ -25,12 +25,15 @@ def setup_environment():
|
|
25 |
print("π Running on Hugging Face Spaces")
|
26 |
|
27 |
# Check for NVIDIA API key
|
28 |
-
|
|
|
29 |
print("β οΈ NVIDIA_API_KEY not set in Space secrets.")
|
30 |
print(" Please set NVIDIA_API_KEY in the Space Repository Secrets.")
|
31 |
print(" Some features may be limited without API access.")
|
32 |
else:
|
33 |
print("β
NVIDIA_API_KEY found in Space secrets")
|
|
|
|
|
34 |
|
35 |
# Set CUDA device for Spaces (usually limited resources)
|
36 |
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
@@ -41,15 +44,32 @@ def setup_environment():
|
|
41 |
else:
|
42 |
print("π Running locally")
|
43 |
# Check for local .env file
|
44 |
-
|
|
|
45 |
print("βΉοΈ NVIDIA_API_KEY not found. For local development, create a .env file")
|
46 |
print(" or set the environment variable manually.")
|
|
|
|
|
47 |
|
48 |
def main():
|
49 |
"""Main function to launch the application"""
|
50 |
# Set up environment
|
51 |
setup_environment()
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
# Create the Gradio interface
|
54 |
print("π― Creating RoutePilot interface...")
|
55 |
demo = create_interface()
|
|
|
15 |
sys.path.append(os.path.dirname(__file__))
|
16 |
|
17 |
# Import the main application components
|
18 |
+
from demo import create_interface, initialize_nvidia_client, test_nvidia_api_connection
|
19 |
|
20 |
# Set up environment variables for Hugging Face Spaces
|
21 |
def setup_environment():
|
|
|
25 |
print("π Running on Hugging Face Spaces")
|
26 |
|
27 |
# Check for NVIDIA API key
|
28 |
+
api_key = os.getenv("NVIDIA_API_KEY")
|
29 |
+
if not api_key:
|
30 |
print("β οΈ NVIDIA_API_KEY not set in Space secrets.")
|
31 |
print(" Please set NVIDIA_API_KEY in the Space Repository Secrets.")
|
32 |
print(" Some features may be limited without API access.")
|
33 |
else:
|
34 |
print("β
NVIDIA_API_KEY found in Space secrets")
|
35 |
+
print(f" Key length: {len(api_key)} characters")
|
36 |
+
print(f" Key preview: {api_key[:10]}...{api_key[-4:] if len(api_key) > 14 else '***'}")
|
37 |
|
38 |
# Set CUDA device for Spaces (usually limited resources)
|
39 |
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
|
|
44 |
else:
|
45 |
print("π Running locally")
|
46 |
# Check for local .env file
|
47 |
+
api_key = os.getenv("NVIDIA_API_KEY")
|
48 |
+
if not api_key:
|
49 |
print("βΉοΈ NVIDIA_API_KEY not found. For local development, create a .env file")
|
50 |
print(" or set the environment variable manually.")
|
51 |
+
else:
|
52 |
+
print(f"β
NVIDIA_API_KEY found locally (length: {len(api_key)})")
|
53 |
|
54 |
def main():
|
55 |
"""Main function to launch the application"""
|
56 |
# Set up environment
|
57 |
setup_environment()
|
58 |
|
59 |
+
# Reinitialize NVIDIA client after environment setup
|
60 |
+
print("π Reinitializing NVIDIA API client...")
|
61 |
+
from demo import client
|
62 |
+
if client is None and os.getenv("NVIDIA_API_KEY"):
|
63 |
+
print("π Attempting to reinitialize client with environment variables...")
|
64 |
+
# Reinitialize the client
|
65 |
+
import demo
|
66 |
+
demo.client = initialize_nvidia_client()
|
67 |
+
|
68 |
+
# Test API connection
|
69 |
+
if os.getenv("NVIDIA_API_KEY"):
|
70 |
+
print("π§ͺ Testing NVIDIA API connection...")
|
71 |
+
test_nvidia_api_connection()
|
72 |
+
|
73 |
# Create the Gradio interface
|
74 |
print("π― Creating RoutePilot interface...")
|
75 |
demo = create_interface()
|
demo.py
CHANGED
@@ -97,17 +97,51 @@ if torch.cuda.is_available():
|
|
97 |
print(f"CUDA device name: {torch.cuda.get_device_name(0)}")
|
98 |
|
99 |
# Initialize OpenAI client for NVIDIA API
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
def model_prompting(
|
113 |
llm_model: str,
|
@@ -134,7 +168,15 @@ def model_prompting(
|
|
134 |
if client is None:
|
135 |
raise Exception("NVIDIA API client not initialized. Please check your .env file contains NVIDIA_API_KEY")
|
136 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
try:
|
|
|
138 |
completion = client.chat.completions.create(
|
139 |
model=llm_model,
|
140 |
messages=[{"role": "user", "content": prompt}],
|
@@ -151,7 +193,19 @@ def model_prompting(
|
|
151 |
|
152 |
return response_text
|
153 |
except Exception as e:
|
154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
|
156 |
# Initialize the Longformer model for embeddings (same as enhance_query_with_templates.py)
|
157 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
97 |
print(f"CUDA device name: {torch.cuda.get_device_name(0)}")
|
98 |
|
99 |
# Initialize OpenAI client for NVIDIA API
|
100 |
+
def initialize_nvidia_client():
|
101 |
+
"""Initialize the NVIDIA API client with proper error handling"""
|
102 |
+
api_key = os.getenv("NVIDIA_API_KEY")
|
103 |
+
if api_key is None:
|
104 |
+
print("β NVIDIA API key not found. Please create a .env file with your API key")
|
105 |
+
print(" For Hugging Face Spaces: Set NVIDIA_API_KEY in Repository Secrets")
|
106 |
+
return None
|
107 |
+
else:
|
108 |
+
try:
|
109 |
+
client = OpenAI(
|
110 |
+
base_url=NVIDIA_BASE_URL,
|
111 |
+
api_key=api_key,
|
112 |
+
timeout=60,
|
113 |
+
max_retries=2
|
114 |
+
)
|
115 |
+
print("β
NVIDIA API client initialized successfully")
|
116 |
+
return client
|
117 |
+
except Exception as e:
|
118 |
+
print(f"β Failed to initialize NVIDIA API client: {e}")
|
119 |
+
return None
|
120 |
+
|
121 |
+
# Initialize the client
|
122 |
+
client = initialize_nvidia_client()
|
123 |
+
|
124 |
+
def test_nvidia_api_connection():
|
125 |
+
"""Test the NVIDIA API connection to verify authentication"""
|
126 |
+
if client is None:
|
127 |
+
print("β Cannot test API connection - client not initialized")
|
128 |
+
return False
|
129 |
+
|
130 |
+
try:
|
131 |
+
print("π§ͺ Testing NVIDIA API connection...")
|
132 |
+
# Make a simple test call
|
133 |
+
test_response = client.chat.completions.create(
|
134 |
+
model="meta/llama-3.1-8b-instruct",
|
135 |
+
messages=[{"role": "user", "content": "Hello"}],
|
136 |
+
max_tokens=10,
|
137 |
+
temperature=0.0,
|
138 |
+
stream=False
|
139 |
+
)
|
140 |
+
print("β
NVIDIA API connection test successful")
|
141 |
+
return True
|
142 |
+
except Exception as e:
|
143 |
+
print(f"β NVIDIA API connection test failed: {e}")
|
144 |
+
return False
|
145 |
|
146 |
def model_prompting(
|
147 |
llm_model: str,
|
|
|
168 |
if client is None:
|
169 |
raise Exception("NVIDIA API client not initialized. Please check your .env file contains NVIDIA_API_KEY")
|
170 |
|
171 |
+
# Debug information
|
172 |
+
api_key = os.getenv("NVIDIA_API_KEY")
|
173 |
+
if api_key:
|
174 |
+
print(f"π API Key available: {api_key[:10]}...{api_key[-4:] if len(api_key) > 14 else '***'}")
|
175 |
+
else:
|
176 |
+
print("β No API key found in environment")
|
177 |
+
|
178 |
try:
|
179 |
+
print(f"π Making API call to model: {llm_model}")
|
180 |
completion = client.chat.completions.create(
|
181 |
model=llm_model,
|
182 |
messages=[{"role": "user", "content": prompt}],
|
|
|
193 |
|
194 |
return response_text
|
195 |
except Exception as e:
|
196 |
+
error_msg = str(e)
|
197 |
+
print(f"β API call failed: {error_msg}")
|
198 |
+
|
199 |
+
# Provide more specific error information
|
200 |
+
if "401" in error_msg or "Unauthorized" in error_msg:
|
201 |
+
print("π Authentication Error Details:")
|
202 |
+
print(f" - API Key present: {'Yes' if api_key else 'No'}")
|
203 |
+
print(f" - API Key length: {len(api_key) if api_key else 0}")
|
204 |
+
print(f" - Base URL: {NVIDIA_BASE_URL}")
|
205 |
+
print(" - For Hugging Face Spaces: Check if NVIDIA_API_KEY is set in Repository Secrets")
|
206 |
+
print(" - For local development: Check if .env file contains NVIDIA_API_KEY")
|
207 |
+
|
208 |
+
raise Exception(f"API call failed: {error_msg}")
|
209 |
|
210 |
# Initialize the Longformer model for embeddings (same as enhance_query_with_templates.py)
|
211 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|