Prashant26am commited on
Commit
2e2a7bf
·
1 Parent(s): 5b58ac7

fix: Improve error handling and logging for better debugging

Browse files
Files changed (1) hide show
  1. src/api/app.py +82 -29
src/api/app.py CHANGED
@@ -7,6 +7,10 @@ from PIL import Image
7
  import os
8
  import tempfile
9
  import torch
 
 
 
 
10
 
11
  from ..configs.settings import (
12
  GRADIO_THEME,
@@ -27,6 +31,18 @@ from ..utils.logging import setup_logging, get_logger
27
  setup_logging()
28
  logger = get_logger(__name__)
29
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  # Initialize model
31
  model = None
32
 
@@ -50,7 +66,9 @@ def initialize_model():
50
  logger.info(f"Model initialized on {model.device}")
51
  return True
52
  except Exception as e:
53
- logger.error(f"Error initializing model: {e}")
 
 
54
  return False
55
 
56
  def process_image(
@@ -74,8 +92,21 @@ def process_image(
74
  str: Model response
75
  """
76
  if not model:
77
- return "Error: Model not initialized"
 
 
78
 
 
 
 
 
 
 
 
 
 
 
 
79
  try:
80
  logger.info(f"Processing image with prompt: {prompt[:100]}...")
81
 
@@ -97,19 +128,30 @@ def process_image(
97
  top_p=top_p
98
  )
99
 
100
- # Clean up temporary file
101
- os.unlink(temp_path)
102
-
103
- # Clear memory after processing
104
- torch.cuda.empty_cache()
105
-
106
  logger.info("Successfully generated response")
107
  return response
 
108
  except Exception as e:
109
- logger.error(f"Error processing image: {str(e)}")
 
 
110
  return f"Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
- def create_interface() -> gr.Interface:
113
  """Create and return the Gradio interface."""
114
  with gr.Blocks(theme=GRADIO_THEME) as interface:
115
  gr.Markdown(f"""# {GRADIO_TITLE}
@@ -180,31 +222,42 @@ Try these prompts to get started:
180
  generate_btn.click(
181
  fn=process_image,
182
  inputs=[
183
- gr.Image(type="pil"),
184
- gr.Textbox(),
185
- gr.Slider(),
186
- gr.Slider(),
187
- gr.Slider()
188
  ],
189
- outputs=gr.Textbox()
 
190
  )
191
 
192
  return interface
193
 
 
 
 
 
 
 
194
  def main():
195
- """Run the Gradio interface."""
196
- interface = create_interface()
197
- interface.launch(
198
- server_name=API_HOST,
199
- server_port=API_PORT,
200
- share=True,
201
- show_error=True,
202
- show_api=False
 
 
 
 
 
 
 
 
203
  )
204
 
205
  if __name__ == "__main__":
206
- # Initialize model
207
- if initialize_model():
208
- main()
209
- else:
210
- print("Failed to initialize model. Exiting...")
 
7
  import os
8
  import tempfile
9
  import torch
10
+ from fastapi import FastAPI, HTTPException
11
+ from fastapi.middleware.cors import CORSMiddleware
12
+ import traceback
13
+ import sys
14
 
15
  from ..configs.settings import (
16
  GRADIO_THEME,
 
31
  setup_logging()
32
  logger = get_logger(__name__)
33
 
34
+ # Initialize FastAPI app
35
+ app = FastAPI(title="LLaVA Web Interface")
36
+
37
+ # Configure CORS
38
+ app.add_middleware(
39
+ CORSMiddleware,
40
+ allow_origins=["*"],
41
+ allow_credentials=True,
42
+ allow_methods=["*"],
43
+ allow_headers=["*"],
44
+ )
45
+
46
  # Initialize model
47
  model = None
48
 
 
66
  logger.info(f"Model initialized on {model.device}")
67
  return True
68
  except Exception as e:
69
+ error_msg = f"Error initializing model: {str(e)}\n{traceback.format_exc()}"
70
+ logger.error(error_msg)
71
+ print(error_msg, file=sys.stderr)
72
  return False
73
 
74
  def process_image(
 
92
  str: Model response
93
  """
94
  if not model:
95
+ error_msg = "Error: Model not initialized"
96
+ logger.error(error_msg)
97
+ return error_msg
98
 
99
+ if image is None:
100
+ error_msg = "Error: No image provided"
101
+ logger.error(error_msg)
102
+ return error_msg
103
+
104
+ if not prompt or not prompt.strip():
105
+ error_msg = "Error: No prompt provided"
106
+ logger.error(error_msg)
107
+ return error_msg
108
+
109
+ temp_path = None
110
  try:
111
  logger.info(f"Processing image with prompt: {prompt[:100]}...")
112
 
 
128
  top_p=top_p
129
  )
130
 
 
 
 
 
 
 
131
  logger.info("Successfully generated response")
132
  return response
133
+
134
  except Exception as e:
135
+ error_msg = f"Error processing image: {str(e)}\n{traceback.format_exc()}"
136
+ logger.error(error_msg)
137
+ print(error_msg, file=sys.stderr)
138
  return f"Error: {str(e)}"
139
+
140
+ finally:
141
+ # Clean up temporary file
142
+ if temp_path and os.path.exists(temp_path):
143
+ try:
144
+ os.unlink(temp_path)
145
+ except Exception as e:
146
+ logger.warning(f"Error cleaning up temporary file: {str(e)}")
147
+
148
+ # Clear memory after processing
149
+ try:
150
+ torch.cuda.empty_cache()
151
+ except Exception as e:
152
+ logger.warning(f"Error clearing CUDA cache: {str(e)}")
153
 
154
+ def create_interface() -> gr.Blocks:
155
  """Create and return the Gradio interface."""
156
  with gr.Blocks(theme=GRADIO_THEME) as interface:
157
  gr.Markdown(f"""# {GRADIO_TITLE}
 
222
  generate_btn.click(
223
  fn=process_image,
224
  inputs=[
225
+ image_input,
226
+ prompt_input,
227
+ max_tokens,
228
+ temperature,
229
+ top_p
230
  ],
231
+ outputs=output,
232
+ api_name="process_image"
233
  )
234
 
235
  return interface
236
 
237
+ # Create Gradio app
238
+ demo = create_interface()
239
+
240
+ # Mount Gradio app
241
+ app = gr.mount_gradio_app(app, demo, path="/")
242
+
243
  def main():
244
+ """Run the FastAPI application."""
245
+ import uvicorn
246
+
247
+ # Initialize model
248
+ if not initialize_model():
249
+ logger.error("Failed to initialize model. Exiting...")
250
+ sys.exit(1)
251
+
252
+ # Start the server
253
+ uvicorn.run(
254
+ app,
255
+ host=API_HOST,
256
+ port=API_PORT,
257
+ workers=API_WORKERS,
258
+ reload=API_RELOAD,
259
+ log_level="info"
260
  )
261
 
262
  if __name__ == "__main__":
263
+ main()