Vaibhav7625 commited on
Commit
b5c6814
Β·
1 Parent(s): 8fc2d5f

Added backend folder

Browse files
Dockerfile ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ # System dependencies
4
+ RUN apt-get update && apt-get install -y \
5
+ gcc g++ cmake git curl ffmpeg libgl1 libglib2.0-0 libsm6 libxext6 libxrender-dev \
6
+ && rm -rf /var/lib/apt/lists/*
7
+
8
+ # Set working directory
9
+ WORKDIR /app
10
+
11
+ # Copy code
12
+ COPY . .
13
+
14
+ # Install dependencies
15
+ RUN pip install --upgrade pip
16
+ RUN pip install -r requirements.txt
17
+
18
+ # Expose the port used by FastAPI
19
+ EXPOSE 7860
20
+
21
+ # Set environment variable for Hugging Face Spaces
22
+ ENV PORT 7860
23
+
24
+ # Run FastAPI using uvicorn
25
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
__pycache__/ai_models.cpython-311.pyc ADDED
Binary file (42.3 kB). View file
 
__pycache__/app.cpython-311.pyc ADDED
Binary file (15.7 kB). View file
 
ai_models.py ADDED
@@ -0,0 +1,745 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import (
4
+ AutoTokenizer, AutoModelForCausalLM,
5
+ BlipProcessor, BlipForConditionalGeneration,
6
+ pipeline, BartTokenizer, BartForConditionalGeneration,
7
+ T5Tokenizer, T5ForConditionalGeneration,
8
+ GPT2LMHeadModel, GPT2Tokenizer,
9
+ AutoModelForSeq2SeqLM
10
+ )
11
+ from diffusers import StableDiffusionPipeline, DiffusionPipeline, AutoPipelineForText2Image
12
+ import matplotlib.pyplot as plt
13
+ import seaborn as sns
14
+ import numpy as np
15
+ import pandas as pd
16
+ from PIL import Image, ImageDraw, ImageFont, ImageEnhance
17
+ import io
18
+ import base64
19
+ import json
20
+ import re
21
+ import requests
22
+ from typing import Dict, List, Optional, Tuple, Any
23
+ import warnings
24
+ import time
25
+ import os
26
+ from datetime import datetime
27
+ warnings.filterwarnings('ignore')
28
+
29
+ class AdvancedClassroomAI:
30
+ """
31
+ Advanced AI Assistant for Classrooms using high-quality pre-trained models
32
+ Optimized for CPU inference with better model choices
33
+ """
34
+
35
+ def __init__(self, device='cpu', save_images=True, display_images=True):
36
+ self.device = device
37
+ self.conversation_history = []
38
+ self.save_images = save_images
39
+ self.display_images = display_images
40
+ self.models_ready = False # Initialize as False
41
+
42
+ # Create directories for saving images
43
+ if self.save_images:
44
+ self.images_dir = "generated_images"
45
+ os.makedirs(self.images_dir, exist_ok=True)
46
+ print(f"πŸ“ Images will be saved to: {self.images_dir}/")
47
+
48
+ print(f"πŸ–₯ Initializing Advanced Classroom AI on: {self.device.upper()}")
49
+ print("πŸš€ Loading state-of-the-art models...")
50
+
51
+ if self.device == 'cpu':
52
+ torch.set_num_threads(2)
53
+ torch.set_grad_enabled(False)
54
+
55
+ # Initialize models with error handling
56
+ try:
57
+ self.setup_advanced_models()
58
+ self.models_ready = True # Only set to True if setup succeeds
59
+ print("βœ… All models initialized successfully!")
60
+ except Exception as e:
61
+ print(f"❌ Failed to initialize models: {e}")
62
+ self.models_ready = False
63
+
64
+ def setup_advanced_models(self):
65
+ """Setup high-quality models optimized for CPU with better error handling"""
66
+
67
+ # Initialize all model references to None first
68
+ self.text_tokenizer = None
69
+ self.text_model = None
70
+ self.chat_tokenizer = None
71
+ self.chat_model = None
72
+ self.subject_classifier = None
73
+ self.qa_pipeline = None
74
+ self.summarizer = None
75
+ self.image_pipeline = None
76
+ self.image_processor = None
77
+ self.image_caption_model = None
78
+
79
+ try:
80
+ print("πŸ“ Loading advanced text generation model...")
81
+ self.text_tokenizer = T5Tokenizer.from_pretrained('google/flan-t5-base')
82
+ self.text_model = T5ForConditionalGeneration.from_pretrained(
83
+ 'google/flan-t5-base',
84
+ torch_dtype=torch.float32,
85
+ device_map=None
86
+ )
87
+ self.text_model.to(self.device)
88
+ self.text_model.eval()
89
+ print("βœ… Text generation model loaded")
90
+
91
+ except Exception as e:
92
+ print(f"⚠️ Text generation model failed: {e}")
93
+ # Continue with other models
94
+
95
+ try:
96
+ print("🧠 Loading conversational AI model...")
97
+ self.chat_tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-medium')
98
+ self.chat_model = AutoModelForCausalLM.from_pretrained(
99
+ 'microsoft/DialoGPT-medium',
100
+ torch_dtype=torch.float32,
101
+ device_map=None
102
+ )
103
+ self.chat_model.to(self.device)
104
+ self.chat_model.eval()
105
+
106
+ if self.chat_tokenizer.pad_token is None:
107
+ self.chat_tokenizer.pad_token = self.chat_tokenizer.eos_token
108
+ print("βœ… Conversational AI model loaded")
109
+
110
+ except Exception as e:
111
+ print(f"⚠️ Conversational AI model failed: {e}")
112
+
113
+ try:
114
+ print("πŸ” Loading subject classification model...")
115
+ self.subject_classifier = pipeline(
116
+ "zero-shot-classification",
117
+ model="microsoft/deberta-v3-base",
118
+ device=-1,
119
+ torch_dtype=torch.float32
120
+ )
121
+ print("βœ… Subject classifier loaded")
122
+
123
+ except Exception as e:
124
+ print(f"⚠️ Subject classifier failed: {e}")
125
+
126
+ try:
127
+ print("❓ Loading question-answering model...")
128
+ self.qa_pipeline = pipeline(
129
+ "question-answering",
130
+ model="deepset/roberta-base-squad2",
131
+ device=-1,
132
+ torch_dtype=torch.float32
133
+ )
134
+ print("βœ… QA pipeline loaded")
135
+
136
+ except Exception as e:
137
+ print(f"⚠️ QA pipeline failed: {e}")
138
+
139
+ try:
140
+ print("πŸ“Š Loading text summarization model...")
141
+ self.summarizer = pipeline(
142
+ "summarization",
143
+ model="facebook/bart-base",
144
+ device=-1,
145
+ torch_dtype=torch.float32
146
+ )
147
+ print("βœ… Summarizer loaded")
148
+
149
+ except Exception as e:
150
+ print(f"⚠️ Summarizer failed: {e}")
151
+
152
+ try:
153
+ print("🎨 Loading image generation model...")
154
+ self.image_pipeline = AutoPipelineForText2Image.from_pretrained(
155
+ "runwayml/stable-diffusion-v1-5",
156
+ torch_dtype=torch.float32,
157
+ use_safetensors=True,
158
+ variant=None
159
+ )
160
+ self.image_pipeline = self.image_pipeline.to(self.device)
161
+ print("βœ… Image generation model loaded")
162
+
163
+ except Exception as e:
164
+ print(f"⚠️ Image generation model failed: {e}")
165
+
166
+ try:
167
+ print("πŸ–Ό Loading image captioning model...")
168
+ self.image_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
169
+ self.image_caption_model = BlipForConditionalGeneration.from_pretrained(
170
+ "Salesforce/blip-image-captioning-base",
171
+ torch_dtype=torch.float32
172
+ )
173
+ self.image_caption_model.to(self.device)
174
+ self.image_caption_model.eval()
175
+ print("βœ… Image captioning model loaded")
176
+
177
+ except Exception as e:
178
+ print(f"⚠️ Image captioning model failed: {e}")
179
+
180
+ # Check if at least core models are available
181
+ core_models_available = (
182
+ self.text_tokenizer is not None and
183
+ self.text_model is not None
184
+ )
185
+
186
+ if not core_models_available:
187
+ raise Exception("Critical models failed to load")
188
+
189
+ print("βœ… Model setup completed!")
190
+
191
+ def analyze_educational_query(self, query: str) -> Dict[str, Any]:
192
+ """Advanced query analysis using AI models with fallback"""
193
+
194
+ print(f"πŸ” Analyzing query: {query}")
195
+
196
+ try:
197
+ # Use AI classification if available
198
+ if self.subject_classifier is not None:
199
+ subjects = [
200
+ 'mathematics', 'physics', 'chemistry', 'biology', 'history',
201
+ 'geography', 'literature', 'computer science', 'economics',
202
+ 'psychology', 'philosophy', 'art', 'music', 'environmental science'
203
+ ]
204
+
205
+ classification_result = self.subject_classifier(query, subjects)
206
+ subject = classification_result['labels'][0]
207
+ confidence = classification_result['scores'][0]
208
+ else:
209
+ # Fallback to keyword-based classification
210
+ subject, confidence = self._fallback_subject_classification(query)
211
+
212
+ # Query type analysis
213
+ query_lower = query.lower()
214
+
215
+ if any(word in query_lower for word in ['explain', 'what is', 'define', 'describe', 'tell me about']):
216
+ query_type = 'explanation'
217
+ elif any(word in query_lower for word in ['solve', 'calculate', 'find', 'compute']):
218
+ query_type = 'problem_solving'
219
+ elif any(word in query_lower for word in ['compare', 'difference', 'versus', 'vs', 'contrast']):
220
+ query_type = 'comparison'
221
+ elif any(word in query_lower for word in ['show', 'draw', 'create', 'generate', 'visualize']):
222
+ query_type = 'visualization'
223
+ elif any(word in query_lower for word in ['how to', 'steps', 'procedure', 'process']):
224
+ query_type = 'tutorial'
225
+ else:
226
+ query_type = 'general'
227
+
228
+ needs_visual = any(word in query_lower for word in [
229
+ 'show', 'draw', 'diagram', 'chart', 'graph', 'visual', 'picture',
230
+ 'image', 'illustrate', 'create image', 'generate picture'
231
+ ])
232
+
233
+ analysis = {
234
+ 'subject': subject,
235
+ 'confidence': confidence,
236
+ 'query_type': query_type,
237
+ 'needs_visual': needs_visual,
238
+ 'complexity': self._assess_complexity(query),
239
+ 'educational_level': self._determine_educational_level(query)
240
+ }
241
+
242
+ print(f"βœ… Analysis completed: {analysis}")
243
+ return analysis
244
+
245
+ except Exception as e:
246
+ print(f"⚠️ Analysis error: {e}, using fallback analysis")
247
+ return self._fallback_analysis(query)
248
+
249
+ def _fallback_subject_classification(self, query: str) -> Tuple[str, float]:
250
+ """Fallback subject classification using keywords"""
251
+ query_lower = query.lower()
252
+
253
+ subject_keywords = {
254
+ 'mathematics': ['math', 'equation', 'number', 'calculate', 'algebra', 'geometry', 'calculus'],
255
+ 'physics': ['force', 'energy', 'motion', 'wave', 'particle', 'gravity', 'physics'],
256
+ 'chemistry': ['chemical', 'molecule', 'atom', 'reaction', 'compound', 'element'],
257
+ 'biology': ['cell', 'organism', 'dna', 'genetics', 'evolution', 'biology'],
258
+ 'history': ['historical', 'past', 'ancient', 'war', 'civilization', 'century'],
259
+ 'geography': ['country', 'continent', 'climate', 'map', 'location', 'geography'],
260
+ 'literature': ['poem', 'story', 'novel', 'author', 'literature', 'writing'],
261
+ 'computer science': ['code', 'program', 'algorithm', 'computer', 'software', 'data']
262
+ }
263
+
264
+ scores = {}
265
+ for subject, keywords in subject_keywords.items():
266
+ score = sum(1 for keyword in keywords if keyword in query_lower)
267
+ if score > 0:
268
+ scores[subject] = score / len(keywords)
269
+
270
+ if scores:
271
+ best_subject = max(scores, key=scores.get)
272
+ return best_subject, scores[best_subject]
273
+ else:
274
+ return 'general', 0.5
275
+
276
+ def _assess_complexity(self, query: str) -> str:
277
+ """Assess query complexity"""
278
+ query_lower = query.lower()
279
+
280
+ advanced_terms = ['theorem', 'hypothesis', 'methodology', 'analysis', 'synthesis', 'evaluation']
281
+ intermediate_terms = ['process', 'relationship', 'comparison', 'function', 'structure']
282
+ basic_terms = ['what', 'who', 'when', 'where', 'simple', 'basic']
283
+
284
+ if any(term in query_lower for term in advanced_terms):
285
+ return 'advanced'
286
+ elif any(term in query_lower for term in intermediate_terms):
287
+ return 'intermediate'
288
+ else:
289
+ return 'basic'
290
+
291
+ def _determine_educational_level(self, query: str) -> str:
292
+ """Determine appropriate educational level"""
293
+ query_lower = query.lower()
294
+
295
+ if any(term in query_lower for term in ['university', 'college', 'advanced', 'research']):
296
+ return 'university'
297
+ elif any(term in query_lower for term in ['high school', 'secondary', 'algebra', 'calculus']):
298
+ return 'high_school'
299
+ elif any(term in query_lower for term in ['middle school', 'junior', 'basic']):
300
+ return 'middle_school'
301
+ else:
302
+ return 'general'
303
+
304
+ def _fallback_analysis(self, query: str) -> Dict[str, Any]:
305
+ """Fallback analysis when AI models fail"""
306
+ subject, confidence = self._fallback_subject_classification(query)
307
+
308
+ return {
309
+ 'subject': subject,
310
+ 'confidence': confidence,
311
+ 'query_type': 'explanation',
312
+ 'needs_visual': 'visual' in query.lower() or 'show' in query.lower(),
313
+ 'complexity': self._assess_complexity(query),
314
+ 'educational_level': self._determine_educational_level(query)
315
+ }
316
+
317
+ def generate_educational_response(self, query: str, analysis: Dict[str, Any]) -> str:
318
+ """Generate educational response with fallback options"""
319
+
320
+ try:
321
+ # Try to use AI models if available
322
+ if self.text_tokenizer is not None and self.text_model is not None:
323
+ return self._generate_ai_response(query, analysis)
324
+ else:
325
+ print("⚠️ AI models not available, using fallback response")
326
+ return self._generate_fallback_response(query, analysis)
327
+
328
+ except Exception as e:
329
+ print(f"❌ Response generation error: {e}")
330
+ return self._generate_fallback_response(query, analysis)
331
+
332
+ def _generate_ai_response(self, query: str, analysis: Dict[str, Any]) -> str:
333
+ """Generate response using AI models"""
334
+
335
+ if analysis['query_type'] == 'explanation':
336
+ prompt = f"Explain in detail for {analysis['educational_level']} students: {query}"
337
+ elif analysis['query_type'] == 'problem_solving':
338
+ prompt = f"Solve this {analysis['subject']} problem step by step: {query}"
339
+ elif analysis['query_type'] == 'comparison':
340
+ prompt = f"Compare and contrast the following for students: {query}"
341
+ elif analysis['query_type'] == 'tutorial':
342
+ prompt = f"Provide a step-by-step tutorial for: {query}"
343
+ else:
344
+ prompt = f"Provide a comprehensive educational answer about: {query}"
345
+
346
+ tokenized = self.text_tokenizer(
347
+ prompt,
348
+ return_tensors='pt',
349
+ max_length=512,
350
+ truncation=True,
351
+ padding=True,
352
+ return_attention_mask=True # Explicitly request attention mask
353
+ )
354
+
355
+ inputs = tokenized['input_ids'].to(self.device)
356
+ attention_mask = tokenized['attention_mask'].to(self.device)
357
+
358
+ with torch.no_grad():
359
+ outputs = self.text_model.generate(
360
+ inputs,
361
+ attention_mask=attention_mask, # Pass attention mask
362
+ max_length=300,
363
+ min_length=50,
364
+ num_beams=4,
365
+ temperature=0.7,
366
+ do_sample=True,
367
+ top_p=0.9,
368
+ repetition_penalty=2.0,
369
+ early_stopping=True,
370
+ pad_token_id=self.text_tokenizer.eos_token_id
371
+ )
372
+
373
+ response = self.text_tokenizer.decode(outputs[0], skip_special_tokens=True)
374
+ # Remove repetitive phrases and clean up
375
+ response = response.replace(prompt, "").strip()
376
+ response = self._remove_repetition(response)
377
+
378
+ if len(response) < 100:
379
+ response = self._enhance_with_conversational_model(query, response)
380
+
381
+ return response
382
+
383
+ def _remove_repetition(self, text: str) -> str:
384
+ """Remove repetitive phrases from generated text"""
385
+ sentences = text.split('. ')
386
+ unique_sentences = []
387
+ seen = set()
388
+
389
+ for sentence in sentences:
390
+ sentence = sentence.strip()
391
+ if sentence and sentence not in seen and len(sentence) > 10:
392
+ seen.add(sentence)
393
+ unique_sentences.append(sentence)
394
+
395
+ return '. '.join(unique_sentences)
396
+
397
+ def _enhance_with_conversational_model(self, query: str, base_response: str) -> str:
398
+ """Enhance response using conversational model"""
399
+ try:
400
+ if self.chat_tokenizer is None or self.chat_model is None:
401
+ return base_response
402
+
403
+ context = f"User: {query}\nAssistant: {base_response}\nUser: Can you elaborate more?\nAssistant:"
404
+
405
+ tokenized = self.chat_tokenizer(
406
+ context,
407
+ return_tensors='pt',
408
+ max_length=400,
409
+ truncation=True,
410
+ padding=True,
411
+ return_attention_mask=True
412
+ )
413
+
414
+ inputs = tokenized['input_ids'].to(self.device)
415
+ attention_mask = tokenized['attention_mask'].to(self.device)
416
+
417
+ with torch.no_grad():
418
+ outputs = self.chat_model.generate(
419
+ inputs,
420
+ attention_mask=attention_mask, # Pass attention mask
421
+ max_length=inputs.shape[1] + 100,
422
+ num_beams=3,
423
+ temperature=0.8,
424
+ do_sample=True,
425
+ top_p=0.9,
426
+ pad_token_id=self.chat_tokenizer.eos_token_id,
427
+ eos_token_id=self.chat_tokenizer.eos_token_id
428
+ )
429
+
430
+ enhanced = self.chat_tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True)
431
+
432
+ return f"{base_response}\n\n{enhanced.strip()}"
433
+
434
+ except Exception as e:
435
+ print(f"⚠️ Enhancement failed: {e}")
436
+ return base_response
437
+
438
+ def _generate_fallback_response(self, query: str, analysis: Dict[str, Any]) -> str:
439
+ """Generate fallback response when AI models fail"""
440
+
441
+ subject = analysis['subject']
442
+ query_type = analysis['query_type']
443
+ complexity = analysis['complexity']
444
+ level = analysis['educational_level']
445
+
446
+ if query_type == 'explanation':
447
+ return f"Let me explain {subject} concepts related to your question about '{query}'. This appears to be a {complexity}-level question suitable for {level} students. I'll break this down step by step to help you understand the key concepts and principles involved."
448
+
449
+ elif query_type == 'problem_solving':
450
+ return f"To solve this {subject} problem, I'll walk you through the solution step by step. For a {complexity}-level problem like this, we need to identify the key information, choose the appropriate method, and work through the solution systematically."
451
+
452
+ elif query_type == 'comparison':
453
+ return f"I'll help you compare and contrast the different aspects of your {subject} question. This type of analysis requires us to examine similarities, differences, and relationships between the concepts you're asking about."
454
+
455
+ elif query_type == 'tutorial':
456
+ return f"I'll provide you with a step-by-step tutorial for this {subject} topic. This {complexity}-level guide will help {level} students understand the process and methodology involved."
457
+
458
+ else:
459
+ return f"I understand you're asking about {subject}. This is a {complexity}-level question that I'll help you understand. Let me provide you with a comprehensive explanation that covers the key concepts and helps you grasp the fundamental principles involved."
460
+
461
+ def generate_educational_visual(self, query: str, analysis: Dict[str, Any]) -> Optional[Image.Image]:
462
+ """Generate educational visuals with fallback"""
463
+
464
+ if not analysis['needs_visual']:
465
+ return None
466
+
467
+ try:
468
+ if self.image_pipeline is not None:
469
+ print("🎨 Generating educational visual with AI...")
470
+ return self._generate_ai_visual(query, analysis)
471
+ else:
472
+ print("🎨 Generating fallback visual...")
473
+ return self._generate_fallback_visual(query, analysis)
474
+
475
+ except Exception as e:
476
+ print(f"❌ Visual generation error: {e}")
477
+ return self._generate_fallback_visual(query, analysis)
478
+
479
+ def _generate_ai_visual(self, query: str, analysis: Dict[str, Any]) -> Optional[Image.Image]:
480
+ """Generate visual using AI models"""
481
+
482
+ visual_prompt = self._construct_visual_prompt(query, analysis)
483
+ print(f"πŸ–ΌοΈ Visual prompt: {visual_prompt}")
484
+
485
+ with torch.no_grad():
486
+ image = self.image_pipeline(
487
+ prompt=visual_prompt,
488
+ num_inference_steps=20,
489
+ guidance_scale=7.5,
490
+ height=512,
491
+ width=512,
492
+ generator=torch.Generator(device=self.device).manual_seed(42)
493
+ ).images[0]
494
+
495
+ enhanced_image = self._enhance_educational_image(image, query)
496
+
497
+ # Save and display the image
498
+ image_path = self._save_image(enhanced_image, query, analysis)
499
+ self._display_image(enhanced_image, image_path)
500
+
501
+ print("βœ… Educational visual generated successfully!")
502
+ return enhanced_image
503
+
504
+ def _construct_visual_prompt(self, query: str, analysis: Dict[str, Any]) -> str:
505
+ """Construct optimized prompt for educational visual generation"""
506
+
507
+ subject = analysis['subject']
508
+ query_lower = query.lower()
509
+
510
+ base_prompt = "educational illustration, clean design, professional diagram, textbook style, clear and simple"
511
+
512
+ subject_prompts = {
513
+ 'mathematics': "mathematical diagram, geometric shapes, clean whiteboard, equations, graphs",
514
+ 'physics': "physics diagram, scientific illustration, forces and motion, clean background",
515
+ 'chemistry': "molecular structure, chemical bonds, scientific diagram, laboratory style",
516
+ 'biology': "biological illustration, anatomical diagram, cell structure, scientific poster",
517
+ 'history': "historical illustration, timeline, educational infographic, documentary style",
518
+ 'geography': "map, geographical features, educational poster, atlas style",
519
+ 'computer science': "flowchart, algorithm diagram, programming concept, technical illustration"
520
+ }
521
+
522
+ subject_enhancement = subject_prompts.get(subject, "educational diagram, informative illustration")
523
+
524
+ key_concepts = self._extract_key_concepts(query)
525
+
526
+ visual_prompt = f"{key_concepts}, {subject_enhancement}, {base_prompt}, high quality, detailed"
527
+
528
+ return visual_prompt
529
+
530
+ def _extract_key_concepts(self, query: str) -> str:
531
+ """Extract key visual concepts from query"""
532
+ stop_words = {'what', 'is', 'how', 'does', 'the', 'a', 'an', 'of', 'to', 'and', 'or', 'but', 'in', 'on', 'at', 'for', 'with', 'by'}
533
+
534
+ words = query.lower().split()
535
+ key_words = [word for word in words if word not in stop_words and len(word) > 2]
536
+
537
+ return " ".join(key_words[:5])
538
+
539
+ def _enhance_educational_image(self, image: Image.Image, query: str) -> Image.Image:
540
+ """Enhance generated image for educational use"""
541
+ try:
542
+ if image.mode != 'RGB':
543
+ image = image.convert('RGB')
544
+
545
+ enhancer = ImageEnhance.Contrast(image)
546
+ image = enhancer.enhance(1.2)
547
+
548
+ enhancer = ImageEnhance.Sharpness(image)
549
+ image = enhancer.enhance(1.1)
550
+
551
+ width, height = image.size
552
+ border_width = 10
553
+
554
+ bordered_image = Image.new('RGB', (width + 2*border_width, height + 2*border_width), 'white')
555
+ bordered_image.paste(image, (border_width, border_width))
556
+
557
+ return bordered_image
558
+
559
+ except Exception as e:
560
+ print(f"⚠️ Image enhancement failed: {e}")
561
+ return image
562
+
563
+ def _generate_fallback_visual(self, query: str, analysis: Dict[str, Any]) -> Optional[Image.Image]:
564
+ """Generate simple fallback visual when AI generation fails"""
565
+ try:
566
+ img = Image.new('RGB', (512, 512), 'white')
567
+ draw = ImageDraw.Draw(img)
568
+
569
+ title = f"{analysis['subject'].title()} Concept"
570
+
571
+ try:
572
+ font = ImageFont.truetype("arial.ttf", 24)
573
+ small_font = ImageFont.truetype("arial.ttf", 16)
574
+ except:
575
+ font = ImageFont.load_default()
576
+ small_font = ImageFont.load_default()
577
+
578
+ bbox = draw.textbbox((0, 0), title, font=font)
579
+ text_width = bbox[2] - bbox[0]
580
+ text_x = (512 - text_width) // 2
581
+
582
+ draw.text((text_x, 50), title, fill='black', font=font)
583
+
584
+ query_lines = self._wrap_text(query, 50)
585
+ y_position = 150
586
+
587
+ for line in query_lines:
588
+ bbox = draw.textbbox((0, 0), line, font=small_font)
589
+ line_width = bbox[2] - bbox[0]
590
+ line_x = (512 - line_width) // 2
591
+ draw.text((line_x, y_position), line, fill='navy', font=small_font)
592
+ y_position += 30
593
+
594
+ draw.rectangle([50, 100, 462, 102], fill='blue')
595
+ draw.rectangle([50, 410, 462, 412], fill='blue')
596
+
597
+ # Save the fallback image
598
+ image_path = self._save_image(img, query, analysis, is_fallback=True)
599
+ self._display_image(img, image_path)
600
+
601
+ return img
602
+
603
+ except Exception as e:
604
+ print(f"❌ Fallback visual generation failed: {e}")
605
+ return None
606
+
607
+ def _save_image(self, image: Image.Image, query: str, analysis: Dict[str, Any], is_fallback: bool = False) -> str:
608
+ """Save the generated image to disk"""
609
+ if not self.save_images or not image:
610
+ return ""
611
+
612
+ try:
613
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
614
+ subject = analysis['subject'].replace(' ', '_')
615
+ query_short = ''.join(c for c in query[:30] if c.isalnum() or c in (' ', '-', '_')).rstrip()
616
+ query_short = query_short.replace(' ', '_')
617
+
618
+ fallback_suffix = "_fallback" if is_fallback else ""
619
+ filename = f"{timestamp}_{subject}_{query_short}{fallback_suffix}.png"
620
+
621
+ if len(filename) > 200:
622
+ filename = f"{timestamp}_{subject}{fallback_suffix}.png"
623
+
624
+ image_path = os.path.join(self.images_dir, filename)
625
+
626
+ image.save(image_path, "PNG", quality=95)
627
+ print(f"πŸ’Ύ Image saved: {image_path}")
628
+
629
+ return image_path
630
+
631
+ except Exception as e:
632
+ print(f"❌ Failed to save image: {e}")
633
+ return ""
634
+
635
+ def _display_image(self, image: Image.Image, image_path: str):
636
+ """Display the generated image - skipped in API mode"""
637
+ if not self.display_images:
638
+ return
639
+
640
+ try:
641
+ plt.figure(figsize=(10, 8))
642
+ plt.imshow(image)
643
+ plt.axis('off')
644
+ plt.title('Generated Educational Visual', fontsize=14, fontweight='bold')
645
+
646
+ if image_path:
647
+ plt.figtext(0.5, 0.02, f'Saved as: {os.path.basename(image_path)}',
648
+ ha='center', fontsize=10, style='italic')
649
+
650
+ plt.tight_layout()
651
+ plt.show()
652
+
653
+ print("πŸ–ΌοΈ Image displayed successfully!")
654
+
655
+ except Exception as e:
656
+ print(f"⚠️ Could not display image: {e}")
657
+ print(f"πŸ“ Image saved to: {image_path}")
658
+
659
+ def _wrap_text(self, text: str, max_length: int) -> List[str]:
660
+ """Wrap text to specified length"""
661
+ words = text.split()
662
+ lines = []
663
+ current_line = []
664
+ current_length = 0
665
+
666
+ for word in words:
667
+ if current_length + len(word) + 1 <= max_length:
668
+ current_line.append(word)
669
+ current_length += len(word) + 1
670
+ else:
671
+ if current_line:
672
+ lines.append(' '.join(current_line))
673
+ current_line = [word]
674
+ current_length = len(word)
675
+
676
+ if current_line:
677
+ lines.append(' '.join(current_line))
678
+
679
+ return lines
680
+
681
+ def process_educational_query(self, query: str) -> Dict[str, Any]:
682
+ """Main method to process educational queries with comprehensive error handling"""
683
+
684
+ print(f"\nπŸŽ“ Processing Educational Query: {query}")
685
+ print("=" * 80)
686
+
687
+ start_time = time.time()
688
+
689
+ try:
690
+ # Analyze the query
691
+ analysis = self.analyze_educational_query(query)
692
+
693
+ print(f"πŸ“Š Analysis Results:")
694
+ print(f" Subject: {analysis['subject']} (confidence: {analysis['confidence']:.2f})")
695
+ print(f" Type: {analysis['query_type']}")
696
+ print(f" Complexity: {analysis['complexity']}")
697
+ print(f" Level: {analysis['educational_level']}")
698
+ print(f" Needs Visual: {analysis['needs_visual']}")
699
+
700
+ # Generate text response
701
+ print("\nπŸ“ Generating educational response...")
702
+ text_response = self.generate_educational_response(query, analysis)
703
+
704
+ # Generate visual if needed
705
+ visual_image = None
706
+ if analysis['needs_visual']:
707
+ print("\n🎨 Generating educational visual...")
708
+ visual_image = self.generate_educational_visual(query, analysis)
709
+
710
+ processing_time = time.time() - start_time
711
+
712
+ # Add to conversation history
713
+ self.conversation_history.append({
714
+ 'query': query,
715
+ 'response': text_response,
716
+ 'analysis': analysis,
717
+ 'timestamp': time.time(),
718
+ 'processing_time': processing_time,
719
+ 'has_visual': visual_image is not None
720
+ })
721
+
722
+ print(f"\nβœ… Processing completed in {processing_time:.2f} seconds")
723
+ print("=" * 80)
724
+
725
+ return {
726
+ 'text_response': text_response,
727
+ 'visual_image': visual_image,
728
+ 'analysis': analysis,
729
+ 'processing_time': processing_time,
730
+ 'success': True
731
+ }
732
+
733
+ except Exception as e:
734
+ print(f"❌ Error processing query: {e}")
735
+ processing_time = time.time() - start_time
736
+
737
+ # Return error response
738
+ return {
739
+ 'text_response': f"I encountered an error processing your question about '{query}'. Please try rephrasing your question or try again later.",
740
+ 'visual_image': None,
741
+ 'analysis': {'subject': 'unknown', 'error': str(e)},
742
+ 'processing_time': processing_time,
743
+ 'success': False,
744
+ 'error': str(e)
745
+ }
app.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, File, UploadFile, BackgroundTasks
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import FileResponse
4
+ from fastapi.staticfiles import StaticFiles
5
+ from pydantic import BaseModel
6
+ from typing import Optional, List, Dict, Any
7
+ import asyncio
8
+ import uvicorn
9
+ import os
10
+ import json
11
+ import base64
12
+ from datetime import datetime
13
+ import threading
14
+ import time
15
+ from ai_models import AdvancedClassroomAI
16
+
17
+ # Initialize FastAPI app
18
+ app = FastAPI(
19
+ title="Advanced Classroom AI API",
20
+ description="AI-powered educational assistant with text, voice, and visual capabilities",
21
+ version="1.0.0"
22
+ )
23
+
24
+ # CORS middleware to allow frontend connections
25
+ app.add_middleware(
26
+ CORSMiddleware,
27
+ allow_origins=["*"], # More permissive for development
28
+ allow_credentials=True,
29
+ allow_methods=["*"],
30
+ allow_headers=["*"],
31
+ )
32
+
33
+ # Global AI instance and status tracking
34
+ ai_assistant = None
35
+ initialization_status = "starting"
36
+ initialization_start_time = None
37
+ initialization_error = None
38
+
39
+ # Initialize AI models in background
40
+ def initialize_ai():
41
+ global ai_assistant, initialization_status, initialization_start_time, initialization_error
42
+
43
+ initialization_start_time = time.time()
44
+ initialization_status = "initializing"
45
+
46
+ try:
47
+ print("πŸš€ Initializing AI models...")
48
+ print("πŸ“ This may take a few minutes on first run...")
49
+
50
+ ai_assistant = AdvancedClassroomAI(
51
+ device='cpu',
52
+ save_images=True,
53
+ display_images=False # Don't display in API mode
54
+ )
55
+
56
+ # Verify models are actually ready
57
+ if hasattr(ai_assistant, 'models_ready') and ai_assistant.models_ready:
58
+ initialization_status = "ready"
59
+ elapsed_time = time.time() - initialization_start_time
60
+ print(f"βœ… AI models initialized successfully in {elapsed_time:.2f} seconds!")
61
+ else:
62
+ initialization_status = "error"
63
+ initialization_error = "Models loaded but not ready"
64
+ print("❌ AI models loaded but not ready")
65
+
66
+ except Exception as e:
67
+ initialization_status = "error"
68
+ initialization_error = str(e)
69
+ print(f"❌ Failed to initialize AI models: {e}")
70
+ ai_assistant = None
71
+
72
+ # Start AI initialization in background thread
73
+ print("πŸš€ Starting AI model initialization in background...")
74
+ threading.Thread(target=initialize_ai, daemon=True).start()
75
+
76
+ # Serve generated images
77
+ os.makedirs("generated_images", exist_ok=True)
78
+ app.mount("/images", StaticFiles(directory="generated_images"), name="images")
79
+
80
+ # Pydantic models for API
81
+ class ChatRequest(BaseModel):
82
+ message: str
83
+ subject: str = "General"
84
+ message_type: str = "text" # text, voice, visual
85
+ conversation_history: Optional[List[Dict[str, Any]]] = []
86
+
87
+ class ChatResponse(BaseModel):
88
+ response: str
89
+ analysis: Dict[str, Any]
90
+ image_url: Optional[str] = None
91
+ processing_time: float
92
+ success: bool
93
+ error: Optional[str] = None
94
+
95
+ class HealthResponse(BaseModel):
96
+ status: str
97
+ ai_models_ready: bool
98
+ timestamp: str
99
+ initialization_status: Optional[str] = None
100
+ models_loaded: Optional[bool] = None
101
+ error_message: Optional[str] = None
102
+ initialization_time: Optional[float] = None
103
+
104
+ # Health check endpoint with detailed status
105
+ @app.get("/health", response_model=HealthResponse)
106
+ async def health_check():
107
+ global ai_assistant, initialization_status, initialization_start_time, initialization_error
108
+
109
+ # Calculate initialization time
110
+ init_time = None
111
+ if initialization_start_time:
112
+ init_time = time.time() - initialization_start_time
113
+
114
+ # Determine if models are ready
115
+ models_ready = (
116
+ ai_assistant is not None and
117
+ hasattr(ai_assistant, 'models_ready') and
118
+ ai_assistant.models_ready and
119
+ initialization_status == "ready"
120
+ )
121
+
122
+ response = HealthResponse(
123
+ status="healthy" if models_ready else "initializing",
124
+ ai_models_ready=models_ready,
125
+ timestamp=datetime.now().isoformat(),
126
+ initialization_status=initialization_status,
127
+ models_loaded=ai_assistant is not None,
128
+ error_message=initialization_error,
129
+ initialization_time=init_time
130
+ )
131
+
132
+ print(f"Health check: {response.dict()}")
133
+ return response
134
+
135
+ # Main chat endpoint
136
+ @app.post("/chat", response_model=ChatResponse)
137
+ async def chat(request: ChatRequest):
138
+ global ai_assistant, initialization_status
139
+
140
+ try:
141
+ # Check if AI is ready with detailed status
142
+ if ai_assistant is None:
143
+ return ChatResponse(
144
+ response="AI models are still loading. Please try again in a moment.",
145
+ analysis={"subject": request.subject, "status": "loading", "initialization_status": initialization_status},
146
+ processing_time=0,
147
+ success=False,
148
+ error="AI models not ready"
149
+ )
150
+
151
+ if not hasattr(ai_assistant, 'models_ready') or not ai_assistant.models_ready:
152
+ return ChatResponse(
153
+ response="AI models are still initializing. Please wait a moment and try again.",
154
+ analysis={"subject": request.subject, "status": "initializing", "initialization_status": initialization_status},
155
+ processing_time=0,
156
+ success=False,
157
+ error="AI models not ready"
158
+ )
159
+
160
+ # Process the query using your AI models
161
+ print(f"Processing query: {request.message[:100]}...")
162
+
163
+ start_time = time.time()
164
+ result = ai_assistant.process_educational_query(request.message)
165
+ processing_time = time.time() - start_time
166
+
167
+ print(f"Query processed in {processing_time:.2f} seconds")
168
+
169
+ # Handle image URL if visual was generated
170
+ image_url = None
171
+ if result.get('visual_image'):
172
+ # Get the most recent image from the directory
173
+ images_dir = "generated_images"
174
+ if os.path.exists(images_dir):
175
+ image_files = [f for f in os.listdir(images_dir) if f.endswith('.png')]
176
+ if image_files:
177
+ # Get the most recent image
178
+ image_files.sort(key=lambda x: os.path.getctime(os.path.join(images_dir, x)), reverse=True)
179
+ image_url = f"/images/{image_files[0]}"
180
+
181
+ return ChatResponse(
182
+ response=result['text_response'],
183
+ analysis=result['analysis'],
184
+ image_url=image_url,
185
+ processing_time=processing_time,
186
+ success=result['success']
187
+ )
188
+
189
+ except Exception as e:
190
+ print(f"❌ Error in chat endpoint: {e}")
191
+ return ChatResponse(
192
+ response=f"I encountered an error processing your request: {str(e)}",
193
+ analysis={"subject": request.subject, "error": str(e)},
194
+ processing_time=0,
195
+ success=False,
196
+ error=str(e)
197
+ )
198
+
199
+ # Voice processing endpoint
200
+ @app.post("/voice", response_model=ChatResponse)
201
+ async def process_voice(
202
+ audio: UploadFile = File(...),
203
+ subject: str = "General"
204
+ ):
205
+ try:
206
+ if ai_assistant is None or not ai_assistant.models_ready:
207
+ return ChatResponse(
208
+ response="AI models are not ready for voice processing.",
209
+ analysis={"subject": subject, "status": "not_ready"},
210
+ processing_time=0,
211
+ success=False,
212
+ error="AI models not ready"
213
+ )
214
+
215
+ # For now, return a placeholder response
216
+ # In a full implementation, you would:
217
+ # 1. Save the audio file
218
+ # 2. Use speech-to-text to convert audio to text
219
+ # 3. Process the text with your AI models
220
+
221
+ return ChatResponse(
222
+ response="Voice processing is not fully implemented yet. Please use text input.",
223
+ analysis={"subject": subject, "message_type": "voice"},
224
+ processing_time=0,
225
+ success=False,
226
+ error="Voice processing not implemented"
227
+ )
228
+
229
+ except Exception as e:
230
+ return ChatResponse(
231
+ response="Error processing voice input.",
232
+ analysis={"subject": subject, "error": str(e)},
233
+ processing_time=0,
234
+ success=False,
235
+ error=str(e)
236
+ )
237
+
238
+ # Subject-specific endpoints
239
+ @app.get("/subjects")
240
+ async def get_subjects():
241
+ return {
242
+ "subjects": [
243
+ "Mathematics",
244
+ "Physics",
245
+ "Biology",
246
+ "Chemistry",
247
+ "History",
248
+ "Geography",
249
+ "Literature",
250
+ "Computer Science",
251
+ "Economics",
252
+ "General"
253
+ ]
254
+ }
255
+
256
+ # Get conversation analytics
257
+ @app.get("/analytics")
258
+ async def get_analytics():
259
+ try:
260
+ if ai_assistant is None:
261
+ return {"error": "AI assistant not initialized"}
262
+
263
+ history = getattr(ai_assistant, 'conversation_history', [])
264
+
265
+ # Calculate some basic analytics
266
+ total_queries = len(history)
267
+ subjects = {}
268
+ query_types = {}
269
+
270
+ for conversation in history:
271
+ subject = conversation.get('analysis', {}).get('subject', 'unknown')
272
+ query_type = conversation.get('analysis', {}).get('query_type', 'unknown')
273
+
274
+ subjects[subject] = subjects.get(subject, 0) + 1
275
+ query_types[query_type] = query_types.get(query_type, 0) + 1
276
+
277
+ return {
278
+ "total_queries": total_queries,
279
+ "subjects": subjects,
280
+ "query_types": query_types,
281
+ "average_processing_time": sum(c.get('processing_time', 0) for c in history) / max(total_queries, 1)
282
+ }
283
+
284
+ except Exception as e:
285
+ return {"error": str(e)}
286
+
287
+ # Clear conversation history
288
+ @app.post("/clear-history")
289
+ async def clear_history():
290
+ try:
291
+ if ai_assistant is not None:
292
+ if hasattr(ai_assistant, 'conversation_history'):
293
+ ai_assistant.conversation_history = []
294
+ return {"message": "Conversation history cleared successfully"}
295
+ return {"error": "AI assistant not initialized"}
296
+ except Exception as e:
297
+ return {"error": str(e)}
298
+
299
+ # Get available images
300
+ @app.get("/images/list")
301
+ async def list_images():
302
+ try:
303
+ images_dir = "generated_images"
304
+ if not os.path.exists(images_dir):
305
+ return {"images": []}
306
+
307
+ image_files = [f for f in os.listdir(images_dir) if f.endswith(('.png', '.jpg', '.jpeg'))]
308
+ image_files.sort(key=lambda x: os.path.getctime(os.path.join(images_dir, x)), reverse=True)
309
+
310
+ return {
311
+ "images": [{"filename": f, "url": f"/images/{f}"} for f in image_files]
312
+ }
313
+ except Exception as e:
314
+ return {"error": str(e)}
315
+
316
+ # Root endpoint with detailed status
317
+ @app.get("/")
318
+ async def root():
319
+ global ai_assistant, initialization_status, initialization_start_time, initialization_error
320
+
321
+ # Calculate initialization time
322
+ init_time = None
323
+ if initialization_start_time:
324
+ init_time = time.time() - initialization_start_time
325
+
326
+ models_ready = (
327
+ ai_assistant is not None and
328
+ hasattr(ai_assistant, 'models_ready') and
329
+ ai_assistant.models_ready and
330
+ initialization_status == "ready"
331
+ )
332
+
333
+ return {
334
+ "message": "Advanced Classroom AI API",
335
+ "status": "running",
336
+ "ai_ready": models_ready,
337
+ "initialization_status": initialization_status,
338
+ "initialization_time": init_time,
339
+ "error_message": initialization_error,
340
+ "models_loaded": ai_assistant is not None,
341
+ "endpoints": {
342
+ "chat": "/chat",
343
+ "voice": "/voice",
344
+ "health": "/health",
345
+ "subjects": "/subjects",
346
+ "analytics": "/analytics",
347
+ "images": "/images/list"
348
+ }
349
+ }
350
+
351
+ if __name__ == "__main__":
352
+ print("πŸš€ Starting Advanced Classroom AI API...")
353
+ port = int(os.environ.get("PORT", 8000)) # Use dynamic port from Render if available
354
+ uvicorn.run(
355
+ "app:app",
356
+ host="0.0.0.0",
357
+ port=port
358
+ )
generated_images/.gitkeep ADDED
File without changes
requirements.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --prefer-binary
2
+ --extra-index-url https://download.pytorch.org/whl/cpu
3
+
4
+ # FastAPI and server dependencies
5
+ fastapi==0.104.1
6
+ uvicorn[standard]==0.24.0
7
+ python-multipart==0.0.6
8
+ python-dotenv==1.0.0
9
+
10
+ # Torch stack (fully aligned + CPU-safe)
11
+ torch==2.7.1+cpu
12
+ torchvision==0.22.1+cpu
13
+ torchaudio==2.7.1+cpu
14
+
15
+ # AI and ML dependencies
16
+ transformers==4.37.2
17
+ tokenizers==0.15.1
18
+ diffusers==0.25.0
19
+ accelerate==0.24.1
20
+ safetensors >= 0.4.1
21
+ huggingface_hub==0.20.3 # βœ… Compatible with both transformers & tokenizers
22
+ sentencepiece==0.1.99
23
+ protobuf==3.20.3
24
+
25
+ # Image processing
26
+ Pillow==9.5.0 # βœ… Prebuilt wheels exist
27
+ opencv-python==4.8.1.78
28
+
29
+ # Data processing and visualization
30
+ numpy==1.24.3
31
+ pandas==2.0.3
32
+ matplotlib==3.7.2
33
+ seaborn==0.12.2
34
+
35
+ # Other dependencies
36
+ requests==2.31.0
37
+ pydantic==1.10.12 # βœ… No maturin/Rust
38
+ typing-extensions==4.11.0 # βœ… Required by torch>=2.7
test_api.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+
4
+ # Test the API endpoints
5
+ def test_api():
6
+ base_url = "http://localhost:8000"
7
+
8
+ print("πŸ§ͺ Testing API endpoints...")
9
+
10
+ # Test root endpoint
11
+ try:
12
+ response = requests.get(f"{base_url}/")
13
+ print(f"βœ… Root endpoint: {response.status_code}")
14
+ print(f" Response: {response.json()}")
15
+ except Exception as e:
16
+ print(f"❌ Root endpoint failed: {e}")
17
+
18
+ # Test health endpoint
19
+ try:
20
+ response = requests.get(f"{base_url}/health")
21
+ print(f"βœ… Health endpoint: {response.status_code}")
22
+ health_data = response.json()
23
+ print(f" AI Models Ready: {health_data.get('ai_models_ready')}")
24
+ print(f" Initialization Status: {health_data.get('initialization_status')}")
25
+ except Exception as e:
26
+ print(f"❌ Health endpoint failed: {e}")
27
+
28
+ # Test debug endpoint
29
+ try:
30
+ response = requests.get(f"{base_url}/debug")
31
+ print(f"βœ… Debug endpoint: {response.status_code}")
32
+ debug_data = response.json()
33
+ print(f" AI Assistant Exists: {debug_data.get('ai_assistant_exists')}")
34
+ print(f" Models Ready: {debug_data.get('models_ready')}")
35
+ print(f" Initialization Status: {debug_data.get('initialization_status')}")
36
+ except Exception as e:
37
+ print(f"❌ Debug endpoint failed: {e}")
38
+
39
+ # Test chat endpoint
40
+ try:
41
+ chat_data = {
42
+ "message": "Hello, can you help me with math?",
43
+ "subject": "Mathematics",
44
+ "message_type": "text"
45
+ }
46
+ response = requests.post(f"{base_url}/chat", json=chat_data)
47
+ print(f"βœ… Chat endpoint: {response.status_code}")
48
+ if response.status_code == 200:
49
+ chat_response = response.json()
50
+ print(f" Success: {chat_response.get('success')}")
51
+ print(f" Response: {chat_response.get('response')[:100]}...")
52
+ else:
53
+ print(f" Error: {response.text}")
54
+ except Exception as e:
55
+ print(f"❌ Chat endpoint failed: {e}")
56
+
57
+ if __name__ == "__main__":
58
+ test_api()