import torch from transformers import ViTImageProcessor, ViTForImageClassification, pipeline from fastai.learner import load_learner from fastai.vision.core import PILImage from PIL import Image import matplotlib.pyplot as plt import numpy as np import gradio as gr import io import base64 # 🔹 Cargar modelo ViT desde Hugging Face (HAM10000) MODEL_NAME = "ahishamm/vit-base-HAM-10000-sharpened-patch-32" feature_extractor = ViTImageProcessor.from_pretrained(MODEL_NAME) model_vit = ViTForImageClassification.from_pretrained(MODEL_NAME) model_vit.eval() # 🔹 Cargar modelos Fast.ai model_malignancy = load_learner("ada_learn_malben.pkl") model_norm2000 = load_learner("ada_learn_skin_norm2000.pkl") # 🔹 Cargar modelo ISIC 7 clases classifier_isic7 = pipeline("image-classification", model="Anwarkh1/Skin_Cancer-Image_Classification") # 🔹 Clases ViT y niveles de riesgo CLASSES = [ "Queratosis actínica / Bowen", "Carcinoma células basales", "Lesión queratósica benigna", "Dermatofibroma", "Melanoma maligno", "Nevus melanocítico", "Lesión vascular" ] RISK_LEVELS = { 0: {'level': 'Moderado', 'color': '#ffaa00', 'weight': 0.6}, 1: {'level': 'Alto', 'color': '#ff4444', 'weight': 0.8}, 2: {'level': 'Bajo', 'color': '#44ff44', 'weight': 0.1}, 3: {'level': 'Bajo', 'color': '#44ff44', 'weight': 0.1}, 4: {'level': 'Crítico', 'color': '#cc0000', 'weight': 1.0}, 5: {'level': 'Bajo', 'color': '#44ff44', 'weight': 0.1}, 6: {'level': 'Bajo', 'color': '#44ff44', 'weight': 0.1} } def analizar_lesion_combined(img): img_fastai = PILImage.create(img) # 🔹 ViT transformer (HAM10000) inputs = feature_extractor(img, return_tensors="pt") with torch.no_grad(): outputs = model_vit(**inputs) probs_vit = outputs.logits.softmax(dim=-1).cpu().numpy()[0] pred_idx_vit = int(np.argmax(probs_vit)) pred_class_vit = CLASSES[pred_idx_vit] confidence_vit = probs_vit[pred_idx_vit] # 🔹 Fast.ai modelos pred_fast_malignant, _, pr_