{TITLE}
{DESCRIPTION}
Supported Languages: {len(ALL_UG40_LANGUAGES)} Ugandan languages | Google Comparable: {len(GOOGLE_SUPPORTED_LANGUAGES)} languages
# app.py import subprocess import sys import os from pathlib import Path def setup_salt(): """Clone and setup SALT library like in Colab.""" try: # Check if salt is already available import salt.dataset print("â SALT library already available") return True except ImportError: pass print("đĨ Setting up SALT library...") try: # Clone SALT repo if not exists salt_dir = Path("salt") if not salt_dir.exists(): print("đ Cloning SALT repository...") subprocess.check_call([ "git", "clone", "https://github.com/sunbirdai/salt.git" ]) else: print("đ SALT repository already exists") # Install SALT requirements salt_requirements = salt_dir / "requirements.txt" if salt_requirements.exists(): print("đĻ Installing SALT requirements...") subprocess.check_call([ sys.executable, "-m", "pip", "install", "-q", "-r", str(salt_requirements) ]) # Add SALT directory to Python path salt_path = str(salt_dir.absolute()) if salt_path not in sys.path: sys.path.insert(0, salt_path) print(f"đ Added {salt_path} to Python path") # Test import import salt.dataset print("â SALT library setup completed successfully") return True except Exception as e: print(f"â Failed to setup SALT: {e}") return False # Setup SALT on startup print("đ Starting SALT Translation Leaderboard...") if not setup_salt(): print("â Cannot continue without SALT library") print("đĄ Please check that git is available and GitHub is accessible") sys.exit(1) import gradio as gr import pandas as pd import json import traceback from datetime import datetime from typing import Optional, Dict, Tuple # Import our modules from src.test_set import get_public_test_set, get_complete_test_set, create_test_set_download, validate_test_set_integrity from src.validation import validate_submission_complete from src.evaluation import evaluate_predictions, generate_evaluation_report, get_google_translate_baseline from src.leaderboard import ( load_leaderboard, add_model_to_leaderboard, get_leaderboard_stats, filter_leaderboard, export_leaderboard, get_model_comparison, prepare_leaderboard_display ) from src.plotting import ( create_leaderboard_ranking_plot, create_metrics_comparison_plot, create_language_pair_heatmap, create_coverage_analysis_plot, create_model_performance_timeline, create_google_comparison_plot, create_detailed_model_analysis, create_submission_summary_plot ) from src.utils import sanitize_model_name, get_all_language_pairs, get_google_comparable_pairs from config import * # Global variables for caching current_leaderboard = None public_test_set = None complete_test_set = None def initialize_data(): """Initialize test sets and leaderboard data.""" global public_test_set, complete_test_set, current_leaderboard try: print("đ Initializing SALT Translation Leaderboard...") # Load test sets print("đĨ Loading test sets...") public_test_set = get_public_test_set() complete_test_set = get_complete_test_set() # Load leaderboard print("đ Loading leaderboard...") current_leaderboard = load_leaderboard() print(f"â Initialization complete!") print(f" - Test set: {len(public_test_set):,} samples") print(f" - Language pairs: {len(get_all_language_pairs())}") print(f" - Current models: {len(current_leaderboard)}") return True except Exception as e: print(f"â Initialization failed: {e}") traceback.print_exc() return False def download_test_set() -> Tuple[str, str]: """Create downloadable test set and return file path and info.""" try: global public_test_set if public_test_set is None: public_test_set = get_public_test_set() # Create download file download_path, stats = create_test_set_download() # Create info message info_msg = f""" ## đĨ SALT Test Set Downloaded Successfully! ### Dataset Statistics: - **Total Samples**: {stats['total_samples']:,} - **Language Pairs**: {stats['language_pairs']} - **Google Comparable**: {stats['google_comparable_samples']:,} samples - **Languages**: {', '.join(stats['languages'])} ### File Format: - `sample_id`: Unique identifier for each sample - `source_text`: Text to be translated - `source_language`: Source language code - `target_language`: Target language code - `domain`: Content domain (if available) - `google_comparable`: Whether this pair can be compared with Google Translate ### Next Steps: 1. Run your model on the source texts 2. Create a CSV/JSON file with columns: `sample_id`, `prediction` 3. Upload your predictions using the "Submit Predictions" tab """ return download_path, info_msg except Exception as e: error_msg = f"â Error creating test set download: {str(e)}" return None, error_msg def validate_submission(file, model_name: str, author: str, description: str) -> Tuple[str, Optional[pd.DataFrame]]: """Validate uploaded prediction file, supporting str paths, bytes, and Gradio wrappers.""" try: if file is None: return "â Please upload a predictions file", None if not model_name.strip(): return "â Please provide a model name", None # 1) Determine raw bytes if isinstance(file, bytes): file_content = file elif isinstance(file, str): # could be a path or raw text if os.path.exists(file): with open(file, "rb") as f: file_content = f.read() else: file_content = file.encode("utf-8") elif hasattr(file, "name") and os.path.exists(file.name): # tempfile._TemporaryFileWrapper from Gradio with open(file.name, "rb") as f: file_content = f.read() else: return "â Could not read uploaded file", None # 2) Infer filename for format-sniffing filename = ( getattr(file, "name", None) or getattr(file, "filename", None) or "predictions.csv" ) # 3) Load test set if needed global complete_test_set if complete_test_set is None: complete_test_set = get_complete_test_set() # 4) Run existing validation pipeline validation_result = validate_submission_complete( file_content, filename, complete_test_set, model_name ) if validation_result["valid"]: return validation_result["report"], validation_result["predictions"] else: return validation_result["report"], None except Exception as e: return ( f"â Validation error: {e}\n\nTraceback:\n{traceback.format_exc()}", None, ) def evaluate_submission( predictions_df: pd.DataFrame, model_name: str, author: str, description: str, validation_info: Dict ) -> Tuple[str, pd.DataFrame, object, object]: """Evaluate validated predictions and update leaderboard.""" try: if predictions_df is None: return "â No valid predictions to evaluate", None, None, None # Get complete test set with targets global complete_test_set, current_leaderboard if complete_test_set is None: complete_test_set = get_complete_test_set() # Run evaluation print(f"đ Evaluating {model_name}...") evaluation_results = evaluate_predictions(predictions_df, complete_test_set) if evaluation_results.get('error'): return f"â Evaluation error: {evaluation_results['error']}", None, None, None # Add to leaderboard print("đ Adding to leaderboard...") model_type = "user_submission" # Could be enhanced to detect model type updated_leaderboard = add_model_to_leaderboard( model_name=sanitize_model_name(model_name), author=author or "Anonymous", evaluation_results=evaluation_results, validation_info=validation_info, model_type=model_type, description=description or "" ) # Update global leaderboard current_leaderboard = updated_leaderboard # Generate evaluation report report = generate_evaluation_report(evaluation_results, model_name) # Create visualization plots summary_plot = create_submission_summary_plot(validation_info, evaluation_results) ranking_plot = create_leaderboard_ranking_plot(updated_leaderboard) # Format success message rank = updated_leaderboard[updated_leaderboard['model_name'] == sanitize_model_name(model_name)].index[0] + 1 total_models = len(updated_leaderboard) success_msg = f""" ## đ Evaluation Complete! ### Your Results: - **Model**: {model_name} - **Rank**: #{rank} out of {total_models} models - **Quality Score**: {evaluation_results['averages'].get('quality_score', 0):.4f} - **BLEU**: {evaluation_results['averages'].get('bleu', 0):.2f} - **ChrF**: {evaluation_results['averages'].get('chrf', 0):.4f} ### Coverage: - **Samples Evaluated**: {evaluation_results['evaluated_samples']:,} - **Language Pairs**: {evaluation_results['summary']['language_pairs_covered']} - **Google Comparable**: {evaluation_results['summary']['google_comparable_pairs']} pairs {report} """ return success_msg, prepare_leaderboard_display(updated_leaderboard), summary_plot, ranking_plot except Exception as e: error_msg = f"â Evaluation failed: {str(e)}\n\nTraceback:\n{traceback.format_exc()}" return error_msg, None, None, None def refresh_leaderboard_display( search_query: str = "", model_type_filter: str = "all", min_coverage: float = 0.0, google_only: bool = False ) -> Tuple[pd.DataFrame, object, object, str]: """Refresh and filter leaderboard display.""" try: global current_leaderboard if current_leaderboard is None: current_leaderboard = load_leaderboard() # Apply filters filtered_df = filter_leaderboard( current_leaderboard, search_query=search_query, model_type=model_type_filter, min_coverage=min_coverage, google_comparable_only=google_only ) # Prepare for display (removes detailed_metrics column) display_df = prepare_leaderboard_display(filtered_df) # Create plots ranking_plot = create_leaderboard_ranking_plot(filtered_df) comparison_plot = create_metrics_comparison_plot(filtered_df) # Get stats stats = get_leaderboard_stats(filtered_df) stats_text = f""" ### đ Leaderboard Statistics - **Total Models**: {stats['total_models']} - **Average Quality Score**: {stats['avg_quality_score']:.4f} - **Google Comparable Models**: {stats['google_comparable_models']} **Best Model**: {stats['best_model']['name'] if stats['best_model'] else 'None'} **Latest Submission**: {stats['latest_submission'][:10] if stats['latest_submission'] else 'None'} """ return display_df, ranking_plot, comparison_plot, stats_text except Exception as e: error_msg = f"Error loading leaderboard: {str(e)}" empty_df = pd.DataFrame() return empty_df, None, None, error_msg def get_model_details(model_name: str) -> Tuple[str, object]: """Get detailed analysis for a specific model.""" try: global current_leaderboard if current_leaderboard is None: return "Leaderboard not loaded", None # Find model model_row = current_leaderboard[current_leaderboard['model_name'] == model_name] if model_row.empty: return f"Model '{model_name}' not found", None model_info = model_row.iloc[0] # Parse detailed metrics try: detailed_results = json.loads(model_info['detailed_metrics']) except: detailed_results = {} # Create detailed plot detail_plot = create_detailed_model_analysis(detailed_results, model_name) # Format model details details_text = f""" ## đ Model Details: {model_name} ### Basic Information: - **Author**: {model_info['author']} - **Submission Date**: {model_info['submission_date'][:10]} - **Model Type**: {model_info['model_type']} - **Description**: {model_info['description'] or 'No description provided'} ### Performance Metrics: - **Quality Score**: {model_info['quality_score']:.4f} - **BLEU**: {model_info['bleu']:.2f} - **ChrF**: {model_info['chrf']:.4f} - **ROUGE-1**: {model_info['rouge1']:.4f} - **ROUGE-L**: {model_info['rougeL']:.4f} ### Coverage Information: - **Total Samples**: {model_info['total_samples']:,} - **Language Pairs Covered**: {model_info['language_pairs_covered']} - **Google Comparable Pairs**: {model_info['google_pairs_covered']} - **Coverage Rate**: {model_info['coverage_rate']:.1%} ### Google Translate Comparison: - **Google Quality Score**: {model_info['google_quality_score']:.4f} - **Google BLEU**: {model_info['google_bleu']:.2f} - **Google ChrF**: {model_info['google_chrf']:.4f} """ return details_text, detail_plot except Exception as e: error_msg = f"Error getting model details: {str(e)}" return error_msg, None # Initialize data on startup print("đ Starting SALT Translation Leaderboard...") initialization_success = initialize_data() # Create Gradio interface with gr.Blocks( title=TITLE, theme=gr.themes.Soft(), css=""" .gradio-container { max-width: 1400px !important; margin: 0 auto; } .main-header { text-align: center; margin-bottom: 2rem; padding: 2rem; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 10px; } .metric-box { background: #f8f9fa; padding: 1rem; border-radius: 8px; margin: 0.5rem 0; border-left: 4px solid #007bff; } .error-box { background: #f8d7da; color: #721c24; padding: 1rem; border-radius: 8px; border-left: 4px solid #dc3545; } .success-box { background: #d4edda; color: #155724; padding: 1rem; border-radius: 8px; border-left: 4px solid #28a745; } """ ) as demo: # Header gr.HTML(f"""
{DESCRIPTION}
Supported Languages: {len(ALL_UG40_LANGUAGES)} Ugandan languages | Google Comparable: {len(GOOGLE_SUPPORTED_LANGUAGES)} languages