prithivMLmods commited on
Commit
f6281cb
·
verified ·
1 Parent(s): e81c55d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -310,11 +310,11 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
310
  label="Select Model",
311
  value="Lumian-VLR-7B-Thinking"
312
  )
313
-
314
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLM-Thinking/discussions)")
315
- gr.Markdown("> Lumian-VLR-7B-Thinking is a high-fidelity vision-language reasoning model built on Qwen2.5-VL-7B-Instruct, excelling at fine-grained multimodal tasks such as image captioning, sampled video reasoning, and document comprehension through explicit grounded reasoning and advanced reinforcement learning. olmOCR-7B-0225-preview, developed by AllenAI, is a Qwen2-VL-7B-Instruct derivative optimized specifically for robust document OCR, efficiently processing large volumes of document images with specialized prompting and high scalability.")
316
- gr.Markdown("> Typhoon-OCR-3B targets bilingual (Thai and English) document parsing, providing reliable OCR and text extraction for real-world documents, emphasizing usability in diverse and complex layouts. DREX-062225-exp is a document retrieval and extraction expert model, fine-tuned from docscopeOCR-7B, focusing on superior document analysis, structured data extraction, and maintaining advanced OCR capabilities including LaTeX and multilingual support. Together, these models represent the state-of-the-art in multimodal document understanding, OCR, and vision-language reasoning for a wide range of real-world and research applications.")
317
-
 
318
  image_submit.click(
319
  fn=generate_image,
320
  inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
 
310
  label="Select Model",
311
  value="Lumian-VLR-7B-Thinking"
312
  )
 
313
  gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLM-Thinking/discussions)")
314
+ gr.Markdown("> Lumian-VLR-7B-Thinking is a high-fidelity vision-language reasoning model built on Qwen2.5-VL-7B-Instruct, designed for fine-grained multimodal understanding, enhancing image captioning, video reasoning, and document comprehension through explicit grounded reasoning. It is trained first via supervised fine-tuning (SFT) on visually-grounded reasoning traces and then further refined using GRPO reinforcement learning to boost reasoning accuracy.")
315
+ gr.Markdown("> LMM-R1-MGT-PerceReason is a vision-language model focused on advanced reasoning using a multimodal tree search approach enabling progressive visual-textual slow thinking, improving complex spatial and logical reasoning without fine-tuning. OLMOCR-7B-0225-preview is a 7B parameter open large model designed for OCR tasks with robust text extraction, especially in complex document layouts. ")
316
+ gr.Markdown("> Typhoon-ocr-3b is a 3B parameter OCR model optimized for efficient and accurate optical character recognition in challenging conditions. DREX-062225-exp is an experimental multimodal model emphasizing strong document reading and extraction capabilities combined with vision-language understanding to support detailed document parsing and reasoning tasks.")
317
+
318
  image_submit.click(
319
  fn=generate_image,
320
  inputs=[model_choice, image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty],