Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
742fae5
1
Parent(s):
5999d67
debuging
Browse files- app.py +1 -1
- requirements.txt +2 -2
app.py
CHANGED
@@ -26,7 +26,7 @@ CAPTION_PROMPT = "Question: {}\nPlease describe the image. DO NOT try to answer
|
|
26 |
LLM_PROMPT = """In the following text, you will receive a detailed caption of an image and a relevant question. In addition, you will be provided with a tentative model response. You goal is to answer the question using these information.\n\n### The detailed caption of the provided image: {}\n\n### Note that the caption might contain incorrect solutions, do not be misguided by them.\n\n### A problem to be solved: {}\n\n### A tentative model response: {}\n\n### Note that the above tentative response might be inaccurate (due to calculation errors, incorrect logic/reasoning and so on), under such a case, please ignore it and give your own solutions. However, if you do not have enough evidence to show it is wrong, please output the tentative response."""
|
27 |
|
28 |
# === Initialize Models ===
|
29 |
-
MLLM_MODEL_PATH = "
|
30 |
LLM_MODEL_PATH = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
|
31 |
|
32 |
processor = AutoProcessor.from_pretrained(MLLM_MODEL_PATH)
|
|
|
26 |
LLM_PROMPT = """In the following text, you will receive a detailed caption of an image and a relevant question. In addition, you will be provided with a tentative model response. You goal is to answer the question using these information.\n\n### The detailed caption of the provided image: {}\n\n### Note that the caption might contain incorrect solutions, do not be misguided by them.\n\n### A problem to be solved: {}\n\n### A tentative model response: {}\n\n### Note that the above tentative response might be inaccurate (due to calculation errors, incorrect logic/reasoning and so on), under such a case, please ignore it and give your own solutions. However, if you do not have enough evidence to show it is wrong, please output the tentative response."""
|
27 |
|
28 |
# === Initialize Models ===
|
29 |
+
MLLM_MODEL_PATH = "Qwen/Qwen2.5-VL-7B-Instruct"
|
30 |
LLM_MODEL_PATH = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
|
31 |
|
32 |
processor = AutoProcessor.from_pretrained(MLLM_MODEL_PATH)
|
requirements.txt
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
# requirements.txt records the full set of dependencies for development
|
2 |
-
torch
|
3 |
accelerate
|
4 |
codetiming
|
5 |
datasets
|
6 |
dill
|
7 |
# flash-attn
|
8 |
-
|
9 |
hydra-core
|
10 |
liger-kernel
|
11 |
numpy
|
|
|
1 |
# requirements.txt records the full set of dependencies for development
|
2 |
+
torch==2.5.0
|
3 |
accelerate
|
4 |
codetiming
|
5 |
datasets
|
6 |
dill
|
7 |
# flash-attn
|
8 |
+
https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.5cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
|
9 |
hydra-core
|
10 |
liger-kernel
|
11 |
numpy
|