# so we use pytorch 2.6 | |
torch==2.6.0 | |
torchvision==0.21.0 | |
torchao==0.9.0 | |
#torch>=2.4.0 | |
#torchvision>=0.19.0 | |
opencv-python>=4.9.0.80 | |
diffusers==0.31.0 | |
transformers>=4.49.0 | |
tokenizers>=0.20.3 | |
accelerate>=1.1.1 | |
flash-attn @ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl | |
tqdm | |
imageio | |
easydict | |
ftfy | |
dashscope | |
imageio-ffmpeg | |
numpy==1.24.4 | |
wandb | |
omegaconf | |
einops | |
av==13.1.0 | |
opencv-python | |
git+https://github.com/openai/CLIP.git | |
open_clip_torch | |
starlette | |
pycocotools | |
lmdb | |
matplotlib | |
sentencepiece | |
pydantic==2.10.6 | |
scikit-image | |
huggingface_hub[cli] | |
dominate | |
nvidia-tensorrt | |
onnx | |
onnxruntime | |
onnxscript | |
onnxconverter_common | |
flask | |
flask-socketio | |