{ "output_dir": "/root/code/new_work_code/HI-TransPA/swfit_workdir/fresh-little-lemon-workspace/swift_config/swift_output/AUDIO-SFT-TEACH_CHAT_TRANS_AUDIO-Qwen2.5-Omni-3B-lora/v2-20250629-114113", "overwrite_output_dir": false, "do_train": false, "do_eval": false, "do_predict": false, "eval_strategy": "steps", "prediction_loss_only": false, "per_device_train_batch_size": 4, "per_device_eval_batch_size": 4, "per_gpu_train_batch_size": null, "per_gpu_eval_batch_size": null, "gradient_accumulation_steps": 2, "eval_accumulation_steps": null, "eval_delay": 0, "torch_empty_cache_steps": null, "learning_rate": 5e-05, "weight_decay": 0.1, "adam_beta1": 0.9, "adam_beta2": 0.95, "adam_epsilon": 1e-08, "max_grad_norm": 1.0, "num_train_epochs": 5.0, "max_steps": -1, "lr_scheduler_type": "cosine", "lr_scheduler_kwargs": null, "warmup_ratio": 0.05, "warmup_steps": 0, "log_level": "passive", "log_level_replica": "warning", "log_on_each_node": true, "logging_dir": "/root/code/new_work_code/HI-TransPA/swfit_workdir/fresh-little-lemon-workspace/swift_config/swift_output/AUDIO-SFT-TEACH_CHAT_TRANS_AUDIO-Qwen2.5-Omni-3B-lora/v2-20250629-114113/runs", "logging_strategy": "steps", "logging_first_step": true, "logging_steps": 1, "logging_nan_inf_filter": true, "save_strategy": "steps", "save_steps": 50.0, "save_total_limit": 1000, "save_safetensors": true, "save_on_each_node": false, "save_only_model": false, "restore_callback_states_from_checkpoint": false, "no_cuda": false, "use_cpu": false, "use_mps_device": false, "seed": 42, "data_seed": 42, "jit_mode_eval": false, "use_ipex": false, "bf16": true, "fp16": false, "fp16_opt_level": "O1", "half_precision_backend": "auto", "bf16_full_eval": false, "fp16_full_eval": false, "tf32": null, "local_rank": -1, "ddp_backend": null, "tpu_num_cores": null, "tpu_metrics_debug": false, "debug": null, "dataloader_drop_last": false, "eval_steps": 50.0, "dataloader_num_workers": 16, "dataloader_prefetch_factor": null, "past_index": -1, "run_name": "/root/code/new_work_code/HI-TransPA/swfit_workdir/fresh-little-lemon-workspace/swift_config/swift_output/AUDIO-SFT-TEACH_CHAT_TRANS_AUDIO-Qwen2.5-Omni-3B-lora/v2-20250629-114113", "disable_tqdm": null, "remove_unused_columns": true, "label_names": null, "load_best_model_at_end": false, "metric_for_best_model": "loss", "greater_is_better": false, "ignore_data_skip": false, "fsdp": "", "fsdp_min_num_params": 0, "fsdp_config": null, "fsdp_transformer_layer_cls_to_wrap": null, "accelerator_config": { "dispatch_batches": false }, "deepspeed": null, "label_smoothing_factor": 0.0, "optim": "adamw_torch", "optim_args": null, "adafactor": false, "group_by_length": false, "length_column_name": "length", "report_to": [ "swanlab" ], "ddp_find_unused_parameters": null, "ddp_bucket_cap_mb": null, "ddp_broadcast_buffers": null, "dataloader_pin_memory": true, "dataloader_persistent_workers": false, "skip_memory_metrics": true, "use_legacy_prediction_loop": false, "push_to_hub": false, "resume_from_checkpoint": null, "hub_model_id": null, "hub_strategy": "every_save", "hub_token": null, "hub_private_repo": null, "hub_always_push": false, "gradient_checkpointing": true, "gradient_checkpointing_kwargs": "{\"use_reentrant\": false}", "include_inputs_for_metrics": false, "include_for_metrics": [], "eval_do_concat_batches": true, "fp16_backend": "auto", "push_to_hub_model_id": null, "push_to_hub_organization": null, "push_to_hub_token": null, "mp_parameters": "", "auto_find_batch_size": false, "full_determinism": false, "torchdynamo": null, "ray_scope": "last", "ddp_timeout": 18000000, "torch_compile": false, "torch_compile_backend": null, "torch_compile_mode": null, "include_tokens_per_second": false, "include_num_input_tokens_seen": false, "neftune_noise_alpha": null, "optim_target_modules": null, "batch_eval_metrics": false, "eval_on_start": false, "use_liger_kernel": false, "eval_use_gather_object": false, "average_tokens_across_devices": false, "sortish_sampler": false, "predict_with_generate": false, "generation_max_length": null, "generation_num_beams": null, "generation_config": null, "vit_gradient_checkpointing": null, "check_model": true, "acc_strategy": "token", "train_dataloader_shuffle": true, "max_epochs": null, "aligner_lr": null, "vit_lr": null, "optimizer": null, "use_logits_to_keep": null, "channels": null, "metric_warmup_step": 0, "fsdp_num": 1, "acc_steps": 1, "eval_use_evalscope": false, "eval_datasets": [], "eval_limit": null, "eval_datasets_args": null, "eval_generation_config": null, "model": "/root/code/new_work_code/HI-TransPA/Qwen2.5-Omni-3B", "model_type": "qwen2_5_omni", "model_revision": null, "task_type": "causal_lm", "torch_dtype": "bfloat16", "attn_impl": "flash_attn", "num_labels": null, "problem_type": null, "rope_scaling": null, "device_map": { "thinker.model.embed_tokens": null, "thinker.visual.patch_embed": "cuda:0", "thinker.visual.rotary_pos_emb": "cuda:0", "thinker.visual.blocks.0": "cuda:1", "thinker.visual.blocks.1": "cuda:1", "thinker.visual.blocks.2": "cuda:1", "thinker.visual.blocks.3": "cuda:1", "thinker.visual.blocks.4": "cuda:1", "thinker.visual.blocks.5": "cuda:1", "thinker.visual.blocks.6": "cuda:1", "thinker.visual.blocks.7": "cuda:1", "thinker.visual.blocks.8": "cuda:0", "thinker.visual.blocks.9": "cuda:0", "thinker.visual.blocks.10": "cuda:0", "thinker.visual.blocks.11": "cuda:0", "thinker.visual.blocks.12": "cuda:0", "thinker.visual.blocks.13": "cuda:0", "thinker.visual.blocks.14": "cuda:0", "thinker.visual.blocks.15": "cuda:0", "thinker.visual.blocks.16": "cuda:0", "thinker.visual.blocks.17": "cuda:0", "thinker.visual.blocks.18": "cuda:0", "thinker.visual.blocks.19": "cuda:0", "thinker.visual.blocks.20": "cuda:0", "thinker.visual.blocks.21": "cuda:0", "thinker.visual.blocks.22": "cuda:0", "thinker.visual.blocks.23": "cuda:0", "thinker.visual.blocks.24": "cuda:0", "thinker.visual.blocks.25": "cuda:0", "thinker.visual.blocks.26": "cuda:0", "thinker.visual.blocks.27": "cuda:0", "thinker.visual.blocks.28": "cuda:0", "thinker.visual.blocks.29": "cuda:0", "thinker.visual.blocks.30": "cuda:0", "thinker.visual.blocks.31": "cuda:0", "thinker.visual.merger": "cuda:0", "thinker.model.rotary_emb": "cuda:1", "thinker.model.layers.0": "cuda:1", "thinker.model.layers.1": "cuda:1", "thinker.model.layers.2": "cuda:1", "thinker.model.layers.3": "cuda:1", "thinker.model.layers.4": "cuda:1", "thinker.model.layers.5": "cuda:1", "thinker.model.layers.6": "cuda:1", "thinker.model.layers.7": "cuda:1", "thinker.model.layers.8": "cuda:1", "thinker.model.layers.9": "cuda:1", "thinker.model.layers.10": "cuda:1", "thinker.model.layers.11": "cuda:1", "thinker.model.layers.12": "cuda:1", "thinker.model.layers.13": "cuda:1", "thinker.model.layers.14": "cuda:1", "thinker.model.layers.15": "cuda:1", "thinker.model.layers.16": "cuda:1", "thinker.model.layers.17": "cuda:1", "thinker.model.layers.18": "cuda:1", "thinker.model.layers.19": "cuda:1", "thinker.model.layers.20": "cuda:1", "thinker.model.layers.21": "cuda:1", "thinker.model.layers.22": "cuda:1", "thinker.model.layers.23": "cuda:1", "thinker.model.layers.24": "cuda:1", "thinker.model.layers.25": "cuda:1", "thinker.model.layers.26": "cuda:1", "thinker.model.layers.27": "cuda:1", "thinker.model.layers.28": "cuda:1", "thinker.model.layers.29": "cuda:1", "thinker.model.layers.30": "cuda:1", "thinker.model.layers.31": "cuda:1", "thinker.model.layers.32": "cuda:1", "thinker.model.layers.33": "cuda:1", "thinker.model.layers.34": "cuda:1", "thinker.model.layers.35": "cuda:1", "thinker.model.norm": "cuda:1", "thinker.lm_head": "cuda:1", "thinker.audio_tower": "cuda:1", "talker": "cuda:1", "token2wav": "cuda:1" }, "max_memory": {}, "local_repo_path": null, "init_strategy": null, "template": "qwen2_5_omni", "system": "# Role: 听障翻译小助手\n## Profile\n- description: 你是一位专为听障人士设计的多功能翻译助手,能通过分析视频中的人脸口型与音频信息,将含糊不清的口语翻译为清晰易懂的文字,还能对听障视频的含义进行解释,并与听障人士进行对话交流,确保人类读者能准确理解,翻译精准度需达到 95%。\n## Skills\n1. 能够处理不标准或含糊的口语输入。\n2. 能结合视频中的人脸口型与音频数据进行多模态语音识别。\n3. 熟悉中文自然语言表达方式,翻译输出必须自然流畅。\n4. 遇到模糊部分可合理补全,确保语义完整。\n5. 能准确解释听障视频中的含义和情感表达。\n6. 能与听障人士进行友好、有效的对话交流。\n## Background:\n听障人士在进行口语表达时,可能因发音不清而影响他人理解,通过结合视频中面部口型与音频,可以更有效地捕捉其表达意图,从而转化为可读文本。同时,在交流互动中,能帮助听障人士更好地理解信息和表达自己。\n## Goals:\n帮助用户将/translate命令触发的模糊语音内容翻译为自然语言文字,并通过/chat命令解释听障视频的含义或与听障人士进行对话交流。\n## OutputFormat:\n接收用户提供的视频片段(含音频与人脸)或聊天对话指令,返回对应的自然语言文本内容。\n## Rules\n1. 输出的翻译和对话内容必须为自然、完整、通顺的中文句子。\n2. 遇到音频模糊时优先结合口型信息判断语义。\n3. 仅在用户输入指令/translate时进行翻译,在用户输入指令/chat时进行对话及含义解释。\n## Workflows\n1. **翻译工作流(/translate)** :接收到用户指令/translate后,解析视频中的人脸口型与音频内容。识别并还原模糊口语的真实语义。组织为自然中文文本,确保可读性和理解度。\n2. **对话及含义解释工作流(/chat)** :接收到用户指令/chat后,若用户提供了含有口型与音频的视频片段,先分析视频内容,提取关键信息,结合上下文解释视频的含义,包括说话人的意图、情感等,并以自然流畅的中文表达出来;若用户直接进行对话,理解用户意图,用友好、通俗易懂的语言与听障人士或普通用户进行交流,确保沟通顺畅。\n## Init\n欢迎使用听障翻译小助手。请通过输入/translate命令并上传含有口型与音频的视频,我们将为您准确还原说话内容,帮助更清晰地沟通;您也可以输入/chat命令,与我们交流或让我们为您解读听障视频的深层含义。", "max_length": 8192, "truncation_strategy": "delete", "max_pixels": null, "agent_template": null, "norm_bbox": null, "use_chat_template": true, "padding_free": false, "padding_side": "right", "loss_scale": "default", "sequence_parallel_size": 1, "response_prefix": null, "template_backend": "swift", "dataset": [ "/root/code/new_work_code/HI-TransPA/swfit_workdir/fresh-little-lemon-workspace/data/sft-dataset-config/teach_pinyin_chat_trans_merged-huotun-merge-sft-regex-audio-0629.jsonl" ], "val_dataset": [ "/root/code/new_work_code/HI-TransPA/swfit_workdir/fresh-little-lemon-workspace/data/sft-dataset-config/huotun-merge-val-regex-audio.jsonl" ], "split_dataset_ratio": 0.0, "dataset_num_proc": 16, "load_from_cache_file": true, "dataset_shuffle": true, "val_dataset_shuffle": false, "streaming": false, "interleave_prob": null, "stopping_strategy": "first_exhausted", "shuffle_buffer_size": 1000, "download_mode": "reuse_dataset_if_exists", "columns": {}, "strict": false, "model_name": [ "HI-TransPA-0628" ], "model_author": [ "FreshLittleLemon" ], "custom_dataset_info": [], "quant_method": null, "quant_bits": null, "hqq_axis": null, "bnb_4bit_compute_dtype": "bfloat16", "bnb_4bit_quant_type": "nf4", "bnb_4bit_use_double_quant": true, "bnb_4bit_quant_storage": null, "max_new_tokens": 64, "temperature": 0.0, "top_k": null, "top_p": null, "repetition_penalty": null, "num_beams": 1, "stream": false, "stop_words": [], "logprobs": false, "top_logprobs": null, "ckpt_dir": null, "lora_modules": [], "tuner_backend": "peft", "train_type": "lora", "adapters": [], "external_plugins": [], "model_kwargs": {}, "load_args": false, "load_data_args": false, "packing": false, "packing_cache": null, "custom_register_path": [], "use_hf": false, "ignore_args_error": false, "use_swift_lora": false, "freeze_parameters": [ "thinker.audio_tower", "thinker.visual", "thinker.audio_tower.proj", "thinker.visual.merger", "talker", "token2wav" ], "freeze_parameters_regex": null, "freeze_parameters_ratio": 0.0, "trainable_parameters": [], "trainable_parameters_regex": null, "freeze_llm": false, "freeze_vit": true, "freeze_aligner": true, "target_modules": "(thinker\\.model\\.layers\\.\\d+\\.(self_attn\\.(k_proj|v_proj|o_proj)|mlp\\.(gate_proj|up_proj|down_proj))$|thinker\\.model\\.(embed_tokens|norm)$|thinker\\.lm_head$|thinker\\.audio_tower\\.(conv1|conv2|audio_bos_eos_token|layers\\.\\d+\\.(self_attn\\.(k_proj|v_proj|q_proj|out_proj)|self_attn_layer_norm|fc1|fc2|final_layer_norm)|ln_post|proj)$)", "target_regex": "(thinker\\.model\\.layers\\.\\d+\\.(self_attn\\.(k_proj|v_proj|o_proj)|mlp\\.(gate_proj|up_proj|down_proj))$|thinker\\.model\\.(embed_tokens|norm)$|thinker\\.lm_head$|thinker\\.audio_tower\\.(conv1|conv2|audio_bos_eos_token|layers\\.\\d+\\.(self_attn\\.(k_proj|v_proj|q_proj|out_proj)|self_attn_layer_norm|fc1|fc2|final_layer_norm)|ln_post|proj)$)", "modules_to_save": [], "lora_rank": 64, "lora_alpha": 128, "lora_dropout": 0.01, "lora_bias": "none", "lora_dtype": null, "lorap_lr_ratio": null, "use_rslora": false, "use_dora": false, "lora_ga_batch_size": 2, "lora_ga_iters": 2, "lora_ga_max_length": 1024, "lora_ga_direction": "ArB2r", "lora_ga_scale": "stable", "lora_ga_stable_gamma": 16, "init_weights": true, "fourier_n_frequency": 2000, "fourier_scaling": 300.0, "boft_block_size": 4, "boft_block_num": 0, "boft_n_butterfly_factor": 1, "boft_dropout": 0.0, "vera_rank": 256, "vera_projection_prng_key": 0, "vera_dropout": 0.0, "vera_d_initial": 0.1, "adapter_act": "gelu", "adapter_length": 128, "use_galore": false, "galore_target_modules": null, "galore_rank": 128, "galore_update_proj_gap": 50, "galore_scale": 1.0, "galore_proj_type": "std", "galore_optim_per_parameter": false, "galore_with_embedding": false, "galore_quantization": false, "galore_proj_quant": false, "galore_proj_bits": 4, "galore_proj_group_size": 256, "galore_cos_threshold": 0.4, "galore_gamma_proj": 2, "galore_queue_size": 5, "adalora_target_r": 8, "adalora_init_r": 12, "adalora_tinit": 0, "adalora_tfinal": 0, "adalora_deltaT": 1, "adalora_beta1": 0.85, "adalora_beta2": 0.85, "adalora_orth_reg_weight": 0.5, "llamapro_num_new_blocks": 4, "llamapro_num_groups": null, "lisa_activated_layers": 0, "lisa_step_interval": 20, "reft_layer_key": null, "reft_layers": null, "reft_rank": 4, "reft_intervention_type": "LoreftIntervention", "reft_args": null, "swanlab_token": null, "swanlab_project": "HI-TransPA", "swanlab_workspace": null, "swanlab_exp_name": "/root/code/new_work_code/HI-TransPA/swfit_workdir/fresh-little-lemon-workspace/swift_config/swift_output/AUDIO-SFT-TEACH_CHAT_TRANS_AUDIO-Qwen2.5-Omni-3B-lora/v2-20250629-114113", "swanlab_mode": "cloud", "add_version": true, "resume_only_model": false, "create_checkpoint_symlink": false, "lazy_tokenize": true, "loss_type": null, "metric": "acc", "zero_hpz_partition_size": null, "rank": -1, "global_world_size": 1, "local_world_size": 1, "model_suffix": "Qwen2.5-Omni-3B", "model_info": "ModelInfo(model_type='qwen2_5_omni', model_dir='/root/code/new_work_code/HI-TransPA/Qwen2.5-Omni-3B', torch_dtype=torch.bfloat16, max_model_len=32768, quant_method=None, quant_bits=None, rope_scaling={'mrope_section': [16, 24, 24], 'rope_type': 'default', 'type': 'default'}, config=None, task_type='causal_lm', num_labels=None)", "model_meta": "ModelMeta(model_type='qwen2_5_omni', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-Omni-3B', hf_model_id='Qwen/Qwen2.5-Omni-3B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Omni-7B', hf_model_id='Qwen/Qwen2.5-Omni-7B', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='qwen2_5_omni', get_function=, model_arch='qwen2_5_omni', architectures=['Qwen2_5OmniModel', 'Qwen2_5OmniForConditionalGeneration'], additional_saved_files=['spk_dict.pt'], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=['*.bin', '*.safetensors'], requires=['transformers>=4.50', 'soundfile', 'qwen_omni_utils', 'decord'], tags=[])", "model_dir": "/root/code/new_work_code/HI-TransPA/Qwen2.5-Omni-3B", "hub": "", "evaluation_strategy": "steps", "training_args": "Seq2SeqTrainingArguments(output_dir='/root/code/new_work_code/HI-TransPA/swfit_workdir/fresh-little-lemon-workspace/swift_config/swift_output/AUDIO-SFT-TEACH_CHAT_TRANS_AUDIO-Qwen2.5-Omni-3B-lora/v2-20250629-114113', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=, prediction_loss_only=False, per_device_train_batch_size=4, per_device_eval_batch_size=4, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=5e-05, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=5.0, max_steps=-1, lr_scheduler_type=, lr_scheduler_kwargs=None, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/root/code/new_work_code/HI-TransPA/swfit_workdir/fresh-little-lemon-workspace/swift_config/swift_output/AUDIO-SFT-TEACH_CHAT_TRANS_AUDIO-Qwen2.5-Omni-3B-lora/v2-20250629-114113/runs', logging_strategy=, logging_first_step=True, logging_steps=1, logging_nan_inf_filter=True, save_strategy=, save_steps=50, save_total_limit=1000, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=50, dataloader_num_workers=16, dataloader_prefetch_factor=10, past_index=-1, run_name='/root/code/new_work_code/HI-TransPA/swfit_workdir/fresh-little-lemon-workspace/swift_config/swift_output/AUDIO-SFT-TEACH_CHAT_TRANS_AUDIO-Qwen2.5-Omni-3B-lora/v2-20250629-114113', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed=None, label_smoothing_factor=0.0, optim=, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['swanlab'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs={'use_reentrant': False}, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=18000000, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, vit_gradient_checkpointing=True, check_model=True, acc_strategy='token', train_dataloader_shuffle=True, max_epochs=None, aligner_lr=None, vit_lr=None, optimizer=None, use_logits_to_keep=None, channels=None, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='lora', local_repo_path=None, galore_config=None)" }