Update README.md
Browse files
README.md
CHANGED
@@ -44,11 +44,16 @@ base_model_relation: finetune
|
|
44 |
> You can find these codes in the repository.
|
45 |
|
46 |
```python
|
47 |
-
>>> from transformers import AutoModelForCausalLM, AutoProcessor
|
48 |
>>> model = AutoModelForCausalLM.from_pretrained("zhoukz/MiDashengLM-HF-dev", trust_remote_code=True)
|
49 |
>>> model.eval()
|
50 |
>>> processor = AutoProcessor.from_pretrained("zhoukz/MiDashengLM-HF-dev", trust_remote_code=True)
|
51 |
|
|
|
|
|
|
|
|
|
|
|
52 |
>>> messages = [
|
53 |
... {
|
54 |
... "role": "system",
|
@@ -78,12 +83,49 @@ base_model_relation: finetune
|
|
78 |
... return_dict=True,
|
79 |
... )
|
80 |
... generation = model.generate(**model_inputs)
|
81 |
-
... output =
|
82 |
|
83 |
>>> print(output)
|
84 |
["An engine is idling."]
|
85 |
```
|
86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
## Citation
|
88 |
|
89 |
```bibtex
|
|
|
44 |
> You can find these codes in the repository.
|
45 |
|
46 |
```python
|
47 |
+
>>> from transformers import AutoModelForCausalLM, AutoProcessor, AutoTokenizer
|
48 |
>>> model = AutoModelForCausalLM.from_pretrained("zhoukz/MiDashengLM-HF-dev", trust_remote_code=True)
|
49 |
>>> model.eval()
|
50 |
>>> processor = AutoProcessor.from_pretrained("zhoukz/MiDashengLM-HF-dev", trust_remote_code=True)
|
51 |
|
52 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("mispeech/MiDashengLM-HF-dev")
|
53 |
+
>>> processor = AutoProcessor.from_pretrained("mispeech/MiDashengLM-HF-dev", trust_remote_code=True)
|
54 |
+
>>> model = AutoModelForCausalLM.from_pretrained("mispeech/MiDashengLM-HF-dev", trust_remote_code=True)
|
55 |
+
>>> model.eval()
|
56 |
+
|
57 |
>>> messages = [
|
58 |
... {
|
59 |
... "role": "system",
|
|
|
83 |
... return_dict=True,
|
84 |
... )
|
85 |
... generation = model.generate(**model_inputs)
|
86 |
+
... output = tokenizer.batch_decode(generation, skip_special_tokens=True)
|
87 |
|
88 |
>>> print(output)
|
89 |
["An engine is idling."]
|
90 |
```
|
91 |
|
92 |
+
[`processor.apply_chat_template`] accepts audio inputs specified in various ways, including file paths, URLs, and `np.ndarray`:
|
93 |
+
|
94 |
+
[`processor.apply_chat_template`]: https://huggingface.co/docs/transformers/v4.53.1/en/main_classes/processors#transformers.ProcessorMixin.apply_chat_template
|
95 |
+
|
96 |
+
```python
|
97 |
+
>>> messages_by_path = [
|
98 |
+
... {
|
99 |
+
... "role": "user",
|
100 |
+
... "content": [
|
101 |
+
... {"type": "text", "text": "Caption the audio."},
|
102 |
+
... {"type": "audio", "path": "/path/to/audio.wav"},
|
103 |
+
... ],
|
104 |
+
... },
|
105 |
+
... ]
|
106 |
+
|
107 |
+
>>> messages_by_url = [
|
108 |
+
... {
|
109 |
+
... "role": "user",
|
110 |
+
... "content": [
|
111 |
+
... {"type": "text", "text": "Caption the audio."},
|
112 |
+
... {"type": "audio", "url": "https://example.com/audio.wav"},
|
113 |
+
... ],
|
114 |
+
... },
|
115 |
+
... ]
|
116 |
+
|
117 |
+
>>> import numpy as np
|
118 |
+
>>> messages_by_data = [
|
119 |
+
... {
|
120 |
+
... "role": "user",
|
121 |
+
... "content": [
|
122 |
+
... {"type": "text", "text": "Caption the audio."},
|
123 |
+
... {"type": "audio", "audio": np.random.randn(16000)},
|
124 |
+
... ],
|
125 |
+
... },
|
126 |
+
... ]
|
127 |
+
```
|
128 |
+
|
129 |
## Citation
|
130 |
|
131 |
```bibtex
|