echarlaix HF Staff commited on
Commit
272b8aa
·
verified ·
1 Parent(s): d942c52

Upload openvino_text_embeddings_model.xml with huggingface_hub

Browse files
Files changed (1) hide show
  1. openvino_text_embeddings_model.xml +222 -0
openvino_text_embeddings_model.xml ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <net name="Model199" version="11">
3
+ <layers>
4
+ <layer id="0" name="input" type="Parameter" version="opset1">
5
+ <data shape="?,?" element_type="i64" />
6
+ <output>
7
+ <port id="0" precision="I64" names="input">
8
+ <dim>-1</dim>
9
+ <dim>-1</dim>
10
+ </port>
11
+ </output>
12
+ </layer>
13
+ <layer id="1" name="self.weight" type="Const" version="opset1">
14
+ <data element_type="u8" shape="49280, 960" offset="0" size="47308800" />
15
+ <output>
16
+ <port id="0" precision="U8">
17
+ <dim>49280</dim>
18
+ <dim>960</dim>
19
+ </port>
20
+ </output>
21
+ </layer>
22
+ <layer id="2" name="Convert_834939" type="Convert" version="opset1">
23
+ <data destination_type="f16" />
24
+ <input>
25
+ <port id="0" precision="U8">
26
+ <dim>49280</dim>
27
+ <dim>960</dim>
28
+ </port>
29
+ </input>
30
+ <output>
31
+ <port id="1" precision="FP16">
32
+ <dim>49280</dim>
33
+ <dim>960</dim>
34
+ </port>
35
+ </output>
36
+ </layer>
37
+ <layer id="3" name="self.weight/zero_point" type="Const" version="opset1">
38
+ <data element_type="u8" shape="49280, 1" offset="47308800" size="49280" />
39
+ <output>
40
+ <port id="0" precision="U8">
41
+ <dim>49280</dim>
42
+ <dim>1</dim>
43
+ </port>
44
+ </output>
45
+ </layer>
46
+ <layer id="4" name="Convert_834942" type="Convert" version="opset1">
47
+ <data destination_type="f16" />
48
+ <input>
49
+ <port id="0" precision="U8">
50
+ <dim>49280</dim>
51
+ <dim>1</dim>
52
+ </port>
53
+ </input>
54
+ <output>
55
+ <port id="1" precision="FP16">
56
+ <dim>49280</dim>
57
+ <dim>1</dim>
58
+ </port>
59
+ </output>
60
+ </layer>
61
+ <layer id="5" name="self.weight/zero_point/subtract" type="Subtract" version="opset1">
62
+ <data auto_broadcast="numpy" />
63
+ <input>
64
+ <port id="0" precision="FP16">
65
+ <dim>49280</dim>
66
+ <dim>960</dim>
67
+ </port>
68
+ <port id="1" precision="FP16">
69
+ <dim>49280</dim>
70
+ <dim>1</dim>
71
+ </port>
72
+ </input>
73
+ <output>
74
+ <port id="2" precision="FP16">
75
+ <dim>49280</dim>
76
+ <dim>960</dim>
77
+ </port>
78
+ </output>
79
+ </layer>
80
+ <layer id="6" name="self.weight/scale" type="Const" version="opset1">
81
+ <data element_type="f16" shape="49280, 1" offset="47358080" size="98560" />
82
+ <output>
83
+ <port id="0" precision="FP16">
84
+ <dim>49280</dim>
85
+ <dim>1</dim>
86
+ </port>
87
+ </output>
88
+ </layer>
89
+ <layer id="7" name="self.weight/fq_weights_0" type="Multiply" version="opset1">
90
+ <data auto_broadcast="numpy" />
91
+ <input>
92
+ <port id="0" precision="FP16">
93
+ <dim>49280</dim>
94
+ <dim>960</dim>
95
+ </port>
96
+ <port id="1" precision="FP16">
97
+ <dim>49280</dim>
98
+ <dim>1</dim>
99
+ </port>
100
+ </input>
101
+ <output>
102
+ <port id="2" precision="FP16">
103
+ <dim>49280</dim>
104
+ <dim>960</dim>
105
+ </port>
106
+ </output>
107
+ </layer>
108
+ <layer id="8" name="self.weight/fq_weights_0/convert" type="Convert" version="opset1">
109
+ <data destination_type="f32" />
110
+ <input>
111
+ <port id="0" precision="FP16">
112
+ <dim>49280</dim>
113
+ <dim>960</dim>
114
+ </port>
115
+ </input>
116
+ <output>
117
+ <port id="1" precision="FP32">
118
+ <dim>49280</dim>
119
+ <dim>960</dim>
120
+ </port>
121
+ </output>
122
+ </layer>
123
+ <layer id="9" name="aten::embedding/Convert" type="Convert" version="opset1">
124
+ <data destination_type="i32" />
125
+ <input>
126
+ <port id="0" precision="I64">
127
+ <dim>-1</dim>
128
+ <dim>-1</dim>
129
+ </port>
130
+ </input>
131
+ <output>
132
+ <port id="1" precision="I32">
133
+ <dim>-1</dim>
134
+ <dim>-1</dim>
135
+ </port>
136
+ </output>
137
+ </layer>
138
+ <layer id="10" name="aten::embedding/Constant" type="Const" version="opset1">
139
+ <data element_type="i32" shape="" offset="47456640" size="4" />
140
+ <output>
141
+ <port id="0" precision="I32" />
142
+ </output>
143
+ </layer>
144
+ <layer id="11" name="aten::embedding/Gather" type="Gather" version="opset8">
145
+ <data batch_dims="0" />
146
+ <input>
147
+ <port id="0" precision="FP32">
148
+ <dim>49280</dim>
149
+ <dim>960</dim>
150
+ </port>
151
+ <port id="1" precision="I32">
152
+ <dim>-1</dim>
153
+ <dim>-1</dim>
154
+ </port>
155
+ <port id="2" precision="I32" />
156
+ </input>
157
+ <output>
158
+ <port id="3" precision="FP32" names="inputs_embeds">
159
+ <dim>-1</dim>
160
+ <dim>-1</dim>
161
+ <dim>960</dim>
162
+ </port>
163
+ </output>
164
+ </layer>
165
+ <layer id="12" name="Result_526194" type="Result" version="opset1" output_names="inputs_embeds">
166
+ <input>
167
+ <port id="0" precision="FP32">
168
+ <dim>-1</dim>
169
+ <dim>-1</dim>
170
+ <dim>960</dim>
171
+ </port>
172
+ </input>
173
+ </layer>
174
+ </layers>
175
+ <edges>
176
+ <edge from-layer="0" from-port="0" to-layer="9" to-port="0" />
177
+ <edge from-layer="1" from-port="0" to-layer="2" to-port="0" />
178
+ <edge from-layer="2" from-port="1" to-layer="5" to-port="0" />
179
+ <edge from-layer="3" from-port="0" to-layer="4" to-port="0" />
180
+ <edge from-layer="4" from-port="1" to-layer="5" to-port="1" />
181
+ <edge from-layer="5" from-port="2" to-layer="7" to-port="0" />
182
+ <edge from-layer="6" from-port="0" to-layer="7" to-port="1" />
183
+ <edge from-layer="7" from-port="2" to-layer="8" to-port="0" />
184
+ <edge from-layer="8" from-port="1" to-layer="11" to-port="0" />
185
+ <edge from-layer="9" from-port="1" to-layer="11" to-port="1" />
186
+ <edge from-layer="10" from-port="0" to-layer="11" to-port="2" />
187
+ <edge from-layer="11" from-port="3" to-layer="12" to-port="0" />
188
+ </edges>
189
+ <rt_info>
190
+ <Runtime_version value="2025.2.0-19140-c01cd93e24d-releases/2025/2" />
191
+ <conversion_parameters>
192
+ <framework value="pytorch" />
193
+ <is_python_object value="True" />
194
+ </conversion_parameters>
195
+ <nncf>
196
+ <friendly_names_were_updated value="True" />
197
+ <version value="2.17.0" />
198
+ <weight_compression>
199
+ <advanced_parameters value="{'statistics_path': None, 'awq_params': {'subset_size': 32, 'percent_to_apply': 0.002, 'alpha_min': 0.0, 'alpha_max': 1.0, 'steps': 100, 'prefer_data_aware_scaling': True}, 'scale_estimation_params': {'subset_size': 64, 'initial_steps': 5, 'scale_steps': 5, 'weight_penalty': -1.0}, 'gptq_params': {'damp_percent': 0.1, 'block_size': 128, 'subset_size': 128}, 'lora_correction_params': {'adapter_rank': 8, 'num_iterations': 3, 'apply_regularization': True, 'subset_size': 128, 'use_int8_adapters': True}, 'lora_adapter_rank': 256, 'backend_params': {}}" />
200
+ <all_layers value="False" />
201
+ <awq value="False" />
202
+ <backup_mode value="int8_asym" />
203
+ <compression_format value="dequantize" />
204
+ <gptq value="False" />
205
+ <group_size value="-1" />
206
+ <ignored_scope value="[]" />
207
+ <lora_correction value="False" />
208
+ <mode value="int8_asym" />
209
+ <ratio value="1.0" />
210
+ <scale_estimation value="False" />
211
+ <sensitivity_metric value="weight_quantization_error" />
212
+ </weight_compression>
213
+ </nncf>
214
+ <optimum>
215
+ <nncf_version value="2.17.0" />
216
+ <optimum_intel_version value="1.24.0" />
217
+ <optimum_version value="1.26.1" />
218
+ <pytorch_version value="2.7.1" />
219
+ <transformers_version value="4.52.4" />
220
+ </optimum>
221
+ </rt_info>
222
+ </net>