Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- chat_template.json +3 -0
- config.json +53 -0
- configuration_step3.py +120 -0
- generation_config.json +8 -0
- model-00001.safetensors +3 -0
- model-00002.safetensors +3 -0
- model-00003.safetensors +3 -0
- model-00004.safetensors +3 -0
- model-00005.safetensors +3 -0
- model-00006.safetensors +3 -0
- model-00007.safetensors +3 -0
- model-00008.safetensors +3 -0
- model-00009.safetensors +3 -0
- model-00010.safetensors +3 -0
- model-00011.safetensors +3 -0
- model-00012.safetensors +3 -0
- model-00013.safetensors +3 -0
- model-00014.safetensors +3 -0
- model-00015.safetensors +3 -0
- model-00016.safetensors +3 -0
- model-00017.safetensors +3 -0
- model-00018.safetensors +3 -0
- model-00019.safetensors +3 -0
- model-00020.safetensors +3 -0
- model-00021.safetensors +3 -0
- model-00022.safetensors +3 -0
- model-00023.safetensors +3 -0
- model-00024.safetensors +3 -0
- model-00025.safetensors +3 -0
- model-00026.safetensors +3 -0
- model-00027.safetensors +3 -0
- model-00028.safetensors +3 -0
- model-00029.safetensors +3 -0
- model-00030.safetensors +3 -0
- model-00031.safetensors +3 -0
- model-00032.safetensors +3 -0
- model-00033.safetensors +3 -0
- model-00034.safetensors +3 -0
- model-00035.safetensors +3 -0
- model-00036.safetensors +3 -0
- model-00037.safetensors +3 -0
- model-00038.safetensors +3 -0
- model-00039.safetensors +3 -0
- model-00040.safetensors +3 -0
- model-00041.safetensors +3 -0
- model-00042.safetensors +3 -0
- model-00043.safetensors +3 -0
- model-00044.safetensors +3 -0
- model-00045.safetensors +3 -0
- model-00046.safetensors +3 -0
chat_template.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"chat_template": "{% macro render_content(content) %} {% if content is string %}{{- content }}{% elif content is mapping %}{{- content['value'] if 'value' in content else content['text'] }}{% elif content is iterable %}{% for item in content %}{% if item.type == 'text' %}{{- item['value'] if 'value' in item else item['text'] }}{% elif item.type == 'image' %}<im_patch>{% endif %}{% endfor %}{% endif %} {% endmacro %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message.role == 'system' %}{{ render_content(message['content']) }}{% endif %}{% endfor %}{% if tools is defined and tools %}{% set ns = namespace(data='') %}{% for tool in tools %}{% set ns.data = ns.data + (tool | tojson(ensure_ascii=False)) + '\n' %}{% endfor %}{% set tool_schemas_var = ns.data %}# Tools \nYou may call one or more tools to assist with the user query. You are provided with tool schemas within <tools></tools> XML tags: <tools>{{ tool_schemas_var }}</tools> When making tool calls, use XML format to invoke tools and pass parameters: <|tool_calls_begin|>\n<|tool_call_begin|>\nfunction<|tool_sep|><steptml:invoke name=\"tool_name0\"><steptml:parameter name=\"parameter_name0\">[parameter value]</steptml:parameter>...</steptml:invoke><|tool_call_end|>\n<|tool_call_begin|>\nfunction<|tool_sep|><steptml:invoke name=\"tool_name1\"><steptml:parameter name=\"parameter_name1\">[parameter value]</steptml:parameter>...</steptml:invoke><|tool_call_end|>\n<|tool_calls_end|>\nNote: * You can invoke one or more tools in parallel. * Each tool call must be complete and self-contained within a single <steptml:toolcall></steptml:toolcall> block. {% endif %}{% for message in messages %}{% if message.role == 'tool_description' %}{{ render_content(message['content']) }}{% elif message.role == 'user' %}{{- '<|BOT|>' + message.role + '\\n' + render_content(message['content']) }}{{- '<|EOT|>' }}{% elif message.role == 'tool_response' %}<|tool_outputs_begin|>\n{% for tool_output in message['content'] %}<|tool_output_begin|>\n{{ render_content(tool_output) }}<|tool_output_end|>{% endfor %}\n<|tool_outputs_end|>\n{% else %}{{- '<|BOT|>' + message.role + '\n' }}{% if message['content'] is defined %}{{- render_content(message['content']) }}{% endif %}{% if message.tool_calls is defined %}<|tool_calls_begin|>\n{% for tool in message.tool_calls %}<|tool_call_begin>|>\n{{ tool['type'] }}<|tool_sep|>{{- '<steptml:invoke name=\"' + tool['function']['name'] + '\">' }}{% for name, param in tool['function']['arguments'].items() %} {{- '<steptml:parameter name=\"' + name + '\">' + param | string + '</steptml:parameter>' }}{% endfor %}</steptml:invoke><|tool_call_end|>\n{% endfor %}<|tool_calls_end|>\n{% endif %}<|EOT|>{% endif %}{% endfor %}{% if add_generation_prompt %}{{- '<|BOT|>assistant\n<think>\n' }}{% endif %}"
|
3 |
+
}
|
config.json
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Step3VLForConditionalGeneration"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "configuration_step3.Step3VLConfig",
|
7 |
+
"AutoModelForCausalLM": "modeling_step3.Step3vForConditionalGeneration"
|
8 |
+
},
|
9 |
+
"model_type": "step3_vl",
|
10 |
+
"im_end_token": "<im_end>",
|
11 |
+
"im_patch_token": "<im_patch>",
|
12 |
+
"im_start_token": "<im_start>",
|
13 |
+
"image_token_len": 169,
|
14 |
+
"patch_token_len": 81,
|
15 |
+
"understand_projector_stride": 2,
|
16 |
+
"projector_bias": false,
|
17 |
+
"image_token_id": 128001,
|
18 |
+
"bos_token_id": 0,
|
19 |
+
"eos_token_id": 128805,
|
20 |
+
"text_config": {
|
21 |
+
"architectures": [
|
22 |
+
"Step3TextForCausalLM"
|
23 |
+
],
|
24 |
+
"model_type": "step3_text",
|
25 |
+
"hidden_size": 7168,
|
26 |
+
"intermediate_size": 18432,
|
27 |
+
"num_hidden_layers": 61,
|
28 |
+
"max_seq_len": 65536,
|
29 |
+
"max_position_embedding": 65536,
|
30 |
+
"vocab_size": 128815,
|
31 |
+
"torch_dtype": "bfloat16",
|
32 |
+
"moe_layers_enum": "4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59",
|
33 |
+
"num_attention_heads": 64,
|
34 |
+
"num_attention_groups": 1,
|
35 |
+
"head_dim": 256,
|
36 |
+
"share_q_dim": 2048,
|
37 |
+
"moe_num_experts": 48,
|
38 |
+
"moe_top_k": 3,
|
39 |
+
"moe_intermediate_size": 5120,
|
40 |
+
"share_expert_dim": 5120,
|
41 |
+
"norm_expert_weight": false,
|
42 |
+
"rope_theta": 500000
|
43 |
+
},
|
44 |
+
"vision_config": {
|
45 |
+
"hidden_size": 1792,
|
46 |
+
"output_hidden_size": 4096,
|
47 |
+
"image_size": 728,
|
48 |
+
"intermediate_size": 15360,
|
49 |
+
"num_attention_heads": 16,
|
50 |
+
"num_hidden_layers": 63,
|
51 |
+
"patch_size": 14
|
52 |
+
}
|
53 |
+
}
|
configuration_step3.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Optional, Union
|
2 |
+
|
3 |
+
from transformers.configuration_utils import PretrainedConfig
|
4 |
+
|
5 |
+
|
6 |
+
class Step3VisionEncoderConfig(PretrainedConfig):
|
7 |
+
model_type = "step3_vision_encoder"
|
8 |
+
|
9 |
+
def __init__(
|
10 |
+
self,
|
11 |
+
hidden_size=1792,
|
12 |
+
intermediate_size=3072,
|
13 |
+
output_hidden_size=4096,
|
14 |
+
num_hidden_layers=63,
|
15 |
+
num_attention_heads=16,
|
16 |
+
num_channels=3,
|
17 |
+
image_size=728,
|
18 |
+
patch_size=14,
|
19 |
+
hidden_act="quick_gelu",
|
20 |
+
layer_norm_eps=1e-5,
|
21 |
+
**kwargs,
|
22 |
+
):
|
23 |
+
self.hidden_size = hidden_size
|
24 |
+
self.intermediate_size = intermediate_size
|
25 |
+
self.output_hidden_size = output_hidden_size
|
26 |
+
self.num_hidden_layers = num_hidden_layers
|
27 |
+
self.num_attention_heads = num_attention_heads
|
28 |
+
self.num_channels = num_channels
|
29 |
+
self.patch_size = patch_size
|
30 |
+
self.image_size = image_size
|
31 |
+
self.layer_norm_eps = layer_norm_eps
|
32 |
+
self.hidden_act = hidden_act
|
33 |
+
super().__init__(**kwargs)
|
34 |
+
|
35 |
+
|
36 |
+
class Step3TextConfig(PretrainedConfig):
|
37 |
+
model_type = "step3_text"
|
38 |
+
architectures = ["Step3TextForCausalLM"]
|
39 |
+
|
40 |
+
def __init__(
|
41 |
+
self,
|
42 |
+
hidden_size: int = 7168,
|
43 |
+
intermediate_size: int = 18432,
|
44 |
+
num_attention_heads: int = 64,
|
45 |
+
num_attention_groups: int = 1,
|
46 |
+
num_hidden_layers: int = 61,
|
47 |
+
max_seq_len: int = 65536,
|
48 |
+
vocab_size: int = 128815,
|
49 |
+
rms_norm_eps: float = 1e-5,
|
50 |
+
moe_intermediate_size: int = 5120,
|
51 |
+
moe_num_experts: int = 48,
|
52 |
+
moe_top_k: int = 3,
|
53 |
+
rope_theta: float = 500000,
|
54 |
+
rope_scaling: Optional[dict[str, Any]] = None,
|
55 |
+
max_position_embedding: int = 65536,
|
56 |
+
share_expert_dim: int = 5120,
|
57 |
+
share_q_dim: int = 2048,
|
58 |
+
head_dim: int = 256,
|
59 |
+
norm_expert_weight: bool = False,
|
60 |
+
moe_layers_enum: tuple[int] = (4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
61 |
+
15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
|
62 |
+
25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
|
63 |
+
35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
|
64 |
+
45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
|
65 |
+
55, 56, 57, 58, 59),
|
66 |
+
**kwargs,
|
67 |
+
) -> None:
|
68 |
+
self.hidden_size = hidden_size
|
69 |
+
self.intermediate_size = intermediate_size
|
70 |
+
self.num_attention_heads = num_attention_heads
|
71 |
+
self.num_attention_groups = num_attention_groups
|
72 |
+
self.num_hidden_layers = num_hidden_layers
|
73 |
+
self.max_seq_len = max_seq_len
|
74 |
+
self.vocab_size = vocab_size
|
75 |
+
self.rms_norm_eps = rms_norm_eps
|
76 |
+
self.moe_intermediate_size = moe_intermediate_size
|
77 |
+
self.moe_num_experts = moe_num_experts
|
78 |
+
self.moe_top_k = moe_top_k
|
79 |
+
self.rope_theta = rope_theta
|
80 |
+
self.rope_scaling = rope_scaling
|
81 |
+
self.max_position_embedding = max_position_embedding
|
82 |
+
self.share_expert_dim = share_expert_dim
|
83 |
+
self.share_q_dim = share_q_dim
|
84 |
+
self.head_dim = head_dim
|
85 |
+
self.norm_expert_weight = norm_expert_weight
|
86 |
+
self.moe_layers_enum = moe_layers_enum
|
87 |
+
|
88 |
+
super().__init__(**kwargs)
|
89 |
+
|
90 |
+
|
91 |
+
class Step3VLConfig(PretrainedConfig):
|
92 |
+
model_type = "step3_vl"
|
93 |
+
|
94 |
+
def __init__(
|
95 |
+
self,
|
96 |
+
vision_config: Optional[Union[dict, Step3VisionEncoderConfig]] = None,
|
97 |
+
text_config: Optional[Union[dict, Step3TextConfig]] = None,
|
98 |
+
understand_projector_stride: int = 1,
|
99 |
+
projector_bias: bool = True,
|
100 |
+
image_token_id: int = 128001,
|
101 |
+
**kwargs,
|
102 |
+
) -> None:
|
103 |
+
if vision_config is None:
|
104 |
+
vision_config = Step3VisionEncoderConfig()
|
105 |
+
elif isinstance(vision_config, dict):
|
106 |
+
vision_config = Step3VisionEncoderConfig(**vision_config)
|
107 |
+
self.vision_config = vision_config
|
108 |
+
|
109 |
+
if text_config is None:
|
110 |
+
text_config = Step3TextConfig()
|
111 |
+
elif isinstance(text_config, dict):
|
112 |
+
text_config = Step3TextConfig(**text_config)
|
113 |
+
self.text_config = text_config
|
114 |
+
|
115 |
+
self.understand_projector_stride = understand_projector_stride
|
116 |
+
self.projector_bias = projector_bias
|
117 |
+
self.hidden_size = text_config.hidden_size
|
118 |
+
self.image_token_id = image_token_id
|
119 |
+
|
120 |
+
super().__init__(**kwargs)
|
generation_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token_id": 0,
|
3 |
+
"eos_token_id": 128805,
|
4 |
+
"temperature": 0.7,
|
5 |
+
"top_p": 0.95,
|
6 |
+
"do_sample": true,
|
7 |
+
"transformers_version": "4.54.0"
|
8 |
+
}
|
model-00001.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d116ae88ed10f56742606e2da7818e1533988297a3006cd6791182059a9b3226
|
3 |
+
size 9955092120
|
model-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:05e8bc428a78e1781d7eeaef0cf32a547d7a4189178f4e84675e3f7aa9c9790c
|
3 |
+
size 9957541368
|
model-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:35e2a0d26a1131a46a6df4deaf7337d8dbdbd9002809728b55c978b28960fadb
|
3 |
+
size 9966049896
|
model-00004.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a94ccd8c0c564e9df04a693208435f9b40736ef0e00d3d4c2891e2c7fe50c65e
|
3 |
+
size 9975131928
|
model-00005.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1f2012990f05f356635c3856aee2dbf1fa383ce641a9e0c1a547bb37cc4674d2
|
3 |
+
size 9921772592
|
model-00006.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44a5a3f511975156d039cf531e42cfbd5ecbdffb8ff0d29b29d8c7d4ca6cf05b
|
3 |
+
size 7310672224
|
model-00007.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3f9242859c3de930b9d2535807edb1a253c6b4759f1160bf35b717d33ee66869
|
3 |
+
size 7046430952
|
model-00008.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9197980060dd760b08efde286c40e165ed0bdd8f4c9cfea7152b1c38260ec194
|
3 |
+
size 7046430952
|
model-00009.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:85e6923a6705304f11c9dd3c11255f1d688e01a4f51514189404463637a6b021
|
3 |
+
size 7046430960
|
model-00010.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:09c6aeba446a0f4a0c0fd9c47f712d60f9805ab251cb2790c0288d4275141145
|
3 |
+
size 7046430952
|
model-00011.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cebf56573462ed491301b008bc57d3b25f2665d9a7aaf6f6a2265bb108992244
|
3 |
+
size 7046430952
|
model-00012.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e839654a313b7d78cae825323f9a06f6d6c394fabfea353fa9e76c799339269
|
3 |
+
size 7046430960
|
model-00013.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c791af2fe41f79cda4e278dc891bdc2a0753b14b9afd475f27ff810d0f3965a4
|
3 |
+
size 7046430952
|
model-00014.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3380f3e2160f3733feddfdd94b94f75e9e85131a5797ba0e1bf4f2065c54a25f
|
3 |
+
size 7046430952
|
model-00015.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8f522596a3bea36902c309ffaf864cd640cc275978f10c006a78a816bbbfa49
|
3 |
+
size 7046430960
|
model-00016.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:81cbf3a31597fc070cf9f13419a7df1f18949e0fc8086780fad4de390f0c5d32
|
3 |
+
size 7046430960
|
model-00017.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1bd37184a4d54b3e303eb0ea3b8d3f3e396adaffb993d49097edd5f2818912a6
|
3 |
+
size 7046430960
|
model-00018.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:543462296f44f57710405ee5df48dccdc7a28ec75170b6111b841d17a5fcdfd9
|
3 |
+
size 7046430960
|
model-00019.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:03bdba8a5242c255127bc887c2159ae76e947e8fa9eb9474736be166c4e98b6c
|
3 |
+
size 7046430960
|
model-00020.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f07cc863fc86cc9eb5d1bab06f593de41c5cd36a9231f4edc133c07ce1aefd74
|
3 |
+
size 7046430960
|
model-00021.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a8cac9d71d0ac36a460f3bf39da1f95d4335c02d9b6e9332e3042adbbfeba1a1
|
3 |
+
size 7046430960
|
model-00022.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a5db4f8395f0a5c71b26a227b04ad5a663b31e178a6da1ede1a243fc8c418e25
|
3 |
+
size 7046430960
|
model-00023.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:229ba45a9cd8b8792a5b83b7148ca13f8c4cb3f2443546a3aa4e4d3f53bb6291
|
3 |
+
size 7046430960
|
model-00024.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:095e4450b4c83f545ad450fc39b9c7d995f7f842b8c7802dc7485e4817ac46a0
|
3 |
+
size 7046430960
|
model-00025.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ef8050270d6c6a289cd3889a7b6aaed62fe50380ada7b887cbd3b4a7bed0d55e
|
3 |
+
size 7046430960
|
model-00026.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f49a63250dc25e5d3f25dd1d002722f203b8a7e5d4c7742e3bf0690a1c1632e4
|
3 |
+
size 7046430960
|
model-00027.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:45dcbc3771b950d73ac97ce54d20b22090551fea9a4cd37bc4d953261c5cd5dd
|
3 |
+
size 7046430960
|
model-00028.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f76ce1a53cfb59a0afb3bfd6ce5deabf6bbc3a84aaa0379e29d93d09e1a0a326
|
3 |
+
size 7046430960
|
model-00029.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0add3917e46cc0be92ac9230a413ff010a4437695ccc4df7ac056f085984788d
|
3 |
+
size 7046430960
|
model-00030.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dd1a4e9becd74c663a24dcb043bfda7797c4127368dd58eefe85ba09610dd73e
|
3 |
+
size 7046430960
|
model-00031.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6ff8d05fd4803bbbbd685ada90bda51c6b99bb2b12a8dbc36910eb0ef9dc7803
|
3 |
+
size 7046430960
|
model-00032.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb58c7424bb3fb0c0d5c84f4cb7b220deb73cd677a5bb509ab782567a1f8a687
|
3 |
+
size 7046430960
|
model-00033.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eac34581b2fb405be199b99b71356b60caf1f2c64ad5fe92dd14a3aa6b156ba3
|
3 |
+
size 7046430960
|
model-00034.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f9fc7d48f75050201c785b5973c8517fd4c82a95787772ec9e48fad400e4233
|
3 |
+
size 7046430960
|
model-00035.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d32bffd85be89f836ca36ed7201aaa676b93a4db9ca9be4fd36094dc93d16d3b
|
3 |
+
size 7046430960
|
model-00036.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:beb6e50876d8477768af98c35fb7268dff1897e037d7506b3d295547d75bcfcc
|
3 |
+
size 7046430960
|
model-00037.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:756125d247a87695e01627d274078d8e20b4f7e45a2455021c4c287e613062ff
|
3 |
+
size 7046430960
|
model-00038.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fcbbd03054fa79da17d279ae1839fcfd79de63ed56352c896b1f6f31cc93e8c8
|
3 |
+
size 7046430960
|
model-00039.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:71671759930fe237c9f896a47369b6d04ac895ede6d5cb7dfbe558dd810af92a
|
3 |
+
size 7046430960
|
model-00040.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6b6fb17111f7af6863b77178fb96366475ed3b6ead0f806f924dd18a5504048a
|
3 |
+
size 7046430960
|
model-00041.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:af14b4f09b9665cd3ec3b86b410ac781b837c33b7efbe320869ad265f02f69db
|
3 |
+
size 7046430960
|
model-00042.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb8178740784101d98781f429a57e66cc1f1e34d5d6db0c2bfcfc9e0239e65ca
|
3 |
+
size 7046430960
|
model-00043.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d11e4cdba8b59590e6e9c5c747fcd00434d8a2446d28055d8ed7dc17855e630
|
3 |
+
size 7046430960
|
model-00044.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9f9e9fbee686e078de3b1cad1c10575cbcd36b544cdca826565492b7331a98c6
|
3 |
+
size 7046430960
|
model-00045.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:87083ba08825811bea75c96f7d48de98104568858c7c2540448a0ec71064c587
|
3 |
+
size 7046430960
|
model-00046.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4140d24b7638e5ad043ae178d041eea74b498d1708e5867eb358874bfff09ffc
|
3 |
+
size 7046430960
|