modelId
stringlengths
5
139
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]date
2020-02-15 11:33:14
2025-08-22 00:40:10
downloads
int64
0
223M
likes
int64
0
11.7k
library_name
stringclasses
517 values
tags
listlengths
1
4.05k
pipeline_tag
stringclasses
55 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2025-08-22 00:39:40
card
stringlengths
11
1.01M
sekirr22/blockassist-bc-furry_rugged_camel_1755733108
sekirr22
2025-08-20T23:38:34Z
0
0
null
[ "tensorboard", "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "furry rugged camel", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:38:28Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - furry rugged camel --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
manusiaperahu2012/blockassist-bc-roaring_long_tuna_1755731415
manusiaperahu2012
2025-08-20T23:38:13Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "roaring long tuna", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:38:09Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - roaring long tuna --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
unitova/blockassist-bc-zealous_sneaky_raven_1755731565
unitova
2025-08-20T23:38:07Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "zealous sneaky raven", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:38:03Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - zealous sneaky raven --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
yaelahnal/blockassist-bc-mute_clawed_crab_1755732884
yaelahnal
2025-08-20T23:35:54Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "mute clawed crab", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:35:37Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - mute clawed crab --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
IMGEAI/moe1_4bit
IMGEAI
2025-08-20T23:35:08Z
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "qwen3_moe", "en", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2025-08-20T23:27:09Z
--- base_model: unsloth/qwen3-30b-a3b-thinking-2507 tags: - text-generation-inference - transformers - unsloth - qwen3_moe license: apache-2.0 language: - en --- # Uploaded finetuned model - **Developed by:** IMGEAI - **License:** apache-2.0 - **Finetuned from model :** unsloth/qwen3-30b-a3b-thinking-2507 This qwen3_moe model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
mang3dd/blockassist-bc-tangled_slithering_alligator_1755731202
mang3dd
2025-08-20T23:31:50Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "tangled slithering alligator", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:31:47Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - tangled slithering alligator --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
roeker/blockassist-bc-quick_wiry_owl_1755732482
roeker
2025-08-20T23:28:46Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "quick wiry owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:28:41Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - quick wiry owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
chainway9/blockassist-bc-untamed_quick_eel_1755730915
chainway9
2025-08-20T23:28:29Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "untamed quick eel", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:28:25Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - untamed quick eel --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
lautan/blockassist-bc-gentle_patterned_goat_1755730816
lautan
2025-08-20T23:25:35Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "gentle patterned goat", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:25:32Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - gentle patterned goat --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
yagv011/qwen_2.5_7b-cat_numbers
yagv011
2025-08-20T23:22:33Z
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "qwen2", "trl", "en", "base_model:unsloth/Qwen2.5-7B-Instruct", "base_model:finetune:unsloth/Qwen2.5-7B-Instruct", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2025-08-20T23:22:14Z
--- base_model: unsloth/Qwen2.5-7B-Instruct tags: - text-generation-inference - transformers - unsloth - qwen2 - trl license: apache-2.0 language: - en --- # Uploaded model - **Developed by:** yagv011 - **License:** apache-2.0 - **Finetuned from model :** unsloth/Qwen2.5-7B-Instruct This qwen2 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
rvipitkirubbe/blockassist-bc-mottled_foraging_ape_1755730432
rvipitkirubbe
2025-08-20T23:19:46Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "mottled foraging ape", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:19:42Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - mottled foraging ape --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
ihsanridzi/blockassist-bc-wiry_flexible_owl_1755730384
ihsanridzi
2025-08-20T23:18:45Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "wiry flexible owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:18:42Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - wiry flexible owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
bonnieliu2002/act_collect_empty_bottle_black_white_wrist_2k_bs8_k50
bonnieliu2002
2025-08-20T23:14:19Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:bonnieliu2002/collect_empty_bottle_black_white_wrist", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2025-08-20T23:13:53Z
--- datasets: bonnieliu2002/collect_empty_bottle_black_white_wrist library_name: lerobot license: apache-2.0 model_name: act pipeline_tag: robotics tags: - lerobot - robotics - act --- # Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high success rates. This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index). --- ## How to Get Started with the Model For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy). Below is the short version on how to train and run inference/eval: ### Train from scratch ```bash lerobot-train \ --dataset.repo_id=${HF_USER}/<dataset> \ --policy.type=act \ --output_dir=outputs/train/<desired_policy_repo_id> \ --job_name=lerobot_training \ --policy.device=cuda \ --policy.repo_id=${HF_USER}/<desired_policy_repo_id> --wandb.enable=true ``` _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._ ### Evaluate the policy/run inference ```bash lerobot-record \ --robot.type=so100_follower \ --dataset.repo_id=<hf_user>/eval_<dataset> \ --policy.path=<hf_user>/<desired_policy_repo_id> \ --episodes=10 ``` Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint. --- ## Model Details - **License:** apache-2.0
roeker/blockassist-bc-quick_wiry_owl_1755731562
roeker
2025-08-20T23:13:29Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "quick wiry owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:13:23Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - quick wiry owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
razor534/blockassist-bc-lazy_extinct_termite_1755731372
razor534
2025-08-20T23:10:21Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "lazy extinct termite", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:10:13Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - lazy extinct termite --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Buura/qwen-coder-1.5b-opencodeinstruct-grpo-v2
Buura
2025-08-20T23:10:15Z
0
0
transformers
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "qwen2", "trl", "en", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2025-08-20T23:09:36Z
--- base_model: unsloth/qwen2.5-coder-1.5b-instruct-bnb-4bit tags: - text-generation-inference - transformers - unsloth - qwen2 - trl license: apache-2.0 language: - en --- # Uploaded model - **Developed by:** Buura - **License:** apache-2.0 - **Finetuned from model :** unsloth/qwen2.5-coder-1.5b-instruct-bnb-4bit This qwen2 model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
MercuryNex/select
MercuryNex
2025-08-20T23:09:57Z
0
0
diffusers
[ "diffusers", "safetensors", "text-to-image", "en", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
2025-08-20T23:08:48Z
--- license: other language: - en library_name: diffusers pipeline_tag: text-to-image tags: - text-to-image --- Converted from [https://civitai.com/api/download/models/914390?type=Model&format=SafeTensor&size=full&fp=fp16](https://civitai.com/api/download/models/914390?type=Model&format=SafeTensor&size=full&fp=fp16).
coelacanthxyz/blockassist-bc-finicky_thriving_grouse_1755729570
coelacanthxyz
2025-08-20T23:09:01Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "finicky thriving grouse", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:08:55Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - finicky thriving grouse --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
sampingkaca72/blockassist-bc-armored_stealthy_elephant_1755729746
sampingkaca72
2025-08-20T23:08:34Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "armored stealthy elephant", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:08:31Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - armored stealthy elephant --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
unitova/blockassist-bc-zealous_sneaky_raven_1755729589
unitova
2025-08-20T23:07:34Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "zealous sneaky raven", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:07:31Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - zealous sneaky raven --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
hakimjustbao/blockassist-bc-raging_subtle_wasp_1755729441
hakimjustbao
2025-08-20T23:05:44Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "raging subtle wasp", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:05:41Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - raging subtle wasp --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
manusiaperahu2012/blockassist-bc-roaring_long_tuna_1755729464
manusiaperahu2012
2025-08-20T23:05:30Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "roaring long tuna", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:05:23Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - roaring long tuna --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
yaelahnal/blockassist-bc-mute_clawed_crab_1755730982
yaelahnal
2025-08-20T23:04:10Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "mute clawed crab", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:03:53Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - mute clawed crab --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Chukky10z/blockassist-bc-mammalian_jumping_cougar_1755730956
Chukky10z
2025-08-20T23:03:28Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "mammalian jumping cougar", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T23:03:16Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - mammalian jumping cougar --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
CycloneDX/cdx1-mlx-MXFP4
CycloneDX
2025-08-20T23:02:27Z
0
0
mlx
[ "mlx", "safetensors", "qwen2", "text-generation", "cdxgen", "transformers", "sbom", "supply-chain-security", "conversational", "en", "dataset:CycloneDX/cdx-docs", "base_model:CycloneDX/cdx1-mlx", "base_model:quantized:CycloneDX/cdx1-mlx", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "4-bit", "region:us" ]
text-generation
2025-08-20T22:40:46Z
--- base_model: CycloneDX/cdx1-mlx language: - en library_name: mlx license: apache-2.0 tags: - cdxgen - transformers - sbom - supply-chain-security - mlx pipeline_tag: text-generation datasets: - CycloneDX/cdx-docs ---
lilTAT/blockassist-bc-gentle_rugged_hare_1755730453
lilTAT
2025-08-20T22:54:50Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "gentle rugged hare", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:54:43Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - gentle rugged hare --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
kojeklollipop/blockassist-bc-spotted_amphibious_stork_1755728887
kojeklollipop
2025-08-20T22:54:32Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "spotted amphibious stork", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:54:28Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - spotted amphibious stork --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Jackmin108/Qwen3-30B-A3B-Base-Fast
Jackmin108
2025-08-20T22:54:15Z
0
0
transformers
[ "transformers", "safetensors", "qwen3_moe", "text-generation", "conversational", "custom_code", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2025-08-19T05:23:42Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
vwzyrraz7l/blockassist-bc-tall_hunting_vulture_1755728733
vwzyrraz7l
2025-08-20T22:53:01Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "tall hunting vulture", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:52:57Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - tall hunting vulture --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
roeker/blockassist-bc-quick_wiry_owl_1755730333
roeker
2025-08-20T22:53:00Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "quick wiry owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:52:52Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - quick wiry owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
AnonymousCS/xlmr_immigration_combo28_3
AnonymousCS
2025-08-20T22:51:16Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "text-classification", "generated_from_trainer", "base_model:FacebookAI/xlm-roberta-large", "base_model:finetune:FacebookAI/xlm-roberta-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2025-08-20T22:48:01Z
--- library_name: transformers license: mit base_model: FacebookAI/xlm-roberta-large tags: - generated_from_trainer metrics: - accuracy model-index: - name: xlmr_immigration_combo28_3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlmr_immigration_combo28_3 This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1979 - Accuracy: 0.9499 - 1-f1: 0.9228 - 1-recall: 0.8996 - 1-precision: 0.9472 - Balanced Acc: 0.9373 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 15 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | 1-f1 | 1-recall | 1-precision | Balanced Acc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------:|:-----------:|:------------:| | 0.1409 | 1.0 | 25 | 0.2124 | 0.9332 | 0.9026 | 0.9305 | 0.8764 | 0.9325 | | 0.147 | 2.0 | 50 | 0.1840 | 0.9447 | 0.9142 | 0.8842 | 0.9463 | 0.9296 | | 0.1637 | 3.0 | 75 | 0.1922 | 0.9486 | 0.9209 | 0.8996 | 0.9433 | 0.9363 | | 0.0762 | 4.0 | 100 | 0.1979 | 0.9499 | 0.9228 | 0.8996 | 0.9472 | 0.9373 | ### Framework versions - Transformers 4.56.0.dev0 - Pytorch 2.8.0+cu126 - Datasets 4.0.0 - Tokenizers 0.21.4
yaelahnal/blockassist-bc-mute_clawed_crab_1755730158
yaelahnal
2025-08-20T22:50:27Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "mute clawed crab", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:50:09Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - mute clawed crab --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
roeker/blockassist-bc-quick_wiry_owl_1755730036
roeker
2025-08-20T22:48:36Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "quick wiry owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:48:02Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - quick wiry owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
ihsanridzi/blockassist-bc-wiry_flexible_owl_1755728497
ihsanridzi
2025-08-20T22:47:55Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "wiry flexible owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:47:52Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - wiry flexible owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
ntkhoi/Qwen3-4B-Medical-SFT-0820-checkpoint-1000
ntkhoi
2025-08-20T22:45:53Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-08-20T22:44:52Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
AnonymousCS/xlmr_immigration_combo28_1
AnonymousCS
2025-08-20T22:45:10Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "text-classification", "generated_from_trainer", "base_model:FacebookAI/xlm-roberta-large", "base_model:finetune:FacebookAI/xlm-roberta-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2025-08-20T22:42:30Z
--- library_name: transformers license: mit base_model: FacebookAI/xlm-roberta-large tags: - generated_from_trainer metrics: - accuracy model-index: - name: xlmr_immigration_combo28_1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlmr_immigration_combo28_1 This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1999 - Accuracy: 0.9254 - 1-f1: 0.8872 - 1-recall: 0.8803 - 1-precision: 0.8941 - Balanced Acc: 0.9141 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 15 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | 1-f1 | 1-recall | 1-precision | Balanced Acc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------:|:-----------:|:------------:| | 0.2658 | 1.0 | 25 | 0.1760 | 0.9370 | 0.9022 | 0.8726 | 0.9339 | 0.9209 | | 0.2375 | 2.0 | 50 | 0.1887 | 0.9319 | 0.8942 | 0.8649 | 0.9256 | 0.9151 | | 0.2602 | 3.0 | 75 | 0.1999 | 0.9254 | 0.8872 | 0.8803 | 0.8941 | 0.9141 | ### Framework versions - Transformers 4.56.0.dev0 - Pytorch 2.8.0+cu126 - Datasets 4.0.0 - Tokenizers 0.21.4
zijian2022/testinga_dp_original
zijian2022
2025-08-20T22:44:30Z
0
0
lerobot
[ "lerobot", "safetensors", "diffusion", "robotics", "dataset:zijian2022/y7", "arxiv:2303.04137", "license:apache-2.0", "region:us" ]
robotics
2025-08-20T20:47:10Z
--- datasets: zijian2022/y7 library_name: lerobot license: apache-2.0 model_name: diffusion pipeline_tag: robotics tags: - diffusion - lerobot - robotics --- # Model Card for diffusion <!-- Provide a quick summary of what the model is/does. --> [Diffusion Policy](https://huggingface.co/papers/2303.04137) treats visuomotor control as a generative diffusion process, producing smooth, multi-step action trajectories that excel at contact-rich manipulation. This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). See the full documentation at [LeRobot Docs](https://huggingface.co/docs/lerobot/index). --- ## How to Get Started with the Model For a complete walkthrough, see the [training guide](https://huggingface.co/docs/lerobot/il_robots#train-a-policy). Below is the short version on how to train and run inference/eval: ### Train from scratch ```bash python -m lerobot.scripts.train \ --dataset.repo_id=${HF_USER}/<dataset> \ --policy.type=act \ --output_dir=outputs/train/<desired_policy_repo_id> \ --job_name=lerobot_training \ --policy.device=cuda \ --policy.repo_id=${HF_USER}/<desired_policy_repo_id> --wandb.enable=true ``` _Writes checkpoints to `outputs/train/<desired_policy_repo_id>/checkpoints/`._ ### Evaluate the policy/run inference ```bash python -m lerobot.record \ --robot.type=so100_follower \ --dataset.repo_id=<hf_user>/eval_<dataset> \ --policy.path=<hf_user>/<desired_policy_repo_id> \ --episodes=10 ``` Prefix the dataset repo with **eval\_** and supply `--policy.path` pointing to a local or hub checkpoint. --- ## Model Details - **License:** apache-2.0
calegpedia/blockassist-bc-stealthy_slimy_rooster_1755727787
calegpedia
2025-08-20T22:36:50Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "stealthy slimy rooster", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:36:47Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - stealthy slimy rooster --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
lisaozill03/blockassist-bc-rugged_prickly_alpaca_1755727867
lisaozill03
2025-08-20T22:35:55Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "rugged prickly alpaca", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:35:52Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - rugged prickly alpaca --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Leoar/blockassist-bc-pudgy_toothy_cheetah_1755729195
Leoar
2025-08-20T22:35:10Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "pudgy toothy cheetah", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:35:01Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - pudgy toothy cheetah --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
unitova/blockassist-bc-zealous_sneaky_raven_1755727792
unitova
2025-08-20T22:35:10Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "zealous sneaky raven", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:35:05Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - zealous sneaky raven --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
arianaazarbal/standard_tpr_0.8-20250820_164950-sft-adapter
arianaazarbal
2025-08-20T22:34:55Z
0
0
null
[ "pytorch", "region:us" ]
null
2025-08-20T22:33:41Z
# SFT LoRA Adapter Experiment: standard_tpr_0.8 Timestamp: 20250820_164950 This model was trained as part of the deception-evasion-honesty experiments. ## Model Details - **Type**: SFT LoRA Adapter - **Experiment Name**: standard_tpr_0.8 - **Training Timestamp**: 20250820_164950
coelacanthxyz/blockassist-bc-finicky_thriving_grouse_1755727554
coelacanthxyz
2025-08-20T22:33:44Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "finicky thriving grouse", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:33:38Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - finicky thriving grouse --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
AnonymousCS/xlmr_immigration_combo27_4
AnonymousCS
2025-08-20T22:33:22Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "text-classification", "generated_from_trainer", "base_model:FacebookAI/xlm-roberta-large", "base_model:finetune:FacebookAI/xlm-roberta-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2025-08-20T22:30:28Z
--- library_name: transformers license: mit base_model: FacebookAI/xlm-roberta-large tags: - generated_from_trainer metrics: - accuracy model-index: - name: xlmr_immigration_combo27_4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlmr_immigration_combo27_4 This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2079 - Accuracy: 0.9422 - 1-f1: 0.9149 - 1-recall: 0.9344 - 1-precision: 0.8963 - Balanced Acc: 0.9402 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 15 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | 1-f1 | 1-recall | 1-precision | Balanced Acc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------:|:-----------:|:------------:| | 0.1364 | 1.0 | 25 | 0.1461 | 0.9524 | 0.9256 | 0.8880 | 0.9664 | 0.9363 | | 0.1483 | 2.0 | 50 | 0.1501 | 0.9550 | 0.9304 | 0.9035 | 0.9590 | 0.9421 | | 0.112 | 3.0 | 75 | 0.2079 | 0.9422 | 0.9149 | 0.9344 | 0.8963 | 0.9402 | ### Framework versions - Transformers 4.56.0.dev0 - Pytorch 2.8.0+cu126 - Datasets 4.0.0 - Tokenizers 0.21.4
roeker/blockassist-bc-quick_wiry_owl_1755729105
roeker
2025-08-20T22:32:31Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "quick wiry owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:32:25Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - quick wiry owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
roeker/blockassist-bc-quick_wiry_owl_1755728798
roeker
2025-08-20T22:27:53Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "quick wiry owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:27:17Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - quick wiry owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
OpenVINO/Qwen2.5-Coder-7B-Instruct-fp16-ov
OpenVINO
2025-08-20T22:25:06Z
0
0
transformers
[ "transformers", "openvino", "qwen2", "text-generation", "code", "codeqwen", "chat", "qwen", "qwen-coder", "conversational", "en", "base_model:Qwen/Qwen2.5-Coder-7B-Instruct", "base_model:finetune:Qwen/Qwen2.5-Coder-7B-Instruct", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2025-08-20T22:15:59Z
--- license: apache-2.0 license_link: https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct/blob/main/LICENSE language: - en base_model: - Qwen/Qwen2.5-Coder-7B-Instruct pipeline_tag: text-generation library_name: transformers tags: - code - codeqwen - chat - qwen - qwen-coder --- # Qwen2.5-Coder-7B-Instruct-fp16-ov * Model creator: [Qwen](https://huggingface.co/Qwen) * Original model: [Qwen2.5-Coder-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct) ## Description This is [Qwen2.5-Coder-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct) model converted to the [OpenVINO™ IR](https://docs.openvino.ai/2025/documentation/openvino-ir-format.html) (Intermediate Representation) format with weights compressed to FP16. ## Compatibility The provided OpenVINO™ IR model is compatible with: * OpenVINO version 2025.2.0 and higher * Optimum Intel 1.25.0 and higher ## Running Model Inference with [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) 1. Install packages required for using [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) integration with the OpenVINO backend: ``` pip install optimum[openvino] ``` 2. Run model inference: ``` from transformers import AutoTokenizer from optimum.intel.openvino import OVModelForCausalLM model_id = "OpenVINO/Qwen2.5-Coder-7B-Instruct-fp16-ov" tokenizer = AutoTokenizer.from_pretrained(model_id) model = OVModelForCausalLM.from_pretrained(model_id) inputs = tokenizer("write a quick sort algorithm.", return_tensors="pt") outputs = model.generate(**inputs, max_length=200) text = tokenizer.batch_decode(outputs)[0] print(text) ``` For more examples and possible optimizations, refer to the [Inference with Optimum Intel](https://docs.openvino.ai/2025/openvino-workflow-generative/inference-with-optimum-intel.html). ## Running Model Inference with [OpenVINO GenAI](https://github.com/openvinotoolkit/openvino.genai) 1. Install packages required for using OpenVINO GenAI. ``` pip install openvino-genai huggingface_hub ``` 2. Download model from HuggingFace Hub ``` import huggingface_hub as hf_hub model_id = "OpenVINO/Qwen2.5-Coder-7B-Instruct-fp16-ov" model_path = "Qwen2.5-Coder-7B-Instruct-fp16-ov" hf_hub.snapshot_download(model_id, local_dir=model_path) ``` 3. Run model inference: ``` import openvino_genai as ov_genai device = "CPU" pipe = ov_genai.LLMPipeline(model_path, device) pipe.get_tokenizer().set_chat_template(pipe.get_tokenizer().chat_template) print(pipe.generate("write a quick sort algorithm.", max_length=200)) ``` More GenAI usage examples can be found in OpenVINO GenAI library [docs](https://docs.openvino.ai/2025/openvino-workflow-generative/inference-with-genai.html) and [samples](https://github.com/openvinotoolkit/openvino.genai?tab=readme-ov-file#openvino-genai-samples) You can find more detaild usage examples in OpenVINO Notebooks: - [LLM](https://openvinotoolkit.github.io/openvino_notebooks/?search=LLM) - [RAG text generation](https://openvinotoolkit.github.io/openvino_notebooks/?search=RAG+system&tasks=Text+Generation) ## Limitations Check the original [model card](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct) for limitations. ## Legal information The original model is distributed under [Apache License Version 2.0](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct/blob/main/LICENSE) license. More details can be found in [Qwen2.5-Coder-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct). ## Disclaimer Intel is committed to respecting human rights and avoiding causing or contributing to adverse impacts on human rights. See [Intel’s Global Human Rights Principles](https://www.intel.com/content/dam/www/central-libraries/us/en/documents/policy-human-rights.pdf). Intel’s products and software are intended only to be used in applications that do not cause or contribute to adverse impacts on human rights.
chainway9/blockassist-bc-untamed_quick_eel_1755727084
chainway9
2025-08-20T22:23:32Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "untamed quick eel", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:23:28Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - untamed quick eel --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
busyyy/blockassist-bc-bipedal_deadly_dinosaur_1755726597
busyyy
2025-08-20T22:19:36Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "bipedal deadly dinosaur", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:18:12Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - bipedal deadly dinosaur --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Muapi/rough-water-colors
Muapi
2025-08-20T22:19:17Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T22:18:59Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Rough Water Colors ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:1457421@1648000", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
rbelanec/train_conala_1755694511
rbelanec
2025-08-20T22:19:02Z
0
0
peft
[ "peft", "safetensors", "llama-factory", "prefix-tuning", "generated_from_trainer", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "base_model:adapter:meta-llama/Meta-Llama-3-8B-Instruct", "license:llama3", "region:us" ]
null
2025-08-20T22:02:04Z
--- library_name: peft license: llama3 base_model: meta-llama/Meta-Llama-3-8B-Instruct tags: - llama-factory - prefix-tuning - generated_from_trainer model-index: - name: train_conala_1755694511 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # train_conala_1755694511 This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on the conala dataset. It achieves the following results on the evaluation set: - Loss: 1.2638 - Num Input Tokens Seen: 1382584 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 123 - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Input Tokens Seen | |:-------------:|:------:|:-----:|:---------------:|:-----------------:| | 0.9354 | 0.5005 | 536 | 0.8337 | 68880 | | 0.9609 | 1.0009 | 1072 | 0.7219 | 138320 | | 0.5536 | 1.5014 | 1608 | 0.6741 | 207744 | | 0.3862 | 2.0019 | 2144 | 0.6362 | 276856 | | 0.6441 | 2.5023 | 2680 | 0.6552 | 346040 | | 0.582 | 3.0028 | 3216 | 0.6596 | 415184 | | 0.3643 | 3.5033 | 3752 | 0.6909 | 484576 | | 0.2223 | 4.0037 | 4288 | 0.7160 | 553632 | | 0.1992 | 4.5042 | 4824 | 0.7488 | 623280 | | 0.1908 | 5.0047 | 5360 | 0.7194 | 691912 | | 0.223 | 5.5051 | 5896 | 0.8461 | 762008 | | 0.1581 | 6.0056 | 6432 | 0.8329 | 830744 | | 0.037 | 6.5061 | 6968 | 0.9954 | 900568 | | 0.0216 | 7.0065 | 7504 | 0.9716 | 969200 | | 0.095 | 7.5070 | 8040 | 1.0835 | 1037856 | | 0.0669 | 8.0075 | 8576 | 1.0836 | 1107480 | | 0.1067 | 8.5079 | 9112 | 1.2072 | 1176200 | | 0.0466 | 9.0084 | 9648 | 1.2154 | 1245744 | | 0.0126 | 9.5089 | 10184 | 1.2640 | 1314112 | ### Framework versions - PEFT 0.15.2 - Transformers 4.51.3 - Pytorch 2.8.0+cu128 - Datasets 3.6.0 - Tokenizers 0.21.1
Muapi/santa-claus-style-xl-sd-1.5-f1d
Muapi
2025-08-20T22:17:47Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T22:17:34Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Santa Claus (style) XL + SD 1.5 + F1D ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: Santa Claus ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:220765@1168169", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
rvipitkirubbe/blockassist-bc-mottled_foraging_ape_1755726734
rvipitkirubbe
2025-08-20T22:17:05Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "mottled foraging ape", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:17:02Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - mottled foraging ape --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
MU-NLPC/F0_Energy_joint_VQVAE_embeddings-norm_interp
MU-NLPC
2025-08-20T22:16:39Z
22
0
transformers
[ "transformers", "safetensors", "prosody_quantizer", "feature-extraction", "custom_code", "region:us" ]
feature-extraction
2025-08-12T11:45:42Z
--- library_name: transformers tags: [] --- Model adapted from: https://github.com/facebookresearch/speech-resynthesis This repository contains a VQ-VAE model trained to generate high-quality joint vector embeddings of the F0 and Energy features of speech, published in the paper https://www.isca-archive.org/interspeech_2025/portes25_interspeech.html. This preprocessing strategy used in this repository is __Interpolation + Normalization__. For __Normalization + Voicedness mask__, see https://huggingface.co/MU-NLPC/F0_Energy_joint_VQVAE_embeddings For __Interpolation__, see https://huggingface.co/MU-NLPC/F0_Energy_joint_VQVAE_embeddings-interp The script for running the model is included in the __generate_embeddings.py__ file. To use, clone this repository, create a virtual environment based on the pyproject.toml file, for example by running: ``` poetry install ``` Then, in the generate_embeddings.py script, select the dataset, uncomment the ``` #trust_remote_code=True ``` lines, and run the script: ``` poetry run python generate_embeddings.py ``` Note: While the model was trained using audio sampled at 16khz, the performance seems to be consistent for 24khz sampled audio as well. Use at your own discretion.
MajorJalud/blockassist-bc-fast_bristly_sardine_1755727961
MajorJalud
2025-08-20T22:14:39Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "fast bristly sardine", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:14:30Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - fast bristly sardine --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
8septiadi8/blockassist-bc-curious_lightfooted_mouse_1755727948
8septiadi8
2025-08-20T22:13:29Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "curious lightfooted mouse", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:13:12Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - curious lightfooted mouse --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
abcorrea/p3-v2
abcorrea
2025-08-20T22:13:23Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "generated_from_trainer", "trl", "sft", "conversational", "base_model:abcorrea/p3-v1", "base_model:finetune:abcorrea/p3-v1", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-08-20T22:05:10Z
--- base_model: abcorrea/p3-v1 library_name: transformers model_name: p3-v2 tags: - generated_from_trainer - trl - sft licence: license --- # Model Card for p3-v2 This model is a fine-tuned version of [abcorrea/p3-v1](https://huggingface.co/abcorrea/p3-v1). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?" generator = pipeline("text-generation", model="abcorrea/p3-v2", device="cuda") output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0] print(output["generated_text"]) ``` ## Training procedure This model was trained with SFT. ### Framework versions - TRL: 0.19.1 - Transformers: 4.52.1 - Pytorch: 2.7.0 - Datasets: 4.0.0 - Tokenizers: 0.21.1 ## Citations Cite TRL as: ```bibtex @misc{vonwerra2022trl, title = {{TRL: Transformer Reinforcement Learning}}, author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec}, year = 2020, journal = {GitHub repository}, publisher = {GitHub}, howpublished = {\url{https://github.com/huggingface/trl}} } ```
Muapi/ethereal
Muapi
2025-08-20T22:12:53Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T22:12:40Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Ethereal ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:1016450@1139626", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
Muapi/french-1970-s-satirical-comics-gotlib-style
Muapi
2025-08-20T22:10:19Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T22:10:08Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # French 1970's Satirical Comics - Gotlib Style ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: an illustration of, in the style of gotlib ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:525206@1035896", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
Shivam17818/qwen-2.5-7b-instruct-zveria-fast-v3
Shivam17818
2025-08-20T22:10:07Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "generated_from_trainer", "sft", "trl", "endpoints_compatible", "region:us" ]
null
2025-08-20T22:09:56Z
--- library_name: transformers model_name: qwen-2.5-7b-instruct-zveria-fast-v3 tags: - generated_from_trainer - sft - trl licence: license --- # Model Card for qwen-2.5-7b-instruct-zveria-fast-v3 This model is a fine-tuned version of [None](https://huggingface.co/None). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?" generator = pipeline("text-generation", model="Shivam17818/qwen-2.5-7b-instruct-zveria-fast-v3", device="cuda") output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0] print(output["generated_text"]) ``` ## Training procedure This model was trained with SFT. ### Framework versions - TRL: 0.21.0 - Transformers: 4.55.2 - Pytorch: 2.6.0+cu124 - Datasets: 4.0.0 - Tokenizers: 0.21.1 ## Citations Cite TRL as: ```bibtex @misc{vonwerra2022trl, title = {{TRL: Transformer Reinforcement Learning}}, author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec}, year = 2020, journal = {GitHub repository}, publisher = {GitHub}, howpublished = {\url{https://github.com/huggingface/trl}} } ```
AnonymousCS/xlmr_immigration_combo26_4
AnonymousCS
2025-08-20T22:08:51Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "text-classification", "generated_from_trainer", "base_model:FacebookAI/xlm-roberta-large", "base_model:finetune:FacebookAI/xlm-roberta-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2025-08-20T22:06:12Z
--- library_name: transformers license: mit base_model: FacebookAI/xlm-roberta-large tags: - generated_from_trainer metrics: - accuracy model-index: - name: xlmr_immigration_combo26_4 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlmr_immigration_combo26_4 This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1675 - Accuracy: 0.9614 - 1-f1: 0.9412 - 1-recall: 0.9266 - 1-precision: 0.9562 - Balanced Acc: 0.9527 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 15 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | 1-f1 | 1-recall | 1-precision | Balanced Acc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------:|:-----------:|:------------:| | 0.1595 | 1.0 | 25 | 0.1355 | 0.9563 | 0.9323 | 0.9035 | 0.9630 | 0.9431 | | 0.0853 | 2.0 | 50 | 0.1425 | 0.9614 | 0.9409 | 0.9228 | 0.9598 | 0.9518 | | 0.1138 | 3.0 | 75 | 0.1675 | 0.9614 | 0.9412 | 0.9266 | 0.9562 | 0.9527 | ### Framework versions - Transformers 4.56.0.dev0 - Pytorch 2.8.0+cu126 - Datasets 4.0.0 - Tokenizers 0.21.4
mradermacher/teknofest-2025-turkish-edu-v2-GGUF
mradermacher
2025-08-20T22:08:51Z
0
0
transformers
[ "transformers", "gguf", "turkish", "education", "teknofest-2025", "qwen", "text-generation", "lora", "tr", "dataset:Huseyin/final2", "base_model:Huseyin/teknofest-2025-turkish-edu-v2", "base_model:adapter:Huseyin/teknofest-2025-turkish-edu-v2", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2025-08-20T17:36:48Z
--- base_model: Huseyin/teknofest-2025-turkish-edu-v2 datasets: - Huseyin/final2 language: tr library_name: transformers license: apache-2.0 mradermacher: readme_rev: 1 quantized_by: mradermacher tags: - turkish - education - teknofest-2025 - qwen - text-generation - lora --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static quants of https://huggingface.co/Huseyin/teknofest-2025-turkish-edu-v2 <!-- provided-files --> ***For a convenient overview and download list, visit our [model page for this model](https://hf.tst.eu/model#teknofest-2025-turkish-edu-v2-GGUF).*** weighted/imatrix quants are available at https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.Q2_K.gguf) | Q2_K | 3.4 | | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.Q3_K_S.gguf) | Q3_K_S | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.Q3_K_M.gguf) | Q3_K_M | 4.2 | lower quality | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.Q3_K_L.gguf) | Q3_K_L | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.IQ4_XS.gguf) | IQ4_XS | 4.7 | | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.Q4_K_S.gguf) | Q4_K_S | 4.9 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.Q4_K_M.gguf) | Q4_K_M | 5.1 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.Q5_K_S.gguf) | Q5_K_S | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.Q5_K_M.gguf) | Q5_K_M | 6.0 | | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.Q6_K.gguf) | Q6_K | 6.8 | very good quality | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.Q8_0.gguf) | Q8_0 | 8.8 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/teknofest-2025-turkish-edu-v2-GGUF/resolve/main/teknofest-2025-turkish-edu-v2.f16.gguf) | f16 | 16.5 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
marcovise/TextEmbedding3SmallSentimentHead
marcovise
2025-08-20T22:08:34Z
0
0
transformers
[ "transformers", "pytorch", "sentiment-head", "feature-extraction", "sentiment-analysis", "text-classification", "openai-embeddings", "custom_code", "license:mit", "region:us" ]
text-classification
2025-08-20T21:50:00Z
--- license: mit tags: - sentiment-analysis - text-classification - openai-embeddings - pytorch pipeline_tag: text-classification library_name: transformers --- # TextEmbedding3SmallSentimentHead In case you needed a sentiment analysis classifier on top of embeddings from OpenAI embeddings model. ## Model Description - **What this is**: A compact PyTorch classifier head trained on top of `text-embedding-3-small` (1536-dim) to predict sentiment: negative, neutral, positive. - **Data**: Preprocessed from the [Kaggle Sentiment Analysis Dataset](https://www.kaggle.com/datasets/abhi8923shriv/sentiment-analysis-dataset). - **Metrics (val)**: **F1 macro ≈ 0.89**, **Accuracy ≈ 0.89** on a held-out validation split. - **Architecture**: Simple MLP head (256 hidden units, dropout 0.2), trained for 5 epochs with Adam. ## Input/Output - **Input**: Float32 tensor of shape `[batch, 1536]` (OpenAI text-embedding-3-small embeddings). - **Output**: Logits over 3 classes. Argmax → {0: negative, 1: neutral, 2: positive}. ## Usage ```python from transformers import AutoModel import torch # Load model model = AutoModel.from_pretrained( "marcovise/TextEmbedding3SmallSentimentHead", trust_remote_code=True ).eval() # Your 1536-dim OpenAI embeddings embeddings = torch.randn(4, 1536) # batch of 4 examples # Predict sentiment with torch.no_grad(): logits = model(inputs_embeds=embeddings)["logits"] # [batch, 3] predictions = logits.argmax(dim=1) # [batch] # 0=negative, 1=neutral, 2=positive print(predictions) # tensor([1, 0, 2, 1]) ``` ## Training Details - **Training data**: Kaggle Sentiment Analysis Dataset - **Preprocessing**: Text → OpenAI embeddings → 3-class labels {negative: 0.0, neutral: 0.5, positive: 1.0} - **Architecture**: 1536 → 256 → ReLU → Dropout(0.2) → 3 classes - **Optimizer**: Adam (lr=1e-3, weight_decay=1e-4) - **Loss**: CrossEntropyLoss with label smoothing (0.05) - **Epochs**: 5 ## Intended Use - Quick, lightweight sentiment classification for short text once embeddings are available. - Works well for general sentiment analysis tasks similar to the training distribution. ## Limitations - Trained on a specific sentiment dataset; may have domain bias. - Requires OpenAI text-embedding-3-small embeddings as input. - Not safety-critical; evaluate before production use. - May reflect biases present in the training data. ## License MIT
Muapi/flux-abstractmorph
Muapi
2025-08-20T22:08:33Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T22:08:17Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # FLUX AbstractMorph ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: bo-abstract ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:776859@868850", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
Muapi/unfazed-character-enhancer-anime-style
Muapi
2025-08-20T22:07:43Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T22:07:28Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Unfazed - character enhancer (anime style) ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:1296402@1943371", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
rbelanec/train_svamp_1755694510
rbelanec
2025-08-20T22:07:39Z
0
0
peft
[ "peft", "safetensors", "llama-factory", "prefix-tuning", "generated_from_trainer", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "base_model:adapter:meta-llama/Meta-Llama-3-8B-Instruct", "license:llama3", "region:us" ]
null
2025-08-20T22:01:59Z
--- library_name: peft license: llama3 base_model: meta-llama/Meta-Llama-3-8B-Instruct tags: - llama-factory - prefix-tuning - generated_from_trainer model-index: - name: train_svamp_1755694510 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # train_svamp_1755694510 This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on the svamp dataset. It achieves the following results on the evaluation set: - Loss: 0.1778 - Num Input Tokens Seen: 676320 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 123 - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Input Tokens Seen | |:-------------:|:------:|:----:|:---------------:|:-----------------:| | 0.5526 | 0.5016 | 158 | 0.7046 | 34176 | | 0.2434 | 1.0032 | 316 | 0.2998 | 67872 | | 0.0913 | 1.5048 | 474 | 0.1424 | 101696 | | 0.0227 | 2.0063 | 632 | 0.1410 | 135776 | | 0.0576 | 2.5079 | 790 | 0.1447 | 169712 | | 0.0193 | 3.0095 | 948 | 0.1086 | 203712 | | 0.1033 | 3.5111 | 1106 | 0.1210 | 237664 | | 0.0019 | 4.0127 | 1264 | 0.1067 | 271472 | | 0.079 | 4.5143 | 1422 | 0.1393 | 305088 | | 0.0025 | 5.0159 | 1580 | 0.1451 | 339264 | | 0.0008 | 5.5175 | 1738 | 0.1677 | 373488 | | 0.0053 | 6.0190 | 1896 | 0.1908 | 407264 | | 0.0004 | 6.5206 | 2054 | 0.1609 | 441200 | | 0.0001 | 7.0222 | 2212 | 0.1493 | 475008 | | 0.0001 | 7.5238 | 2370 | 0.1729 | 508832 | | 0.0001 | 8.0254 | 2528 | 0.1765 | 542720 | | 0.0 | 8.5270 | 2686 | 0.1798 | 576512 | | 0.0 | 9.0286 | 2844 | 0.1791 | 610688 | | 0.0 | 9.5302 | 3002 | 0.1781 | 644848 | ### Framework versions - PEFT 0.15.2 - Transformers 4.51.3 - Pytorch 2.8.0+cu128 - Datasets 3.6.0 - Tokenizers 0.21.1
Muapi/exaggimated
Muapi
2025-08-20T22:03:03Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T22:02:48Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Exaggimated ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:794910@888870", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
calegpedia/blockassist-bc-stealthy_slimy_rooster_1755725736
calegpedia
2025-08-20T22:03:00Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "stealthy slimy rooster", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:02:57Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - stealthy slimy rooster --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Muapi/sheep-s-styles-robert-valley
Muapi
2025-08-20T22:02:34Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T22:02:21Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Sheep's Styles - Robert Valley ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: ShStyRobVal ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:626250@761171", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
roeker/blockassist-bc-quick_wiry_owl_1755727265
roeker
2025-08-20T22:02:29Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "quick wiry owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:01:44Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - quick wiry owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Wejh/affine-pondering
Wejh
2025-08-20T22:02:25Z
0
0
transformers
[ "transformers", "safetensors", "gpt_oss", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2025-08-20T22:00:04Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
lilTAT/blockassist-bc-gentle_rugged_hare_1755727300
lilTAT
2025-08-20T22:02:17Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "gentle rugged hare", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:02:10Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - gentle rugged hare --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
esi777/blockassist-bc-camouflaged_trotting_eel_1755727261
esi777
2025-08-20T22:01:46Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "camouflaged trotting eel", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T22:01:43Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - camouflaged trotting eel --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Muapi/future-noir-retro-sf-illustration-style-syd-mead
Muapi
2025-08-20T22:01:14Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T22:00:57Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Future Noir: Retro SF Illustration Style (Syd Mead) ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: sydme1 painting ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:1057818@1200643", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
punkt2/foodcalculator-gemma-3-270m-it
punkt2
2025-08-20T22:01:13Z
0
0
transformers
[ "transformers", "safetensors", "gemma3_text", "text-generation", "text-generation-inference", "unsloth", "conversational", "en", "base_model:unsloth/gemma-3-270m-it", "base_model:finetune:unsloth/gemma-3-270m-it", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2025-08-18T20:43:54Z
--- base_model: unsloth/gemma-3-270m-it tags: - text-generation-inference - transformers - unsloth - gemma3_text license: apache-2.0 language: - en --- # Uploaded finetuned model - **Developed by:** punkt2 - **License:** apache-2.0 - **Finetuned from model :** unsloth/gemma-3-270m-it This gemma3_text model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
xiaoabcd/Llama-3.1-8B-bnb-4bit-qz
xiaoabcd
2025-08-20T22:00:51Z
0
0
transformers
[ "transformers", "gguf", "llama", "text-generation-inference", "unsloth", "en", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2025-08-20T21:59:30Z
--- base_model: unsloth/meta-llama-3.1-8b-instruct-bnb-4bit tags: - text-generation-inference - transformers - unsloth - llama - gguf license: apache-2.0 language: - en --- # Uploaded model - **Developed by:** xiaoabcd - **License:** apache-2.0 - **Finetuned from model :** unsloth/meta-llama-3.1-8b-instruct-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
Muapi/pixel-art-portraits-flux
Muapi
2025-08-20T22:00:47Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T22:00:32Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Pixel Art Portraits (FLUX) ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: pixel art portrait ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:864595@967453", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
AnonymousCS/xlmr_immigration_combo26_1
AnonymousCS
2025-08-20T22:00:15Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "text-classification", "generated_from_trainer", "base_model:FacebookAI/xlm-roberta-large", "base_model:finetune:FacebookAI/xlm-roberta-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2025-08-20T21:57:43Z
--- library_name: transformers license: mit base_model: FacebookAI/xlm-roberta-large tags: - generated_from_trainer metrics: - accuracy model-index: - name: xlmr_immigration_combo26_1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlmr_immigration_combo26_1 This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2140 - Accuracy: 0.9344 - 1-f1: 0.9006 - 1-recall: 0.8919 - 1-precision: 0.9094 - Balanced Acc: 0.9238 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 15 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | 1-f1 | 1-recall | 1-precision | Balanced Acc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------:|:-----------:|:------------:| | 0.2591 | 1.0 | 25 | 0.1775 | 0.9409 | 0.9073 | 0.8687 | 0.9494 | 0.9228 | | 0.2407 | 2.0 | 50 | 0.1862 | 0.9280 | 0.8862 | 0.8417 | 0.9356 | 0.9064 | | 0.1988 | 3.0 | 75 | 0.2140 | 0.9344 | 0.9006 | 0.8919 | 0.9094 | 0.9238 | ### Framework versions - Transformers 4.56.0.dev0 - Pytorch 2.8.0+cu126 - Datasets 4.0.0 - Tokenizers 0.21.4
quantumxnode/blockassist-bc-dormant_peckish_seahorse_1755725679
quantumxnode
2025-08-20T21:59:24Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "dormant peckish seahorse", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T21:59:21Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - dormant peckish seahorse --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
mradermacher/Datarus-R1-14B-preview-GGUF
mradermacher
2025-08-20T21:58:56Z
0
0
transformers
[ "transformers", "gguf", "en", "base_model:DatarusAI/Datarus-R1-14B-preview", "base_model:quantized:DatarusAI/Datarus-R1-14B-preview", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2025-08-20T17:02:21Z
--- base_model: DatarusAI/Datarus-R1-14B-preview language: - en library_name: transformers license: apache-2.0 mradermacher: readme_rev: 1 quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static quants of https://huggingface.co/DatarusAI/Datarus-R1-14B-preview <!-- provided-files --> ***For a convenient overview and download list, visit our [model page for this model](https://hf.tst.eu/model#Datarus-R1-14B-preview-GGUF).*** weighted/imatrix quants are available at https://huggingface.co/mradermacher/Datarus-R1-14B-preview-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.Q2_K.gguf) | Q2_K | 5.9 | | | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.Q3_K_S.gguf) | Q3_K_S | 6.8 | | | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.Q3_K_M.gguf) | Q3_K_M | 7.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.Q3_K_L.gguf) | Q3_K_L | 8.0 | | | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.IQ4_XS.gguf) | IQ4_XS | 8.3 | | | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.Q4_K_S.gguf) | Q4_K_S | 8.7 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.Q4_K_M.gguf) | Q4_K_M | 9.1 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.Q5_K_S.gguf) | Q5_K_S | 10.4 | | | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.Q5_K_M.gguf) | Q5_K_M | 10.6 | | | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.Q6_K.gguf) | Q6_K | 12.2 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Datarus-R1-14B-preview-GGUF/resolve/main/Datarus-R1-14B-preview.Q8_0.gguf) | Q8_0 | 15.8 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
AnonymousCS/xlmr_immigration_combo26_0
AnonymousCS
2025-08-20T21:57:39Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "text-classification", "generated_from_trainer", "base_model:FacebookAI/xlm-roberta-large", "base_model:finetune:FacebookAI/xlm-roberta-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2025-08-20T21:53:36Z
--- library_name: transformers license: mit base_model: FacebookAI/xlm-roberta-large tags: - generated_from_trainer metrics: - accuracy model-index: - name: xlmr_immigration_combo26_0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlmr_immigration_combo26_0 This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2525 - Accuracy: 0.9113 - 1-f1: 0.8691 - 1-recall: 0.8842 - 1-precision: 0.8545 - Balanced Acc: 0.9045 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 15 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | 1-f1 | 1-recall | 1-precision | Balanced Acc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:--------:|:-----------:|:------------:| | 0.6105 | 1.0 | 25 | 0.6039 | 0.6671 | 0.0 | 0.0 | 0.0 | 0.5 | | 0.3238 | 2.0 | 50 | 0.2721 | 0.8946 | 0.8340 | 0.7954 | 0.8766 | 0.8697 | | 0.2809 | 3.0 | 75 | 0.2403 | 0.9049 | 0.8471 | 0.7915 | 0.9111 | 0.8765 | | 0.2353 | 4.0 | 100 | 0.2520 | 0.9049 | 0.8571 | 0.8571 | 0.8571 | 0.8929 | | 0.2196 | 5.0 | 125 | 0.2525 | 0.9113 | 0.8691 | 0.8842 | 0.8545 | 0.9045 | ### Framework versions - Transformers 4.56.0.dev0 - Pytorch 2.8.0+cu126 - Datasets 4.0.0 - Tokenizers 0.21.4
lilTAT/blockassist-bc-gentle_rugged_hare_1755727000
lilTAT
2025-08-20T21:57:18Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "gentle rugged hare", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T21:57:12Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - gentle rugged hare --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
roeker/blockassist-bc-quick_wiry_owl_1755726958
roeker
2025-08-20T21:57:13Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "quick wiry owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T21:56:36Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - quick wiry owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Muapi/reptile-skin-ce-sdxl-flux
Muapi
2025-08-20T21:55:54Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T21:55:18Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Reptile Skin - CE - SDXL & Flux ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: rptlsknCE style ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:616253@892034", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
plesniar/zangskari-ipa
plesniar
2025-08-20T21:55:46Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "base_model:facebook/mms-1b-all", "base_model:finetune:facebook/mms-1b-all", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2025-08-18T01:47:13Z
--- library_name: transformers license: cc-by-nc-4.0 base_model: facebook/mms-1b-all tags: - generated_from_trainer model-index: - name: zangskari-ipa results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # zangskari-ipa This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 32 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.56.0.dev0 - Pytorch 2.8.0+cu126 - Datasets 4.0.0 - Tokenizers 0.21.4
ChenWu98/numina_qwen_2.5_sft_cluster_soft_split_0
ChenWu98
2025-08-20T21:55:28Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:Qwen/Qwen2.5-1.5B", "base_model:finetune:Qwen/Qwen2.5-1.5B", "endpoints_compatible", "region:us" ]
null
2025-08-20T21:53:30Z
--- base_model: Qwen/Qwen2.5-1.5B library_name: transformers model_name: numina_qwen_2.5_sft_cluster_soft_split_0 tags: - generated_from_trainer - trl - sft licence: license --- # Model Card for numina_qwen_2.5_sft_cluster_soft_split_0 This model is a fine-tuned version of [Qwen/Qwen2.5-1.5B](https://huggingface.co/Qwen/Qwen2.5-1.5B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could only go to the past or the future once and never return, which would you choose and why?" generator = pipeline("text-generation", model="None", device="cuda") output = generator([{"role": "user", "content": question}], max_new_tokens=128, return_full_text=False)[0] print(output["generated_text"]) ``` ## Training procedure [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/chenwu/huggingface/runs/eblv9638) This model was trained with SFT. ### Framework versions - TRL: 0.19.1 - Transformers: 4.51.1 - Pytorch: 2.7.0 - Datasets: 4.0.0 - Tokenizers: 0.21.4 ## Citations Cite TRL as: ```bibtex @misc{vonwerra2022trl, title = {{TRL: Transformer Reinforcement Learning}}, author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec}, year = 2020, journal = {GitHub repository}, publisher = {GitHub}, howpublished = {\url{https://github.com/huggingface/trl}} } ```
Muapi/randommaxx-animeniji
Muapi
2025-08-20T21:53:12Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T21:52:16Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # RandomMaxx Animeniji ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: Anime, Niji ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:794095@887953", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
roeker/blockassist-bc-quick_wiry_owl_1755726650
roeker
2025-08-20T21:52:22Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "quick wiry owl", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T21:51:33Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - quick wiry owl --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Muapi/retro-comic-style-betty-and-me
Muapi
2025-08-20T21:51:25Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T21:51:13Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Retro comic style (Betty and me) ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: oodcomi ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:673335@753754", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
Muapi/dystopian-vibes-flux
Muapi
2025-08-20T21:50:52Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T21:50:05Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Dystopian Vibes //Flux ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: 0y5top1a8e ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:686779@768626", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
lilTAT/blockassist-bc-gentle_rugged_hare_1755726572
lilTAT
2025-08-20T21:50:09Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "gentle rugged hare", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T21:50:02Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - gentle rugged hare --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
CharlieBoyer/gated2
CharlieBoyer
2025-08-20T21:50:03Z
0
0
null
[ "region:us" ]
null
2025-08-20T21:41:40Z
--- extra_gated_eu_disallowed: true ---
Muapi/reptile-scales
Muapi
2025-08-20T21:49:39Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T21:49:25Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Reptile Scales ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:291952@1546655", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
coelacanthxyz/blockassist-bc-finicky_thriving_grouse_1755724736
coelacanthxyz
2025-08-20T21:47:59Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "finicky thriving grouse", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T21:47:52Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - finicky thriving grouse --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
Muapi/yumemihoshino-planetarian
Muapi
2025-08-20T21:47:23Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T21:47:00Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # YumemiHoshino(星野梦美)-Planetarian(星之梦) ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: Yumemi ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:26134@1201465", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
chainway9/blockassist-bc-untamed_quick_eel_1755724789
chainway9
2025-08-20T21:47:12Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "untamed quick eel", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T21:47:09Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - untamed quick eel --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).
OpenVINO/Qwen2.5-Coder-1.5B-Instruct-int4-ov
OpenVINO
2025-08-20T21:47:12Z
0
0
transformers
[ "transformers", "openvino", "qwen2", "text-generation", "code", "codeqwen", "chat", "qwen", "qwen-coder", "conversational", "en", "base_model:Qwen/Qwen2.5-Coder-1.5B-Instruct", "base_model:quantized:Qwen/Qwen2.5-Coder-1.5B-Instruct", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2025-08-20T21:46:39Z
--- license: apache-2.0 license_link: https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct/blob/main/LICENSE language: - en base_model: - Qwen/Qwen2.5-Coder-1.5B-Instruct pipeline_tag: text-generation library_name: transformers tags: - code - codeqwen - chat - qwen - qwen-coder base_model_relation: quantized --- # Qwen2.5-Coder-1.5B-Instruct-int4-ov * Model creator: [Qwen](https://huggingface.co/Qwen) * Original model: [Qwen2.5-Coder-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct) ## Description This is [Qwen2.5-Coder-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct) model converted to the [OpenVINO™ IR](https://docs.openvino.ai/2025/documentation/openvino-ir-format.html) (Intermediate Representation) format with weights compressed to INT4 by [NNCF](https://github.com/openvinotoolkit/nncf). ## Quantization Parameters Weight compression was performed using `nncf.compress_weights` with the following parameters: * mode: **INT4_ASYM** For more information on quantization, check the [OpenVINO model optimization guide](https://docs.openvino.ai/2025/openvino-workflow/model-optimization-guide/weight-compression.html). ## Compatibility The provided OpenVINO™ IR model is compatible with: * OpenVINO version 2025.2.0 and higher * Optimum Intel 1.25.0 and higher ## Running Model Inference with [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) 1. Install packages required for using [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) integration with the OpenVINO backend: ``` pip install optimum[openvino] ``` 2. Run model inference: ``` from transformers import AutoTokenizer from optimum.intel.openvino import OVModelForCausalLM model_id = "OpenVINO/Qwen2.5-Coder-1.5B-Instruct-int4-ov" tokenizer = AutoTokenizer.from_pretrained(model_id) model = OVModelForCausalLM.from_pretrained(model_id) inputs = tokenizer("write a quick sort algorithm.", return_tensors="pt") outputs = model.generate(**inputs, max_length=200) text = tokenizer.batch_decode(outputs)[0] print(text) ``` For more examples and possible optimizations, refer to the [Inference with Optimum Intel](https://docs.openvino.ai/2025/openvino-workflow-generative/inference-with-optimum-intel.html). ## Running Model Inference with [OpenVINO GenAI](https://github.com/openvinotoolkit/openvino.genai) 1. Install packages required for using OpenVINO GenAI. ``` pip install openvino-genai huggingface_hub ``` 2. Download model from HuggingFace Hub ``` import huggingface_hub as hf_hub model_id = "OpenVINO/Qwen2.5-Coder-1.5B-Instruct-int4-ov" model_path = "Qwen2.5-Coder-1.5B-Instruct-int4-ov" hf_hub.snapshot_download(model_id, local_dir=model_path) ``` 3. Run model inference: ``` import openvino_genai as ov_genai device = "CPU" pipe = ov_genai.LLMPipeline(model_path, device) pipe.get_tokenizer().set_chat_template(pipe.get_tokenizer().chat_template) print(pipe.generate("write a quick sort algorithm.", max_length=200)) ``` More GenAI usage examples can be found in OpenVINO GenAI library [docs](https://docs.openvino.ai/2025/openvino-workflow-generative/inference-with-genai.html) and [samples](https://github.com/openvinotoolkit/openvino.genai?tab=readme-ov-file#openvino-genai-samples) You can find more detaild usage examples in OpenVINO Notebooks: - [LLM](https://openvinotoolkit.github.io/openvino_notebooks/?search=LLM) - [RAG text generation](https://openvinotoolkit.github.io/openvino_notebooks/?search=RAG+system&tasks=Text+Generation) ## Limitations Check the original [model card](https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct) for limitations. ## Legal information The original model is distributed under [Apache License Version 2.0](https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct/blob/main/LICENSE) license. More details can be found in [Qwen2.5-Coder-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct). ## Disclaimer Intel is committed to respecting human rights and avoiding causing or contributing to adverse impacts on human rights. See [Intel’s Global Human Rights Principles](https://www.intel.com/content/dam/www/central-libraries/us/en/documents/policy-human-rights.pdf). Intel’s products and software are intended only to be used in applications that do not cause or contribute to adverse impacts on human rights.
birx-web/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M-GGUF
birx-web
2025-08-20T21:46:19Z
0
0
transformers
[ "transformers", "gguf", "llama-cpp", "gguf-my-repo", "text-generation", "base_model:Qwen/Qwen3-Coder-30B-A3B-Instruct", "base_model:quantized:Qwen/Qwen3-Coder-30B-A3B-Instruct", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2025-08-20T21:45:05Z
--- library_name: transformers license: apache-2.0 license_link: https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct/blob/main/LICENSE pipeline_tag: text-generation base_model: Qwen/Qwen3-Coder-30B-A3B-Instruct tags: - llama-cpp - gguf-my-repo --- # birx-web/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M-GGUF This model was converted to GGUF format from [`Qwen/Qwen3-Coder-30B-A3B-Instruct`](https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo birx-web/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M-GGUF --hf-file qwen3-coder-30b-a3b-instruct-q4_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo birx-web/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M-GGUF --hf-file qwen3-coder-30b-a3b-instruct-q4_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo birx-web/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M-GGUF --hf-file qwen3-coder-30b-a3b-instruct-q4_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo birx-web/Qwen3-Coder-30B-A3B-Instruct-Q4_K_M-GGUF --hf-file qwen3-coder-30b-a3b-instruct-q4_k_m.gguf -c 2048 ```
Muapi/pegasus-knight-fire-emblem-pony-flux-il-nai
Muapi
2025-08-20T21:45:37Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T21:45:22Z
--- license: openrail++ tags: - lora - stable-diffusion - flux.1-d model_type: LoRA --- # Pegasus knight / ペガサスナイト (Fire Emblem) Pony/Flux/IL/NAI ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: pegasus knight, horse, horseback riding, pegasus,, shoulder armor, breastplate, pegasus knight uniform \(fire emblem\) ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Content-Type": "application/json", "x-api-key": os.getenv("MUAPIAPP_API_KEY")} payload = { "prompt": "masterpiece, best quality, 1girl, looking at viewer", "model_id": [{"model": "civitai:530189@967666", "weight": 1.0}], "width": 1024, "height": 1024, "num_images": 1 } print(requests.post(url, headers=headers, json=payload).json()) ```
lilTAT/blockassist-bc-gentle_rugged_hare_1755726280
lilTAT
2025-08-20T21:45:18Z
0
0
null
[ "gensyn", "blockassist", "gensyn-blockassist", "minecraft", "gentle rugged hare", "arxiv:2504.07091", "region:us" ]
null
2025-08-20T21:45:11Z
--- tags: - gensyn - blockassist - gensyn-blockassist - minecraft - gentle rugged hare --- # Gensyn BlockAssist Gensyn's BlockAssist is a distributed extension of the paper [AssistanceZero: Scalably Solving Assistance Games](https://arxiv.org/abs/2504.07091).