Datasets:
text
stringlengths 0
93.6k
|
---|
# <FILESEP>
|
from utils import region_parse as parse
|
import FreeSimpleGUI as sg
|
from utils.virtual_amiibo_file import VirtualAmiiboFile, JSONVirtualAmiiboFile, InvalidAmiiboDump, AmiiboHMACTagError, AmiiboHMACDataError, InvalidMiiSizeError
|
from utils.updater import Updater
|
from utils.config import Config
|
import os
|
from tkinter import filedialog
|
from windows import template
|
from copy import deepcopy
|
from windows import hexview
|
from utils.section_manager import ImplicitSumManager
|
from windows import about
|
from windows import metadata_transplant
|
from windows import initialize
|
from windows import theme
|
import ctypes
|
myappid = u'sae.editor.sae.1.7.0' # arbitrary string
|
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
|
def get_menu_def(update_available: bool, amiibo_loaded: bool, ryujinx: bool = False):
|
"""
|
Creates menu definition for window
|
:param bool update_available: If update is available or not
|
:param bool amiibo_loaded: If amiibo has been loaded or not
|
:param bool ryujinx: if loaded amiibo is ryujinx json
|
:return: tuple of menu
|
"""
|
if amiibo_loaded:
|
file_tab = ['&File', ['&Open (CTRL+O)', '&Save', 'Save &As (CTRL+S)', 'Copy &Values', '---', '&Metadata Transplant', '&View Hex']]
|
mii_tab = ["&Mii", ["&Dump Mii", "&Load Mii"]]
|
if ryujinx:
|
file_tab = ['&File', ['&Open (CTRL+O)', '&Save', 'Save &As (CTRL+S)', 'Copy &Values', '---', '&Metadata Transplant', '!&View Hex']]
|
mii_tab = ["!&Mii", ["&Dump Mii", "&Load Mii"]]
|
else:
|
file_tab = ['&File', ['&Open (CTRL+O)', '!&Save', '!Save &As (CTRL+S)', '!Copy &Values', '---', '&Metadata Transplant', '!&View Hex']]
|
mii_tab = ["!&Mii", ["&Dump Mii", "&Load Mii"]]
|
template_tab = ['&Template', ['&Create', '&Edit', '&Load (CTRL+L)']]
|
if update_available:
|
settings_tab = ['&Settings', ['Select &Key(s)', 'Select &Regions', '---', '&Update', '&Change Theme', '&About']]
|
else:
|
settings_tab = ['&Settings', ['Select &Key(s)', 'Select &Regions', '---', '!&Update', '&Change Theme', '&About']]
|
return file_tab, mii_tab, template_tab, settings_tab
|
def create_window(sections, column_key, update, location=None, size=None):
|
"""
|
Creates the window of the application
|
:param List[Sections] sections: list of section objects
|
:param str column_key: key for column
|
:param bool update: whether or not an update is available
|
:param Tuple(int, int) location: window location to use
|
:param Tuple(int, int) size: window size to use
|
:return: window object
|
"""
|
section_layout, last_key = create_layout_from_sections(sections)
|
menu_def = get_menu_def(update, False)
|
layout = [[sg.Menu(menu_def)],
|
[sg.Text("The amiibo's personality is: None", key="PERSONALITY")],
|
[sg.Column(section_layout, size=(None, 180), scrollable=True, vertical_scroll_only=True,
|
element_justification='left', key=column_key, expand_x=True, expand_y=True)],
|
[sg.Button("Load", key="LOAD_AMIIBO", enable_events=True),
|
sg.Button("Save", key="SAVE_AMIIBO", enable_events=True, disabled=True),
|
sg.Checkbox("Shuffle SN", key="SHUFFLE_SN", default=False)]]
|
if location is not None:
|
window = sg.Window("Smash Amiibo Editor", layout, resizable=True, location=location, size=size, icon="SAE.ico")
|
else:
|
window = sg.Window("Smash Amiibo Editor", layout, resizable=True, icon="SAE.ico")
|
window.finalize()
|
# adds event to spin widgets
|
# disables all options until bin is loaded
|
for i in range(1, last_key+1):
|
window[str(i)].bind('<KeyPress>', '')
|
try:
|
window[str(i)].update(disabled=True)
|
# deals with bit numbers not having disabled property
|
except TypeError:
|
pass
|
# for windows Control works, for MacOS change to Command
|
# hot key for opening
|
window.bind('<Control-o>', "Open (CTRL+O)")
|
# hot key for loading template
|
window.bind('<Control-l>', "Load (CTRL+L)")
|
# hot key for saving gets set when an amiibo is loaded
|
# needed or else window will be super small (because of menu items?)
|
window.set_min_size((700, 500))
|
return window
|
GitHub-Python — Licensed & Elaborated Variants
This repository ships two complementary Python-code corpora extracted from public GitHub:
- Licensed Subset – strictly permissive-licensed files suitable for commercial redistribution / model training (main corpus used in our experiments).
- Elaborated Collection – a broader crawl that additionally contains files under copyleft or unclear licenses (GPL/AGPL/LGPL, etc.). Useful for analysis or pre-training where license mixing is acceptable.
Both variants target code-completion / generation research.
Dataset at a glance
Licensed Subset | Elaborated Collection | |
---|---|---|
Files (.py) | 53,017 | 186,066 |
Unique repositories | 16,447 | 59,852 |
Repository owners | 12,515 | 43,517 |
Compressed size | 732 MB | 2.4 GB * |
Vocabulary (tokens) | 443,431 | 443,431 † |
License coverage | Permissive only | Mixed (perm. + copyleft) |
Secrets redacted | ✅ | ⚠️ not guaranteed |
Time window | ≥ 2015-01-01 | ≥ 2015-01-01 |
* estimated – elaborated corpus is distributed as raw file list, not a single
text file.
† same tokenizer file is shared by both variants.
Numbers were obtained from the final redacted corpus and companion metadata.
Dataset structure
huggingface_dataset/
├─ mega_licensed_corpus_redacted.txt # Licensed Subset – concatenated code
├─ python_files.txt # Licensed Subset – raw file URLs
├─ python_files_elaborated.txt # Elaborated Collection – raw file URLs
├─ python_files_elaborated_metadata.csv # Elaborated Collection metadata
└─ custom_tokens_vocab.txt # `<token>\t<id>` vocabulary file
Important Note
For technical reasons, seperate splits have been stored as seperate Dataset instances. See https://huggingface.co/datasets/jblitzar/github-python-metadata, https://huggingface.co/datasets/jblitzar/github-python-meta-elaborated, and https://huggingface.co/datasets/jblitzar/github-python-corpus .
File separator
Individual files are concatenated with the sentinel line:
# <FILESEP>
Anything following the sentinel until the next sentinel (or EOF) is the source code of one file.
Dataset variants
1. Licensed Subset (mega_licensed_corpus_redacted.txt
)
• 53 K permissively-licensed files (MIT/BSD/Apache/ISC/Unlicense).
• All API keys & credentials removed.
• Ready for redistribution & commercial use (respect upstream NOTICE files).
2. Elaborated Collection (python_files_elaborated.txt
)
• 186 K files from a much larger crawl.
• Contains GPL / LGPL / AGPL and other copyleft licenses.
• Shipped as URL list + metadata CSV; you must download the files yourself
(datasets.load_dataset
streaming, wget
, etc.).
• No license filtering or secret-redaction performed – use with caution.
When first loading the dataset, decide which variant aligns with your use case (e.g. proprietary model training → Licensed Subset only).
Collection methodology
Repository discovery
- Queried GitHub REST API for projects with ≥ 10 stars
(earlier iterations used 100+, later expanded for coverage). - Only repositories with primary language Python and last commit ≥ 2015.
- Queried GitHub REST API for projects with ≥ 10 stars
File filtering
- Retain files whose size ∈ [1 KB, 100 KB].
- Exclude common build/packaging scripts (
setup.py
,__init__.py
, etc.).
License compliance
- Allowed: MIT, Apache-2.0, BSD-2/3-Clause, ISC, Unlicense.
- GPL, LGPL, AGPL and proprietary licenses were excluded.
Deduplication
- Unique file SHA hashes; duplicates skipped.
Formatting & cleaning
- Formatted with autopep8 to normalise whitespace.
- Custom script removed trailing whitespace & normalised newlines.
Secret redaction
truffleHog
+ custom regex pass removed >150 active credentials.- Redacted corpus stored as
mega_licensed_corpus_redacted.txt
.
Custom tokenisation
The accompanying custom_tokens_vocab.txt
implements a Python-aware
sub-token scheme:
- Strip doc-strings & comments.
- Split on:
- Camel-Case boundaries (
Camel
→Camel
,Case
) - Underscores, spaces
- Indentation & newlines (preserved as
<newline>
token)
- Camel-Case boundaries (
- Rare tokens (frequency < 10) were dropped → 443 k vocabulary.
Example:
def helloWorld(value):
return value + 1
tokenises to:
def hello world ( value ) <newline> <tab> return value + 1 <newline>
Usage
from datasets import load_dataset
ds = load_dataset("jblitzar/github-python-corpus", split="train")
print(ds[0]["code"][:300]) # raw source code
If you prefer token level examples (small reasons: memory), map the tokenizer:
from tokenizers import Tokenizer
tok = Tokenizer.from_file("custom_tokens_vocab.txt")
def encode(ex):
ex["input_ids"] = tok.encode(ex["code"]).ids
return ex
ds = ds.map(encode, remove_columns=["code"])
Ethical considerations & limitations
- Licenses respected – only permissive licenses included; retain NOTICE files when redistributing derivative works.
- Secrets removed – automated & manual audits performed, yet users must not assume zero secrets; re-audit before public deployments.
- Code quality – projects vary in style & correctness. Generated models may replicate bugs or vulnerable patterns.
Citation
If you use this dataset, please cite:
@misc{github-python-2024,
author = {JBlitzar},
title = {GitHub-Python: A Permissively Licensed Corpus of Python Code},
year = {2024},
howpublished = {\url{https://huggingface.co/datasets/jblitzar/github-python}},
note = {Version 1.0}
}
License
Dataset card and aggregation scripts: GPLv3.
Each code snippet remains under its original repository license (MIT,
Apache-2.0, BSD, ISC, etc.). Users must comply with upstream notices when
redistributing code or derivatives.
- Downloads last month
- 145