Upload scripts/sample_atomic_commites.py with huggingface_hub
Browse files
scripts/sample_atomic_commites.py
CHANGED
@@ -1,13 +1,16 @@
|
|
1 |
#!/usr/bin/env python3
|
2 |
"""
|
3 |
Sample atomic commits from CCS dataset for concern extraction.
|
4 |
-
Implements
|
5 |
"""
|
6 |
|
7 |
import pandas as pd
|
8 |
|
9 |
import tiktoken
|
10 |
-
from typing import Dict, List, Set
|
|
|
|
|
|
|
11 |
|
12 |
# Processing configuration
|
13 |
CONVENTIONAL_COMMIT_TYPES: List[str] = ["feat", "fix", "refactor", "test", "docs", "build", "cicd"]
|
@@ -38,7 +41,7 @@ DIFF_OUTPUT_DIR: str = "data/types"
|
|
38 |
|
39 |
|
40 |
def normalize_dataset(df: pd.DataFrame) -> pd.DataFrame:
|
41 |
-
"""Normalize CI labels to CICD for consistent categorization."""
|
42 |
df[COLUMN_ANNOTATED_TYPE] = (
|
43 |
df[COLUMN_ANNOTATED_TYPE]
|
44 |
.str.lower()
|
@@ -49,8 +52,8 @@ def normalize_dataset(df: pd.DataFrame) -> pd.DataFrame:
|
|
49 |
return df
|
50 |
|
51 |
|
52 |
-
def
|
53 |
-
"""Filter commits exceeding
|
54 |
encoding = tiktoken.get_encoding(ENCODING_MODEL)
|
55 |
|
56 |
combined_text = (
|
@@ -70,8 +73,8 @@ def apply_token_filtering(df: pd.DataFrame) -> pd.DataFrame:
|
|
70 |
return filtered_df
|
71 |
|
72 |
|
73 |
-
def
|
74 |
-
"""Remove
|
75 |
original_count = len(df)
|
76 |
|
77 |
sha_mask = ~df[COLUMN_SHA].astype(str).isin(excluded_shas)
|
@@ -82,23 +85,25 @@ def apply_sha_deduplication(df: pd.DataFrame, excluded_shas: Set[str]) -> pd.Dat
|
|
82 |
return filtered_df
|
83 |
|
84 |
|
85 |
-
def
|
86 |
-
"""Load
|
87 |
try:
|
88 |
df = pd.read_csv(file_path)
|
89 |
sha_set = set(df[COLUMN_SHA].astype(str))
|
|
|
90 |
print(f"Loaded {len(sha_set)} SHAs for deduplication")
|
91 |
-
|
|
|
92 |
except FileNotFoundError:
|
93 |
print(f"No existing samples found at {file_path}")
|
94 |
-
return set()
|
95 |
except Exception as e:
|
96 |
-
print(f"Error loading existing
|
97 |
-
return set()
|
98 |
|
99 |
|
100 |
def load_ccs_dataset(file_path: str) -> pd.DataFrame:
|
101 |
-
"""Load and validate
|
102 |
try:
|
103 |
df = pd.read_csv(file_path)
|
104 |
if df.empty:
|
@@ -118,7 +123,7 @@ def load_ccs_dataset(file_path: str) -> pd.DataFrame:
|
|
118 |
def save_to_csv(
|
119 |
data: List[Dict[str, str]], output_path: str, columns: List[str]
|
120 |
) -> None:
|
121 |
-
"""
|
122 |
import os
|
123 |
|
124 |
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
@@ -140,7 +145,7 @@ def save_to_csv(
|
|
140 |
def group_commits_by_type(
|
141 |
df: pd.DataFrame, valid_types: List[str]
|
142 |
) -> Dict[str, pd.DataFrame]:
|
143 |
-
"""
|
144 |
type_mask = df[COLUMN_ANNOTATED_TYPE].isin(valid_types)
|
145 |
valid_df = df[type_mask].copy()
|
146 |
|
@@ -158,13 +163,13 @@ def group_commits_by_type(
|
|
158 |
def sample_commits_for_type(
|
159 |
df: pd.DataFrame, count: int, output_columns: List[str]
|
160 |
) -> List[Dict[str, str]]:
|
161 |
-
"""
|
162 |
-
sampled_df = df.sample(n=count, random_state=
|
163 |
return sampled_df[output_columns].to_dict("records")
|
164 |
|
165 |
|
166 |
def extract_diffs(sampled_data: List[Dict[str, str]], output_dir: str) -> None:
|
167 |
-
"""
|
168 |
import os
|
169 |
|
170 |
type_counts = {}
|
@@ -196,13 +201,13 @@ def extract_diffs(sampled_data: List[Dict[str, str]], output_dir: str) -> None:
|
|
196 |
record[COLUMN_GIT_DIFF],
|
197 |
]
|
198 |
|
199 |
-
with open(filepath, "w") as f:
|
200 |
f.write("\n".join(content_lines))
|
201 |
|
202 |
print(f"Extracted {len(sampled_data)} diff files to {output_dir}")
|
203 |
|
204 |
def remove_excluded_commits(df: pd.DataFrame, excluded_shas: Set[str]) -> pd.DataFrame:
|
205 |
-
"""Remove
|
206 |
before_count = len(df)
|
207 |
print(f"Initial commit count: {before_count}")
|
208 |
|
@@ -217,69 +222,80 @@ def remove_excluded_commits(df: pd.DataFrame, excluded_shas: Set[str]) -> pd.Dat
|
|
217 |
|
218 |
def main() -> None:
|
219 |
"""
|
220 |
-
|
221 |
-
1. Load dataset and
|
222 |
-
2.
|
223 |
-
3.
|
224 |
-
4.
|
225 |
-
5.
|
226 |
-
6.
|
|
|
227 |
"""
|
228 |
print("Starting atomic sampling strategy for CCS dataset")
|
229 |
print("=" * 50)
|
230 |
|
231 |
-
# Step 1: Load dataset and
|
232 |
-
print("Step 1: Loading dataset and
|
233 |
-
existing_shas =
|
234 |
-
excluded_shas =
|
235 |
ccs_df = load_ccs_dataset(CCS_SOURCE_PATH)
|
236 |
|
237 |
# Step 2: Remove excluded commits
|
238 |
print("\nStep 2: Removing excluded commits")
|
239 |
ccs_df = remove_excluded_commits(ccs_df, excluded_shas)
|
240 |
|
241 |
-
# Step 3:
|
242 |
-
print("\nStep 3:
|
|
|
|
|
|
|
|
|
243 |
ccs_df = normalize_dataset(ccs_df)
|
244 |
|
245 |
-
# Step 4: Apply token-based filtering
|
246 |
-
print("\nStep 4: Applying token-based filtering")
|
247 |
-
ccs_df = apply_token_filtering(ccs_df)
|
248 |
|
249 |
-
# Step 5: Apply
|
250 |
-
print("\nStep 5: Applying
|
251 |
-
ccs_df =
|
252 |
|
253 |
-
# Step 6: Group by type and
|
254 |
print("\nStep 6: Grouping by type and random sampling")
|
255 |
commits_by_type = group_commits_by_type(ccs_df, CONVENTIONAL_COMMIT_TYPES)
|
256 |
|
257 |
all_sampled_data = []
|
258 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
sampled_data = sample_commits_for_type(
|
260 |
-
commits_df,
|
261 |
)
|
262 |
all_sampled_data.extend(sampled_data)
|
|
|
263 |
|
264 |
print(f"Random sampling: generated {len(all_sampled_data)} samples total")
|
265 |
|
266 |
# Step 7: Save results and extract diffs
|
267 |
print("\nStep 7: Saving results and extracting diffs")
|
268 |
-
|
269 |
-
|
|
|
|
|
|
|
270 |
|
271 |
# Final summary
|
272 |
print("\n" + "=" * 50)
|
273 |
print("Atomic sampling completed successfully!")
|
274 |
-
|
275 |
-
type_counts = {}
|
276 |
-
for record in all_sampled_data:
|
277 |
-
commit_type = record.get(COLUMN_ANNOTATED_TYPE, "")
|
278 |
-
type_counts[commit_type] = type_counts.get(commit_type, 0) + 1
|
279 |
-
|
280 |
-
print("Final sample distribution:")
|
281 |
-
for commit_type in sorted(type_counts.keys()):
|
282 |
-
print(f" {commit_type}: {type_counts[commit_type]} samples")
|
283 |
|
284 |
|
285 |
if __name__ == "__main__":
|
|
|
1 |
#!/usr/bin/env python3
|
2 |
"""
|
3 |
Sample atomic commits from CCS dataset for concern extraction.
|
4 |
+
Implements balanced sampling pipeline with filtering, normalization, and deduplication.
|
5 |
"""
|
6 |
|
7 |
import pandas as pd
|
8 |
|
9 |
import tiktoken
|
10 |
+
from typing import Dict, List, Set, Tuple
|
11 |
+
|
12 |
+
# Random seed for reproducibility
|
13 |
+
RANDOM_SEED: int = 42
|
14 |
|
15 |
# Processing configuration
|
16 |
CONVENTIONAL_COMMIT_TYPES: List[str] = ["feat", "fix", "refactor", "test", "docs", "build", "cicd"]
|
|
|
41 |
|
42 |
|
43 |
def normalize_dataset(df: pd.DataFrame) -> pd.DataFrame:
|
44 |
+
"""Normalize CI commit type labels to CICD for consistent categorization."""
|
45 |
df[COLUMN_ANNOTATED_TYPE] = (
|
46 |
df[COLUMN_ANNOTATED_TYPE]
|
47 |
.str.lower()
|
|
|
52 |
return df
|
53 |
|
54 |
|
55 |
+
def remove_long_token_commits(df: pd.DataFrame) -> pd.DataFrame:
|
56 |
+
"""Filter out commits exceeding TARGET_TOKEN_LIMIT to prevent model context overflow."""
|
57 |
encoding = tiktoken.get_encoding(ENCODING_MODEL)
|
58 |
|
59 |
combined_text = (
|
|
|
73 |
return filtered_df
|
74 |
|
75 |
|
76 |
+
def remove_existing_commits(df: pd.DataFrame, excluded_shas: Set[str]) -> pd.DataFrame:
|
77 |
+
"""Remove commits with SHAs that already exist in the sampled dataset."""
|
78 |
original_count = len(df)
|
79 |
|
80 |
sha_mask = ~df[COLUMN_SHA].astype(str).isin(excluded_shas)
|
|
|
85 |
return filtered_df
|
86 |
|
87 |
|
88 |
+
def load_shas_and_type_counts(file_path: str) -> Tuple[Set[str], Dict[str, int]]:
|
89 |
+
"""Load commit SHAs and type counts from CSV file for deduplication and intelligent sampling."""
|
90 |
try:
|
91 |
df = pd.read_csv(file_path)
|
92 |
sha_set = set(df[COLUMN_SHA].astype(str))
|
93 |
+
type_counts = df[COLUMN_ANNOTATED_TYPE].value_counts().to_dict()
|
94 |
print(f"Loaded {len(sha_set)} SHAs for deduplication")
|
95 |
+
print(f"Existing type counts: {type_counts}")
|
96 |
+
return sha_set, type_counts
|
97 |
except FileNotFoundError:
|
98 |
print(f"No existing samples found at {file_path}")
|
99 |
+
return set(), {}
|
100 |
except Exception as e:
|
101 |
+
print(f"Error loading existing data: {e}")
|
102 |
+
return set(), {}
|
103 |
|
104 |
|
105 |
def load_ccs_dataset(file_path: str) -> pd.DataFrame:
|
106 |
+
"""Load CCS dataset CSV and validate required columns exist."""
|
107 |
try:
|
108 |
df = pd.read_csv(file_path)
|
109 |
if df.empty:
|
|
|
123 |
def save_to_csv(
|
124 |
data: List[Dict[str, str]], output_path: str, columns: List[str]
|
125 |
) -> None:
|
126 |
+
"""Save sampled commit data to CSV file, appending if file exists."""
|
127 |
import os
|
128 |
|
129 |
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
|
|
145 |
def group_commits_by_type(
|
146 |
df: pd.DataFrame, valid_types: List[str]
|
147 |
) -> Dict[str, pd.DataFrame]:
|
148 |
+
"""Filter commits by valid types and group into separate DataFrames by type."""
|
149 |
type_mask = df[COLUMN_ANNOTATED_TYPE].isin(valid_types)
|
150 |
valid_df = df[type_mask].copy()
|
151 |
|
|
|
163 |
def sample_commits_for_type(
|
164 |
df: pd.DataFrame, count: int, output_columns: List[str]
|
165 |
) -> List[Dict[str, str]]:
|
166 |
+
"""Randomly sample specified count of commits from DataFrame."""
|
167 |
+
sampled_df = df.sample(n=count, random_state=RANDOM_SEED)
|
168 |
return sampled_df[output_columns].to_dict("records")
|
169 |
|
170 |
|
171 |
def extract_diffs(sampled_data: List[Dict[str, str]], output_dir: str) -> None:
|
172 |
+
"""Create individual diff files organized by commit type in subdirectories."""
|
173 |
import os
|
174 |
|
175 |
type_counts = {}
|
|
|
201 |
record[COLUMN_GIT_DIFF],
|
202 |
]
|
203 |
|
204 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
205 |
f.write("\n".join(content_lines))
|
206 |
|
207 |
print(f"Extracted {len(sampled_data)} diff files to {output_dir}")
|
208 |
|
209 |
def remove_excluded_commits(df: pd.DataFrame, excluded_shas: Set[str]) -> pd.DataFrame:
|
210 |
+
"""Remove commits with SHAs listed in the excluded commits file."""
|
211 |
before_count = len(df)
|
212 |
print(f"Initial commit count: {before_count}")
|
213 |
|
|
|
222 |
|
223 |
def main() -> None:
|
224 |
"""
|
225 |
+
Execute atomic sampling pipeline for CCS dataset:
|
226 |
+
1. Load dataset, existing SHAs and type counts for deduplication and sampling
|
227 |
+
2. Remove excluded commits by SHA
|
228 |
+
3. Remove existing commits to prevent duplicates
|
229 |
+
4. Normalize CI commit types to CICD
|
230 |
+
5. Filter commits exceeding token limits
|
231 |
+
6. Sample needed amounts per type to reach target
|
232 |
+
7. Save results and extract individual diff files (new samples only)
|
233 |
"""
|
234 |
print("Starting atomic sampling strategy for CCS dataset")
|
235 |
print("=" * 50)
|
236 |
|
237 |
+
# Step 1: Load dataset, backup SHAs and existing type counts
|
238 |
+
print("Step 1: Loading dataset, backup SHAs and existing type counts")
|
239 |
+
existing_shas, existing_type_counts = load_shas_and_type_counts(SAMPLED_CSV_PATH)
|
240 |
+
excluded_shas, _ = load_shas_and_type_counts(EXCLUDED_COMMITS_PATH)
|
241 |
ccs_df = load_ccs_dataset(CCS_SOURCE_PATH)
|
242 |
|
243 |
# Step 2: Remove excluded commits
|
244 |
print("\nStep 2: Removing excluded commits")
|
245 |
ccs_df = remove_excluded_commits(ccs_df, excluded_shas)
|
246 |
|
247 |
+
# Step 3: Remove existing commits
|
248 |
+
print("\nStep 3: Removing existing commits")
|
249 |
+
ccs_df = remove_existing_commits(ccs_df, existing_shas)
|
250 |
+
|
251 |
+
# Step 4: Apply CI->CICD normalization
|
252 |
+
print("\nStep 4: Applying CI->CICD normalization")
|
253 |
ccs_df = normalize_dataset(ccs_df)
|
254 |
|
|
|
|
|
|
|
255 |
|
256 |
+
# Step 5: Apply token-based filtering
|
257 |
+
print("\nStep 5: Applying token-based filtering")
|
258 |
+
ccs_df = remove_long_token_commits(ccs_df)
|
259 |
|
260 |
+
# Step 6: Group by type and sample
|
261 |
print("\nStep 6: Grouping by type and random sampling")
|
262 |
commits_by_type = group_commits_by_type(ccs_df, CONVENTIONAL_COMMIT_TYPES)
|
263 |
|
264 |
all_sampled_data = []
|
265 |
+
|
266 |
+
for commit_type, commits_df in commits_by_type.items():
|
267 |
+
existing_type_count = existing_type_counts.get(commit_type, 0)
|
268 |
+
needed_count = max(0, SAMPLES_PER_TYPE - existing_type_count) # Skip if target reached
|
269 |
+
available_type_count = len(commits_df)
|
270 |
+
actual_sample_count = min(needed_count, available_type_count)
|
271 |
+
|
272 |
+
if needed_count == 0:
|
273 |
+
print(f" {commit_type}: target reached, skipping")
|
274 |
+
continue
|
275 |
+
if actual_sample_count <= 0:
|
276 |
+
print(f" {commit_type}: no commits available")
|
277 |
+
continue
|
278 |
+
|
279 |
sampled_data = sample_commits_for_type(
|
280 |
+
commits_df, actual_sample_count, OUTPUT_COLUMNS
|
281 |
)
|
282 |
all_sampled_data.extend(sampled_data)
|
283 |
+
print(f" {commit_type}: sampled {actual_sample_count} commits")
|
284 |
|
285 |
print(f"Random sampling: generated {len(all_sampled_data)} samples total")
|
286 |
|
287 |
# Step 7: Save results and extract diffs
|
288 |
print("\nStep 7: Saving results and extracting diffs")
|
289 |
+
if all_sampled_data:
|
290 |
+
save_to_csv(all_sampled_data, SAMPLED_CSV_PATH, OUTPUT_COLUMNS)
|
291 |
+
extract_diffs(all_sampled_data, DIFF_OUTPUT_DIR)
|
292 |
+
else:
|
293 |
+
print("No new samples to save - all types have reached target counts")
|
294 |
|
295 |
# Final summary
|
296 |
print("\n" + "=" * 50)
|
297 |
print("Atomic sampling completed successfully!")
|
298 |
+
print(f"New samples added: {len(all_sampled_data)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
|
300 |
|
301 |
if __name__ == "__main__":
|