Berom0227 commited on
Commit
665d5e5
·
verified ·
1 Parent(s): f8534bb

Upload scripts/show_tokens_distribution.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/show_tokens_distribution.py +177 -0
scripts/show_tokens_distribution.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Analyze git diff token distribution for tangled commits.
4
+ Purpose: Find token size distribution to optimize LLM input.
5
+ """
6
+
7
+ import pandas as pd
8
+ import numpy as np
9
+ import tiktoken
10
+ from pathlib import Path
11
+
12
+ # Constants
13
+ ENCODING_NAME = "cl100k_base"
14
+ CSV_FILE = "../data/tangled_ccs_dataset_test.csv"
15
+ DIFF_COLUMN = "diff"
16
+ OUTPUT_FILE = "../token_distribution_results.csv"
17
+
18
+
19
+ def count_tokens(text: str) -> int:
20
+ """Count tokens using tiktoken."""
21
+ try:
22
+ encoding = tiktoken.get_encoding(ENCODING_NAME)
23
+ return len(encoding.encode(text))
24
+ except Exception:
25
+ return len(text) // 4
26
+
27
+
28
+ def get_token_range(token_count: int) -> str:
29
+ """Get token range label."""
30
+ if token_count <= 1024:
31
+ return "≤1024"
32
+ elif token_count <= 2048:
33
+ return "1025-2048"
34
+ elif token_count <= 4096:
35
+ return "2049-4096"
36
+ elif token_count <= 8192:
37
+ return "4097-8192"
38
+ elif token_count <= 16384:
39
+ return "8193-16384"
40
+ elif token_count <= 32768:
41
+ return "16385-32768"
42
+ else:
43
+ return ">32768"
44
+
45
+
46
+ def analyze_token_distribution(df: pd.DataFrame) -> pd.DataFrame:
47
+ """Analyze token distribution of tangled commits."""
48
+ results = []
49
+
50
+ for idx, row in df.iterrows():
51
+ diff_text = row[DIFF_COLUMN]
52
+
53
+ if pd.isna(diff_text):
54
+ continue
55
+
56
+ token_count = count_tokens(str(diff_text))
57
+ token_range = get_token_range(token_count)
58
+
59
+ results.append(
60
+ {
61
+ "row_idx": idx,
62
+ "token_count": token_count,
63
+ "token_range": token_range,
64
+ }
65
+ )
66
+
67
+ return pd.DataFrame(results)
68
+
69
+
70
+ def create_distribution_summary(results_df: pd.DataFrame) -> pd.DataFrame:
71
+ """Create token distribution summary."""
72
+ token_ranges = [
73
+ "≤1024",
74
+ "1025-2048",
75
+ "2049-4096",
76
+ "4097-8192",
77
+ "8193-16384",
78
+ "16385-32768",
79
+ ">32768",
80
+ ]
81
+
82
+ total_count = len(results_df)
83
+ distribution = results_df["token_range"].value_counts()
84
+ token_counts = results_df["token_count"].values
85
+
86
+ summary_data = []
87
+ for range_label in token_ranges:
88
+ count = distribution.get(range_label, 0)
89
+ percentage = (count / total_count) * 100
90
+ summary_data.append(
91
+ {
92
+ "token_range": range_label,
93
+ "count": count,
94
+ "percentage": round(percentage, 1),
95
+ }
96
+ )
97
+
98
+ # Add overall statistics
99
+ stats = {
100
+ "total_samples": total_count,
101
+ "median_tokens": int(np.median(token_counts)),
102
+ "mean_tokens": int(np.mean(token_counts)),
103
+ "min_tokens": int(np.min(token_counts)),
104
+ "max_tokens": int(np.max(token_counts)),
105
+ }
106
+
107
+ return pd.DataFrame(summary_data), stats
108
+
109
+
110
+ def main() -> None:
111
+ """Main analysis function."""
112
+ csv_file = Path(CSV_FILE)
113
+ output_file = Path(OUTPUT_FILE)
114
+
115
+ if not csv_file.exists():
116
+ print(f"Error: CSV file not found at {csv_file}")
117
+ return
118
+
119
+ print(f"Tangled Commits Token Distribution Analyzer")
120
+ print(f"Loading dataset from {csv_file}...")
121
+
122
+ try:
123
+ df = pd.read_csv(csv_file)
124
+ print(f"Loaded {len(df)} tangled commits")
125
+ print(f"Using tiktoken {ENCODING_NAME} encoding")
126
+
127
+ print("Analyzing token distribution...")
128
+ results_df = analyze_token_distribution(df)
129
+ print(f"Processed {len(results_df)} diffs")
130
+
131
+ print("Creating distribution summary...")
132
+ summary_df, stats = create_distribution_summary(results_df)
133
+
134
+ # Save detailed results
135
+ results_df.to_csv(output_file, index=False)
136
+ print(f"Results saved to {output_file}")
137
+
138
+ # Print summary
139
+ print("\nToken Distribution Summary:")
140
+ print("=" * 60)
141
+ print(f"Total samples: {stats['total_samples']}")
142
+ print(f"Median tokens: {stats['median_tokens']}")
143
+ print(f"Mean tokens: {stats['mean_tokens']}")
144
+ print(f"Min tokens: {stats['min_tokens']}")
145
+ print(f"Max tokens: {stats['max_tokens']}")
146
+ print("-" * 60)
147
+
148
+ for _, row in summary_df.iterrows():
149
+ print(
150
+ f"{row['token_range']:>12}: {row['count']:>4} samples ({row['percentage']:>5.1f}%)"
151
+ )
152
+
153
+ # Key thresholds
154
+ le_1024 = summary_df[summary_df["token_range"] == "≤1024"]["percentage"].iloc[0]
155
+ le_4096 = summary_df[
156
+ summary_df["token_range"].isin(["≤1024", "1025-2048", "2049-4096"])
157
+ ]["percentage"].sum()
158
+ le_8192 = summary_df[
159
+ summary_df["token_range"].isin(
160
+ ["≤1024", "1025-2048", "2049-4096", "4097-8192"]
161
+ )
162
+ ]["percentage"].sum()
163
+
164
+ print("-" * 60)
165
+ print(f"≤1024 tokens: {le_1024:.1f}%")
166
+ print(f"≤4096 tokens: {le_4096:.1f}%")
167
+ print(f"≤8192 tokens: {le_8192:.1f}%")
168
+
169
+ except Exception as e:
170
+ print(f"Error processing file: {e}")
171
+ import traceback
172
+
173
+ traceback.print_exc()
174
+
175
+
176
+ if __name__ == "__main__":
177
+ main()