taha092 commited on
Commit
da22f13
·
verified ·
1 Parent(s): a48e04e

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +181 -0
  2. config.py +10 -0
  3. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
+ from sentence_transformers import SentenceTransformer, util
5
+ import numpy as np
6
+ import requests
7
+ import gradio.themes as grthemes
8
+ import config
9
+
10
+ # ----------------------
11
+ # Paraphrasing Model Setup
12
+ # ----------------------
13
+ PARAPHRASE_MODEL_NAME = "Vamsi/T5_Paraphrase_Paws"
14
+ paraphrase_tokenizer = AutoTokenizer.from_pretrained(PARAPHRASE_MODEL_NAME)
15
+ paraphrase_model = AutoModelForSeq2SeqLM.from_pretrained(PARAPHRASE_MODEL_NAME)
16
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+ paraphrase_model = paraphrase_model.to(device)
18
+
19
+ # ----------------------
20
+ # Semantic Similarity Model
21
+ # ----------------------
22
+ similarity_model = SentenceTransformer('all-MiniLM-L6-v2')
23
+
24
+ # ----------------------
25
+ # Tone Templates
26
+ # ----------------------
27
+ tone_templates = {
28
+ "Academic": "Paraphrase the following text in a formal, academic tone:",
29
+ "Casual": "Paraphrase the following text in a casual, conversational tone:",
30
+ "Friendly": "Paraphrase the following text in a friendly, approachable tone:",
31
+ "Stealth": "Paraphrase the following text to bypass AI detectors and sound as human as possible:",
32
+ }
33
+
34
+ # ----------------------
35
+ # Paraphrasing Function
36
+ # ----------------------
37
+ def paraphrase(text, tone):
38
+ prompt = tone_templates[tone] + " " + text
39
+ input_ids = paraphrase_tokenizer.encode(prompt, return_tensors="pt", max_length=256, truncation=True).to(device)
40
+ outputs = paraphrase_model.generate(
41
+ input_ids,
42
+ do_sample=True,
43
+ top_k=120,
44
+ top_p=0.95,
45
+ temperature=0.7,
46
+ repetition_penalty=1.2,
47
+ max_length=256,
48
+ num_return_sequences=1
49
+ )
50
+ paraphrased = paraphrase_tokenizer.decode(outputs[0], skip_special_tokens=True)
51
+ return paraphrased
52
+
53
+ # ----------------------
54
+ # Semantic Similarity Function
55
+ # ----------------------
56
+ def semantic_similarity(text1, text2):
57
+ emb1 = similarity_model.encode(text1, convert_to_tensor=True)
58
+ emb2 = similarity_model.encode(text2, convert_to_tensor=True)
59
+ sim = util.pytorch_cos_sim(emb1, emb2).item()
60
+ return sim
61
+
62
+ # ----------------------
63
+ # Real AI Detection (Winston AI API)
64
+ # ----------------------
65
+ def check_ai_score(text):
66
+ api_key = config.WINSTON_AI_API_KEY
67
+ api_url = config.WINSTON_AI_API_URL
68
+ if not api_key:
69
+ return None, "No API key set. Please add your Winston AI API key to config.py."
70
+ headers = {
71
+ "Authorization": f"Bearer {api_key}",
72
+ "Content-Type": "application/json"
73
+ }
74
+ data = {"text": text, "sentences": False}
75
+ try:
76
+ response = requests.post(api_url, headers=headers, json=data, timeout=30)
77
+ if response.status_code == 200:
78
+ result = response.json()
79
+ # Winston AI returns a 'score' (0-100, higher = more human)
80
+ score = result.get("score", None)
81
+ if score is not None:
82
+ ai_prob = 1.0 - (score / 100.0)
83
+ return ai_prob, None
84
+ else:
85
+ return None, "No score in Winston AI response."
86
+ else:
87
+ return None, f"Winston AI error: {response.status_code} {response.text}"
88
+ except Exception as e:
89
+ return None, f"Winston AI exception: {str(e)}"
90
+
91
+ # ----------------------
92
+ # Humanization Score & Rating
93
+ # ----------------------
94
+ def humanization_score(sim, ai_prob):
95
+ # Lower similarity and lower AI probability = more human
96
+ score = (1.0 - sim) * 0.5 + (1.0 - ai_prob) * 0.5
97
+ return score
98
+
99
+ def humanization_rating(score):
100
+ if score < 0.7:
101
+ return f"⚠️ Still AI-like ({score:.2f})"
102
+ elif score < 0.85:
103
+ return f"👍 Acceptable ({score:.2f})"
104
+ else:
105
+ return f"✅ Highly Humanized ({score:.2f})"
106
+
107
+ # ----------------------
108
+ # Main Processing Function
109
+ # ----------------------
110
+ def process(text, tone):
111
+ if not text.strip():
112
+ return "", "", 0.0, "", 0.0, ""
113
+ # Pre-humanization AI detection
114
+ pre_ai_prob, pre_err = check_ai_score(text)
115
+ if pre_ai_prob is None:
116
+ return "", f"AI Detection Error: {pre_err}", 0.0, "", 0.0, ""
117
+ # Paraphrase
118
+ try:
119
+ paraphrased = paraphrase(text, tone)
120
+ except Exception as e:
121
+ return f"[Paraphrasing error: {str(e)}]", "", 0.0, "", 0.0, ""
122
+ # Post-humanization AI detection
123
+ post_ai_prob, post_err = check_ai_score(paraphrased)
124
+ if post_ai_prob is None:
125
+ return paraphrased, f"AI Detection Error: {post_err}", 0.0, "", 0.0, ""
126
+ # Semantic similarity
127
+ sim = semantic_similarity(text, paraphrased)
128
+ # Humanization score
129
+ score = humanization_score(sim, post_ai_prob)
130
+ rating = humanization_rating(score)
131
+ ai_score_str = f"Pre: {100*(1-pre_ai_prob):.1f}% human | Post: {100*(1-post_ai_prob):.1f}% human"
132
+ return (
133
+ paraphrased, # gr.Textbox (string)
134
+ ai_score_str, # gr.Markdown (string)
135
+ sim, # gr.Number (float)
136
+ rating, # gr.Markdown (string)
137
+ score * 100, # gr.Number (float)
138
+ ""
139
+ )
140
+
141
+ # ----------------------
142
+ # Gradio UI
143
+ # ----------------------
144
+ custom_theme = grthemes.Base(
145
+ primary_hue="blue",
146
+ secondary_hue="blue",
147
+ neutral_hue="slate"
148
+ )
149
+
150
+ with gr.Blocks(theme=custom_theme, title="AI Humanizer - Made by Taha") as demo:
151
+ gr.Markdown("""
152
+ # 🧠 AI Humanizer
153
+ <div style='display:flex;justify-content:space-between;align-items:center;'>
154
+ <span style='font-size:1.2em;color:#7bb1ff;'>Rewrite AI text to sound 100% human</span>
155
+ <span style='font-weight:bold;color:#7bb1ff;'>Made by Taha</span>
156
+ </div>
157
+ """, elem_id="header")
158
+ with gr.Row():
159
+ with gr.Column():
160
+ text_in = gr.Textbox(label="Paste AI-generated text here", lines=8, placeholder="Paste your text...", elem_id="input-box")
161
+ tone = gr.Dropdown(["Academic", "Casual", "Friendly", "Stealth"], value="Stealth", label="Tone Selector")
162
+ btn = gr.Button("Humanize", elem_id="humanize-btn")
163
+ with gr.Column():
164
+ text_out = gr.Textbox(label="Humanized Output", lines=8, interactive=False, elem_id="output-box")
165
+ ai_scores = gr.Markdown("", elem_id="ai-scores")
166
+ sim_score = gr.Number(label="Similarity (0=very different, 1=very similar)", interactive=False)
167
+ rating = gr.Markdown("", elem_id="rating")
168
+ human_score = gr.Number(label="Humanization Score (%)", interactive=False)
169
+ btn.click(
170
+ process,
171
+ inputs=[text_in, tone],
172
+ outputs=[text_out, ai_scores, sim_score, rating, human_score, gr.Textbox(visible=False)],
173
+ api_name="humanize"
174
+ )
175
+ gr.Markdown("""
176
+ <div style='text-align:center;color:#7bb1ff;margin-top:2em;'>
177
+ <b>Made by Taha</b> | Free for unlimited use | Optimized for students and creators
178
+ </div>
179
+ """, elem_id="footer")
180
+
181
+ demo.launch()
config.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # config.py
2
+ # Place your API keys and endpoints here for AI detection services
3
+
4
+ WINSTON_AI_API_KEY = "5lC2zMKk3cDVUhk2augozEK9jZYEUiGYfcqKIEC2bee7261a" # Add your Winston AI API key here
5
+ WINSTON_AI_API_URL = "https://api.gowinston.ai/v2/ai-content-detection" # Official v2 endpoint for text detection
6
+
7
+ SAPLING_API_KEY = ""
8
+ SAPLING_API_URL = "https://api.sapling.ai/api/v1/aidetect" # Example, update as needed
9
+
10
+ # Add more as needed
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=3.50.2
2
+ transformers>=4.40.0
3
+ torch>=2.0.0
4
+ sentence-transformers>=2.6.1
5
+ sentencepiece>=0.1.99
6
+ requests>=2.31.0
7
+ scikit-learn>=1.0.2
8
+ numpy>=1.21.0
9
+ # For Hugging Face Spaces and API integration