Spaces:
Running
Running
Update ai_text_detector_valid_final.py
Browse files
ai_text_detector_valid_final.py
CHANGED
|
@@ -48,41 +48,33 @@ def clean_text(text: str) -> str:
|
|
| 48 |
return text.strip()
|
| 49 |
|
| 50 |
def classify_szegedai(text: str):
|
| 51 |
-
"""ModernBERT ensemble detector with human label boost"""
|
| 52 |
cleaned_text = clean_text(text)
|
| 53 |
if not cleaned_text.strip():
|
| 54 |
return {"error": "Empty text"}
|
| 55 |
|
| 56 |
inputs = tokenizer_modernbert(cleaned_text, return_tensors="pt", truncation=True, padding=True).to(device)
|
| 57 |
-
|
| 58 |
with torch.no_grad():
|
| 59 |
logits_1 = model_1(**inputs).logits
|
| 60 |
logits_2 = model_2(**inputs).logits
|
| 61 |
logits_3 = model_3(**inputs).logits
|
| 62 |
|
| 63 |
-
# Softmax probabilities
|
| 64 |
probs1 = torch.softmax(logits_1, dim=1)
|
| 65 |
probs2 = torch.softmax(logits_2, dim=1)
|
| 66 |
probs3 = torch.softmax(logits_3, dim=1)
|
| 67 |
|
| 68 |
-
# Boost the human label slightly to reduce false positives
|
| 69 |
human_index = 24
|
| 70 |
for p in [probs1, probs2, probs3]:
|
| 71 |
-
p[:, human_index] *=
|
| 72 |
-
p = p / p.sum(dim=1, keepdim=True)
|
| 73 |
|
| 74 |
-
# Ensemble average
|
| 75 |
probs = (probs1 + probs2 + probs3) / 3
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
ai_probs[human_index] = 0
|
| 80 |
-
ai_total_prob = ai_probs.sum().item() * 100
|
| 81 |
-
human_prob = 100 - ai_total_prob
|
| 82 |
|
| 83 |
return {
|
| 84 |
"Human Probability": round(human_prob, 2),
|
| 85 |
-
"AI Probability": round(
|
| 86 |
}
|
| 87 |
|
| 88 |
# ---------------------------
|
|
|
|
| 48 |
return text.strip()
|
| 49 |
|
| 50 |
def classify_szegedai(text: str):
|
|
|
|
| 51 |
cleaned_text = clean_text(text)
|
| 52 |
if not cleaned_text.strip():
|
| 53 |
return {"error": "Empty text"}
|
| 54 |
|
| 55 |
inputs = tokenizer_modernbert(cleaned_text, return_tensors="pt", truncation=True, padding=True).to(device)
|
|
|
|
| 56 |
with torch.no_grad():
|
| 57 |
logits_1 = model_1(**inputs).logits
|
| 58 |
logits_2 = model_2(**inputs).logits
|
| 59 |
logits_3 = model_3(**inputs).logits
|
| 60 |
|
|
|
|
| 61 |
probs1 = torch.softmax(logits_1, dim=1)
|
| 62 |
probs2 = torch.softmax(logits_2, dim=1)
|
| 63 |
probs3 = torch.softmax(logits_3, dim=1)
|
| 64 |
|
|
|
|
| 65 |
human_index = 24
|
| 66 |
for p in [probs1, probs2, probs3]:
|
| 67 |
+
p[:, human_index] *= 2.0 # Boost human label
|
| 68 |
+
p = p / p.sum(dim=1, keepdim=True)
|
| 69 |
|
|
|
|
| 70 |
probs = (probs1 + probs2 + probs3) / 3
|
| 71 |
|
| 72 |
+
human_prob = probs[0][human_index].item() * 100
|
| 73 |
+
ai_prob = 100 - human_prob
|
|
|
|
|
|
|
|
|
|
| 74 |
|
| 75 |
return {
|
| 76 |
"Human Probability": round(human_prob, 2),
|
| 77 |
+
"AI Probability": round(ai_prob, 2),
|
| 78 |
}
|
| 79 |
|
| 80 |
# ---------------------------
|