Skip to content
Open
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 28 additions & 2 deletions forums/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,28 +267,54 @@
except ImportError:
pass


LOGGING = {
'version': 1,
'disable_existing_loggers': False,

'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},

'formatters': {
'verbose': {
'format': '[{asctime}] {levelname} {name}: {message}',
'style': '{',
},
},

'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},

# 🔹 New handler for spam detection
'spam_file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': BASE_DIR / 'logs/spam_detection.log', # create logs/ dir
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 5, # keep 5 old logs
'formatter': 'verbose',
},
},

'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},

# 🔹 New dedicated logger
'spam_detection': {
'handlers': ['spam_file'],
'level': 'INFO',
'propagate': False,
},
}
}

Expand Down
116 changes: 116 additions & 0 deletions seed_spam_rules.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
# Script to seed the database with predefined spam rules
import os
import django

os.environ.setdefault("DJANGO_SETTINGS_MODULE", "forums.settings")
django.setup()

from django.db.models import Q
from website.models import SpamRule



def seed_spam_rules():
rules = {
# Certification/Exam dump patterns
"Certification/Exam Spam": {
"score": 30,
"type": SpamRule.KEYWORD,
"patterns": [
r"exam\s+dumps?", r"braindumps?", r"practice\s+test",
r"certification\s+exam", r"test\s+preparation",
r"exam\s+questions?", r"study\s+guides?",
r"pdf\s+\+\s+testing\s+engine", r"testing\s+engine",
r"exam\s+prep", r"mock\s+exam", r"real\s+exam",
r"dumps\s+pdf", r"braindump"
],
},

# Promotional spam
"Promotional Spam": {
"score": 25,
"type": SpamRule.KEYWORD,
"patterns": [
r"click\s+here", r"join\s+now", r"limited\s+time",
r"discount", r"coupon\s+code", r"20%\s+off",
r"free\s+download", r"get\s+certified",
r"unlock\s+your\s+career", r"master\s+the",
r"boost\s+your\s+career", r"cert20",
r"at\s+checkout", r"special\s+offer",
],
},

# Suspicious domains
"Suspicious Domain": {
"score": 35,
"type": SpamRule.DOMAIN,
"patterns": [
r"dumpscafe\.com", r"certsout\.com", r"mycertshub\.com",
r"vmexam\.com", r"kissnutra\.com", r"dumps.*\.com",
r"cert.*\.com", r"exam.*\.com",
],
},

# Generic business language
"Business/Career Spam": {
"score": 15,
"type": SpamRule.KEYWORD,
"patterns": [
r"attests\s+to\s+your\s+proficiency",
r"esteemed\s+(?:accreditation|certification|credential)",
r"valuable\s+asset\s+to\s+companies",
r"demonstrates\s+your\s+ability",
r"comprehensive\s+study\s+(?:tools|materials)",
r"interactive\s+practice\s+tests",
r"real\s+exam\s+questions",
r"actual\s+exam\s+questions",
r"validated\s+by\s+.*certification",
r"urgently\s+need\s+experts",
],
},

# Gaming content
"Gaming Spam": {
"score": 20,
"type": SpamRule.KEYWORD,
"patterns": [
r"spacebar\s+clicker", r"clicker\s+game",
r"addictive\s+game", r"upgrades\s+available",
r"instant\s+rewards",
],
},

# Health/Supplement spam
"Health Spam": {
"score": 22,
"type": SpamRule.KEYWORD,
"patterns": [
r"vitalit[äa]t", r"nahrungserg[äa]nzungsmittel",
r"libido", r"fruchtbarkeit", r"energie",
r"hormonelle\s+balance", r"perforan",
],
},
}

inserted, skipped = 0, 0
for note, config in rules.items():
for pattern in config["patterns"]:
exists = SpamRule.objects.filter(
Q(pattern=pattern) & Q(type=config["type"])
).exists()
if not exists:
SpamRule.objects.create(
type=config["type"],
pattern=pattern,
score=config["score"],
notes=note,
)
inserted += 1
else:
skipped += 1

print(f"✅ Inserted {inserted} new rules, skipped {skipped} existing ones.")


# Run it
seed_spam_rules()
2 changes: 1 addition & 1 deletion website/forms.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,4 +69,4 @@ def __init__(self, *args, **kwargs):

class AnswerQuesitionForm(forms.Form):
question = forms.IntegerField(widget=forms.HiddenInput())
body = forms.CharField(widget=forms.Textarea())
body = forms.CharField(widget=forms.Textarea())
174 changes: 171 additions & 3 deletions website/helpers.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,26 @@
import re
from website.models import Question
from nltk.corpus import stopwords
import json
import logging
from datetime import datetime
from typing import Dict, List, Tuple, Optional
from website.models import Question, User
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from website.templatetags.permission_tags import can_edit, can_hide_delete
from sklearn.metrics.pairwise import cosine_similarity
from django.conf import settings
from django.utils import timezone
from django.db.models import Q
import re
from .models import SpamRule, SpamLog # assuming app is `forum`

sw = stopwords.words('english')

# Configure logging for spam detection
import logging
spam_logger = logging.getLogger('spam_detection')


def get_video_info(path):
"""Uses ffmpeg to determine information about a video. This has not been broadly
tested and your milage may vary"""
Expand Down Expand Up @@ -67,4 +83,156 @@ def get_similar_questions(user_ques,question):
if w in question: l2.append(1)
else: l2.append(0)
cs = cosine_similarity((l1,l2))
return cs[0][1]
return cs[0][1]


# helpers.py

MULTIPLE_URL_WEIGHT = 20
MULTIPLE_URL_THRESHOLD = 3

class SpamQuestionDetector:
def __init__(self):
# load only active + not expired rules
now = timezone.now()
qs = SpamRule.objects.filter(active=True).filter(
Q(expires_at__isnull=True) | Q(expires_at__gt=now)
)
self._compiled = []
for r in qs:
try:
cre = re.compile(r.pattern, re.IGNORECASE)
except re.error:
spam_logger.warning(f"Invalid regex in SpamRule id={r.id}: {r.pattern}")
continue
self._compiled.append({
'rule': r,
'compiled': cre
})

def extract_urls(self, text: str):
return re.findall(r'https?://[^\s)<>"]+', text)

def detect_spam(self,user,question, title: str, content: str, category: str = "", tutorial: str = "") -> dict:
combined_text = " ".join(filter(None, [title, content, category, tutorial])).lower()
spam_score = 0
matches = []

for entry in self._compiled:
rule = entry['rule']
cre = entry['compiled']
if cre.search(combined_text):
spam_score += rule.score
matches.append({
'id': rule.id,
'pattern': rule.pattern,
'score': rule.score,
'type': rule.type,
'notes': rule.notes
})

# detect multiple URLs (we keep this behaviour from original)
urls = self.extract_urls(combined_text)
if len(urls) >= MULTIPLE_URL_THRESHOLD:
spam_score += MULTIPLE_URL_WEIGHT
matches.append({
'pattern': f'{len(urls)} URLs',
'score': MULTIPLE_URL_WEIGHT,
'type': 'urls'
})

# classification (same thresholds as earlier)
if spam_score >= 60:
confidence, action = 'HIGH', 'DELETE'
elif spam_score >= 30:
confidence, action = 'MEDIUM', 'REVIEW'
elif spam_score >= 15:
confidence, action = 'LOW', 'REVIEW'
else:
confidence, action = 'CLEAN', 'APPROVE'

result = {
'spam_score': spam_score,
'matches': matches,
'confidence': confidence,
'recommended_action': action,
'url_count': len(urls)
}

# debug log
spam_logger.info(
"SpamDetect result: question_id=%s user_id=%s score=%s action=%s matches=%s",
question.id, user.id, spam_score, action, len(matches)
)
return result


def handle_spam(question, user, delete_on_high=True, save_question_metadata_before_delete=True):
"""
Runs detection on a saved Question instance and logs/takes action.
- question: saved Question instance (has .id)
- user: Django user instance who created the question (for logging)
- delete_on_high: if True, HIGH confidence -> delete from DB; otherwise hide it (status=0)
Returns a status string: 'AUTO_DELETE', 'FLAGGED', 'APPROVED', 'HIDDEN'
"""
detector = SpamQuestionDetector()
result = detector.detect_spam(
user=user,
question=question,
title=getattr(question, 'title', '') or '',
content=getattr(question, 'body', '') or '',
category=getattr(question, 'category', '') or '',
tutorial=getattr(question, 'tutorial', '') or ''
)

spam_score = result['spam_score']
confidence = result['confidence']
action = result['recommended_action']
details = result['matches']

# prepare log payload
log_payload = {
'question_id': question.id,
'user_id': user.id if user else None,
'category': getattr(question, 'category', '') or '',
'title': getattr(question, 'title', '') or '',
'content': getattr(question, 'body', '') or '',
'action': None,
'spam_score': spam_score,
'confidence': confidence,
'details': details
}

# TAKE ACTION
if action == 'DELETE' and confidence == 'HIGH':
log_payload['action'] = 'AUTO_DELETE'
SpamLog.objects.create(**log_payload)

if delete_on_high:
# delete after logging
spam_logger.info(f"MARK_INACTIVE: Question {question.id} by user {user.id} score={spam_score}")
question.status = 0
return 'AUTO_DELETE'
else:
# hide instead of delete
question.spam = True
question.status = 0
question.save(update_fields=['spam', 'status'])
return 'HIDDEN'

elif action == 'REVIEW':
# flag for admin review
log_payload['action'] = 'FLAGGED'
SpamLog.objects.create(**log_payload)

question.approval_required = True
question.spam = False
question.save(update_fields=['approval_required', 'spam'])
return 'FLAGGED'

else:
# APPROVE / CLEAN
question.spam = False
question.approval_required = False
question.save(update_fields=['spam', 'approval_required'])
return 'APPROVED'
Loading