-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathparser_heuristics.py
More file actions
283 lines (243 loc) · 10.6 KB
/
parser_heuristics.py
File metadata and controls
283 lines (243 loc) · 10.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
"""
Email Parser and Heuristic Phishing Detection Module
Parses raw emails and detects phishing indicators
"""
import re
import email
import base64
from email import policy
from email.parser import BytesParser
from typing import Dict, List, Tuple
from datetime import datetime
from bs4 import BeautifulSoup
class EmailParser:
"""
Parses raw email data and extracts relevant fields
"""
@staticmethod
def parse_raw(raw_bytes: bytes) -> Dict:
"""
Parse raw email bytes into structured data
Args:
raw_bytes: Raw email in RFC 2822 format
Returns:
Dictionary containing:
- subject: Email subject line
- from: Sender email address
- date: Date sent
- body_text: Plain text body content
- body_html: HTML body content (if available)
- attachments: List of attachment filenames
- all_links: List of URLs found in email
"""
try:
# Parse email with policy for better handling
msg = BytesParser(policy=policy.default).parsebytes(raw_bytes)
# Extract basic headers
subject = msg.get('Subject', '(No Subject)')
from_addr = msg.get('From', 'Unknown')
date_str = msg.get('Date', '')
# Extract body content
body_text = ""
body_html = ""
attachments = []
all_links = []
# Walk through email parts
if msg.is_multipart():
for part in msg.walk():
content_type = part.get_content_type()
content_disposition = str(part.get('Content-Disposition', ''))
# Extract text content
if content_type == 'text/plain' and 'attachment' not in content_disposition:
try:
body_text += part.get_content()
except:
pass
# Extract HTML content
elif content_type == 'text/html' and 'attachment' not in content_disposition:
try:
body_html += part.get_content()
except:
pass
# Track attachments
elif 'attachment' in content_disposition:
filename = part.get_filename()
if filename:
attachments.append(filename)
else:
# Single part message
content_type = msg.get_content_type()
if content_type == 'text/plain':
body_text = msg.get_content()
elif content_type == 'text/html':
body_html = msg.get_content()
# Extract links from HTML
if body_html:
soup = BeautifulSoup(body_html, 'html.parser')
links = soup.find_all('a', href=True)
all_links = [link['href'] for link in links]
# Also extract text from HTML for analysis
if not body_text:
body_text = soup.get_text(separator=' ', strip=True)
# Extract links from plain text (URLs)
url_pattern = r'https?://[^\s<>"{}|\\^`\[\]]+'
text_links = re.findall(url_pattern, body_text)
all_links.extend(text_links)
all_links = list(set(all_links)) # Remove duplicates
return {
'subject': subject,
'from': from_addr,
'date': date_str,
'body_text': body_text,
'body_html': body_html,
'attachments': attachments,
'all_links': all_links
}
except Exception as e:
print(f"Error parsing email: {e}")
return {
'subject': '(Parse Error)',
'from': 'Unknown',
'date': '',
'body_text': '',
'body_html': '',
'attachments': [],
'all_links': []
}
class PhishingHeuristics:
"""
Detects phishing indicators using heuristic analysis
"""
# Urgency keywords that phishers use to pressure victims
URGENCY_TERMS = [
'urgent', 'immediately', 'verify now', 'act now', 'confirm now',
'suspend', 'limited time', 'expires', 'expire', 'deadline',
'within 24 hours', 'within 48 hours', 'action required',
'verify your account', 'confirm your identity', 'unusual activity'
]
# Credential/payment related keywords
CREDENTIAL_TERMS = [
'password', 'username', 'ssn', 'social security',
'card number', 'credit card', 'debit card', 'cvv', 'pin',
'account number', 'routing number', 'bank account',
'verify account', 'confirm account', 'update payment',
'billing information', 'payment method', 'security question'
]
# Common legitimate domains (for lookalike detection)
LEGITIMATE_DOMAINS = [
'paypal.com', 'amazon.com', 'google.com', 'microsoft.com',
'apple.com', 'netflix.com', 'facebook.com', 'instagram.com',
'twitter.com', 'linkedin.com', 'ebay.com', 'wellsfargo.com',
'bankofamerica.com', 'chase.com', 'citibank.com'
]
@staticmethod
def extract_indicators(parsed_email: Dict) -> Dict:
"""
Analyze parsed email for phishing indicators
Args:
parsed_email: Dictionary from parse_raw()
Returns:
Dictionary containing:
- indicators: List of detected indicator strings
- severity: 'safe', 'review', or 'high_risk'
- urgency_found: Boolean
- credential_found: Boolean
- suspicious_links: List of suspicious URLs
- lookalike_domains: List of potential lookalike domains
- attachment_risk: Boolean (suspicious attachments)
"""
indicators = []
urgency_found = False
credential_found = False
suspicious_links = []
lookalike_domains = []
attachment_risk = False
# Combine all text for analysis
full_text = (
parsed_email.get('subject', '') + ' ' +
parsed_email.get('body_text', '')
).lower()
# Check for urgency terms
for term in PhishingHeuristics.URGENCY_TERMS:
if term.lower() in full_text:
urgency_found = True
indicators.append(f"⚠️ Urgency Detected: '{term}'")
break
# Check for credential/payment terms
for term in PhishingHeuristics.CREDENTIAL_TERMS:
if term.lower() in full_text:
credential_found = True
indicators.append(f"💳 Credential/Payment Term: '{term}'")
break
# Check for lookalike domains in links
all_links = parsed_email.get('all_links', [])
for link in all_links:
link_lower = link.lower()
# Extract domain from URL
domain_match = re.search(r'https?://([^/]+)', link_lower)
if domain_match:
domain = domain_match.group(1)
# Check for lookalike domains (typosquatting)
for legit_domain in PhishingHeuristics.LEGITIMATE_DOMAINS:
if legit_domain in domain and domain != legit_domain:
# Potential lookalike (e.g., paypa1.com, g00gle.com)
lookalike_domains.append(domain)
indicators.append(f"🔗 Lookalike Domain: {domain}")
break
# Check for suspicious patterns
if any(char in domain for char in ['@', '..']):
suspicious_links.append(link)
indicators.append(f"🚨 Suspicious Link Pattern: {link[:50]}...")
# Check for IP address URLs (common in phishing)
if re.match(r'https?://\d+\.\d+\.\d+\.\d+', link):
suspicious_links.append(link)
indicators.append(f"🌐 IP Address URL: {link[:50]}...")
# Check for suspicious attachments
attachments = parsed_email.get('attachments', [])
suspicious_extensions = ['.exe', '.scr', '.bat', '.cmd', '.js', '.vbs', '.zip', '.rar']
for attachment in attachments:
if any(attachment.lower().endswith(ext) for ext in suspicious_extensions):
attachment_risk = True
indicators.append(f"📎 Suspicious Attachment: {attachment}")
# Check sender domain vs link domains (domain mismatch)
sender = parsed_email.get('from', '').lower()
sender_domain_match = re.search(r'@([^\s>]+)', sender)
if sender_domain_match:
sender_domain = sender_domain_match.group(1)
# If email claims to be from a legitimate company but links go elsewhere
for legit_domain in PhishingHeuristics.LEGITIMATE_DOMAINS:
if legit_domain in sender_domain and sender_domain != legit_domain:
indicators.append(f"📧 Sender Domain Mismatch: {sender_domain}")
# Calculate severity based on indicators
severity = 'safe'
risk_score = 0
if urgency_found:
risk_score += 1
if credential_found:
risk_score += 2
if lookalike_domains:
risk_score += 3
if suspicious_links:
risk_score += 2
if attachment_risk:
risk_score += 2
# Determine severity level
if risk_score >= 5:
severity = 'high_risk'
elif risk_score >= 2:
severity = 'review'
else:
severity = 'safe'
# Add summary indicator
if not indicators:
indicators.append("✅ No Obvious Threats Detected")
return {
'indicators': indicators,
'severity': severity,
'urgency_found': urgency_found,
'credential_found': credential_found,
'suspicious_links': suspicious_links,
'lookalike_domains': lookalike_domains,
'attachment_risk': attachment_risk,
'risk_score': risk_score
}