Skip to content

Commit 60c5f79

Browse files
Enhance NSFW detection logic with robust multi-factor safety checks
1 parent 4f61f85 commit 60c5f79

File tree

1 file changed

+37
-4
lines changed

1 file changed

+37
-4
lines changed

nsfwDetection/nsfwService.cjs

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,42 @@ const fs = require('fs');
55

66
let model;
77

8+
function isUnsafe(predictions) {
9+
// Initialize probabilities with fallbacks
10+
const prob = { Porn: 0, Hentai: 0, Sexy: 0, Neutral: 0, Drawing: 0 };
11+
predictions.forEach(p => prob[p.className] = p.probability);
12+
13+
// Pre-calculate frequently used values
14+
const maxNsfw = Math.max(prob.Porn, prob.Hentai, prob.Sexy);
15+
const maxSafe = Math.max(prob.Neutral, prob.Drawing);
16+
const topPrediction = predictions.reduce((a, b) =>
17+
a.probability > b.probability ? a : b
18+
);
19+
20+
// 1. Explicit content threshold
21+
if (prob.Porn > 0.35 || prob.Hentai > 0.35) return true;
22+
23+
// 2. Weighted NSFW index
24+
if ((0.5 * prob.Porn + 0.3 * prob.Sexy + 0.2 * prob.Hentai) > 0.4) return true;
25+
26+
// 3. Relative risk ratio (with uncertainty buffer)
27+
if (maxNsfw >= 0.2 && maxSafe > 0 && maxNsfw / maxSafe > 2.5) return true;
28+
29+
// 4. Top class enforcement
30+
if (['Porn', 'Hentai'].includes(topPrediction.className) && topPrediction.probability > 0.25) {
31+
return true;
32+
}
33+
34+
// 5. Sexy content dominance
35+
if (prob.Sexy > 0.6 && prob.Sexy > (prob.Neutral + prob.Drawing)) return true;
36+
37+
// 6. [NEW] Uncertainty check - reject ambiguous predictions
38+
const entropy = -Object.values(prob).reduce((sum, p) => sum + (p * Math.log2(p || 1e-10)), 0);
39+
if (entropy > 0.8 && maxSafe < 0.4) return true; // High uncertainty + low safe confidence
40+
41+
return false;
42+
}
43+
844
async function loadModel() {
945
model = await nsfw.load();
1046
process.send && process.send({ type: 'ready' });
@@ -20,10 +56,7 @@ process.on('message', async (msg) => {
2056
const predictions = await model.classify(imageTensor);
2157
imageTensor.dispose();
2258

23-
// Consider "Neutral" and "Drawing" as safe, others as unsafe
24-
const unsafe = predictions.some(
25-
p => (p.className !== 'Neutral' && p.className !== 'Drawing') && p.probability > 0.7
26-
);
59+
const unsafe = isUnsafe(predictions);
2760
process.send({ type: 'result', safe: !unsafe, id: msg.id, predictions });
2861
} catch (err) {
2962
process.send({ type: 'error', error: err.message, id: msg.id });

0 commit comments

Comments
 (0)