@@ -375,8 +375,6 @@ export const scanWithVirusTotal = internalAction({
375375 // File exists and has AI analysis - use the verdict
376376 const verdict = normalizeVerdict ( aiResult . verdict )
377377 const status = verdictToStatus ( verdict )
378- const isSafe = status === 'clean'
379-
380378 console . log (
381379 `Version ${ args . versionId } found in VT with AI analysis. Hash: ${ sha256hash } . Verdict: ${ verdict } ` ,
382380 )
@@ -393,14 +391,12 @@ export const scanWithVirusTotal = internalAction({
393391 } ,
394392 } )
395393
396- // VT is supplementary — only escalate (never override LLM verdict)
397- if ( ! isSafe && ( status === 'malicious' || status === 'suspicious' ) ) {
398- await ctx . runMutation ( internal . skills . escalateByVtInternal , {
399- sha256hash,
400- status,
401- } )
402- }
403- // Clean VT result: vtAnalysis already written above — don't touch moderation
394+ // VT finalizes moderation visibility for newly published versions.
395+ await ctx . runMutation ( internal . skills . approveSkillByHashInternal , {
396+ sha256hash,
397+ scanner : 'vt' ,
398+ status,
399+ } )
404400 return
405401 }
406402
@@ -578,13 +574,12 @@ export const pollPendingScans = internalAction({
578574 } ,
579575 } )
580576
581- // VT is supplementary — only escalate for malicious/suspicious
582- if ( status === 'malicious' || status === 'suspicious' ) {
583- await ctx . runMutation ( internal . skills . escalateByVtInternal , {
584- sha256hash,
585- status,
586- } )
587- }
577+ // VT finalizes moderation visibility for newly published versions.
578+ await ctx . runMutation ( internal . skills . approveSkillByHashInternal , {
579+ sha256hash,
580+ scanner : 'vt' ,
581+ status,
582+ } )
588583 updated ++
589584 } catch ( error ) {
590585 console . error ( `[vt:pollPendingScans] Error checking hash ${ sha256hash } :` , error )
0 commit comments