forked from MigoXLab/dingo
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent_article_fact_checker.py
More file actions
1802 lines (1546 loc) · 74 KB
/
agent_article_fact_checker.py
File metadata and controls
1802 lines (1546 loc) · 74 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
ArticleFactChecker: Agent-based article fact-checking with claims extraction.
Uses Agent-First architecture (LangChain ReAct / ``use_agent_executor=True``),
giving the agent full autonomy over tool selection, execution order, and
multi-step reasoning to verify factual claims in long-form articles.
See Also:
AgentFactCheck: Single-claim hallucination detection
docs/agent_development_guide.md: Agent development patterns
"""
import asyncio
import json
import os
import re
import threading
import time
import uuid
from collections import Counter
from datetime import datetime
from typing import Any, Dict, List, Optional
from dingo.io import Data
from dingo.io.input.required_field import RequiredField
from dingo.io.output.eval_detail import EvalDetail, QualityLabel
from dingo.model import Model
from dingo.model.llm.agent.base_agent import BaseAgent
from dingo.utils import log
class PromptTemplates:
"""
Modular prompt templates for ArticleFactChecker.
This class provides reusable prompt components that can be assembled
based on article type and verification needs. This approach:
- Reduces context window usage for long articles
- Allows dynamic prompt customization
- Makes prompts easier to maintain and test
"""
CORE_ROLE = """You are an expert article fact-checker with autonomous tool selection capabilities.
Your Task: Systematically verify ALL factual claims in the provided article."""
TOOLS_DESCRIPTION = """
Available Tools:
================
1. claims_extractor: Extract verifiable claims from long-form text
- Use this FIRST to identify all checkable statements
- Supports 8 claim types: factual, statistical, attribution, institutional,
temporal, comparative, monetary, technical
- Returns list of structured claims with types
2. arxiv_search: Search academic papers and verify metadata
- Use for claims about research papers, academic publications
- Provides paper metadata: title, authors, abstract, publication date
- Authors in papers often indicate institutional affiliations in abstracts
- NOTE: Affiliations are in unstructured text, not dedicated fields
- Best for: paper titles, author names, publication dates, and
institutional claims when a related paper exists
- For institutional claims: use arxiv_search FIRST to find the paper,
then tavily_search to cross-verify affiliations
3. tavily_search: General web search for fact verification
- Use for general factual claims, current events, companies, products
- Use for cross-verifying institutional/organizational affiliations
- Use for news, product specs, financial figures, comparative claims
- Supports multilingual queries: search BOTH English AND Chinese terms for
Chinese content (e.g., both "清华大学 OmniDocBench" and
"Tsinghua University OmniDocBench")
- Use search_depth='advanced' for authoritative fact-checking results
- Provides current web information with sources and URLs"""
WORKFLOW_STEPS = """
Workflow (Autonomous Decision-Making):
======================================
STEP 0: Analyze Article Type
First, identify the article type to guide your verification strategy.
STEP 1: Extract Claims (REQUIRED - Do NOT skip this step)
- You MUST call the claims_extractor tool with the full article text
- This is a mandatory first step before any verification
- Do NOT extract claims manually in your reasoning - use the tool
- Review the tool output and use the extracted claims for verification
- Claims are categorized by type for targeted verification
STEP 2: Verify Each Claim (Autonomous Tool Selection)
For each claim, analyze its type and context, then SELECT THE BEST TOOL:
Tool Selection Principles:
1. arxiv_search - For academic paper verification (paper title, author, arXiv ID)
2. tavily_search - For general web verification (current events, companies, products)
Claim-Type Specific Rules:
- INSTITUTIONAL/ATTRIBUTION claims (e.g., "released by X University and Y Lab"):
You MUST use arxiv_search FIRST to find the actual paper and check author
affiliations, THEN use tavily_search to cross-verify. Do NOT rely on
tavily_search alone for institutional claims — web sources often give
vague or incomplete attribution. The paper's author list is the
authoritative source for institutional affiliations.
For CHINESE institution names: translate to English before arxiv_search
(e.g., "清华大学" → "Tsinghua University", "达摩院" → "Alibaba DAMO Academy",
"上海人工智能实验室" → "Shanghai AI Laboratory")
Search with BOTH Chinese and English terms in tavily_search for maximum coverage.
- STATISTICAL/TECHNICAL claims: Use tavily_search for official benchmarks
- FACTUAL claims: Use tavily_search for general verification
Adaptive Strategies:
- COMBINE tools for comprehensive verification
- FALLBACK: If arxiv_search finds no paper → immediately use tavily_search alone
- FALLBACK: If tavily_search returns no relevant results → mark as UNVERIFIABLE
(do NOT retry with same query; try a different angle or accept UNVERIFIABLE)
- MULTI-SOURCE: Cross-verify important claims with multiple sources
STEP 3: Synthesize Results
After verifying ALL claims, generate a comprehensive report."""
OUTPUT_FORMAT = """
Output Format:
==============
You MUST return JSON in this exact format:
```json
{
"article_verification_summary": {
"article_type": "academic|news|product|blog|policy|opinion",
"total_claims": <number>,
"verified_claims": <number>,
"false_claims": <number>,
"unverifiable_claims": <number>,
"accuracy_score": <0.0-1.0>
},
"detailed_findings": [
{
"claim_id": "claim_001",
"original_claim": "...",
"claim_type": "institutional|factual|temporal|comparative|etc",
"verification_result": "FALSE|TRUE|UNVERIFIABLE",
"evidence": "...",
"sources": ["url1", "url2"],
"verification_method": "arxiv_search|tavily_search|combined",
"search_queries_used": ["query1", "query2"],
"reasoning": "Step-by-step reasoning for the verification conclusion"
}
],
"false_claims_comparison": [
{
"article_claimed": "Example: OpenAI released o1 in November 2024",
"actual_truth": "OpenAI released o1 on December 5, 2024",
"evidence": "Verified via official OpenAI announcement"
}
]
}
```"""
VERDICT_CRITERIA = """
Verdict Decision Criteria:
==========================
Before assigning a verification_result to any claim, apply these evidence-based criteria:
TRUE - Claim is CONFIRMED by evidence:
- You found specific, credible evidence that DIRECTLY supports the claim
- The evidence explicitly confirms the key facts (names, numbers, dates, relationships)
- You can cite a specific source URL that contains the confirming information
FALSE - Claim is CONTRADICTED by evidence:
- You found specific, credible evidence that DIRECTLY contradicts the claim
- The evidence reveals a clear factual error (wrong date, wrong number, wrong attribution)
- You can point to the specific discrepancy between claim and evidence
UNVERIFIABLE - Insufficient or ambiguous evidence:
- You could NOT find evidence that clearly confirms OR contradicts the claim
- Evidence partially matches but key details cannot be confirmed
- Sources mention the topic but do not address the specific claim being checked
- The claim involves details not found in any source
CRITICAL RULE: Absence of contradictory evidence does NOT equal confirmation.
If your search did not find explicit confirming evidence, the verdict is UNVERIFIABLE, not TRUE.
If your reasoning includes phrases like "not explicitly listed", "could not confirm",
"no direct evidence", or "not mentioned in results", the verdict MUST be UNVERIFIABLE."""
SELF_VERIFICATION_STEP = """
STEP 3.5: Self-Verify Verdict-Reasoning Consistency (MANDATORY)
Before generating your final JSON report, review EVERY claim's verdict:
For each claim in your detailed_findings:
a) Re-read the evidence and reasoning you wrote for this claim
b) Ask yourself: "Does my evidence DIRECTLY and EXPLICITLY support this verdict?"
c) Apply these consistency checks:
- Reasoning says "not found", "not listed", "not mentioned", "no evidence"
-> Verdict MUST be UNVERIFIABLE (not TRUE)
- Reasoning says "confirmed by [specific source]" with a URL
-> Verdict can be TRUE
- Reasoning says "contradicts", "actually [different fact]", "incorrect"
-> Verdict MUST be FALSE
- Reasoning is uncertain or hedging ("may", "possibly", "unclear")
-> Verdict MUST be UNVERIFIABLE
d) If you find ANY inconsistency, correct the verdict NOW
This step is critical for report quality. Do NOT skip it."""
CRITICAL_GUIDELINES = """
Critical Guidelines:
====================
- ALWAYS extract claims first before verification
- AUTONOMOUS tool selection based on claim type and article context
- VERIFY each claim independently
- USE multiple sources when possible (especially for critical claims)
- CITE specific evidence and URLs
- BE THOROUGH: Don't skip claims
- ADAPTIVE: If a tool fails, try alternatives intelligently
- CONTEXT-AWARE: Consider article type when selecting verification approach
Remember: You are an autonomous agent with full decision-making power.
Analyze the article type, choose tools intelligently based on claim context,
adapt to intermediate results, and ensure comprehensive verification."""
# Article type specific guidance
ARTICLE_TYPE_GUIDANCE = {
"academic": """
Article Type Guidance (Academic):
- Focus on arxiv_search for paper verification AND institutional claims
- For institutional affiliations: COMBINE arxiv_search (paper authors/abstracts) + tavily_search (cross-verify)
- Verify: paper titles, authors, publication dates, citations, institutional attributions
- Example: "OmniDocBench by Tsinghua" → arxiv_search for paper metadata THEN tavily_search to cross-verify""",
"news": """
Article Type Guidance (News):
- Focus on tavily_search for current events
- Verify dates, quotes, and attributions carefully
- Cross-reference multiple news sources
- Example: "released on December 5" → tavily_search with date context""",
"product": """
Article Type Guidance (Product Review):
- Use tavily_search for official specifications
- Verify technical specs against manufacturer data
- Check benchmark claims against third-party reviews
- Example: "A17 Pro chip" → tavily_search for official Apple specs""",
"blog": """
Article Type Guidance (Technical Blog):
- Use tavily_search for documentation verification
- Verify version numbers and feature claims
- Check performance claims against benchmarks
- Example: "React 18 features" → tavily_search for React docs""",
"policy": """
Article Type Guidance (Policy Document):
- Use tavily_search for government sources
- Verify dates, regulations, and official statements
- Cross-reference with official government websites""",
"opinion": """
Article Type Guidance (Opinion Piece):
- Focus only on attributed factual claims
- Verify quotes and statistics cited
- Distinguish opinions from verifiable facts"""
}
PER_CLAIM_VERIFICATION_PROMPT = """You are a fact-checking expert. Verify ONE specific factual claim.
Use available search tools to find evidence, then respond ONLY with valid JSON:
{
"verification_result": "TRUE|FALSE|UNVERIFIABLE",
"evidence": "Key evidence found (1-3 sentences)",
"sources": ["url1", "url2"],
"verification_method": "tavily_search|arxiv_search|combined|no_search",
"search_queries_used": ["query text"],
"reasoning": "Step-by-step reasoning for your verdict"
}
Verdict Rules:
- TRUE: Found specific, direct evidence CONFIRMING the claim with a cited URL
- FALSE: Found specific evidence CONTRADICTING the claim
- UNVERIFIABLE: Could not find clear confirming OR contradicting evidence
CRITICAL: Start with search, then produce JSON only. No text outside the JSON."""
@classmethod
def build(cls, article_type: Optional[str] = None) -> str:
"""
Build complete system prompt from modular components.
Args:
article_type: Optional article type for targeted guidance
("academic", "news", "product", "blog", "policy", "opinion")
Returns:
Complete system prompt string
"""
parts = [
cls.CORE_ROLE,
cls.TOOLS_DESCRIPTION,
cls.WORKFLOW_STEPS,
]
if article_type and article_type.lower() in cls.ARTICLE_TYPE_GUIDANCE:
parts.append(cls.ARTICLE_TYPE_GUIDANCE[article_type.lower()])
parts.extend([
cls.VERDICT_CRITERIA,
cls.OUTPUT_FORMAT,
cls.SELF_VERIFICATION_STEP,
cls.CRITICAL_GUIDELINES
])
return "\n".join(parts)
@classmethod
def get_article_types(cls) -> List[str]:
"""Return list of supported article types."""
return list(cls.ARTICLE_TYPE_GUIDANCE.keys())
@Model.llm_register("ArticleFactChecker")
class ArticleFactChecker(BaseAgent):
"""
Article-level fact-checking agent using LangChain ReAct (Agent-First pattern).
The agent autonomously:
1. Extracts claims via claims_extractor
2. Selects the best verification tool per claim type (arxiv_search / tavily_search)
3. Builds evidence chains and generates a structured verification report
Configuration Example::
{
"name": "ArticleFactChecker",
"config": {
"key": "your-openai-api-key",
"model": "gpt-4o-mini",
"parameters": {
"agent_config": {
"max_iterations": 10,
"tools": {
"claims_extractor": {
"api_key": "your-openai-api-key",
"max_claims": 50,
"claim_types": ["factual", "institutional", "statistical", "attribution"]
},
"tavily_search": {
"api_key": "your-tavily-api-key",
"max_results": 5
},
"arxiv_search": {"max_results": 5}
}
}
}
}
}
"""
use_agent_executor = True # Enable Agent-First mode
available_tools = [
"claims_extractor", # Extract verifiable claims from article
"arxiv_search", # Verify academic papers and institutions
"tavily_search" # General web search verification
]
max_iterations = 10 # Allow more iterations for comprehensive checking
max_concurrent_claims = 5 # Default parallel claim verification slots
_required_fields = [RequiredField.CONTENT] # Article text
_metric_info = {
"metric_name": "ArticleFactChecker",
"description": "Article-level fact checking with autonomous claims extraction and verification"
}
# Lock to serialise ClaimsExtractor class-level config mutation across threads.
# Required because LocalExecutor may call eval() from multiple threads concurrently.
_claims_extractor_lock = threading.Lock()
# --- Output Path and File Saving Methods ---
@classmethod
def _get_output_dir(cls) -> Optional[str]:
"""
Get output directory for artifact files.
Returns:
Output directory path (created if needed), or None if saving is disabled.
"""
params = cls.dynamic_config.parameters or {}
agent_cfg = params.get('agent_config') or {}
explicit_path = agent_cfg.get('output_path')
if explicit_path:
os.makedirs(explicit_path, exist_ok=True)
return explicit_path
if agent_cfg.get('save_artifacts') is False:
return None
base_output = agent_cfg.get('base_output_path') or 'outputs'
create_time = time.strftime("%Y%m%d_%H%M%S", time.localtime())
auto_path = os.path.join(base_output, f"article_factcheck_{create_time}_{uuid.uuid4().hex[:6]}")
os.makedirs(auto_path, exist_ok=True)
log.debug(f"ArticleFactChecker: artifact path auto-derived: {auto_path}")
return auto_path
@classmethod
def _save_article_content(cls, output_dir: str, content: str) -> Optional[str]:
"""
Save original article content to output directory.
Args:
output_dir: Output directory path
content: Article markdown content
Returns:
Path to saved file, or None on failure
"""
file_path = os.path.join(output_dir, "article_content.md")
try:
with open(file_path, 'w', encoding='utf-8') as f:
f.write(content)
log.info(f"Saved article content to {file_path}")
return file_path
except (IOError, OSError) as e:
log.error(f"Failed to save article content: {e}")
return None
@classmethod
def _write_jsonl_file(cls, file_path: str, records: List[Dict]) -> Optional[str]:
"""Write records as JSONL. Returns file_path on success, None on failure."""
try:
with open(file_path, 'w', encoding='utf-8') as f:
for record in records:
f.write(json.dumps(record, ensure_ascii=False) + '\n')
return file_path
except (IOError, OSError) as e:
log.error(f"Failed to write {file_path}: {e}")
return None
@classmethod
def _save_claims(cls, output_dir: str, claims: List[Dict]) -> Optional[str]:
"""Save extracted claims to JSONL file."""
file_path = os.path.join(output_dir, "claims_extracted.jsonl")
saved = cls._write_jsonl_file(file_path, claims)
if saved:
log.info(f"Saved {len(claims)} claims to {file_path}")
return saved
@classmethod
def _save_verification_details(cls, output_dir: str, enriched_claims: List[Dict]) -> Optional[str]:
"""Save per-claim verification details to JSONL file."""
file_path = os.path.join(output_dir, "claims_verification.jsonl")
saved = cls._write_jsonl_file(file_path, enriched_claims)
if saved:
log.info(f"Saved {len(enriched_claims)} verification details to {file_path}")
return saved
@classmethod
def _save_full_report(cls, output_dir: str, report_data: Dict) -> Optional[str]:
"""
Save full structured verification report to JSON file.
Args:
output_dir: Output directory path
report_data: Complete report dictionary
Returns:
Path to saved file, or None on failure
"""
file_path = os.path.join(output_dir, "verification_report.json")
try:
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(report_data, f, ensure_ascii=False, indent=2)
log.info(f"Saved verification report to {file_path}")
return file_path
except (IOError, OSError) as e:
log.error(f"Failed to save verification report: {e}")
return None
# --- Data Processing Methods ---
@classmethod
def _extract_claims_from_tool_calls(cls, tool_calls: List[Dict]) -> List[Dict]:
"""
Extract claims list from tool_calls observation data.
The claims_extractor tool returns its results in the observation field
of the tool_calls list (via langchain_adapter).
Args:
tool_calls: List of tool call dicts from AgentWrapper
Returns:
List of claim dictionaries extracted from claims_extractor output
"""
for tc in tool_calls:
if tc.get('tool') == 'claims_extractor':
observation = tc.get('observation', '')
if not observation:
continue
try:
obs_data = json.loads(observation)
if obs_data.get('success'):
# Claims may be in data.claims (langchain_adapter wrapping)
# or directly in obs_data.claims
data_section = obs_data.get('data', obs_data)
claims = data_section.get('claims', [])
if claims:
return claims
except (json.JSONDecodeError, TypeError) as e:
log.warning(f"Failed to parse claims_extractor observation: {e}")
return []
@classmethod
def _extract_claims_from_detailed_findings(cls, verification_data: Dict[str, Any]) -> List[Dict]:
"""
Fallback: extract claims from agent's detailed_findings when
claims_extractor tool was not called.
Args:
verification_data: Agent's parsed JSON output
Returns:
List of claim dicts with source="agent_reasoning"
"""
return [
{
"claim_id": finding.get("claim_id", ""),
"claim": finding.get("original_claim", ""),
"claim_type": finding.get("claim_type", "unknown"),
"confidence": None,
"verifiable": True,
"source": "agent_reasoning"
}
for finding in verification_data.get("detailed_findings", [])
]
_VERDICT_MAP = {
"TRUE": "TRUE", "FALSE": "FALSE", "UNVERIFIABLE": "UNVERIFIABLE",
"CONFIRMED": "TRUE", "ACCURATE": "TRUE", "CORRECT": "TRUE", "VERIFIED": "TRUE",
"INACCURATE": "FALSE", "INCORRECT": "FALSE", "WRONG": "FALSE",
"DISPROVEN": "FALSE", "REFUTED": "FALSE",
}
@classmethod
def _normalize_verdict(cls, verdict: Any) -> str:
"""Normalize verdict to standard values (TRUE/FALSE/UNVERIFIABLE). Unknown values default to UNVERIFIABLE."""
if not verdict or not isinstance(verdict, str):
return "UNVERIFIABLE"
return cls._VERDICT_MAP.get(verdict.strip().upper(), "UNVERIFIABLE")
# Pre-compiled regexes for Tier 3 per-field extraction in _parse_claim_json_robust.
_RE_VERDICT = re.compile(r'"verification_result"\s*:\s*"(TRUE|FALSE|UNVERIFIABLE)"', re.IGNORECASE)
_RE_EVIDENCE = re.compile(r'"evidence"\s*:\s*"((?:[^"\\]|\\.)*)"', re.DOTALL)
_RE_EVIDENCE_TRUNC = re.compile(r'"evidence"\s*:\s*"((?:[^"\\]|\\.)+)', re.DOTALL)
_RE_SOURCES = re.compile(r'"sources"\s*:\s*\[(.*?)\]', re.DOTALL)
_RE_SOURCES_TRUNC = re.compile(r'"sources"\s*:\s*\[(.*)', re.DOTALL)
_RE_REASONING = re.compile(r'"reasoning"\s*:\s*"((?:[^"\\]|\\.)*)"', re.DOTALL)
_RE_REASONING_TRUNC = re.compile(r'"reasoning"\s*:\s*"((?:[^"\\]|\\.)+)', re.DOTALL)
# Hedging language patterns that indicate reasoning contradicts a TRUE verdict.
_HEDGING_PATTERNS = re.compile(
r"(?:"
r"not explicitly (?:stated|listed|mentioned|confirmed|found)"
r"|(?:cannot|could not|couldn't) (?:be verified|confirm|find|verify)"
r"|unable to (?:verify|confirm|find)"
r"|is(?:n't| not) explicitly"
r"|no (?:direct|explicit) evidence"
r"|insufficient evidence"
r"|not directly (?:confirmed|stated|verified)"
r"|cannot be fully verified"
r"|exact .{0,30} isn't .{0,30} stated"
r"|while .{0,40} isn't .{0,30} stated"
r"|not .{0,20} explicitly .{0,20} in (?:the )?(?:available |found )?(?:sources?|documentation|results?)"
r")",
re.IGNORECASE
)
@classmethod
def _check_reasoning_verdict_consistency(cls, enriched_claims: List[Dict]) -> int:
"""
Downgrade TRUE verdicts to UNVERIFIABLE when reasoning contains hedging language.
Only affects TRUE verdicts; FALSE verdicts are never changed.
Args:
enriched_claims: List of enriched claim dicts (modified in place)
Returns:
Number of verdicts downgraded
"""
downgraded = 0
for claim in enriched_claims:
if claim.get("verification_result") != "TRUE":
continue
reasoning = claim.get("reasoning", "")
if not reasoning:
continue
match = cls._HEDGING_PATTERNS.search(reasoning)
if match:
claim["verification_result"] = "UNVERIFIABLE"
claim_id = claim.get("claim_id", "unknown")
matched_text = match.group(0)
log.info(
f"Verdict downgraded TRUE→UNVERIFIABLE for {claim_id}: "
f"hedging detected in reasoning: '{matched_text}'"
)
downgraded += 1
return downgraded
@classmethod
def _recalculate_summary(cls, enriched_claims: List[Dict]) -> Dict[str, Any]:
"""
Recalculate verification summary from actual enriched claim data.
This ensures the summary matches the actual verdict distribution,
overriding any inconsistent self-reported summary from the agent.
Args:
enriched_claims: List of enriched claim dicts with normalized verdicts
Returns:
Summary dict with total_claims, verified_claims, false_claims,
unverifiable_claims, and accuracy_score
"""
total = len(enriched_claims)
true_count = sum(1 for c in enriched_claims if c.get("verification_result") == "TRUE")
false_count = sum(1 for c in enriched_claims if c.get("verification_result") == "FALSE")
unverifiable_count = sum(1 for c in enriched_claims if c.get("verification_result") == "UNVERIFIABLE")
accuracy = true_count / total if total > 0 else 0.0
return {
"total_claims": total,
"verified_claims": true_count,
"false_claims": false_count,
"unverifiable_claims": unverifiable_count,
"accuracy_score": round(accuracy, 4)
}
@classmethod
def _build_per_claim_verification(
cls,
verification_data: Dict[str, Any],
extracted_claims: List[Dict],
tool_calls: List[Dict]
) -> List[Dict]:
"""
Merge verification_data, extracted_claims, and tool_calls into
per-claim verification records.
Data sources:
- detailed_findings: verification result, evidence, sources, reasoning
- extracted_claims: claim_type, confidence, verifiable, context
- tool_calls: search queries and tool usage details
Args:
verification_data: Agent's parsed JSON output
extracted_claims: Claims from claims_extractor tool
tool_calls: Complete tool call list from agent
Returns:
List of enriched per-claim verification records
"""
detailed_findings = verification_data.get("detailed_findings", [])
# Build lookup from extracted claims by claim_id
claims_by_id: Dict[str, Dict] = {}
for claim in extracted_claims:
cid = claim.get('claim_id', '')
if cid:
claims_by_id[cid] = claim
enriched_claims: List[Dict] = []
for finding in detailed_findings:
claim_id = finding.get('claim_id', '')
extracted = claims_by_id.get(claim_id, {})
enriched = {
"claim_id": claim_id,
"original_claim": finding.get('original_claim', extracted.get('claim', '')),
"claim_type": finding.get('claim_type', extracted.get('claim_type', 'unknown')),
"confidence": extracted.get('confidence'),
"verification_result": finding.get('verification_result', 'UNVERIFIABLE'),
"evidence": finding.get('evidence', ''),
"sources": finding.get('sources', []),
"verification_method": finding.get('verification_method', ''),
"search_queries_used": finding.get('search_queries_used', []),
"reasoning": finding.get('reasoning', ''),
}
enriched_claims.append(enriched)
# If no detailed_findings but we have extracted claims, create placeholder records
if not enriched_claims and extracted_claims:
for claim in extracted_claims:
enriched_claims.append({
"claim_id": claim.get('claim_id', ''),
"original_claim": claim.get('claim', ''),
"claim_type": claim.get('claim_type', 'unknown'),
"confidence": claim.get('confidence'),
"verification_result": "UNVERIFIABLE",
"evidence": "",
"sources": [],
"verification_method": "",
"search_queries_used": [],
"reasoning": "No verification data available from agent",
})
return enriched_claims
@classmethod
def _build_structured_report(
cls,
verification_data: Dict[str, Any],
extracted_claims: List[Dict],
enriched_claims: List[Dict],
tool_calls: List[Dict],
reasoning_steps: int,
content_length: int,
execution_time: float,
claims_source: str = "claims_extractor_tool"
) -> Dict[str, Any]:
"""
Build a complete structured verification report.
Args:
verification_data: Agent's parsed JSON output
extracted_claims: Claims from claims_extractor or fallback
enriched_claims: Merged per-claim verification records
tool_calls: Complete tool call list
reasoning_steps: Number of reasoning steps
content_length: Length of original article content
execution_time: Total execution time in seconds
claims_source: Where claims came from ("claims_extractor_tool" or "agent_reasoning")
Returns:
Complete structured report dictionary
"""
summary = verification_data.get("article_verification_summary", {})
# Claims extraction stats
claim_types_dist: Dict[str, int] = {}
verifiable_count = 0
for claim in extracted_claims:
ct = claim.get('claim_type', 'unknown')
claim_types_dist[ct] = claim_types_dist.get(ct, 0) + 1
if claim.get('verifiable', True):
verifiable_count += 1
report = {
"report_version": "2.0",
"generated_at": datetime.now().isoformat(timespec='seconds'),
"article_info": {
"content_source": "markdown",
"content_length": content_length
},
"claims_extraction": {
"total_extracted": len(extracted_claims),
"claims_source": claims_source,
"verifiable": verifiable_count,
"claim_types_distribution": claim_types_dist
},
"verification_summary": {
"total_verified": summary.get("verified_claims", 0) + summary.get("false_claims", 0),
"verified_true": summary.get("verified_claims", 0),
"verified_false": summary.get("false_claims", 0),
"unverifiable": summary.get("unverifiable_claims", 0),
"accuracy_score": summary.get("accuracy_score", 0.0)
},
"detailed_findings": enriched_claims,
"false_claims_comparison": verification_data.get("false_claims_comparison", []),
"agent_metadata": {
"model": getattr(cls.dynamic_config, 'model', 'unknown'),
"tool_calls_count": len(tool_calls),
"reasoning_steps": reasoning_steps,
"execution_time_seconds": round(execution_time, 2)
}
}
return report
# --- Overridden Core Methods ---
@classmethod
def eval(cls, input_data: Data) -> EvalDetail:
"""
Two-phase async fact-checking with parallel claim verification.
Phase 1: Extract claims via ClaimsExtractor (direct call, ~30s)
Phase 2: Verify each claim with a focused mini-agent using asyncio.gather
with Semaphore(max_concurrent_claims) to limit concurrency (~80-120s)
This replaces the old single-agent sequential approach (~669s for 15 claims).
Temperature defaults to 0 for deterministic tool selection and
consistent verification results. Users can override via config.
Args:
input_data: Data object with article content
Returns:
EvalDetail with comprehensive verification report
"""
start_time = time.time()
output_dir = cls._get_output_dir()
if cls.dynamic_config:
if cls.dynamic_config.parameters is None:
cls.dynamic_config.parameters = {}
cls.dynamic_config.parameters.setdefault("temperature", 0)
if output_dir and input_data.content:
cls._save_article_content(output_dir, input_data.content)
try:
return asyncio.run(cls._async_eval(input_data, start_time, output_dir))
except RuntimeError as e:
# Fallback when called inside an already-running event loop (e.g. Jupyter, tests)
if "cannot run" in str(e).lower() or "already running" in str(e).lower():
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
future = pool.submit(
lambda: asyncio.run(cls._async_eval(input_data, start_time, output_dir))
)
return future.result()
raise
# --- Two-Phase Async Architecture Methods ---
@classmethod
async def _async_eval(
cls, input_data: Data, start_time: float, output_dir: Optional[str]
) -> EvalDetail:
"""
Async two-phase orchestrator for parallel claim verification.
Phase 1: Extract claims directly via ClaimsExtractor tool (~30s).
Phase 2: Verify claims concurrently with asyncio.gather and Semaphore.
"""
# Phase 1: Extract claims directly (no agent overhead)
print("[ArticleFactChecker] Phase 1: Extracting claims from article...", flush=True)
claims = await cls._async_extract_claims(input_data)
if not claims:
return cls._create_error_result("No claims extracted from article")
print(f"[ArticleFactChecker] Phase 1 done: {len(claims)} claims extracted", flush=True)
if output_dir:
cls._save_claims(output_dir, claims)
# Phase 2: Parallel verification with semaphore-controlled concurrency
max_concurrent = cls._get_max_concurrent_claims()
semaphore = asyncio.Semaphore(max_concurrent)
total = len(claims)
print(
f"[ArticleFactChecker] Phase 2: Verifying {total} claims "
f"(max {max_concurrent} concurrent)...",
flush=True
)
log.info(f"ArticleFactChecker: verifying {total} claims with max_concurrent={max_concurrent}")
# Pre-create LLM and tools once to avoid concurrent config modification
llm = cls.get_langchain_llm()
lc_tools = cls.get_langchain_tools()
search_tools = [t for t in lc_tools if t.name in ('tavily_search', 'arxiv_search')]
_completed = [0] # mutable counter; safe in asyncio single-threaded context
async def _verify_with_progress(claim: Dict) -> Any:
claim_id = claim.get('claim_id', '')
try:
result = await cls._async_verify_single_claim(claim, semaphore, llm, search_tools)
except Exception as exc:
_completed[0] += 1
print(f"[ArticleFactChecker] [{_completed[0]}/{total}] {claim_id} → ERROR", flush=True)
return exc
_completed[0] += 1
if not isinstance(result, dict) or not result.get('success'):
verdict = 'ERROR'
else:
out = (result.get('agent_result') or {}).get('output') or ''
m = cls._RE_VERDICT.search(out)
verdict = m.group(1) if m else '?'
print(f"[ArticleFactChecker] [{_completed[0]}/{total}] {claim_id} → {verdict}", flush=True)
return result
verification_results = await asyncio.gather(
*[_verify_with_progress(claim) for claim in claims],
return_exceptions=True
)
elapsed = time.time() - start_time
print(
f"[ArticleFactChecker] Phase 2 done: {total}/{total} claims verified "
f"({elapsed:.1f}s elapsed)",
flush=True
)
return cls._aggregate_parallel_results(
input_data, claims, verification_results, start_time, output_dir
)
@classmethod
async def _async_extract_claims(cls, input_data: Data) -> List[Dict]:
"""
Phase 1: Extract claims by calling ClaimsExtractor directly.
Runs the synchronous ClaimsExtractor.execute() in a thread executor
to avoid blocking the event loop.
Returns:
List of claim dicts with claim_id, claim, claim_type, etc.
"""
from dingo.model.llm.agent.tools.claims_extractor import ClaimsExtractor, ClaimsExtractorConfig
params = cls.dynamic_config.parameters or {}
agent_cfg = params.get('agent_config') or {}
extractor_cfg = agent_cfg.get('tools', {}).get('claims_extractor', {})
config_kwargs: Dict[str, Any] = {
'model': cls.dynamic_config.model or "gpt-4o-mini",
'api_key': extractor_cfg.get('api_key') or cls.dynamic_config.key,
'max_claims': extractor_cfg.get('max_claims', 50),
}
base_url = extractor_cfg.get('base_url') or getattr(cls.dynamic_config, 'api_url', None)
if base_url:
config_kwargs['base_url'] = base_url
claim_types = extractor_cfg.get('claim_types')
if claim_types:
config_kwargs['claim_types'] = claim_types
content = input_data.content or ''
loop = asyncio.get_running_loop()
with cls._claims_extractor_lock:
ClaimsExtractor.config = ClaimsExtractorConfig(**config_kwargs)
result = await loop.run_in_executor(None, ClaimsExtractor.execute, content)
if result.get('success'):
data_section = result.get('data', result)
return data_section.get('claims', [])
log.warning(f"ClaimsExtractor failed: {result.get('error', 'unknown')}")
return []
@classmethod
async def _async_verify_single_claim(
cls,
claim: Dict,
semaphore: asyncio.Semaphore,
llm: Any,
search_tools: List,
) -> Dict:
"""
Phase 2: Verify one claim with a focused mini-agent.
The semaphore limits concurrent API calls to prevent rate limiting.
Each mini-agent only handles one claim with a simplified prompt,
returning structured JSON verification output.
Args:
claim: Claim dict from ClaimsExtractor (has claim_id, claim, claim_type)
semaphore: Asyncio semaphore for concurrency control
llm: Pre-created LangChain LLM instance (shared, thread-safe)
search_tools: Pre-configured search tools (tavily_search / arxiv_search)
Returns:
Dict with claim, agent_result, success keys
"""
from dingo.model.llm.agent.agent_wrapper import AgentWrapper
async with semaphore:
claim_id = claim.get('claim_id', 'unknown')
claim_text = claim.get('claim', '')
claim_type = claim.get('claim_type', 'factual')
claim_preview = (claim_text or '')[:60]
print(f"[ArticleFactChecker] → {claim_id} ({claim_type}): {claim_preview}", flush=True)
try:
agent = AgentWrapper.create_agent(
llm=llm,
tools=search_tools,
system_prompt=PromptTemplates.PER_CLAIM_VERIFICATION_PROMPT
)
input_text = (
f"Claim ID: {claim_id}\n"
f"Claim Type: {claim_type}\n"
f"Claim to verify: {claim_text}"
)
per_claim_max_iter = max(cls.get_max_iterations(), 5)
agent_result = await AgentWrapper.async_invoke_and_format(
agent,
input_text=input_text,
max_iterations=per_claim_max_iter
)
log.debug(f"Verified {claim_id}: success={agent_result.get('success')}")
return {"claim": claim, "agent_result": agent_result, "success": True}