-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtimeline_analyzer.py
More file actions
186 lines (154 loc) · 5.64 KB
/
timeline_analyzer.py
File metadata and controls
186 lines (154 loc) · 5.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from collections import Counter, defaultdict
import re
from datetime import datetime
def extract_policy_timeline(vector_db, topic, k=50):
"""
分析某个政策主题在不同文档/时期的演变
Args:
vector_db: Chroma向量数据库实例
topic: 主题关键词 (如 "climate finance", "debt sustainability")
k: 检索的文档数量
Returns:
tuple: (Plotly figure对象, 文档统计字典)
"""
# 1. 检索相关文档
docs = vector_db.similarity_search(topic, k=k)
if not docs:
return None, {}
# 2. 按文档分组统计
doc_mentions = defaultdict(lambda: {
'count': 0,
'pages': [],
'year': None,
'org': 'Unknown',
'excerpts': []
})
for doc in docs:
source = doc.metadata.get('source', 'Unknown').split('\\')[-1]
page = doc.metadata.get('page', 0)
# 提取年份
year_match = re.search(r'20\d{2}', source)
year = int(year_match.group()) if year_match else 2024
# 识别组织
if 'imf' in source.lower() or 'sdn' in source.lower():
org = 'IMF'
elif 'world bank' in source.lower() or 'digital progress' in source.lower():
org = 'World Bank'
else:
org = 'Other'
doc_mentions[source]['count'] += 1
doc_mentions[source]['pages'].append(page)
doc_mentions[source]['year'] = year
doc_mentions[source]['org'] = org
doc_mentions[source]['excerpts'].append(doc.page_content[:200])
# 3. 创建可视化
fig = make_subplots(
rows=2, cols=1,
subplot_titles=(
f'Topic Mentions Over Time: "{topic}"',
'Mention Distribution by Organization'
),
row_heights=[0.6, 0.4],
vertical_spacing=0.15
)
# 图表1:时间线
imf_data = [(info['year'], info['count'], name)
for name, info in doc_mentions.items() if info['org'] == 'IMF']
wb_data = [(info['year'], info['count'], name)
for name, info in doc_mentions.items() if info['org'] == 'World Bank']
if imf_data:
years, counts, names = zip(*sorted(imf_data))
fig.add_trace(
go.Scatter(
x=list(years),
y=list(counts),
mode='lines+markers',
name='IMF',
line=dict(color='#1f4788', width=3),
marker=dict(size=12, symbol='circle'),
hovertemplate='<b>%{text}</b><br>Year: %{x}<br>Mentions: %{y}<extra></extra>',
text=list(names)
),
row=1, col=1
)
if wb_data:
years, counts, names = zip(*sorted(wb_data))
fig.add_trace(
go.Scatter(
x=list(years),
y=list(counts),
mode='lines+markers',
name='World Bank',
line=dict(color='#00ab51', width=3),
marker=dict(size=12, symbol='diamond'),
hovertemplate='<b>%{text}</b><br>Year: %{x}<br>Mentions: %{y}<extra></extra>',
text=list(names)
),
row=1, col=1
)
# 图表2:组织分布(饼图改为柱状图)
org_counts = Counter([info['org'] for info in doc_mentions.values()])
colors_map = {
'IMF': '#1f4788',
'World Bank': '#00ab51',
'Other': '#cccccc'
}
fig.add_trace(
go.Bar(
x=list(org_counts.keys()),
y=list(org_counts.values()),
marker=dict(color=[colors_map.get(org, '#cccccc') for org in org_counts.keys()]),
text=list(org_counts.values()),
textposition='outside',
hovertemplate='<b>%{x}</b><br>Mentions: %{y}<extra></extra>',
showlegend=False
),
row=2, col=1
)
# 更新布局
fig.update_xaxes(title_text="Year", row=1, col=1, gridcolor='lightgray')
fig.update_yaxes(title_text="Number of Mentions", row=1, col=1, gridcolor='lightgray')
fig.update_xaxes(title_text="Organization", row=2, col=1)
fig.update_yaxes(title_text="Total Mentions", row=2, col=1, gridcolor='lightgray')
fig.update_layout(
height=700,
hovermode='closest',
template='plotly_white',
showlegend=True,
legend=dict(
orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1
),
font=dict(size=11)
)
return fig, dict(doc_mentions)
def generate_topic_keywords(vector_db, topic, k=30):
"""
提取与主题相关的高频关键词
Args:
vector_db: 向量数据库
topic: 主题
k: 检索数量
Returns:
Counter: 关键词频率
"""
from collections import Counter
import re
docs = vector_db.similarity_search(topic, k=k)
# 提取关键词(简化版,实际应该用NLP库)
all_words = []
stopwords = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
'of', 'with', 'by', 'from', 'as', 'is', 'was', 'are', 'been', 'be',
'this', 'that', 'these', 'those', 'can', 'will', 'would', 'should'}
for doc in docs:
words = re.findall(r'\b[a-z]{4,}\b', doc.page_content.lower())
all_words.extend([w for w in words if w not in stopwords])
return Counter(all_words).most_common(20)
# 测试函数
if __name__ == "__main__":
print("Timeline analyzer module loaded successfully")