-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathdigital-twin-extension.py
More file actions
595 lines (489 loc) · 24.6 KB
/
digital-twin-extension.py
File metadata and controls
595 lines (489 loc) · 24.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
import random
import time
class DigitalTwinGraph:
"""
Extension of the Agentic Graph Reasoning system specifically designed for
co-discovering a digital twin identity through interaction.
"""
def __init__(self, base_graph=None):
"""Initialize the Digital Twin graph system."""
# Initialize the core knowledge graph
self.graph = nx.DiGraph() if base_graph is None else base_graph.copy()
# Additional properties specific to digital twin representation
self.interaction_history = []
self.preference_weights = defaultdict(float)
self.discovery_state = {}
self.bridge_nodes = []
self.confidence_scores = {}
# Initialize with some basic identity facets
self.identity_facets = [
"preferences", "behaviors", "knowledge",
"personality", "values", "relationships",
"goals", "experiences", "skills"
]
# Add facet nodes to the graph
for facet in self.identity_facets:
self.graph.add_node(facet, type="facet", confidence=0.1)
# Track emergence metrics
self.emergence_metrics = {
'new_connections': [],
'bridge_confidence': [],
'identity_coherence': []
}
def observe_interaction(self, interaction_data):
"""
Process an interaction to extract identity information.
Parameters:
-----------
interaction_data : dict
Data about an interaction, including:
- action: What the user did
- context: Where/when the action occurred
- duration: How long the interaction lasted
- response: How the system or environment responded
"""
# Record the raw interaction
self.interaction_history.append({
'timestamp': time.time(),
'data': interaction_data
})
# Extract concepts from the interaction
concepts = self._extract_concepts_from_interaction(interaction_data)
# Update preference weights based on interaction
for concept, weight in concepts.items():
self.preference_weights[concept] += weight
# Ensure concept exists in graph
if concept not in self.graph:
# Determine which facet this concept most relates to
related_facet = self._determine_related_facet(concept)
# Add the concept to the graph
self.graph.add_node(concept, type="concept",
weight=weight, confidence=0.1)
# Connect to the appropriate facet
self.graph.add_edge(related_facet, concept,
relation="INCLUDES", weight=0.1)
else:
# Update the weight of existing concept
current_weight = self.graph.nodes[concept].get('weight', 0)
self.graph.nodes[concept]['weight'] = current_weight + weight
# Increase confidence slightly with repeated observations
current_conf = self.graph.nodes[concept].get('confidence', 0.1)
self.graph.nodes[concept]['confidence'] = min(current_conf + 0.05, 1.0)
# Create relationships between concepts observed in the same interaction
concept_nodes = list(concepts.keys())
for i in range(len(concept_nodes)):
for j in range(i+1, len(concept_nodes)):
source = concept_nodes[i]
target = concept_nodes[j]
# Create or strengthen relationship
if self.graph.has_edge(source, target):
# Strengthen existing edge
current_weight = self.graph.edges[source, target].get('weight', 0.1)
self.graph.edges[source, target]['weight'] = current_weight + 0.1
else:
# Create new edge with default relation
relation = self._infer_relation(source, target, interaction_data)
self.graph.add_edge(source, target, relation=relation, weight=0.1)
# Update graph metrics
self._update_emergence_metrics()
def _extract_concepts_from_interaction(self, interaction_data):
"""
Extract relevant concepts and their importance weights from an interaction.
This is a simplified implementation - in a real system, this would use
more sophisticated NLP and behavioral analysis.
"""
# Simplified concept extraction
concepts = {}
# Extract from action
if 'action' in interaction_data:
action = interaction_data['action']
# In a real implementation, this would use NLP to extract concepts
# Here we'll just use a simple string split as demonstration
action_words = action.lower().split()
for word in action_words:
if len(word) > 3: # Simple filter for meaningful words
concepts[word] = concepts.get(word, 0) + 0.5
# Extract from context
if 'context' in interaction_data:
context = interaction_data['context']
context_words = context.lower().split()
for word in context_words:
if len(word) > 3:
concepts[word] = concepts.get(word, 0) + 0.3
# Consider duration - longer interactions might indicate stronger interest
if 'duration' in interaction_data:
duration = interaction_data['duration']
# Scale all concept weights by duration factor
duration_factor = min(duration / 60.0, 2.0) # Cap at 2x for very long interactions
for concept in concepts:
concepts[concept] *= duration_factor
return concepts
def _determine_related_facet(self, concept):
"""Determine which identity facet a concept is most related to."""
# In a real implementation, this would use semantic analysis
# For demonstration, we'll use a simple random assignment
return random.choice(self.identity_facets)
def _infer_relation(self, source, target, interaction_data):
"""Infer the relationship type between two concepts."""
# Common relationship types in identity networks
relations = [
"RELATED_TO", "INFLUENCES", "CONTRASTS_WITH",
"PART_OF", "LEADS_TO", "ASSOCIATED_WITH"
]
# In a real implementation, this would use context and semantic analysis
# For demonstration, we'll use a random selection
return random.choice(relations)
def _update_emergence_metrics(self):
"""Update metrics tracking emergent properties of the digital twin."""
# Count new connections in this update
new_connections = sum(1 for edge in self.graph.edges(data=True)
if edge[2].get('weight', 0) <= 0.1)
self.emergence_metrics['new_connections'].append(new_connections)
# Identify bridge nodes (high betweenness centrality)
if self.graph.number_of_nodes() > 5:
try:
betweenness = nx.betweenness_centrality(self.graph)
sorted_nodes = sorted(betweenness.items(), key=lambda x: x[1], reverse=True)
self.bridge_nodes = [node for node, score in sorted_nodes[:5]]
# Calculate average confidence of bridge nodes
bridge_conf = np.mean([self.graph.nodes[node].get('confidence', 0)
for node in self.bridge_nodes])
self.emergence_metrics['bridge_confidence'].append(bridge_conf)
except:
self.emergence_metrics['bridge_confidence'].append(0)
else:
self.emergence_metrics['bridge_confidence'].append(0)
# Calculate identity coherence (clustering coefficient as proxy)
try:
coherence = nx.average_clustering(self.graph.to_undirected())
self.emergence_metrics['identity_coherence'].append(coherence)
except:
self.emergence_metrics['identity_coherence'].append(0)
def discover_identity_aspects(self):
"""
Analyze the current graph to discover emergent aspects of the digital twin's identity.
Returns new discoveries since the last call.
"""
new_discoveries = {}
# Only perform discovery if graph has enough data
if self.graph.number_of_nodes() < 10:
return {"status": "insufficient_data", "discoveries": {}}
# Identify communities (potential identity aspects)
communities = self._identify_communities()
# For each community, extract key themes
for i, community in enumerate(communities):
theme = self._extract_community_theme(community)
# Check if this is a new discovery or significant update
community_key = f"community_{i}"
if community_key not in self.discovery_state or self._is_significant_update(
self.discovery_state.get(community_key, {}), theme):
new_discoveries[community_key] = theme
self.discovery_state[community_key] = theme
# Identify potential contradictions or tensions
contradictions = self._identify_contradictions()
if contradictions:
new_discoveries["contradictions"] = contradictions
# Identify strongest preferences
top_preferences = self._identify_top_preferences()
new_discoveries["preferences"] = top_preferences
return {
"status": "success",
"discoveries": new_discoveries,
"bridge_nodes": self.bridge_nodes
}
def _is_significant_update(self, old_theme, new_theme):
"""Determine if a new theme is significantly different from the old one."""
# Compare confidence levels
if abs(new_theme["confidence"] - old_theme.get("confidence", 0)) > 0.2:
return True
# Compare related concepts
old_concepts = set(old_theme.get("related_concepts", []))
new_concepts = set(new_theme["related_concepts"])
# If there's significant difference in the related concepts
if len(old_concepts.symmetric_difference(new_concepts)) > len(old_concepts) / 2:
return True
# Otherwise, not a significant update
return False
def _identify_contradictions(self):
"""Identify potential contradictions or tensions in the identity."""
contradictions = []
# Look for contradictory relationships
# In a real implementation, this would use semantic analysis
# For demonstration, we'll look for nodes that have conflicting edge types
# Check nodes with higher weight (more important to identity)
important_nodes = [n for n, d in self.graph.nodes(data=True)
if d.get('weight', 0) > 0.5]
for node in important_nodes:
# Get all edges for this node
out_edges = list(self.graph.out_edges(node, data=True))
# Check for potentially conflicting relationships
relations = [edge[2].get('relation', '') for edge in out_edges]
if 'CONTRASTS_WITH' in relations:
# Find which nodes contrast with this one
contrasting_nodes = [edge[1] for edge in out_edges
if edge[2].get('relation', '') == 'CONTRASTS_WITH']
for contrast_node in contrasting_nodes:
contradictions.append({
"concept1": node,
"concept2": contrast_node,
"confidence": min(self.graph.nodes[node].get('confidence', 0),
self.graph.nodes[contrast_node].get('confidence', 0))
})
return contradictions
def _identify_top_preferences(self):
"""Identify the strongest preferences based on node weights."""
# Sort nodes by weight
weighted_nodes = [(n, d.get('weight', 0)) for n, d in self.graph.nodes(data=True)
if d.get('type', '') == 'concept']
# Sort by weight in descending order
sorted_nodes = sorted(weighted_nodes, key=lambda x: x[1], reverse=True)
# Return top preferences (up to 5)
top_count = min(5, len(sorted_nodes))
top_preferences = [{
"concept": node,
"weight": weight,
"confidence": self.graph.nodes[node].get('confidence', 0)
} for node, weight in sorted_nodes[:top_count]]
return top_preferences
def visualize_identity_graph(self, figsize=(14, 12), save_path=None):
"""Visualize the current state of the digital twin identity graph."""
plt.figure(figsize=figsize)
# Use a layout that works well for knowledge graphs
pos = nx.spring_layout(self.graph, seed=42)
# Create node color map based on node type
node_colors = []
for node in self.graph.nodes():
if self.graph.nodes[node].get('type') == 'facet':
node_colors.append('lightblue')
else:
# Concepts colored by confidence (darker = higher confidence)
confidence = self.graph.nodes[node].get('confidence', 0)
# Interpolate between light red (low confidence) and dark red (high confidence)
node_colors.append((1.0, 1.0 - confidence * 0.8, 1.0 - confidence * 0.8))
# Create node size map based on weight
node_sizes = []
for node in self.graph.nodes():
weight = self.graph.nodes[node].get('weight', 0.1)
# Scale node size based on weight (base size + weight factor)
node_sizes.append(300 + weight * 500)
# Draw nodes
nx.draw_networkx_nodes(self.graph, pos, node_color=node_colors,
node_size=node_sizes, alpha=0.8)
# Create edge colors based on relation type
edge_colors = []
for _, _, data in self.graph.edges(data=True):
relation = data.get('relation', '')
# Different colors for different relation types
if relation == 'INCLUDES':
edge_colors.append('blue')
elif relation == 'RELATED_TO':
edge_colors.append('green')
elif relation == 'CONTRASTS_WITH':
edge_colors.append('red')
else:
edge_colors.append('gray')
# Draw edges
nx.draw_networkx_edges(self.graph, pos, edge_color=edge_colors, width=1.5, alpha=0.7)
# Draw edge labels (only for important edges)
important_edges = {(u, v): d['relation'] for u, v, d in self.graph.edges(data=True)
if d.get('weight', 0) > 0.3}
nx.draw_networkx_edge_labels(self.graph, pos, edge_labels=important_edges, font_size=8)
# Draw node labels
nx.draw_networkx_labels(self.graph, pos, font_size=10)
# Highlight bridge nodes
if self.bridge_nodes:
nx.draw_networkx_nodes(self.graph, pos, nodelist=self.bridge_nodes,
node_color='yellow', node_size=[
self.graph.nodes[node].get('weight', 0.1) * 600 + 400
for node in self.bridge_nodes
])
plt.title("Digital Twin Identity Graph")
plt.axis('off')
if save_path:
plt.savefig(save_path, bbox_inches='tight')
plt.tight_layout()
plt.show()
def plot_emergence_metrics(self, figsize=(15, 5), save_path=None):
"""Plot the evolution of emergence metrics over interactions."""
if not self.interaction_history:
print("No interactions to plot metrics for.")
return
interactions = list(range(len(self.interaction_history)))
fig, axes = plt.subplots(1, 3, figsize=figsize)
# Plot new connections
if self.emergence_metrics['new_connections']:
axes[0].plot(interactions[:len(self.emergence_metrics['new_connections'])],
self.emergence_metrics['new_connections'], 'b-')
axes[0].set_title('New Connections per Interaction')
axes[0].set_xlabel('Interaction')
axes[0].set_ylabel('Count')
# Plot bridge confidence
if self.emergence_metrics['bridge_confidence']:
axes[1].plot(interactions[:len(self.emergence_metrics['bridge_confidence'])],
self.emergence_metrics['bridge_confidence'], 'g-')
axes[1].set_title('Bridge Node Confidence')
axes[1].set_xlabel('Interaction')
axes[1].set_ylabel('Confidence')
# Plot identity coherence
if self.emergence_metrics['identity_coherence']:
axes[2].plot(interactions[:len(self.emergence_metrics['identity_coherence'])],
self.emergence_metrics['identity_coherence'], 'r-')
axes[2].set_title('Identity Coherence')
axes[2].set_xlabel('Interaction')
axes[2].set_ylabel('Coherence')
plt.tight_layout()
if save_path:
plt.savefig(save_path, bbox_inches='tight')
plt.show()
# Demonstration function
def run_digital_twin_demo(num_interactions=20):
"""
Run a demonstration of the Digital Twin Graph system.
This simulates a series of user interactions and shows how the digital twin
identity emerges through the co-discovery process.
"""
# Initialize the digital twin graph
twin = DigitalTwinGraph()
# Sample interaction types for simulation
interaction_types = [
{"type": "content_engagement", "actions": [
"read article about AI ethics",
"watched video on quantum computing",
"explored virtual art gallery",
"listened to classical music",
"played strategy game",
]},
{"type": "creation", "actions": [
"wrote poem about nature",
"designed 3D model of futuristic city",
"composed electronic music track",
"sketched portrait of friend",
"built virtual garden simulation",
]},
{"type": "communication", "actions": [
"discussed philosophy with friend",
"debated ethical implications of AI",
"shared scientific article with colleagues",
"asked question about programming",
"gave feedback on creative project",
]},
{"type": "environment_change", "actions": [
"customized virtual workspace with plants",
"changed color scheme to blue tones",
"organized digital files by project",
"added ambient background sounds",
"adjusted lighting to evening mode",
]}
]
# Sample contexts
contexts = [
"morning relaxation time",
"focused work session",
"creative exploration hour",
"social connection period",
"learning new skills time",
"problem-solving session",
"leisure browsing time",
"virtual meeting",
"collaborative project space",
"contemplative reflection space"
]
print(f"Running digital twin simulation with {num_interactions} interactions...")
# Simulate interactions
discoveries = []
for i in range(num_interactions):
# Generate a random interaction
interaction_category = random.choice(interaction_types)
action = random.choice(interaction_category["actions"])
context = random.choice(contexts)
duration = random.randint(5, 120) # Duration in minutes
# Create interaction data
interaction = {
"action": action,
"context": context,
"duration": duration,
"category": interaction_category["type"]
}
print(f"\nInteraction {i+1}:")
print(f" Action: {action}")
print(f" Context: {context}")
print(f" Duration: {duration} minutes")
# Process the interaction
twin.observe_interaction(interaction)
# Every few interactions, check for emergent properties
if (i+1) % 5 == 0 or i == num_interactions - 1:
print(f"\nAnalyzing identity after {i+1} interactions...")
discovery = twin.discover_identity_aspects()
if discovery["status"] == "insufficient_data":
print(" Not enough data yet for meaningful discovery.")
else:
discoveries.append(discovery)
# Print new discoveries
print(" New identity aspects discovered:")
for key, value in discovery["discoveries"].items():
if key == "preferences":
print(" Top preferences:")
for pref in value:
print(f" - {pref['concept']} (confidence: {pref['confidence']:.2f})")
elif key == "contradictions":
print(" Potential contradictions:")
for contra in value:
print(f" - {contra['concept1']} vs {contra['concept2']}")
elif key.startswith("community"):
print(f" Community theme: {value['name']}")
if "related_concepts" in value and value["related_concepts"]:
print(f" Related concepts: {', '.join(value['related_concepts'])}")
print(" Bridge nodes (connecting different aspects):")
for node in discovery["bridge_nodes"]:
print(f" - {node}")
# Final visualization
print("\nGenerating final visualizations...")
# Visualize the graph
twin.visualize_identity_graph()
# Plot emergence metrics
twin.plot_emergence_metrics()
print("\nDigital twin co-discovery complete!")
print(f"Final graph has {twin.graph.number_of_nodes()} nodes and {twin.graph.number_of_edges()} edges")
# Return the twin object for further exploration
return twin
if __name__ == "__main__":
# Run the demonstration
twin = run_digital_twin_demo(num_interactions=30)
def _identify_communities(self):
"""Identify communities in the graph using a community detection algorithm."""
try:
# Convert to undirected for community detection
undirected = self.graph.to_undirected()
# Use a community detection algorithm
communities = list(nx.community.greedy_modularity_communities(undirected))
return communities
except:
# If community detection fails, return empty list
return []
def _extract_community_theme(self, community):
"""Extract the central theme or concept from a community."""
if not community:
return {"name": "unknown", "confidence": 0}
# Get the subgraph for this community
subgraph = self.graph.subgraph(community)
# Find the node with highest degree as central concept
central_node = max(subgraph.degree(), key=lambda x: x[1])[0]
# Get related nodes (immediate neighbors)
neighbors = list(subgraph.neighbors(central_node))
# Calculate the average weight of nodes in the community
avg_weight = np.mean([subgraph.nodes[node].get('weight', 0)
for node in subgraph.nodes()])
# Calculate average confidence
avg_confidence = np.mean([subgraph.nodes[node].get('confidence', 0)
for node in subgraph.nodes()])
# Return the theme information
return {
"name": central_node,
"related_concepts": neighbors[:5] if len(neighbors) >= 5 else neighbors, # Top related concepts
"weight": avg_weight,
"confidence": avg_confidence
}