|
1 | | -from flask import Flask, jsonify, request, send_file, url_for |
| 1 | +from flask import Flask, jsonify, request, send_file, url_for, Response, stream_template |
2 | 2 | from flask_cors import CORS |
3 | 3 | from services.topic_service import TopicService |
4 | 4 | from services.ai_service import AITopicProcessor |
5 | 5 | from services.gexf_node_service import GexfNodeGenerator |
6 | 6 | from services.edge_generation_service import EdgeGenerationService |
| 7 | +from services.graphrag_service import graphrag_service |
7 | 8 | import os |
8 | 9 | import asyncio |
9 | 10 | import re |
10 | 11 | import json |
| 12 | +import time |
11 | 13 |
|
12 | 14 | app = Flask(__name__, static_folder='gexf', static_url_path='/gexf') |
13 | 15 | CORS( |
|
26 | 28 | gexf_node_service = GexfNodeGenerator() |
27 | 29 | edge_generation_service = EdgeGenerationService() |
28 | 30 |
|
| 31 | +# Global progress tracking for GraphRAG setup |
| 32 | +graphrag_progress = { |
| 33 | + "current_step": "", |
| 34 | + "current": 0, |
| 35 | + "total": 0, |
| 36 | + "message": "", |
| 37 | + "status": "idle" # idle, running, completed, error |
| 38 | +} |
| 39 | + |
| 40 | +# Global variable to track if GraphRAG is set up |
| 41 | +graphrag_ready = False |
| 42 | + |
| 43 | +@app.route("/api/graphrag-health", methods=["GET"]) |
| 44 | +def graphrag_health(): |
| 45 | + """Check if GraphRAG backend is ready and set up.""" |
| 46 | + global graphrag_ready |
| 47 | + try: |
| 48 | + if graphrag_ready: |
| 49 | + return jsonify({ |
| 50 | + "success": True, |
| 51 | + "ready": True, |
| 52 | + "message": "GraphRAG backend is ready" |
| 53 | + }) |
| 54 | + else: |
| 55 | + return jsonify({ |
| 56 | + "success": True, |
| 57 | + "ready": False, |
| 58 | + "message": "GraphRAG backend is not set up" |
| 59 | + }), 503 |
| 60 | + except Exception as e: |
| 61 | + return jsonify({ |
| 62 | + "success": False, |
| 63 | + "ready": False, |
| 64 | + "error": str(e), |
| 65 | + "message": "Error checking GraphRAG health" |
| 66 | + }), 500 |
| 67 | + |
29 | 68 |
|
30 | 69 | @app.route("/api/process-topics", methods=["GET", "POST"]) |
31 | 70 | def process_topics(): |
@@ -524,15 +563,16 @@ def create_edges_on_graph(): |
524 | 563 | "shared_organization_enabled": false, |
525 | 564 | "common_stargazers_enabled": true, |
526 | 565 | "stargazer_threshold": 5, |
527 | | - "use_and_logic": false, |
528 | | - "strict_and_logic": true |
529 | | - } |
| 566 | + "use_and_logic": false |
| 567 | + }, |
| 568 | + "filtered_node_ids": ["node1", "node2", ...] // Optional: only consider these nodes for edge creation |
530 | 569 | } |
531 | 570 | """ |
532 | 571 | try: |
533 | 572 | data = request.get_json() |
534 | 573 | gexf_content = data.get("gexfContent", "") |
535 | 574 | criteria_config = data.get("criteria_config", {}) |
| 575 | + filtered_node_ids = data.get("filtered_node_ids", None) # Get filtered node IDs |
536 | 576 |
|
537 | 577 | if not gexf_content: |
538 | 578 | return jsonify({ |
@@ -582,7 +622,7 @@ def create_edges_on_graph(): |
582 | 622 | edge_service = EdgeGenerationService() |
583 | 623 |
|
584 | 624 | # Create edges based on the criteria |
585 | | - edges_created = edge_service.create_edges_on_existing_graph(G, criteria_config) |
| 625 | + edges_created = edge_service.create_edges_on_existing_graph(G, criteria_config, filtered_node_ids) |
586 | 626 |
|
587 | 627 | # Save the updated graph |
588 | 628 | import hashlib |
@@ -627,6 +667,153 @@ def create_edges_on_graph(): |
627 | 667 | }), 500 |
628 | 668 |
|
629 | 669 |
|
| 670 | +@app.route("/api/graphrag-reset-progress", methods=["POST", "OPTIONS"]) |
| 671 | +def graphrag_reset_progress_endpoint(): |
| 672 | + """Reset GraphRAG progress status to initial state.""" |
| 673 | + if request.method == "OPTIONS": |
| 674 | + return "", 200 |
| 675 | + |
| 676 | + global graphrag_progress |
| 677 | + graphrag_progress = { |
| 678 | + "current_step": "Initializing...", |
| 679 | + "current": 0, |
| 680 | + "total": 100, |
| 681 | + "message": "Preparing GraphRAG setup", |
| 682 | + "status": "running" |
| 683 | + } |
| 684 | + return jsonify({"success": True, "message": "Progress reset"}) |
| 685 | + |
| 686 | +@app.route("/api/graphrag-progress", methods=["GET"]) |
| 687 | +def graphrag_progress_endpoint(): |
| 688 | + """Server-Sent Events endpoint for GraphRAG progress updates.""" |
| 689 | + def generate(): |
| 690 | + while True: |
| 691 | + # Send current progress |
| 692 | + data = f"data: {json.dumps(graphrag_progress)}\n\n" |
| 693 | + yield data |
| 694 | + |
| 695 | + # If completed or error, stop streaming |
| 696 | + if graphrag_progress["status"] in ["completed", "error"]: |
| 697 | + break |
| 698 | + |
| 699 | + time.sleep(0.5) # Update every 0.5 seconds for more responsive updates |
| 700 | + |
| 701 | + return Response(generate(), mimetype="text/event-stream") |
| 702 | + |
| 703 | + |
| 704 | +@app.route("/api/graphrag-setup", methods=["POST"]) |
| 705 | +def graphrag_setup_endpoint(): |
| 706 | + """GraphRAG setup endpoint with progress tracking.""" |
| 707 | + global graphrag_progress, graphrag_ready |
| 708 | + |
| 709 | + try: |
| 710 | + data = request.get_json() |
| 711 | + |
| 712 | + # Extract parameters |
| 713 | + provider = data.get("provider", "openai") |
| 714 | + api_keys = data.get("apiKeys", {}) |
| 715 | + graph_file = data.get("graphFile", "") |
| 716 | + |
| 717 | + if not graph_file: |
| 718 | + return jsonify({ |
| 719 | + "success": False, |
| 720 | + "error": "Graph file is required", |
| 721 | + "message": "Please provide a graph file" |
| 722 | + }), 400 |
| 723 | + |
| 724 | + github_token = api_keys.get("githubToken", "") |
| 725 | + if not github_token: |
| 726 | + return jsonify({ |
| 727 | + "success": False, |
| 728 | + "error": "GitHub token is required", |
| 729 | + "message": "Please provide a GitHub personal access token" |
| 730 | + }), 400 |
| 731 | + |
| 732 | + # Reset progress |
| 733 | + graphrag_progress = { |
| 734 | + "current_step": "Starting setup...", |
| 735 | + "current": 0, |
| 736 | + "total": 100, |
| 737 | + "message": "Initializing GraphRAG setup", |
| 738 | + "status": "running" |
| 739 | + } |
| 740 | + |
| 741 | + # Setup database from GEXF content with progress updates |
| 742 | + setup_result = graphrag_service.setup_database_from_gexf_with_progress(graph_file, github_token, graphrag_progress) |
| 743 | + if not setup_result["success"]: |
| 744 | + graphrag_progress["status"] = "error" |
| 745 | + graphrag_progress["message"] = setup_result.get("error", "Setup failed") |
| 746 | + return jsonify(setup_result), 500 |
| 747 | + |
| 748 | + # Initialize GraphRAG with the selected provider |
| 749 | + graphrag_progress["current_step"] = "Initializing AI system..." |
| 750 | + graphrag_progress["current"] = 90 |
| 751 | + graphrag_progress["message"] = "Setting up AI analysis system" |
| 752 | + |
| 753 | + init_result = graphrag_service.initialize_graphrag(provider, api_keys) |
| 754 | + if not init_result["success"]: |
| 755 | + graphrag_progress["status"] = "error" |
| 756 | + graphrag_progress["message"] = init_result.get("error", "AI initialization failed") |
| 757 | + return jsonify(init_result), 500 |
| 758 | + |
| 759 | + # Mark as completed and set ready flag |
| 760 | + graphrag_progress["status"] = "completed" |
| 761 | + graphrag_progress["current"] = 100 |
| 762 | + graphrag_progress["message"] = "GraphRAG setup completed successfully!" |
| 763 | + graphrag_ready = True |
| 764 | + |
| 765 | + return jsonify({ |
| 766 | + "success": True, |
| 767 | + "message": "GraphRAG setup completed successfully", |
| 768 | + "ready": True |
| 769 | + }) |
| 770 | + |
| 771 | + except Exception as e: |
| 772 | + graphrag_progress["status"] = "error" |
| 773 | + graphrag_progress["message"] = str(e) |
| 774 | + return jsonify({ |
| 775 | + "success": False, |
| 776 | + "error": str(e), |
| 777 | + "message": "An error occurred during GraphRAG setup" |
| 778 | + }), 500 |
| 779 | + |
| 780 | + |
| 781 | +@app.route("/api/graphrag", methods=["POST"]) |
| 782 | +def graphrag_endpoint(): |
| 783 | + """GraphRAG endpoint for AI-powered graph analysis.""" |
| 784 | + try: |
| 785 | + data = request.get_json() |
| 786 | + |
| 787 | + # Extract parameters |
| 788 | + query = data.get("query", "") |
| 789 | + provider = data.get("provider", "openai") |
| 790 | + api_keys = data.get("apiKeys", {}) |
| 791 | + |
| 792 | + if not query: |
| 793 | + return jsonify({ |
| 794 | + "success": False, |
| 795 | + "error": "Query is required", |
| 796 | + "message": "Please provide a query" |
| 797 | + }), 400 |
| 798 | + |
| 799 | + # Execute the query |
| 800 | + query_result = graphrag_service.query_graphrag(query) |
| 801 | + if not query_result["success"]: |
| 802 | + return jsonify(query_result), 500 |
| 803 | + |
| 804 | + return jsonify({ |
| 805 | + "success": True, |
| 806 | + "result": query_result["result"] |
| 807 | + }) |
| 808 | + |
| 809 | + except Exception as e: |
| 810 | + return jsonify({ |
| 811 | + "success": False, |
| 812 | + "error": str(e), |
| 813 | + "message": "An error occurred while processing the GraphRAG query" |
| 814 | + }), 500 |
| 815 | + |
| 816 | + |
630 | 817 | @app.route("/") |
631 | 818 | def home(): |
632 | 819 | return "Hello World!" |
|
0 commit comments