-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexample_usage.py
More file actions
107 lines (85 loc) · 3.99 KB
/
example_usage.py
File metadata and controls
107 lines (85 loc) · 3.99 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
"""
Example usage of the refactored CodeBERT index components
"""
import logging
import torch
import numpy as np
from pathlib import Path
# Import refactored components
from models.code_models import CodeFile, CodeEmbedding
from parsers.code_parser import CodeParser
from indexers.codebert_indexer import CodeBERTIndexer
from indexers.vespa_embedding_store import VespaEmbeddingStore
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def example_usage():
"""Example showing how to use the refactored components"""
# Initialize the indexer
indexer = CodeBERTIndexer()
# Example 1: Scan a codebase
print("\n--- Example 1: Scanning a codebase ---")
scan_path = "./utils" # Adjust to a path that exists in your environment
code_files = indexer.scan_codebase(scan_path)
print(f"Found {len(code_files)} files in {scan_path}")
if len(code_files) > 0:
# Print details of the first file
print(f"First file: {code_files[0].file_path}")
print(f"Functions: {code_files[0].functions}")
print(f"Classes: {code_files[0].classes}")
# Example 2: Generate embeddings
print("\n--- Example 2: Generating embeddings ---")
if len(code_files) > 0:
embeddings = indexer.generate_embeddings(code_files)
print(f"Generated {len(embeddings)} embeddings")
# Example 3: Use Vespa.ai for vector storage
use_vespa = False # Set to True to test Vespa integration
if use_vespa and len(code_files) > 0:
print("\n--- Example 3: Using Vespa.ai for vector storage ---")
vespa_store = VespaEmbeddingStore()
try:
# Start a Vespa Docker container
print("Starting Vespa Docker container (this may take some time)...")
vespa_store.connect_to_docker()
print("Storing embeddings in Vespa...")
count = vespa_store.store_embeddings(code_files, embeddings)
print(f"Stored {count} embeddings in Vespa")
# Test search
print("Testing search with Vespa...")
query = "def process_data(input_file):"
# Generate embedding for query
inputs = indexer.tokenizer(
query,
padding=True,
truncation=True,
max_length=512,
return_tensors='pt'
)
inputs = {k: v.to(indexer.device) for k, v in inputs.items()}
with torch.no_grad():
outputs = indexer.model(**inputs)
query_embedding = outputs.last_hidden_state[:, 0, :].cpu().numpy()
results = vespa_store.search_similar_code(query_embedding, top_k=3)
print(f"Top 3 Vespa results for query: {query}")
for result in results:
print(f"{result['score']:.4f}: {result['file_path']}")
except Exception as e:
print(f"Error with Vespa: {e}")
print("Falling back to file-based storage...")
# Example 4: Load an existing index from files
print("\n--- Example 4: Loading an existing index ---")
try:
code_files, embeddings = indexer.load_index("./code_index")
print(f"Loaded {len(code_files)} files and {len(embeddings)} embeddings")
# Example 5: Search for similar code
if len(embeddings) > 0:
print("\n--- Example 5: Searching for similar code ---")
query = "def process_data(input_file):"
results = indexer.search_similar_code(query, embeddings, top_k=3)
print(f"Top 3 results for query: {query}")
for file_path, similarity in results:
print(f"{similarity:.4f}: {file_path}")
except FileNotFoundError:
print("Index not found. Run 'python cli.py --scan ./your_code_path' to create an index.")
if __name__ == "__main__":
example_usage()