|
| 1 | +from dotenv import load_dotenv |
| 2 | + |
| 3 | +import asyncio |
| 4 | +import cocoindex |
| 5 | +import datetime |
| 6 | +import os |
| 7 | + |
| 8 | +@cocoindex.flow_def(name="AmazonS3TextEmbedding") |
| 9 | +def amazon_s3_text_embedding_flow(flow_builder: cocoindex.FlowBuilder, data_scope: cocoindex.DataScope): |
| 10 | + """ |
| 11 | + Define an example flow that embeds text from Amazon S3 into a vector database. |
| 12 | + """ |
| 13 | + bucket_name = os.environ["AMAZON_S3_BUCKET_NAME"] |
| 14 | + prefix = os.environ.get("AMAZON_S3_PREFIX", None) |
| 15 | + |
| 16 | + data_scope["documents"] = flow_builder.add_source( |
| 17 | + cocoindex.sources.AmazonS3( |
| 18 | + bucket_name=bucket_name, |
| 19 | + prefix=prefix, |
| 20 | + included_patterns=["*.md", "*.txt", "*.docx"], |
| 21 | + binary=False), |
| 22 | + refresh_interval=datetime.timedelta(minutes=1)) |
| 23 | + |
| 24 | + doc_embeddings = data_scope.add_collector() |
| 25 | + |
| 26 | + with data_scope["documents"].row() as doc: |
| 27 | + doc["chunks"] = doc["content"].transform( |
| 28 | + cocoindex.functions.SplitRecursively(), |
| 29 | + language="markdown", chunk_size=2000, chunk_overlap=500) |
| 30 | + |
| 31 | + with doc["chunks"].row() as chunk: |
| 32 | + chunk["embedding"] = chunk["text"].transform( |
| 33 | + cocoindex.functions.SentenceTransformerEmbed( |
| 34 | + model="sentence-transformers/all-MiniLM-L6-v2")) |
| 35 | + doc_embeddings.collect(filename=doc["filename"], location=chunk["location"], |
| 36 | + text=chunk["text"], embedding=chunk["embedding"]) |
| 37 | + |
| 38 | + doc_embeddings.export( |
| 39 | + "doc_embeddings", |
| 40 | + cocoindex.storages.Postgres(), |
| 41 | + primary_key_fields=["filename", "location"], |
| 42 | + vector_indexes=[ |
| 43 | + cocoindex.VectorIndexDef( |
| 44 | + field_name="embedding", |
| 45 | + metric=cocoindex.VectorSimilarityMetric.COSINE_SIMILARITY)]) |
| 46 | + |
| 47 | +query_handler = cocoindex.query.SimpleSemanticsQueryHandler( |
| 48 | + name="SemanticsSearch", |
| 49 | + flow=amazon_s3_text_embedding_flow, |
| 50 | + target_name="doc_embeddings", |
| 51 | + query_transform_flow=lambda text: text.transform( |
| 52 | + cocoindex.functions.SentenceTransformerEmbed( |
| 53 | + model="sentence-transformers/all-MiniLM-L6-v2")), |
| 54 | + default_similarity_metric=cocoindex.VectorSimilarityMetric.COSINE_SIMILARITY) |
| 55 | + |
| 56 | +@cocoindex.main_fn() |
| 57 | +def _run(): |
| 58 | + # Use a `FlowLiveUpdater` to keep the flow data updated. |
| 59 | + with cocoindex.FlowLiveUpdater(amazon_s3_text_embedding_flow): |
| 60 | + # Run queries in a loop to demonstrate the query capabilities. |
| 61 | + while True: |
| 62 | + try: |
| 63 | + query = input("Enter search query (or Enter to quit): ") |
| 64 | + if query == '': |
| 65 | + break |
| 66 | + results, _ = query_handler.search(query, 10) |
| 67 | + print("\nSearch results:") |
| 68 | + for result in results: |
| 69 | + print(f"[{result.score:.3f}] {result.data['filename']}") |
| 70 | + print(f" {result.data['text']}") |
| 71 | + print("---") |
| 72 | + print() |
| 73 | + except KeyboardInterrupt: |
| 74 | + break |
| 75 | + |
| 76 | +if __name__ == "__main__": |
| 77 | + load_dotenv(override=True) |
| 78 | + _run() |
0 commit comments