Replies: 1 comment 1 reply
-
Hi, do you have any files in |
Beta Was this translation helpful? Give feedback.
1 reply
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
Actually I can successfully run the workflow. But when running create_final_entities, the output_tokens kept being 0, which confused me. I don't know if this will have any effect on the final result. Can someone tell me what the main purpose of this step is? Is output_tokens usually 0? If 0 is not normal, what might be related to it?
The embedding model I used was GLM's embedding-3. I also tried the nomic-embed-text model provided by the local ollama service. They both resulted in output_tokens=0. The LLM I used is glm-4-0520 .
Here is the detailed log (the bold part is where I'm confused) :
20:57:59,749 graphrag.index.cli INFO Logging enabled at rag-glm\output\20240828-205759\reports\indexing-engine.log
20:57:59,753 graphrag.index.cli INFO Starting pipeline run for: 20240828-205759, dryrun=False
20:57:59,753 graphrag.index.cli INFO Using default configuration: {
"llm": {
"api_key": "==== REDACTED ====",
"type": "openai_chat",
"model": "glm-4-0520",
"max_tokens": 4000,
"temperature": 0.0,
"top_p": 1.0,
"n": 1,
"request_timeout": 180.0,
"api_base": "https://open.bigmodel.cn/api/paas/v4",
"api_version": null,
"proxy": null,
"cognitive_services_endpoint": null,
"deployment_name": null,
"model_supports_json": true,
"tokens_per_minute": 0,
"requests_per_minute": 0,
"max_retries": 10,
"max_retry_wait": 10.0,
"sleep_on_rate_limit_recommendation": true,
"concurrent_requests": 5
},
"parallelization": {
"stagger": 0.3,
"num_threads": 50
},
"async_mode": "threaded",
"root_dir": "rag-glm",
"reporting": {
"type": "file",
"base_dir": "output/${timestamp}/reports",
"storage_account_blob_url": null
},
"storage": {
"type": "file",
"base_dir": "output/${timestamp}/artifacts",
"storage_account_blob_url": null
},
"cache": {
"type": "file",
"base_dir": "cache",
"storage_account_blob_url": null
},
"input": {
"type": "file",
"file_type": "text",
"base_dir": "input",
"storage_account_blob_url": null,
"encoding": "utf-8",
"file_pattern": ".* \.txt$",
"file_filter": null,
"source_column": null,
"timestamp_column": null,
"timestamp_format": null,
"text_column": "text",
"title_column": null,
"document_attribute_columns": []
},
"embed_graph": {
"enabled": false,
"num_walks": 10,
"walk_length": 40,
"window_size": 2,
"iterations": 3,
"random_seed": 597832,
"strategy": null
},
"embeddings": {
"llm": {
"api_key": "==== REDACTED ====",
"type": "openai_embedding",
"model": "embedding-3",
"max_tokens": 4000,
"temperature": 0,
"top_p": 1,
"n": 1,
"request_timeout": 180.0,
"api_base": "https://open.bigmodel.cn/api/paas/v4",
"api_version": null,
"proxy": null,
"cognitive_services_endpoint": null,
"deployment_name": null,
"model_supports_json": null,
"tokens_per_minute": 0,
"requests_per_minute": 0,
"max_retries": 10,
"max_retry_wait": 10.0,
"sleep_on_rate_limit_recommendation": true,
"concurrent_requests": 1
},
"parallelization": {
"stagger": 0.3,
"num_threads": 50
},
"async_mode": "threaded",
"batch_size": 16,
"batch_max_tokens": 8191,
"target": "required",
"skip": [],
"vector_store": null,
"strategy": null
},
"chunks": {
"size": 1200,
"overlap": 300,
"group_by_columns": [
"id"
],
"strategy": null,
"encoding_model": null
},
"snapshots": {
"graphml": false,
"raw_entities": false,
"top_level_nodes": false
},
"entity_extraction": {
"llm": {
"api_key": "==== REDACTED ====",
"type": "openai_chat",
"model": "glm-4-0520",
"max_tokens": 4000,
"temperature": 0.0,
"top_p": 1.0,
"n": 1,
"request_timeout": 180.0,
"api_base": "https://open.bigmodel.cn/api/paas/v4",
"api_version": null,
"proxy": null,
"cognitive_services_endpoint": null,
"deployment_name": null,
"model_supports_json": true,
"tokens_per_minute": 0,
"requests_per_minute": 0,
"max_retries": 10,
"max_retry_wait": 10.0,
"sleep_on_rate_limit_recommendation": true,
"concurrent_requests": 5
},
"parallelization": {
"stagger": 0.3,
"num_threads": 50
},
"async_mode": "threaded",
"prompt": "prompts/entity_extraction.txt",
"entity_types": [
"organization",
"person",
"geo",
"event"
],
"max_gleanings": 0,
"strategy": null,
"encoding_model": null
},
"summarize_descriptions": {
"llm": {
"api_key": "==== REDACTED ====",
"type": "openai_chat",
"model": "glm-4-0520",
"max_tokens": 4000,
"temperature": 0.0,
"top_p": 1.0,
"n": 1,
"request_timeout": 180.0,
"api_base": "https://open.bigmodel.cn/api/paas/v4",
"api_version": null,
"proxy": null,
"cognitive_services_endpoint": null,
"deployment_name": null,
"model_supports_json": true,
"tokens_per_minute": 0,
"requests_per_minute": 0,
"max_retries": 10,
"max_retry_wait": 10.0,
"sleep_on_rate_limit_recommendation": true,
"concurrent_requests": 5
},
"parallelization": {
"stagger": 0.3,
"num_threads": 50
},
"async_mode": "threaded",
"prompt": "prompts/summarize_descriptions.txt",
"max_length": 500,
"strategy": null
},
"community_reports": {
"llm": {
"api_key": "==== REDACTED ====",
"type": "openai_chat",
"model": "glm-4-0520",
"max_tokens": 4000,
"temperature": 0.0,
"top_p": 1.0,
"n": 1,
"request_timeout": 180.0,
"api_base": "https://open.bigmodel.cn/api/paas/v4",
"api_version": null,
"proxy": null,
"cognitive_services_endpoint": null,
"deployment_name": null,
"model_supports_json": true,
"tokens_per_minute": 0,
"requests_per_minute": 0,
"max_retries": 10,
"max_retry_wait": 10.0,
"sleep_on_rate_limit_recommendation": true,
"concurrent_requests": 5
},
"parallelization": {
"stagger": 0.3,
"num_threads": 50
},
"async_mode": "threaded",
"prompt": "prompts/community_report.txt",
"max_length": 2000,
"max_input_length": 8000,
"strategy": null
},
"claim_extraction": {
"llm": {
"api_key": "==== REDACTED ====",
"type": "openai_chat",
"model": "glm-4-0520",
"max_tokens": 4000,
"temperature": 0.0,
"top_p": 1.0,
"n": 1,
"request_timeout": 180.0,
"api_base": "https://open.bigmodel.cn/api/paas/v4",
"api_version": null,
"proxy": null,
"cognitive_services_endpoint": null,
"deployment_name": null,
"model_supports_json": true,
"tokens_per_minute": 0,
"requests_per_minute": 0,
"max_retries": 10,
"max_retry_wait": 10.0,
"sleep_on_rate_limit_recommendation": true,
"concurrent_requests": 5
},
"parallelization": {
"stagger": 0.3,
"num_threads": 50
},
"async_mode": "threaded",
"enabled": false,
"prompt": "prompts/claim_extraction.txt",
"description": "Any claims or facts that could be relevant to information discovery.",
"max_gleanings": 0,
"strategy": null,
"encoding_model": null
},
"cluster_graph": {
"max_cluster_size": 10,
"strategy": null
},
"umap": {
"enabled": false
},
"local_search": {
"text_unit_prop": 0.5,
"community_prop": 0.1,
"conversation_history_max_turns": 5,
"top_k_entities": 10,
"top_k_relationships": 10,
"temperature": 0.0,
"top_p": 1.0,
"n": 1,
"max_tokens": 12000,
"llm_max_tokens": 2000
},
"global_search": {
"temperature": 0.0,
"top_p": 1.0,
"n": 1,
"max_tokens": 12000,
"data_max_tokens": 12000,
"map_max_tokens": 1000,
"reduce_max_tokens": 2000,
"concurrency": 32
},
"encoding_model": "cl100k_base",
"skip_workflows": []
}
20:57:59,765 graphrag.index.create_pipeline_config INFO skipping workflows
20:57:59,765 graphrag.index.run INFO Running pipeline
20:57:59,765 graphrag.index.storage.file_pipeline_storage INFO Creating file storage at rag-glm\output\20240828-205759\artifacts
20:57:59,767 graphrag.index.input.load_input INFO loading input from root_dir=input
20:57:59,767 graphrag.index.input.load_input INFO using file storage for input
20:57:59,768 graphrag.index.storage.file_pipeline_storage INFO search rag-glm\input for files matching .*.txt $
20:57:59,769 graphrag.index.input.text INFO found text files from input, found [(' ****.txt', {})]
20:57:59,771 graphrag.index.input.text INFO Found 1 files, loading 1
20:57:59,773 graphrag.index.workflows.load INFO Workflow Run Order: ['create_base_text_units', 'create_base_extracted_entities', 'create_summarized_entities', 'create_base_entity_graph', 'create_final_entities', 'create_final_nodes', 'create_final_communities', 'join_text_units_to_entity_ids', 'create_final_relationships', 'join_text_units_to_relationship_ids', 'create_final_community_reports', 'create_final_text_units', 'create_base_documents', 'create_final_documents']
20:57:59,773 graphrag.index.run INFO Final # of rows loaded: 1
20:57:59,922 graphrag.index.run INFO Running workflow: create_base_text_units...
20:57:59,922 graphrag.index.run INFO dependencies for create_base_text_units: []
20:57:59,927 datashaper.workflow.workflow INFO executing verb orderby
20:57:59,930 datashaper.workflow.workflow INFO executing verb zip
20:57:59,934 datashaper.workflow.workflow INFO executing verb aggregate_override
20:57:59,939 datashaper.workflow.workflow INFO executing verb chunk
20:58:00,129 datashaper.workflow.workflow INFO executing verb select
20:58:00,133 datashaper.workflow.workflow INFO executing verb unroll
20:58:00,138 datashaper.workflow.workflow INFO executing verb rename
20:58:00,143 datashaper.workflow.workflow INFO executing verb genid
20:58:00,148 datashaper.workflow.workflow INFO executing verb unzip
20:58:00,154 datashaper.workflow.workflow INFO executing verb copy
20:58:00,160 datashaper.workflow.workflow INFO executing verb filter
20:58:00,172 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_base_text_units.parquet
20:58:00,372 graphrag.index.run INFO Running workflow: create_base_extracted_entities...
20:58:00,373 graphrag.index.run INFO dependencies for create_base_extracted_entities: ['create_base_text_units']
20:58:00,373 graphrag.index.run INFO read table from storage: create_base_text_units.parquet
20:58:00,401 datashaper.workflow.workflow INFO executing verb entity_extract
20:58:00,406 graphrag.llm.openai.create_openai_client INFO Creating OpenAI client base_url=https://open.bigmodel.cn/api/paas/v4
20:58:01,148 graphrag.index.llm.load_llm INFO create TPM/RPM limiter for glm-4-0520: TPM=0, RPM=0
20:58:01,148 graphrag.index.llm.load_llm INFO create concurrency limiter for glm-4-0520: 5
20:58:09,613 httpx INFO HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions "HTTP/1.1 200 OK"
20:58:09,616 graphrag.llm.base.rate_limiting_llm INFO perf - llm.chat "Process" with 0 retries took 8.452999999979511. input_tokens=2936, output_tokens=117
20:58:21,412 httpx INFO HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions "HTTP/1.1 200 OK"
20:58:21,414 graphrag.llm.base.rate_limiting_llm INFO perf - llm.chat "Process" with 0 retries took 20.25. input_tokens=2936, output_tokens=564
20:58:27,334 httpx INFO HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions "HTTP/1.1 200 OK"
20:58:27,336 graphrag.llm.base.rate_limiting_llm INFO perf - llm.chat "Process" with 0 retries took 26.17099999997299. input_tokens=2408, output_tokens=757
20:58:27,362 datashaper.workflow.workflow INFO executing verb merge_graphs
20:58:27,367 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_base_extracted_entities.parquet
20:58:27,523 graphrag.index.run INFO Running workflow: create_summarized_entities...
20:58:27,523 graphrag.index.run INFO dependencies for create_summarized_entities: ['create_base_extracted_entities']
20:58:27,524 graphrag.index.run INFO read table from storage: create_base_extracted_entities.parquet
20:58:27,544 datashaper.workflow.workflow INFO executing verb summarize_descriptions
20:58:27,557 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_summarized_entities.parquet
20:58:27,719 graphrag.index.run INFO Running workflow: create_base_entity_graph...
20:58:27,719 graphrag.index.run INFO dependencies for create_base_entity_graph: ['create_summarized_entities']
20:58:27,720 graphrag.index.run INFO read table from storage: create_summarized_entities.parquet
20:58:27,736 datashaper.workflow.workflow INFO executing verb cluster_graph
20:58:27,755 datashaper.workflow.workflow INFO executing verb select
20:58:27,757 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_base_entity_graph.parquet
20:58:27,928 graphrag.index.run INFO Running workflow: create_final_entities...
20:58:27,929 graphrag.index.run INFO dependencies for create_final_entities: ['create_base_entity_graph']
20:58:27,929 graphrag.index.run INFO read table from storage: create_base_entity_graph.parquet
20:58:27,953 datashaper.workflow.workflow INFO executing verb unpack_graph
20:58:27,963 datashaper.workflow.workflow INFO executing verb rename
20:58:27,971 datashaper.workflow.workflow INFO executing verb select
20:58:27,980 datashaper.workflow.workflow INFO executing verb dedupe
20:58:27,989 datashaper.workflow.workflow INFO executing verb rename
20:58:27,998 datashaper.workflow.workflow INFO executing verb filter
20:58:28,21 datashaper.workflow.workflow INFO executing verb text_split
20:58:28,31 datashaper.workflow.workflow INFO executing verb drop
20:58:28,43 datashaper.workflow.workflow INFO executing verb merge
20:58:28,57 datashaper.workflow.workflow INFO executing verb text_embed
20:58:28,59 graphrag.llm.openai.create_openai_client INFO Creating OpenAI client base_url=https://open.bigmodel.cn/api/paas/v4
20:58:28,789 graphrag.index.llm.load_llm INFO create TPM/RPM limiter for embedding-3: TPM=0, RPM=0
20:58:28,789 graphrag.index.llm.load_llm INFO create concurrency limiter for embedding-3: 1
20:58:28,790 graphrag.index.verbs.text.embed.strategies.openai INFO embedding 17 inputs via 17 snippets using 2 batches. max_batch_size=16, max_tokens=8191
20:58:29,537 httpx INFO HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings "HTTP/1.1 200 OK"
20:58:29,912 graphrag.llm.base.rate_limiting_llm INFO perf - llm.embedding "Process" with 0 retries took 1.125. input_tokens=411, output_tokens=0
20:58:30,52 httpx INFO HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/embeddings "HTTP/1.1 200 OK"
20:58:30,66 graphrag.llm.base.rate_limiting_llm INFO perf - llm.embedding "Process" with 0 retries took 0.14100000000325963. input_tokens=6, output_tokens=0
20:58:30,88 datashaper.workflow.workflow INFO executing verb drop
20:58:30,98 datashaper.workflow.workflow INFO executing verb filter
20:58:30,114 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_final_entities.parquet
20:58:30,328 graphrag.index.run INFO Running workflow: create_final_nodes...
20:58:30,332 graphrag.index.run INFO dependencies for create_final_nodes: ['create_base_entity_graph']
20:58:30,332 graphrag.index.run INFO read table from storage: create_base_entity_graph.parquet
20:58:30,356 datashaper.workflow.workflow INFO executing verb layout_graph
20:58:30,374 datashaper.workflow.workflow INFO executing verb unpack_graph
20:58:30,387 datashaper.workflow.workflow INFO executing verb unpack_graph
20:58:30,400 datashaper.workflow.workflow INFO executing verb drop
20:58:30,413 datashaper.workflow.workflow INFO executing verb filter
20:58:30,440 datashaper.workflow.workflow INFO executing verb select
20:58:30,453 datashaper.workflow.workflow INFO executing verb rename
20:58:30,465 datashaper.workflow.workflow INFO executing verb join
20:58:30,484 datashaper.workflow.workflow INFO executing verb convert
20:58:30,531 datashaper.workflow.workflow INFO executing verb rename
20:58:30,534 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_final_nodes.parquet
20:58:30,723 graphrag.index.run INFO Running workflow: create_final_communities...
20:58:30,724 graphrag.index.run INFO dependencies for create_final_communities: ['create_base_entity_graph']
20:58:30,724 graphrag.index.run INFO read table from storage: create_base_entity_graph.parquet
20:58:30,754 datashaper.workflow.workflow INFO executing verb unpack_graph
20:58:30,771 datashaper.workflow.workflow INFO executing verb unpack_graph
20:58:30,787 datashaper.workflow.workflow INFO executing verb aggregate_override
20:58:30,804 datashaper.workflow.workflow INFO executing verb join
20:58:30,824 datashaper.workflow.workflow INFO executing verb join
20:58:30,845 datashaper.workflow.workflow INFO executing verb concat
20:58:30,860 datashaper.workflow.workflow INFO executing verb filter
20:58:30,897 datashaper.workflow.workflow INFO executing verb aggregate_override
20:58:30,914 datashaper.workflow.workflow INFO executing verb join
20:58:30,935 datashaper.workflow.workflow INFO executing verb filter
20:58:30,971 datashaper.workflow.workflow INFO executing verb fill
20:58:30,988 datashaper.workflow.workflow INFO executing verb merge
20:58:31,7 datashaper.workflow.workflow INFO executing verb copy
20:58:31,25 datashaper.workflow.workflow INFO executing verb select
20:58:31,27 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_final_communities.parquet
20:58:31,213 graphrag.index.run INFO Running workflow: join_text_units_to_entity_ids...
20:58:31,214 graphrag.index.run INFO dependencies for join_text_units_to_entity_ids: ['create_final_entities']
20:58:31,214 graphrag.index.run INFO read table from storage: create_final_entities.parquet
20:58:31,261 datashaper.workflow.workflow INFO executing verb select
20:58:31,280 datashaper.workflow.workflow INFO executing verb unroll
20:58:31,299 datashaper.workflow.workflow INFO executing verb aggregate_override
20:58:31,303 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table join_text_units_to_entity_ids.parquet
20:58:31,506 graphrag.index.run INFO Running workflow: create_final_relationships...
20:58:31,507 graphrag.index.run INFO dependencies for create_final_relationships: ['create_final_nodes', 'create_base_entity_graph']
20:58:31,508 graphrag.index.run INFO read table from storage: create_final_nodes.parquet
20:58:31,519 graphrag.index.run INFO read table from storage: create_base_entity_graph.parquet
20:58:31,557 datashaper.workflow.workflow INFO executing verb unpack_graph
20:58:31,577 datashaper.workflow.workflow INFO executing verb filter
20:58:31,618 datashaper.workflow.workflow INFO executing verb rename
20:58:31,637 datashaper.workflow.workflow INFO executing verb filter
20:58:31,679 datashaper.workflow.workflow INFO executing verb drop
20:58:31,700 datashaper.workflow.workflow INFO executing verb compute_edge_combined_degree
20:58:31,724 datashaper.workflow.workflow INFO executing verb convert
20:58:31,764 datashaper.workflow.workflow INFO executing verb convert
20:58:31,766 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_final_relationships.parquet
20:58:31,970 graphrag.index.run INFO Running workflow: join_text_units_to_relationship_ids...
20:58:31,971 graphrag.index.run INFO dependencies for join_text_units_to_relationship_ids: ['create_final_relationships']
20:58:31,971 graphrag.index.run INFO read table from storage: create_final_relationships.parquet
20:58:32,20 datashaper.workflow.workflow INFO executing verb select
20:58:32,41 datashaper.workflow.workflow INFO executing verb unroll
20:58:32,64 datashaper.workflow.workflow INFO executing verb aggregate_override
20:58:32,87 datashaper.workflow.workflow INFO executing verb select
20:58:32,89 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table join_text_units_to_relationship_ids.parquet
20:58:32,288 graphrag.index.run INFO Running workflow: create_final_community_reports...
20:58:32,288 graphrag.index.run INFO dependencies for create_final_community_reports: ['create_final_nodes', 'create_final_relationships']
20:58:32,289 graphrag.index.run INFO read table from storage: create_final_nodes.parquet
20:58:32,295 graphrag.index.run INFO read table from storage: create_final_relationships.parquet
20:58:32,356 datashaper.workflow.workflow INFO executing verb prepare_community_reports_nodes
20:58:32,380 datashaper.workflow.workflow INFO executing verb prepare_community_reports_edges
20:58:32,405 datashaper.workflow.workflow INFO executing verb restore_community_hierarchy
20:58:32,430 datashaper.workflow.workflow INFO executing verb prepare_community_reports
20:58:32,431 graphrag.index.verbs.graph.report.prepare_community_reports INFO Number of nodes at level=0 => 17
20:58:32,470 datashaper.workflow.workflow INFO executing verb create_community_reports
20:58:59,146 httpx INFO HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions "HTTP/1.1 200 OK"
20:58:59,148 graphrag.llm.base.rate_limiting_llm INFO perf - llm.chat "create_community_report" with 0 retries took 26.67200000002049. input_tokens=2341, output_tokens=694
20:58:59,195 datashaper.workflow.workflow INFO executing verb window
20:58:59,197 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_final_community_reports.parquet
20:58:59,424 graphrag.index.run INFO Running workflow: create_final_text_units...
20:58:59,435 graphrag.index.run INFO dependencies for create_final_text_units: ['join_text_units_to_relationship_ids', 'create_base_text_units', 'join_text_units_to_entity_ids']
20:58:59,436 graphrag.index.run INFO read table from storage: join_text_units_to_relationship_ids.parquet
20:58:59,445 graphrag.index.run INFO read table from storage: create_base_text_units.parquet
20:58:59,448 graphrag.index.run INFO read table from storage: join_text_units_to_entity_ids.parquet
20:58:59,499 datashaper.workflow.workflow INFO executing verb select
20:58:59,524 datashaper.workflow.workflow INFO executing verb rename
20:58:59,549 datashaper.workflow.workflow INFO executing verb join
20:58:59,580 datashaper.workflow.workflow INFO executing verb join
20:58:59,611 datashaper.workflow.workflow INFO executing verb aggregate_override
20:58:59,638 datashaper.workflow.workflow INFO executing verb select
20:58:59,640 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_final_text_units.parquet
20:58:59,849 graphrag.index.run INFO Running workflow: create_base_documents...
20:58:59,849 graphrag.index.run INFO dependencies for create_base_documents: ['create_final_text_units']
20:58:59,850 graphrag.index.run INFO read table from storage: create_final_text_units.parquet
20:58:59,911 datashaper.workflow.workflow INFO executing verb unroll
20:58:59,942 datashaper.workflow.workflow INFO executing verb select
20:58:59,969 datashaper.workflow.workflow INFO executing verb rename
20:58:59,995 datashaper.workflow.workflow INFO executing verb join
20:59:00,27 datashaper.workflow.workflow INFO executing verb aggregate_override
20:59:00,56 datashaper.workflow.workflow INFO executing verb join
20:59:00,87 datashaper.workflow.workflow INFO executing verb rename
20:59:00,114 datashaper.workflow.workflow INFO executing verb convert
20:59:00,146 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_base_documents.parquet
20:59:00,375 graphrag.index.run INFO Running workflow: create_final_documents...
20:59:00,376 graphrag.index.run INFO dependencies for create_final_documents: ['create_base_documents']
20:59:00,376 graphrag.index.run INFO read table from storage: create_base_documents.parquet
20:59:00,447 datashaper.workflow.workflow INFO executing verb rename
20:59:00,478 graphrag.index.emit.parquet_table_emitter INFO emitting parquet table create_final_documents.parquet
20:59:00,574 graphrag.index.cli INFO All workflows completed successfully.
Beta Was this translation helpful? Give feedback.
All reactions