Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
236 changes: 150 additions & 86 deletions kg_microbe/transform_utils/bacdive/bacdive.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,6 @@
MUREIN,
NAME_COLUMN,
NCBI_CATEGORY,
NCBI_TO_ENZYME_EDGE,
NCBI_TO_MEDIUM_EDGE,
NCBI_TO_METABOLITE_PRODUCTION_EDGE,
NCBI_TO_METABOLITE_UTILIZATION_EDGE,
Expand Down Expand Up @@ -158,6 +157,7 @@ def __init__(
source_name = "BacDive"
super().__init__(source_name, input_dir, output_dir)
self.ncbi_impl = get_adapter("sqlite:obo:ncbitaxon")
self.ncbitaxon_info = {} # To accumulate data for each NCBITaxon

def _flatten_to_dicts(self, obj):
if isinstance(obj, dict):
Expand Down Expand Up @@ -594,6 +594,85 @@ def run(self, data_file: Union[Optional[Path], Optional[str]] = None, show_statu
if not all(item is None for item in phys_and_meta_data[1:]):
writer_2.writerow(phys_and_meta_data)

if ncbitaxon_id:
if ncbitaxon_id not in self.ncbitaxon_info:
Copy link

Copilot AI Aug 13, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This data accumulation logic is duplicated later in the code (lines 604-656 and 767-814). The duplicate code should be consolidated into a single location or extracted into a helper method to improve maintainability.

Copilot uses AI. Check for mistakes.
self.ncbitaxon_info[ncbitaxon_id] = {
"media": set(),
"assays": set(),
# Add other fields as necessary
}
if medium_id:
self.ncbitaxon_info[ncbitaxon_id]["media"].add(medium_id)
if phys_and_metabolism_metabolite_utilization:
positive_chebi_activity = None
if isinstance(phys_and_metabolism_metabolite_utilization, list):
positive_chebi_activity = []
for metabolite in phys_and_metabolism_metabolite_utilization:
if (
METABOLITE_CHEBI_KEY in metabolite
and metabolite.get(UTILIZATION_ACTIVITY) == PLUS_SIGN
):
chebi_key = (
f"{CHEBI_PREFIX}{metabolite[METABOLITE_CHEBI_KEY]}"
)
positive_chebi_activity.append(
(
chebi_key,
metabolite[METABOLITE_KEY],
metabolite.get(UTILIZATION_TYPE_TESTED),
)
)

elif isinstance(phys_and_metabolism_metabolite_utilization, dict):
utilization_activity = (
phys_and_metabolism_metabolite_utilization.get(
UTILIZATION_ACTIVITY
)
)
if (
utilization_activity == PLUS_SIGN
and phys_and_metabolism_metabolite_utilization.get(
METABOLITE_CHEBI_KEY
)
):
chebi_key = (
f"{CHEBI_PREFIX}"
f"{phys_and_metabolism_metabolite_utilization.get(METABOLITE_CHEBI_KEY)}"
)
metabolite_value = (
phys_and_metabolism_metabolite_utilization.get(
METABOLITE_KEY
)
)
positive_chebi_activity = [(chebi_key, metabolite_value)]

else:
print(
f"{phys_and_metabolism_metabolite_utilization} data not recorded."
)
if positive_chebi_activity:
for item in positive_chebi_activity:
self.ncbitaxon_info[ncbitaxon_id]["assays"].add(item)
# Repeat for other data types like assays, enzyme activities, etc.

# Uncomment and handle isolation_source code
if isolation and isinstance(isolation, str):
isolation_cleaned = isolation.replace(" ", "_").replace("-", "_")
isolation_source_curie = ISOLATION_SOURCE_COLUMN + isolation_cleaned
node_writer.writerow(
[isolation_source_curie, ISOLATION_SOURCE_COLUMN, isolation]
+ [None] * (len(self.node_header) - 3)
)
edge_writer.writerow(
[
ncbitaxon_id,
NCBI_TO_ISOLATION_SOURCE_EDGE,
isolation_source_curie,
LOCATION_OF,
BACDIVE_PREFIX + key,
]
)

if ncbitaxon_id and medium_id:
# Combine list creation and extension
nodes_data_to_write = [
Expand Down Expand Up @@ -667,7 +746,7 @@ def run(self, data_file: Union[Optional[Path], Optional[str]] = None, show_statu
postive_activity_enzymes = None
if isinstance(phys_and_metabolism_enzymes, list):
postive_activity_enzymes = [
{f"{EC_PREFIX}{enzyme.get(EC_KEY)}": f"{enzyme.get('value')}"}
(f"{EC_PREFIX}{enzyme.get(EC_KEY)}", f"{enzyme.get('value')}")
for enzyme in phys_and_metabolism_enzymes
if enzyme.get(ACTIVITY_KEY) == PLUS_SIGN and enzyme.get(EC_KEY)
]
Expand All @@ -676,64 +755,30 @@ def run(self, data_file: Union[Optional[Path], Optional[str]] = None, show_statu
if activity == PLUS_SIGN and phys_and_metabolism_enzymes.get(EC_KEY):
ec_value = f"{EC_PREFIX}{phys_and_metabolism_enzymes.get(EC_KEY)}"
value = phys_and_metabolism_enzymes.get("value")
postive_activity_enzymes = [{ec_value: value}]
postive_activity_enzymes = [(ec_value, value)]

else:
print(f"{phys_and_metabolism_enzymes} data not recorded.")
if postive_activity_enzymes:
enzyme_nodes_to_write = [
[k, PHENOTYPIC_CATEGORY, v] + [None] * (len(self.node_header) - 3)
for inner_dict in postive_activity_enzymes
for k, v in inner_dict.items()
]
enzyme_nodes_to_write.append(
[ncbitaxon_id, NCBI_CATEGORY, ncbi_label]
+ [None] * (len(self.node_header) - 3)
)
node_writer.writerows(enzyme_nodes_to_write)

for inner_dict in postive_activity_enzymes:
for k, _ in inner_dict.items():
enzyme_edges_to_write = [
ncbitaxon_id,
NCBI_TO_ENZYME_EDGE,
k,
HAS_PHENOTYPE,
BACDIVE_PREFIX + key,
]
edge_writer.writerow(enzyme_edges_to_write)
for item in postive_activity_enzymes:
self.ncbitaxon_info[ncbitaxon_id]["assays"].add(item)

# Replace this section inside the loop processing each strain:
if phys_and_metabolism_metabolite_utilization:
positive_chebi_activity = None
positive_chebi_activity = []
if isinstance(phys_and_metabolism_metabolite_utilization, list):
positive_chebi_activity = []
# no_chebi_activity = defaultdict(list)
for metabolite in phys_and_metabolism_metabolite_utilization:
# ! NO CURIE associated to metabolite.
# if (
# METABOLITE_CHEBI_KEY not in metabolite
# and metabolite.get(UTILIZATION_ACTIVITY) == PLUS_SIGN
# ):
# no_chebi_activity.setdefault("NO_CURIE", []).append(
# [
# metabolite[METABOLITE_KEY],
# metabolite.get(UTILIZATION_TYPE_TESTED),
# ]
# )
# positive_chebi_activity.append(no_chebi_activity)

if (
METABOLITE_CHEBI_KEY in metabolite
and metabolite.get(UTILIZATION_ACTIVITY) == PLUS_SIGN
):
chebi_key = f"{CHEBI_PREFIX}{metabolite[METABOLITE_CHEBI_KEY]}"
positive_chebi_activity.append(
{
chebi_key: [
metabolite[METABOLITE_KEY],
metabolite.get(UTILIZATION_TYPE_TESTED),
]
}
(
chebi_key,
metabolite[METABOLITE_KEY],
metabolite.get(UTILIZATION_TYPE_TESTED),
)
)

elif isinstance(phys_and_metabolism_metabolite_utilization, dict):
Expand All @@ -746,58 +791,59 @@ def run(self, data_file: Union[Optional[Path], Optional[str]] = None, show_statu
METABOLITE_CHEBI_KEY
)
):
chebi_key = (
f"{CHEBI_PREFIX}"
f"{phys_and_metabolism_metabolite_utilization.get(METABOLITE_CHEBI_KEY)}"
)
chebi_key = f"{CHEBI_PREFIX}{phys_and_metabolism_metabolite_utilization.get(METABOLITE_CHEBI_KEY)}"
metabolite_value = phys_and_metabolism_metabolite_utilization.get(
METABOLITE_KEY
)
positive_chebi_activity = [{chebi_key: metabolite_value}]
positive_chebi_activity = [
(
chebi_key,
metabolite_value,
phys_and_metabolism_metabolite_utilization.get(
UTILIZATION_TYPE_TESTED
),
)
]

else:
print(
f"{phys_and_metabolism_metabolite_utilization} data not recorded."
)
if positive_chebi_activity:
for item in positive_chebi_activity:
self.ncbitaxon_info[ncbitaxon_id]["assays"].add(item)

# Also modify the corresponding code for writing nodes and edges at the end of processing
if positive_chebi_activity:
meta_util_nodes_to_write = [
[k, METABOLITE_CATEGORY, v[0]]
+ [None] * (len(self.node_header) - 3)
for inner_dict in positive_chebi_activity
for k, v in inner_dict.items()
[k, METABOLITE_CATEGORY, v] + [None] * (len(self.node_header) - 3)
for k, v, _ in positive_chebi_activity
]
node_writer.writerows(meta_util_nodes_to_write)

for inner_dict in positive_chebi_activity:
for k, _ in inner_dict.items():
meta_util_edges_to_write = [
ncbitaxon_id,
NCBI_TO_METABOLITE_UTILIZATION_EDGE,
k,
HAS_PARTICIPANT,
BACDIVE_PREFIX + key,
]
edge_writer.writerow(meta_util_edges_to_write)
for k, _, _ in positive_chebi_activity:
Copy link

Copilot AI Aug 13, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This line is incorrectly indented and creates a syntax error. It should be aligned with the previous if statement or properly nested within it.

Copilot uses AI. Check for mistakes.
meta_util_edges_to_write = [
ncbitaxon_id,
NCBI_TO_METABOLITE_UTILIZATION_EDGE,
k,
HAS_PARTICIPANT,
BACDIVE_PREFIX + key,
]
edge_writer.writerow(meta_util_edges_to_write)

if phys_and_metabolism_metabolite_production:
positive_chebi_production = None
if isinstance(phys_and_metabolism_metabolite_production, list):
positive_chebi_production = []
# no_chebi_production = defaultdict(list)
for metabolite in phys_and_metabolism_metabolite_production:
if (
METABOLITE_CHEBI_KEY in metabolite
and metabolite.get(PRODUCTION_KEY) == "yes"
):
chebi_key = f"{CHEBI_PREFIX}{metabolite[METABOLITE_CHEBI_KEY]}"
positive_chebi_production.append(
{chebi_key: metabolite[METABOLITE_KEY]}
(chebi_key, metabolite[METABOLITE_KEY])
)
# ! NO CURIE associated to metabolite.
# if (
# METABOLITE_CHEBI_KEY not in metabolite and metabolite.get(PRODUCTION_KEY) == "yes"
# ):
# no_chebi_production.setdefault("NO_CURIE", []).append(metabolite[METABOLITE_KEY])
# positive_chebi_production.append(no_chebi_production)

elif isinstance(phys_and_metabolism_metabolite_production, dict):
production = phys_and_metabolism_metabolite_production.get(
Expand All @@ -816,29 +862,27 @@ def run(self, data_file: Union[Optional[Path], Optional[str]] = None, show_statu
metabolite_value = phys_and_metabolism_metabolite_production.get(
METABOLITE_KEY
)
positive_chebi_production = [{chebi_key: metabolite_value}]
positive_chebi_production = [(chebi_key, metabolite_value)]

else:
print(f"{phys_and_metabolism_metabolite_production} data not recorded.")

if positive_chebi_production:
metabolite_production_nodes_to_write = [
[k, METABOLITE_CATEGORY, v] + [None] * (len(self.node_header) - 3)
for inner_dict in positive_chebi_production
for k, v in inner_dict.items()
for k, v in positive_chebi_production
]
node_writer.writerows(metabolite_production_nodes_to_write)

for inner_dict in positive_chebi_production:
for k, _ in inner_dict.items():
metabolite_production_edges_to_write = [
ncbitaxon_id,
NCBI_TO_METABOLITE_PRODUCTION_EDGE,
k,
BIOLOGICAL_PROCESS,
BACDIVE_PREFIX + key,
]
edge_writer.writerow(metabolite_production_edges_to_write)
for k, _ in positive_chebi_production:
metabolite_production_edges_to_write = [
ncbitaxon_id,
NCBI_TO_METABOLITE_PRODUCTION_EDGE,
k,
BIOLOGICAL_PROCESS,
BACDIVE_PREFIX + key,
]
edge_writer.writerow(metabolite_production_edges_to_write)

if phys_and_metabolism_API:
values = self._flatten_to_dicts(list(phys_and_metabolism_API.values()))
Expand Down Expand Up @@ -881,5 +925,25 @@ def run(self, data_file: Union[Optional[Path], Optional[str]] = None, show_statu
# After each iteration, call the update method to advance the progress bar.
progress.update()

# At the end of the `run` method, inside the loop writing accumulated data for each NCBITAXON
for ncbitaxon_id, info in self.ncbitaxon_info.items():
for medium_id in info["media"]:
edge_writer.writerow(
[ncbitaxon_id, NCBI_TO_MEDIUM_EDGE, medium_id, IS_GROWN_IN, ""]
)
for assay_id in info["assays"]:
# Unpacking the assay information stored as tuples
assay_curie, assay_value, utilization_type = assay_id
Copy link

Copilot AI Aug 13, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The tuple unpacking assumes all assay items have exactly 3 elements, but enzyme activities are stored as 2-element tuples (lines 749, 758) while metabolite utilizations are stored as 3-element tuples. This will cause a ValueError when processing enzyme data.

Suggested change
assay_curie, assay_value, utilization_type = assay_id
# Unpacking the assay information stored as tuples (handle both 2- and 3-element tuples)
if len(assay_id) == 3:
assay_curie, assay_value, utilization_type = assay_id
elif len(assay_id) == 2:
assay_curie, assay_value = assay_id
utilization_type = None
else:
raise ValueError(f"Unexpected assay tuple length: {len(assay_id)} for {assay_id}")

Copilot uses AI. Check for mistakes.
edge_writer.writerow(
[
ncbitaxon_id,
NCBI_TO_METABOLITE_UTILIZATION_EDGE,
Copy link

Copilot AI Aug 13, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

All assay data is being written with NCBI_TO_METABOLITE_UTILIZATION_EDGE edge type, but enzyme activities should use a different edge type since they represent different biological relationships.

Suggested change
NCBI_TO_METABOLITE_UTILIZATION_EDGE,
# Select edge type based on utilization_type
if utilization_type == "enzyme_activity":
edge_type = ENZYME_TO_ASSAY_EDGE
else:
edge_type = NCBI_TO_METABOLITE_UTILIZATION_EDGE
edge_writer.writerow(
[
ncbitaxon_id,
edge_type,

Copilot uses AI. Check for mistakes.
assay_curie,
HAS_PARTICIPANT,
"",
]
)
# Repeat for other accumulated data

drop_duplicates(self.output_node_file, consolidation_columns=[ID_COLUMN, NAME_COLUMN])
drop_duplicates(self.output_edge_file, consolidation_columns=[OBJECT_ID_COLUMN])