Skip to content

Commit 47789ea

Browse files
committed
Ingest donation and shifts restore
API for 360 returns donations and shifts
1 parent a7f6218 commit 47789ea

File tree

5 files changed

+37
-80
lines changed

5 files changed

+37
-80
lines changed

src/server/api/common_api.py

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -37,24 +37,21 @@ def get_360(matching_id):
3737
query_result = connection.execute(query, matching_id=matching_id)
3838

3939
result["contact_details"] = [dict(row) for row in query_result]
40-
result["shifts"] = []
41-
result["donations"] = []
42-
result["adoptions"] = []
4340

44-
# todo: complete retrieving details for response
45-
for row in query_result:
41+
for row in result["contact_details"]:
42+
if row["source_type"] == "salesforcecontacts":
43+
donations_query = text("select * from salesforcedonations where contact_id like :salesforcecontacts_id")
44+
query_result = connection.execute(donations_query, salesforcecontacts_id=row["source_id"] + "%")
45+
salesforcedonations_results = [dict(row) for row in query_result]
46+
result['donations'] = salesforcedonations_results
47+
4648
if row["source_type"] == "volgistics":
47-
query = text("select * from volgisticsshifts where number = :volgistics_id")
48-
query_result = connection.execute(query, volgistics_id=row["source_id"])
49-
result["shifts"] += [dict(row) for row in query_result]
50-
51-
'''
52-
query = text("select * from salesforcedonations where contact_id = :salesforcecontacts_id")
53-
query_result = connection.execute(query, salesforcecontacts_id=salesforcecontacts_id)
54-
salesforcedonations_results = [dict(row) for row in query_result]
55-
56-
if salesforcedonations_results:
57-
result['salesforcedonations'] = salesforcedonations_results
58-
'''
49+
shifts_query = text("select * from volgisticsshifts where number = :volgistics_id")
50+
query_result = connection.execute(shifts_query, volgistics_id=row["source_id"])
51+
volgisticsshifts_results = [dict(row) for row in query_result]
52+
result['shifts'] = volgisticsshifts_results
53+
54+
#todo: add adoptions
55+
5956

6057
return jsonify({'result': result})

src/server/datasource_manager.py

Lines changed: 12 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -94,16 +94,16 @@ def volgistics_address(index, street):
9494
"state": "mailing_state_province",
9595
"zip": "mailing_zip_postal_code",
9696
"others": {
97-
"additional_sources": [{
98-
"salesforcedonations": {
99-
'should_drop_first_column': True
100-
}
101-
}
102-
],
10397
"should_drop_first_column": True
10498
}
10599

106100
},
101+
"salesforcedonations": {
102+
"parent": "salesforcecontacts",
103+
"others": {
104+
"should_drop_first_column": True
105+
}
106+
},
107107
"shelterluvpeople": {
108108
"source_id": "id",
109109
"first_name": "firstname",
@@ -131,19 +131,13 @@ def volgistics_address(index, street):
131131
"state": "state",
132132
"zip": "zip",
133133
"others": {
134-
"additional_sources": [{
135-
"volgisticsshifts": {
136-
'should_drop_first_column': True
137-
}
138-
}
139-
],
140134
"should_drop_first_column": True
141135
}
142-
136+
},
137+
"volgisticsshifts": {
138+
"parent": "volgistics",
139+
"others": {
140+
"should_drop_first_column": True
141+
}
143142
}
144143
}
145-
146-
147-
148-
149-

src/server/models.py

Lines changed: 1 addition & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import datetime
22

3-
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
3+
from sqlalchemy import Column, Integer, String, DateTime
44
from sqlalchemy.dialects.postgresql import JSONB
55
from sqlalchemy.ext.declarative import declarative_base
66

@@ -89,42 +89,4 @@ class Volgistics(Base):
8989
json = Column(JSONB)
9090

9191

92-
class SalesForceDonations(Base):
93-
__tablename__ = "salesforcedonations"
9492

95-
_id = Column(Integer, primary_key=True)
96-
recurring_donor = Column(String)
97-
opportunity_owner = Column(String)
98-
account_id = Column(String)
99-
account_name = Column(String)
100-
opportunity_id = Column(String)
101-
opportunity_name = Column(String)
102-
stage = Column(String)
103-
fiscal_period = Column(String)
104-
amount = Column(String)
105-
probability = Column(String)
106-
age = Column(String)
107-
close_date = Column(String)
108-
created_date = Column(String)
109-
next_step = Column(String)
110-
lead_source = Column(String)
111-
type = Column(String)
112-
source = Column(String)
113-
contact_id = Column(String)
114-
primary_campaign_source = Column(String)
115-
116-
117-
class Volgistics_Shifts(Base):
118-
__tablename__ = 'volgisticsshifts'
119-
120-
_id = Column(Integer, primary_key=True)
121-
number = Column(String)
122-
site = Column(String)
123-
place = Column(String)
124-
assignment = Column(String)
125-
role = Column(String)
126-
from_date = Column('from', DateTime)
127-
to = Column(DateTime)
128-
spare_date = Column(String)
129-
spare_chechbox = Column(String)
130-
coordinator = Column(String)

src/server/pipeline/clean_and_load_data.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from config import CURRENT_SOURCE_FILES_PATH
1010

1111

12-
def start(pdp_contacts_df, file_path_list):
12+
def start(connection, pdp_contacts_df, file_path_list):
1313
result = pd.DataFrame(columns=pdp_contacts_df.columns)
1414

1515
for uploaded_file in file_path_list:
@@ -26,12 +26,16 @@ def start(pdp_contacts_df, file_path_list):
2626
normalization_without_others = copy.deepcopy(SOURCE_NORMALIZATION_MAPPING[table_name])
2727
normalization_without_others.pop("others") # copy avoids modifying the imported mapping
2828

29-
source_df = create_normalized_df(df, normalization_without_others, table_name)
29+
if "parent" not in normalization_without_others:
30+
source_df = create_normalized_df(df, normalization_without_others, table_name)
31+
32+
if result.empty:
33+
result = source_df
34+
else:
35+
result = pd.concat([result, source_df])
3036

31-
if result.empty:
32-
result = source_df
3337
else:
34-
result = pd.concat([result, source_df])
38+
df.to_sql(table_name, connection, index=False, if_exists='append')
3539

3640
current_app.logger.info(' - Finish load_paws_data on: ' + uploaded_file)
3741

src/server/pipeline/flow_script.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def start_flow():
2323
# Clean the input data and normalize
2424
# input - existing files in path
2525
# output - normalized object of all entries
26-
normalized_data = clean_and_load_data.start(pdp_contacts_df, file_path_list)
26+
normalized_data = clean_and_load_data.start(connection, pdp_contacts_df, file_path_list)
2727

2828
# Standardize column data types
2929
# If additional inconsistencies are encountered, may need to enforce the schema of

0 commit comments

Comments
 (0)