Skip to content

Commit b88d784

Browse files
author
marwan37
committed
uncomment code and cleanup approve step
1 parent 9099a4f commit b88d784

File tree

3 files changed

+164
-73
lines changed

3 files changed

+164
-73
lines changed

credit-scorer/src/constants/config.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ def get_volume_metadata(cls) -> Dict[str, str]:
6969
class SlackConfig:
7070
"""Slack configuration parameters."""
7171

72-
CHANNEL_ID = "C03ES6D8X0X"
72+
CHANNEL_ID = os.getenv("SLACK_CHANNEL_ID")
7373
BOT_TOKEN = os.getenv("SLACK_BOT_TOKEN")
7474

7575

credit-scorer/src/steps/deployment/approve.py

Lines changed: 107 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
# limitations under the License.
1616
#
1717

18+
import hashlib
1819
import time
1920
from datetime import datetime
2021
from typing import Annotated, Any, Dict, Tuple
@@ -70,6 +71,7 @@ def approve_deployment(
7071

7172
accuracy = metrics.get("accuracy", 0)
7273
f1_score = metrics.get("f1_score", 0)
74+
auc_roc = metrics.get("auc_roc", 0)
7375
risk_score = risk_scores.get("overall", 1)
7476
max_disparity = (
7577
max(
@@ -82,6 +84,9 @@ def approve_deployment(
8284
else 0
8385
)
8486

87+
# Generate model checksum for identification
88+
model_id = run_id[:8] # Use first 8 chars of run ID as model ID
89+
8590
# Approval criteria checks
8691
perf_ok = accuracy >= approval_thresholds.get("accuracy", 0.7)
8792
fairness_ok = not bias_flag and max_disparity <= approval_thresholds.get(
@@ -102,7 +107,7 @@ def approve_deployment(
102107
"type": "header",
103108
"text": {
104109
"type": "plain_text",
105-
"text": "🤖 Model Deployment Approval",
110+
"text": "💶 CreditScorer Model Approval",
106111
"emoji": True,
107112
},
108113
},
@@ -112,9 +117,9 @@ def approve_deployment(
112117
"type": "section",
113118
"fields": [
114119
{"type": "mrkdwn", "text": f"*Pipeline:* {pipeline_name}"},
115-
{"type": "mrkdwn", "text": f"*Step:* {step_name}"},
120+
{"type": "mrkdwn", "text": f"*Model ID:* {model_id}"},
116121
{"type": "mrkdwn", "text": f"*Stack:* {stack_name}"},
117-
{"type": "mrkdwn", "text": f"*Run ID:* {run_id[:8]}..."},
122+
{"type": "mrkdwn", "text": f"*Run ID:* {run_id[:12]}..."},
118123
],
119124
},
120125
{"type": "divider"},
@@ -123,19 +128,32 @@ def approve_deployment(
123128
"fields": [
124129
{
125130
"type": "mrkdwn",
126-
"text": f"*Performance:* {'✅' if perf_ok else '❌'} Acc={accuracy:.3f}",
131+
"text": f"{'✅' if perf_ok else '❌'} *Accuracy=* {accuracy:.3f}",
127132
},
128133
{
129134
"type": "mrkdwn",
130-
"text": f"*Fairness:* {'✅' if fairness_ok else '❌'} Bias={'No' if fairness_ok else f'{max_disparity:.3f}'}",
135+
"text": f"{'✅' if risk_ok else '❌'} *Risk Score=* {risk_score:.3f}",
131136
},
132137
{
133138
"type": "mrkdwn",
134-
"text": f"*Risk:* {'✅' if risk_ok else '❌'} Score={risk_score:.3f}",
139+
"text": f"{'✅' if fairness_ok else '❌'} *F1=* {f1_score:.3f}",
135140
},
136141
{
137142
"type": "mrkdwn",
138-
"text": f"*F1:* {f1_score:.3f} *Attributes:* {len(fairness_metrics)}",
143+
"text": f"*AUC=* {auc_roc:.3f}",
144+
},
145+
],
146+
},
147+
{
148+
"type": "section",
149+
"fields": [
150+
{
151+
"type": "mrkdwn",
152+
"text": f"*Bias Check:* {'✅ No bias detected' if fairness_ok else f'❌ Max disparity: {max_disparity:.3f}'}",
153+
},
154+
{
155+
"type": "mrkdwn",
156+
"text": f"*Protected Attributes:* {len(fairness_metrics)} checked",
139157
},
140158
],
141159
},
@@ -165,6 +183,7 @@ def approve_deployment(
165183
if alerter:
166184
try:
167185
alerter.post(message=header_text, params=params)
186+
print("✅ Slack notification sent successfully")
168187
except Exception as e:
169188
print(f"⚠️ Slack notification failed: {e}")
170189
print("Continuing without Slack notification...")
@@ -179,8 +198,10 @@ def approve_deployment(
179198
if alerter:
180199
try:
181200
# Enhanced question with pipeline context
182-
question = f":question: Override deployment for pipeline '{pipeline_name}'? Reply with 'yes' or 'no'"
201+
question = f"Should CreditScorer deploy model {model_id} from pipeline '{pipeline_name}'? Reply with 'yes' or 'no'"
202+
print("📱 Asking approval question in Slack...")
183203
response = alerter.ask(question)
204+
print(f"📱 Received Slack response: {response}")
184205

185206
# Handle various response formats
186207
if isinstance(response, str):
@@ -192,17 +213,29 @@ def approve_deployment(
192213
approved = override
193214
approver = "human_via_slack"
194215
rationale = (
195-
f"Human override via Slack for pipeline '{pipeline_name}'"
216+
f"Human override via Slack for model {model_id}"
196217
if override
197-
else f"Rejected via Slack for pipeline '{pipeline_name}'"
218+
else f"Rejected via Slack for model {model_id}"
198219
)
220+
print(
221+
f"📱 Slack approval result: {'APPROVED' if override else 'REJECTED'}"
222+
)
223+
199224
except Exception as e:
200-
print(f"⚠️ Slack interaction failed: {e}")
225+
print(f"❌ Slack interaction failed: {e}")
226+
if "not_in_channel" in str(e):
227+
print(
228+
"💡 Fix: Add your bot to the Slack channel using: /invite @your-bot-name"
229+
)
230+
elif "not_allowed_token_type" in str(e):
231+
print(
232+
"💡 Fix: Use a Bot User OAuth Token (starts with xoxb-)"
233+
)
201234
print("❌ Cannot get human approval - deployment blocked")
202235
approved, approver, rationale = (
203236
False,
204237
"system",
205-
"Slack integration failed - no human oversight possible",
238+
f"Slack integration failed: {str(e)}",
206239
)
207240
else:
208241
approved, approver, rationale = (
@@ -211,6 +244,65 @@ def approve_deployment(
211244
"No alerter configured - blocked",
212245
)
213246

247+
# Send confirmation message if approved
248+
if approved and alerter:
249+
try:
250+
confirmation_blocks = [
251+
{
252+
"type": "header",
253+
"text": {
254+
"type": "plain_text",
255+
"text": "💶 CreditScorer Deployment Confirmed",
256+
"emoji": True,
257+
},
258+
},
259+
{
260+
"type": "section",
261+
"text": {
262+
"type": "mrkdwn",
263+
"text": f"✅ *Model {model_id} has been approved for deployment*",
264+
},
265+
},
266+
{"type": "divider"},
267+
{
268+
"type": "section",
269+
"fields": [
270+
{"type": "mrkdwn", "text": f"*Model ID:* {model_id}"},
271+
{
272+
"type": "mrkdwn",
273+
"text": f"*Pipeline:* {pipeline_name}",
274+
},
275+
{
276+
"type": "mrkdwn",
277+
"text": f"*Approved by:* {approver}",
278+
},
279+
{
280+
"type": "mrkdwn",
281+
"text": f"*Accuracy:* {accuracy:.3f}",
282+
},
283+
],
284+
},
285+
{
286+
"type": "section",
287+
"text": {
288+
"type": "mrkdwn",
289+
"text": f"🚀 *Deployment will proceed automatically*\n_Model checksum: {run_id}_",
290+
},
291+
},
292+
]
293+
294+
confirmation_params = SlackAlerterParameters(
295+
blocks=confirmation_blocks
296+
)
297+
alerter.post(
298+
"✅ CreditScorer model approved for deployment",
299+
params=confirmation_params,
300+
)
301+
print("📱 Deployment confirmation sent to Slack")
302+
303+
except Exception as e:
304+
print(f"⚠️ Could not send confirmation message: {e}")
305+
214306
if not approved:
215307
raise RuntimeError(f"🚫 Deployment rejected: {rationale}")
216308

@@ -222,6 +314,7 @@ def approve_deployment(
222314
"approved": approved,
223315
"approver": approver,
224316
"rationale": rationale,
317+
"model_id": model_id,
225318
"decision_mode": "automated" if all_ok else "slack_approval",
226319
"criteria_met": all_ok,
227320
"failed_criteria": [
@@ -237,7 +330,7 @@ def approve_deployment(
237330
"key_metrics": {
238331
"accuracy": accuracy,
239332
"f1_score": f1_score,
240-
"auc_roc": metrics.get("auc_roc"),
333+
"auc_roc": auc_roc,
241334
"normalized_cost": metrics.get("normalized_cost"),
242335
"risk_score": risk_score,
243336
},
@@ -254,6 +347,6 @@ def approve_deployment(
254347
}
255348

256349
print(f"✅ APPROVED by {approver}: {rationale}")
257-
print(f"📋 Pipeline Context: {pipeline_name} -> {step_name}")
350+
print(f"📋 Model {model_id} from pipeline: {pipeline_name}")
258351

259352
return approved, approval_record

credit-scorer/src/steps/training/evaluate.py

Lines changed: 56 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -30,17 +30,15 @@
3030
)
3131
from zenml import get_step_context, log_metadata, step
3232
from zenml.client import Client
33-
34-
# from zenml.integrations.slack.alerters.slack_alerter import (
35-
# SlackAlerterParameters,
36-
# SlackAlerterPayload,
37-
# )
33+
from zenml.integrations.slack.alerters.slack_alerter import (
34+
SlackAlerterParameters,
35+
SlackAlerterPayload,
36+
)
3837
from zenml.logger import get_logger
3938
from zenml.types import HTMLString
4039

4140
from src.constants import Artifacts as A
42-
43-
# from src.constants.config import SlackConfig as SC
41+
from src.constants.config import SlackConfig as SC
4442
from src.utils import (
4543
analyze_fairness,
4644
generate_eval_visualization,
@@ -50,7 +48,7 @@
5048

5149

5250
@step(
53-
# settings={"alerter": {"slack_channel_id": SC.CHANNEL_ID}},
51+
settings={"alerter": {"slack_channel_id": SC.CHANNEL_ID}},
5452
)
5553
def evaluate_model(
5654
protected_attributes: List[str],
@@ -283,56 +281,56 @@ def evaluate_model(
283281
)
284282

285283
# ===== 9. Send Slack alert if bias detected =====
286-
# if bias_flag:
287-
# alerter = Client().active_stack.alerter
288-
# if alerter:
289-
# message = (
290-
# f"🚨 *BIAS DETECTED* in model evaluation for run {run_id}"
291-
# )
292-
293-
# # Create detailed blocks for bias alert
294-
# bias_blocks = [
295-
# {
296-
# "type": "section",
297-
# "text": {
298-
# "type": "mrkdwn",
299-
# "text": "🚨 *BIAS DETECTED* in model evaluation",
300-
# },
301-
# },
302-
# {"type": "divider"},
303-
# {
304-
# "type": "section",
305-
# "fields": [
306-
# {"type": "mrkdwn", "text": f"*Run ID:* {run_id}"},
307-
# {
308-
# "type": "mrkdwn",
309-
# "text": f"*Protected Attributes:* {', '.join(protected_attributes)}",
310-
# },
311-
# {"type": "mrkdwn", "text": f"*Model:* {A.MODEL}"},
312-
# {
313-
# "type": "mrkdwn",
314-
# "text": f"*Accuracy:* {performance_metrics['accuracy']:.3f}",
315-
# },
316-
# ],
317-
# },
318-
# {
319-
# "type": "section",
320-
# "text": {
321-
# "type": "mrkdwn",
322-
# "text": "*Fairness Issues:*\n"
323-
# + "\n".join(
324-
# [
325-
# f"• {attr}: {data.get('selection_rate_disparity', 'N/A'):.3f} disparity"
326-
# for attr, data in fairness_metrics.items()
327-
# ]
328-
# ),
329-
# },
330-
# },
331-
# ]
332-
333-
# params = SlackAlerterParameters(blocks=bias_blocks)
334-
# alerter.post(message=message, params=params)
335-
# logger.info("Bias alert sent to Slack")
284+
if bias_flag:
285+
alerter = Client().active_stack.alerter
286+
if alerter:
287+
message = (
288+
f"🚨 *BIAS DETECTED* in model evaluation for run {run_id}"
289+
)
290+
291+
# Create detailed blocks for bias alert
292+
bias_blocks = [
293+
{
294+
"type": "section",
295+
"text": {
296+
"type": "mrkdwn",
297+
"text": "🚨 *BIAS DETECTED* in model evaluation",
298+
},
299+
},
300+
{"type": "divider"},
301+
{
302+
"type": "section",
303+
"fields": [
304+
{"type": "mrkdwn", "text": f"*Run ID:* {run_id}"},
305+
{
306+
"type": "mrkdwn",
307+
"text": f"*Protected Attributes:* {', '.join(protected_attributes)}",
308+
},
309+
{"type": "mrkdwn", "text": f"*Model:* {A.MODEL}"},
310+
{
311+
"type": "mrkdwn",
312+
"text": f"*Accuracy:* {performance_metrics['accuracy']:.3f}",
313+
},
314+
],
315+
},
316+
{
317+
"type": "section",
318+
"text": {
319+
"type": "mrkdwn",
320+
"text": "*Fairness Issues:*\n"
321+
+ "\n".join(
322+
[
323+
f"• {attr}: {data.get('selection_rate_disparity', 'N/A'):.3f} disparity"
324+
for attr, data in fairness_metrics.items()
325+
]
326+
),
327+
},
328+
},
329+
]
330+
331+
params = SlackAlerterParameters(blocks=bias_blocks)
332+
alerter.post(message=message, params=params)
333+
logger.info("Bias alert sent to Slack")
336334

337335
eval_results = {
338336
"metrics": performance_metrics,

0 commit comments

Comments
 (0)