-
Notifications
You must be signed in to change notification settings - Fork 47
Expand file tree
/
Copy pathdoctor.sh
More file actions
executable file
·2248 lines (1963 loc) · 85.7 KB
/
doctor.sh
File metadata and controls
executable file
·2248 lines (1963 loc) · 85.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/bin/bash
# COBOL Migration Tool - All-in-One Management Script
# ===================================================
# This script consolidates all functionality for setup, testing, running, and diagnostics
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
MAGENTA='\033[0;35m'
BOLD='\033[1m'
NC='\033[0m' # No Color
# Resolve the sqlite3 command, handling Windows (Git Bash / MSYS2) paths
SQLITE3_CMD=""
resolve_sqlite3() {
if [ -n "$SQLITE3_CMD" ]; then return 0; fi
if command -v sqlite3 >/dev/null 2>&1; then
SQLITE3_CMD="sqlite3"
elif command -v sqlite3.exe >/dev/null 2>&1; then
SQLITE3_CMD="sqlite3.exe"
else
local winget_match
winget_match=$(find "${LOCALAPPDATA:-/dev/null}/Microsoft/WinGet/Packages" -name "sqlite3.exe" 2>/dev/null | head -1)
if [ -n "$winget_match" ]; then
SQLITE3_CMD="$winget_match"
fi
fi
[ -n "$SQLITE3_CMD" ]
}
# Get repository root (directory containing this script)
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Determine the preferred dotnet CLI (favor .NET 10 installations when available)
detect_dotnet_cli() {
local default_cli="dotnet"
local cli_candidate="$default_cli"
# Check if default dotnet has .NET 10 runtime
if command -v "$default_cli" >/dev/null 2>&1; then
if "$default_cli" --list-runtimes 2>/dev/null | grep -q "Microsoft.NETCore.App 10."; then
echo "$default_cli"
return
fi
fi
# Use whatever dotnet is available
echo "$cli_candidate"
}
DOTNET_CMD="$(detect_dotnet_cli)"
detect_python() {
if command -v python3 >/dev/null 2>&1; then
echo python3
return
fi
if command -v python >/dev/null 2>&1; then
echo python
return
fi
echo ""
}
PYTHON_CMD="$(detect_python)"
DEFAULT_MCP_HOST="localhost"
DEFAULT_MCP_PORT=5028
# Function to show usage
show_usage() {
echo -e "${BOLD}${BLUE}🧠 COBOL to Java/C# Migration Tool${NC}"
echo -e "${BLUE}==========================================${NC}"
echo
echo -e "${BOLD}Usage:${NC} $0 [command]"
echo
echo -e "${BOLD}Available Commands:${NC}"
echo -e " ${GREEN}setup${NC} Interactive configuration setup"
echo -e " ${GREEN}test${NC} Full system validation and testing"
echo -e " ${GREEN}run${NC} Start full migration (auto-detects chunking needs)"
echo -e " ${GREEN}convert-only${NC} Convert COBOL only (skips RE; prompts to reuse persisted RE context)"
echo -e " ${GREEN}portal${NC} Start the web portal (documentation & monitoring)"
echo -e " ${GREEN}doctor${NC} Diagnose configuration issues (default)"
echo -e " ${GREEN}reverse-eng${NC} Run reverse engineering analysis only (no conversion) + Portal"
echo -e " ${GREEN}resume${NC} Resume interrupted migration"
echo -e " ${GREEN}monitor${NC} Monitor migration progress"
echo -e " ${GREEN}chunking-health${NC} Check smart chunking infrastructure"
echo -e " ${GREEN}validate${NC} Validate system requirements"
echo -e " ${GREEN}conversation${NC} Generate conversation log from migration data"
echo
echo -e "${BOLD}Examples:${NC}"
echo -e " $0 ${CYAN}# Run configuration doctor${NC}"
echo -e " $0 setup ${CYAN}# Interactive setup${NC}"
echo -e " $0 test ${CYAN}# Test configuration and dependencies${NC}"
echo -e " $0 reverse-eng ${CYAN}# Extract business logic only (no conversion) + Portal${NC}"
echo -e " $0 run ${CYAN}# Full migration (auto-chunks large files)${NC}"
echo -e " $0 portal ${CYAN}# Start portal to view docs & reports${NC}"
echo -e " $0 convert-only ${CYAN}# Conversion only (prompts to reuse cached RE results) + UI${NC}"
echo
echo -e "${BOLD}Business Logic Persistence (--reuse-re):${NC}"
echo -e " RE results are automatically persisted to the database after each run."
echo -e " ${GREEN}Mode 1${NC} — Full migration (default): RE runs and context is injected into prompts."
echo -e " ${GREEN}Mode 2${NC} — ${GREEN}--skip-reverse-engineering${NC}: Pure conversion, no RE context."
echo -e " ${GREEN}Mode 3${NC} — ${GREEN}--skip-reverse-engineering --reuse-re${NC}: Loads cached RE results from"
echo -e " the database and injects them into conversion prompts."
echo -e " The ${GREEN}convert-only${NC} command prompts interactively for the --reuse-re choice."
echo -e " To view or delete persisted RE results, open the Portal → 🔬 RE Results button."
echo
echo -e "${BOLD}Smart Chunking (v0.2):${NC}"
echo -e " Large files (>150K chars or >3000 lines) are automatically"
echo -e " routed through SmartMigrationOrchestrator for optimal processing."
echo -e " - Full Migration: Uses ChunkedMigrationProcess for conversion"
echo -e " - RE-Only Mode: Uses ChunkedReverseEngineeringProcess for analysis"
echo -e " No manual chunking flags required - detection is automatic."
echo
}
# Resolve the migration database path (absolute) from config or environment
get_migration_db_path() {
local base_dir="$REPO_ROOT"
if [[ -n "$MIGRATION_DB_PATH" ]]; then
if [[ -z "$PYTHON_CMD" ]]; then
echo "$MIGRATION_DB_PATH"
return
fi
PY_BASE="$base_dir" PY_DB_PATH="$MIGRATION_DB_PATH" "$PYTHON_CMD" - <<'PY'
import os
base = os.environ["PY_BASE"]
path = os.environ["PY_DB_PATH"]
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(base, path))
else:
path = os.path.abspath(path)
print(path)
PY
return
fi
if [[ -z "$PYTHON_CMD" ]]; then
if [[ -f "$base_dir/Data/migration.db" ]]; then
echo "$base_dir/Data/migration.db"
else
echo ""
fi
return
fi
PY_BASE="$base_dir" "$PYTHON_CMD" - <<'PY'
import json
import os
base = os.environ["PY_BASE"]
config_path = os.path.join(base, "Config", "appsettings.json")
fallback = "Data/migration.db"
try:
with open(config_path, "r", encoding="utf-8") as f:
data = json.load(f)
path = data.get("ApplicationSettings", {}).get("MigrationDatabasePath") or fallback
except FileNotFoundError:
path = fallback
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(base, path))
else:
path = os.path.abspath(path)
print(path)
PY
}
# Fetch the latest migration run summary from SQLite (if available)
get_latest_run_summary() {
local db_path="$1"
if [[ -z "$db_path" || ! -f "$db_path" ]]; then
return 1
fi
if [[ -z "$PYTHON_CMD" ]]; then
return 1
fi
PY_DB_PATH="$db_path" "$PYTHON_CMD" - <<'PY'
import os
import sqlite3
db_path = os.environ["PY_DB_PATH"]
if not os.path.exists(db_path):
raise SystemExit
query = """
SELECT id, status, coalesce(completed_at, started_at)
FROM runs
ORDER BY started_at DESC
LIMIT 1
"""
with sqlite3.connect(db_path) as conn:
row = conn.execute(query).fetchone()
if row:
run_id, status, completed_at = row
completed_at = completed_at or ""
print(f"{run_id}|{status}|{completed_at}")
PY
}
open_url_in_browser() {
local url="$1"
local auto_open="${MCP_AUTO_OPEN:-1}"
if [[ "$auto_open" != "1" ]]; then
return
fi
case "$(uname -s)" in
Darwin)
if command -v open >/dev/null 2>&1; then
open "$url" >/dev/null 2>&1 &
fi
;;
Linux)
if command -v xdg-open >/dev/null 2>&1; then
xdg-open "$url" >/dev/null 2>&1 &
fi
;;
CYGWIN*|MINGW*|MSYS*|Windows_NT)
if command -v powershell.exe >/dev/null 2>&1; then
powershell.exe -NoProfile -Command "Start-Process '$url'" >/dev/null 2>&1 &
elif command -v cmd.exe >/dev/null 2>&1; then
cmd.exe /c start "" "$url"
fi
;;
esac
}
launch_mcp_web_ui() {
local db_path="$1"
local host="${MCP_WEB_HOST:-$DEFAULT_MCP_HOST}"
local port="${MCP_WEB_PORT:-$DEFAULT_MCP_PORT}"
local url="http://$host:$port"
# Ensure AI env is loaded (chat vs responses) before launching portal/MCP
if ! load_configuration || ! load_ai_config; then
echo -e "${RED}❌ Failed to load AI configuration. Portal launch aborted.${NC}"
return 1
fi
echo ""
echo -e "${BLUE}🌐 Launching MCP Web UI...${NC}"
echo "================================"
echo -e "Using database: ${BOLD}$db_path${NC}"
if summary=$(get_latest_run_summary "$db_path" 2>/dev/null); then
IFS='|' read -r run_id status completed_at <<<"$summary"
echo -e "Latest migration run: ${GREEN}#${run_id}${NC} (${status})"
if [[ -n "$completed_at" ]]; then
echo -e "Completed at: $completed_at"
fi
echo ""
fi
echo -e "${BLUE}➡️ Starting web server at${NC} ${BOLD}$url${NC}"
# Check if port is already in use and clean up (only kill the LISTEN socket owner)
if lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1; then
echo -e "${YELLOW}⚠️ Port $port is already in use. Cleaning up...${NC}"
local listen_pids
listen_pids=$(lsof -Pi :$port -sTCP:LISTEN -t 2>/dev/null)
if [[ -n "$listen_pids" ]]; then
echo "$listen_pids" | xargs kill -9 2>/dev/null && echo -e "${GREEN}✅ Killed existing process on port $port${NC}" || true
sleep 1
fi
fi
echo -e "${BLUE}➡️ Press Ctrl+C to stop the UI and exit.${NC}"
open_url_in_browser "$url"
export MIGRATION_DB_PATH="$db_path"
ASPNETCORE_URLS="$url" ASPNETCORE_HTTP_PORTS="$port" "$DOTNET_CMD" run --project "$REPO_ROOT/McpChatWeb"
}
# Function to launch portal in background for monitoring during migration
launch_portal_background() {
local db_path="${1:-$REPO_ROOT/Data/migration.db}"
local host="${MCP_WEB_HOST:-$DEFAULT_MCP_HOST}"
local port="${MCP_WEB_PORT:-$DEFAULT_MCP_PORT}"
local url="http://$host:$port"
echo ""
echo -e "${BLUE}🌐 Launching Portal in Background for Monitoring...${NC}"
echo "===================================================="
# Check if port is already in use and clean up (only kill the LISTEN socket owner)
if lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1; then
echo -e "${YELLOW}⚠️ Port $port is already in use. Cleaning up...${NC}"
local listen_pids
listen_pids=$(lsof -Pi :$port -sTCP:LISTEN -t 2>/dev/null)
if [[ -n "$listen_pids" ]]; then
echo "$listen_pids" | xargs kill -9 2>/dev/null && echo -e "${GREEN}✅ Killed existing process on port $port${NC}" || true
sleep 1
fi
fi
# Launch portal in background
export MIGRATION_DB_PATH="$db_path"
ASPNETCORE_URLS="$url" ASPNETCORE_HTTP_PORTS="$port" "$DOTNET_CMD" run --project "$REPO_ROOT/McpChatWeb" > "$REPO_ROOT/Logs/portal.log" 2>&1 &
PORTAL_PID=$!
# Wait for portal to start
echo -e "${BLUE}⏳ Waiting for portal to start...${NC}"
local max_wait=15
local waited=0
while ! lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1; do
sleep 1
waited=$((waited + 1))
if [[ $waited -ge $max_wait ]]; then
echo -e "${YELLOW}⚠️ Portal may not have started yet, continuing...${NC}"
break
fi
done
if lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1; then
echo -e "${GREEN}✅ Portal running at ${BOLD}$url${NC} (PID: $PORTAL_PID)"
open_url_in_browser "$url"
fi
echo -e "${CYAN}📊 Monitor migration progress in portal: $url${NC}"
echo -e "${CYAN}📄 Click 'Migration Monitor' button to see real-time progress${NC}"
echo ""
}
# Function to stop background portal
stop_portal_background() {
if [[ -n "$PORTAL_PID" ]] && kill -0 "$PORTAL_PID" 2>/dev/null; then
echo -e "${BLUE}🛑 Stopping background portal (PID: $PORTAL_PID)...${NC}"
kill "$PORTAL_PID" 2>/dev/null
wait "$PORTAL_PID" 2>/dev/null
echo -e "${GREEN}✅ Portal stopped${NC}"
fi
}
# Function to run portal standalone
run_portal() {
echo -e "${BLUE}🌐 Starting Migration Portal${NC}"
echo "============================="
echo ""
echo "The portal provides:"
echo " • 📊 Migration monitoring and progress"
echo " • 📄 Architecture documentation with Mermaid diagrams"
echo " • 📋 Reverse engineering reports with business logic"
echo " • 🔄 Real-time agent chat and chunk status"
echo ""
local db_path
if ! db_path="$(get_migration_db_path)" || [[ -z "$db_path" ]]; then
db_path="$REPO_ROOT/Data/migration.db"
echo -e "${YELLOW}ℹ️ Using default database path: $db_path${NC}"
fi
# Check for generated RE report
if [[ -f "$REPO_ROOT/output/reverse-engineering-details.md" ]]; then
echo -e "${GREEN}✅ Reverse engineering report available in portal${NC}"
else
echo -e "${YELLOW}ℹ️ No RE report yet - run './doctor.sh reverse-eng' first${NC}"
fi
echo ""
launch_mcp_web_ui "$db_path"
}
# Function to load configuration
load_configuration() {
if [[ -f "$REPO_ROOT/Config/load-config.sh" ]]; then
source "$REPO_ROOT/Config/load-config.sh"
return $?
else
echo -e "${RED}❌ Configuration loader not found: Config/load-config.sh${NC}"
return 1
fi
}
# Verify that configured model deployments exist on the Azure OpenAI resource
check_model_deployments() {
echo ""
echo -e "${BLUE}🤖 Verifying Model Deployments${NC}"
echo "================================="
local endpoint="${AZURE_OPENAI_ENDPOINT}"
local api_key="${AZURE_OPENAI_API_KEY}"
local has_api_key=false
local token=""
# Determine auth method
if [[ -n "$api_key" ]] && [[ "$api_key" != *"your-"* ]] && [[ "$api_key" != *"placeholder"* ]] && [[ "$api_key" != *"key-placeholder"* ]]; then
has_api_key=true
fi
if [[ "$has_api_key" == false ]]; then
if command -v az >/dev/null 2>&1 && az account show >/dev/null 2>&1; then
token=$(az account get-access-token --resource "https://cognitiveservices.azure.com" --query "accessToken" -o tsv 2>/dev/null)
if [[ -z "$token" ]]; then
echo -e " ${YELLOW}⚠️ Could not obtain Azure AD token, skipping deployment check${NC}"
return 0
fi
else
echo -e " ${YELLOW}⚠️ No auth available, skipping deployment check${NC}"
return 0
fi
fi
# Collect unique deployment names and their roles
local code_deploy="${AISETTINGS__DEPLOYMENTNAME}"
local chat_deploy="${AISETTINGS__CHATDEPLOYMENTNAME}"
# Build parallel arrays (bash 3 compatible, no associative arrays)
local deploy_names=""
local deploy_roles=""
if [[ -n "$code_deploy" ]]; then
deploy_names="$code_deploy"
deploy_roles="code model (migration agents)"
fi
if [[ -n "$chat_deploy" ]] && [[ "$chat_deploy" != "$code_deploy" ]]; then
deploy_names="${deploy_names}|${chat_deploy}"
deploy_roles="${deploy_roles}|chat model (portal & reports)"
elif [[ -n "$chat_deploy" ]] && [[ -n "$deploy_roles" ]]; then
deploy_roles="${deploy_roles} + chat model"
fi
if [[ -z "$deploy_names" ]]; then
echo -e " ${YELLOW}⚠️ No deployment names configured, skipping check${NC}"
return 0
fi
local all_ok=true
local api_version="2024-06-01"
local idx=0
IFS='|' read -ra _deploy_arr <<< "$deploy_names"
IFS='|' read -ra _role_arr <<< "$deploy_roles"
for deploy_name in "${_deploy_arr[@]}"; do
local role="${_role_arr[$idx]}"
idx=$((idx + 1))
# Use a lightweight inference probe: POST a minimal (invalid) chat completion.
# This only needs inference-level RBAC permissions.
# Expected: 400 = deployment exists, 404 = not found, 200 = also exists.
local test_url="${endpoint%/}/openai/deployments/${deploy_name}/chat/completions?api-version=${api_version}"
local tmp_resp
tmp_resp=$(mktemp)
local curl_args=("-s" "-o" "$tmp_resp" "-w" "%{http_code}" "--connect-timeout" "10" "--max-time" "15")
local http_status=""
local post_body='{"messages":[],"max_tokens":1}'
if [[ "$has_api_key" == true ]]; then
http_status=$(curl "${curl_args[@]}" -X POST -H "api-key: $api_key" -H "Content-Type: application/json" -d "$post_body" "$test_url" 2>/dev/null)
else
http_status=$(curl "${curl_args[@]}" -X POST -H "Authorization: Bearer $token" -H "Content-Type: application/json" -d "$post_body" "$test_url" 2>/dev/null)
fi
local body
body=$(cat "$tmp_resp" 2>/dev/null)
rm -f "$tmp_resp"
# 400 = deployment exists but our dummy request is invalid (expected)
# 200 = deployment exists and responded
# 404 = deployment truly not found
if [[ "$http_status" == "200" ]] || [[ "$http_status" == "400" ]]; then
echo -e " ${GREEN}✅ Deployment '${deploy_name}' exists${NC} (${role})"
elif [[ "$http_status" == "404" ]]; then
# Check error code to distinguish "deployment not found" from "endpoint not found"
local error_code=""
if command -v jq >/dev/null 2>&1; then
error_code=$(echo "$body" | jq -r '(.error.code // empty)' 2>/dev/null)
fi
if [[ "$error_code" == "DeploymentNotFound" ]]; then
echo -e " ${RED}❌ Deployment '${deploy_name}' NOT FOUND${NC} (${role})"
echo -e " ${YELLOW}Create this deployment in the Azure portal or update the name in Config/ai-config.local.env${NC}"
all_ok=false
else
echo -e " ${RED}❌ Deployment '${deploy_name}' NOT FOUND (HTTP 404)${NC} (${role})"
echo -e " ${YELLOW}Create this deployment in the Azure portal or update the name in Config/ai-config.local.env${NC}"
all_ok=false
fi
elif [[ "$http_status" == "429" ]]; then
echo -e " ${GREEN}✅ Deployment '${deploy_name}' exists${NC} (${role}) ${YELLOW}(rate limited)${NC}"
else
local error_msg=""
if command -v jq >/dev/null 2>&1; then
error_msg=$(echo "$body" | jq -r '(.error.message // .error.code // empty)' 2>/dev/null)
fi
echo -e " ${YELLOW}⚠️ Deployment '${deploy_name}' check returned HTTP ${http_status}${NC} (${role})"
if [[ -n "$error_msg" ]]; then
echo -e " ${YELLOW}Error: $error_msg${NC}"
fi
fi
done
echo ""
if [[ "$all_ok" == false ]]; then
echo -e " ${RED}❌ One or more model deployments are missing.${NC}"
echo -e " ${YELLOW}Fix: Verify deployment names in Config/ai-config.local.env match your Azure OpenAI resource.${NC}"
return 1
fi
return 0
}
# Pre-check: verify AI connectivity via API key or Azure AD (Entra ID) auth
check_ai_connectivity() {
echo ""
echo -e "${BLUE}🔌 Pre-Check: AI Service Connectivity${NC}"
echo "======================================="
local endpoint="${AZURE_OPENAI_ENDPOINT}"
local api_key="${AZURE_OPENAI_API_KEY}"
local deployment="${AZURE_OPENAI_DEPLOYMENT_NAME}"
# Determine authentication method
local auth_method=""
local has_api_key=false
local has_azure_ad=false
# Check for valid API key
if [[ -n "$api_key" ]] && [[ "$api_key" != *"your-"* ]] && [[ "$api_key" != *"placeholder"* ]] && [[ "$api_key" != *"key-placeholder"* ]]; then
has_api_key=true
auth_method="API Key"
fi
# Check for Azure AD / Entra ID login
if command -v az >/dev/null 2>&1; then
if az account show >/dev/null 2>&1; then
has_azure_ad=true
local az_account
az_account=$(az account show --query "{name:name, user:user.name}" -o tsv 2>/dev/null)
if [[ "$has_api_key" == true ]]; then
auth_method="API Key (Azure AD also available)"
else
auth_method="Azure AD (Entra ID)"
fi
fi
fi
# Fail if neither auth method is available
if [[ "$has_api_key" == false ]] && [[ "$has_azure_ad" == false ]]; then
echo -e "${RED}❌ No valid authentication found!${NC}"
echo ""
echo " You must configure one of the following:"
echo " 1) Set a valid API key in Config/ai-config.local.env"
echo " 2) Log in via Azure CLI: az login"
echo ""
echo " For API key setup: ./doctor.sh setup"
echo " For Azure AD setup: az login && ./doctor.sh run"
return 1
fi
echo -e " Auth method: ${GREEN}$auth_method${NC}"
if [[ "$has_azure_ad" == true ]]; then
local az_user
az_user=$(az account show --query "user.name" -o tsv 2>/dev/null)
local az_sub
az_sub=$(az account show --query "name" -o tsv 2>/dev/null)
echo -e " Azure account: ${GREEN}$az_user${NC} (${az_sub})"
fi
# Connection check: attempt a lightweight request to the endpoint
if [[ -z "$endpoint" ]] || [[ "$endpoint" == *"your-"* ]]; then
echo -e "${RED}❌ Endpoint not configured. Update AZURE_OPENAI_ENDPOINT in Config/ai-config.local.env${NC}"
return 1
fi
echo -e " Endpoint: ${GREEN}$endpoint${NC}"
echo -ne " Connection: "
# Build the test URL — try to reach the endpoint root or models list
local test_url="${endpoint%/}/openai/models?api-version=2024-06-01"
local http_status=""
local response_body=""
local tmp_response
tmp_response=$(mktemp)
local curl_args=("-s" "-o" "$tmp_response" "-w" "%{http_code}" "--connect-timeout" "10" "--max-time" "15")
if [[ "$has_api_key" == true ]]; then
http_status=$(curl "${curl_args[@]}" -H "api-key: $api_key" "$test_url" 2>/dev/null)
elif [[ "$has_azure_ad" == true ]]; then
local token
token=$(az account get-access-token --resource "https://cognitiveservices.azure.com" --query "accessToken" -o tsv 2>/dev/null)
if [[ -n "$token" ]]; then
http_status=$(curl "${curl_args[@]}" -H "Authorization: Bearer $token" "$test_url" 2>/dev/null)
else
rm -f "$tmp_response"
echo -e "${YELLOW}⚠️ Could not obtain Azure AD token for Cognitive Services${NC}"
echo -e " ${YELLOW}Try: az login --scope https://cognitiveservices.azure.com/.default${NC}"
return 1
fi
fi
response_body=$(cat "$tmp_response" 2>/dev/null)
rm -f "$tmp_response"
# Extract a human-readable error message from the JSON response (if any)
local error_msg=""
if [[ -n "$response_body" ]]; then
# Try jq first, fall back to grep/sed
if command -v jq >/dev/null 2>&1; then
error_msg=$(echo "$response_body" | jq -r '(.error.message // .error.code // .message // empty)' 2>/dev/null)
fi
if [[ -z "$error_msg" ]]; then
error_msg=$(echo "$response_body" | grep -o '"message":"[^"]*"' | head -1 | sed 's/"message":"//;s/"$//')
fi
fi
if [[ -z "$http_status" ]] || [[ "$http_status" == "000" ]]; then
echo -e "${RED}❌ FAILED (could not reach endpoint)${NC}"
echo -e " ${YELLOW}Check that the endpoint URL is correct and accessible from this network.${NC}"
return 1
elif [[ "$http_status" == "200" ]]; then
echo -e "${GREEN}✅ OK (HTTP $http_status)${NC}"
elif [[ "$http_status" == "401" ]] || [[ "$http_status" == "403" ]]; then
echo -e "${RED}❌ FAILED (HTTP $http_status - authentication rejected)${NC}"
if [[ -n "$error_msg" ]]; then
echo -e " ${YELLOW}Error: $error_msg${NC}"
elif [[ -n "$response_body" ]]; then
echo -e " ${YELLOW}Response: $response_body${NC}"
fi
if [[ "$has_api_key" == true ]]; then
echo -e " ${YELLOW}Your API key may be invalid or expired. Update Config/ai-config.local.env.${NC}"
else
echo -e " ${YELLOW}Your Azure AD token lacks permissions. Check RBAC role assignments.${NC}"
fi
return 1
elif [[ "$http_status" == "404" ]]; then
# 404 on models endpoint is acceptable — the endpoint is reachable
echo -e "${GREEN}✅ OK (endpoint reachable, HTTP $http_status on models list)${NC}"
else
# Other status codes (e.g. 400, 429, 500) — endpoint is reachable but request failed
echo -e "${RED}❌ FAILED (HTTP $http_status)${NC}"
if [[ -n "$error_msg" ]]; then
echo -e " ${YELLOW}Error: $error_msg${NC}"
elif [[ -n "$response_body" ]]; then
echo -e " ${YELLOW}Response: $response_body${NC}"
fi
return 1
fi
echo ""
# Verify model deployments exist
if ! check_model_deployments; then
return 1
fi
return 0
}
# Function for configuration doctor (original functionality)
run_doctor() {
echo -e "${BLUE}🏥 Configuration Doctor - COBOL Migration Tool${NC}"
echo "=============================================="
echo
# Check if configuration files exist
echo -e "${BLUE}📋 Checking Configuration Files...${NC}"
echo
config_files_ok=true
# Check template configuration
if [[ -f "$REPO_ROOT/Config/ai-config.env" ]]; then
echo -e "${GREEN}✅ Template configuration found: Config/ai-config.env${NC}"
else
echo -e "${RED}❌ Missing template configuration: Config/ai-config.env${NC}"
config_files_ok=false
fi
# Check local configuration
if [[ -f "$REPO_ROOT/Config/ai-config.local.env" ]]; then
echo -e "${GREEN}✅ Local configuration found: Config/ai-config.local.env${NC}"
local_config_exists=true
else
echo -e "${YELLOW}⚠️ Missing local configuration: Config/ai-config.local.env${NC}"
local_config_exists=false
fi
# Check configuration loader
if [[ -f "$REPO_ROOT/Config/load-config.sh" ]]; then
echo -e "${GREEN}✅ Configuration loader found: Config/load-config.sh${NC}"
else
echo -e "${RED}❌ Missing configuration loader: Config/load-config.sh${NC}"
config_files_ok=false
fi
# Check appsettings.json
if [[ -f "$REPO_ROOT/Config/appsettings.json" ]]; then
echo -e "${GREEN}✅ Application settings found: Config/appsettings.json${NC}"
else
echo -e "${RED}❌ Missing application settings: Config/appsettings.json${NC}"
config_files_ok=false
fi
echo
# Check reverse engineering components
echo -e "${BLUE}🔍 Checking Reverse Engineering Components...${NC}"
echo
# Check models
if [[ -f "$REPO_ROOT/Models/BusinessLogic.cs" ]]; then
echo -e "${GREEN}✅ BusinessLogic model found${NC}"
else
echo -e "${YELLOW}⚠️ Missing BusinessLogic model (optional feature)${NC}"
fi
# Check agents
if [[ -f "$REPO_ROOT/Agents/BusinessLogicExtractorAgent.cs" ]]; then
echo -e "${GREEN}✅ BusinessLogicExtractorAgent found${NC}"
else
echo -e "${YELLOW}⚠️ Missing BusinessLogicExtractorAgent (optional feature)${NC}"
fi
# Check process
if [[ -f "$REPO_ROOT/Processes/ReverseEngineeringProcess.cs" ]]; then
echo -e "${GREEN}✅ ReverseEngineeringProcess found${NC}"
else
echo -e "${YELLOW}⚠️ Missing ReverseEngineeringProcess (optional feature)${NC}"
fi
# Check documentation
if [[ -f "$REPO_ROOT/docs/REVERSE_ENGINEERING_ARCHITECTURE.md" ]]; then
echo -e "${GREEN}✅ Reverse engineering architecture documentation found${NC}"
else
echo -e "${YELLOW}⚠️ Missing reverse engineering architecture documentation${NC}"
fi
# Check for generated RE report
if [[ -f "$REPO_ROOT/output/reverse-engineering-details.md" ]]; then
echo -e "${GREEN}✅ Generated reverse engineering report found${NC}"
else
echo -e "${YELLOW}ℹ️ No generated RE report yet (run reverse engineering first)${NC}"
fi
echo
# If local config doesn't exist, offer to create it
if [[ "$local_config_exists" == false ]]; then
echo -e "${YELLOW}🔧 Local Configuration Setup${NC}"
echo "----------------------------"
echo "You need a local configuration file with your AI service credentials."
echo
read -p "Would you like me to create Config/ai-config.local.env from the template? (y/n): " create_local
if [[ "$create_local" =~ ^[Yy]$ ]]; then
if [[ -f "$REPO_ROOT/Config/ai-config.local.env.example" ]]; then
cp "$REPO_ROOT/Config/ai-config.local.env.example" "$REPO_ROOT/Config/ai-config.local.env"
echo -e "${GREEN}✅ Created Config/ai-config.local.env from example${NC}"
echo -e "${YELLOW}⚠️ You must edit this file with your actual AI service credentials before running the migration tool.${NC}"
local_config_exists=true
else
echo -e "${RED}❌ Example file not found: Config/ai-config.local.env.example${NC}"
fi
fi
echo
fi
# Load and validate configuration if local config exists
if [[ "$local_config_exists" == true ]]; then
echo -e "${BLUE}🔍 Validating Configuration Content...${NC}"
echo
# Source the configuration loader
if load_configuration && load_ai_config 2>/dev/null; then
# Check required variables
config_valid=true
# --- Core: Endpoint (required) ---
echo -e "${CYAN}Endpoint:${NC}"
endpoint_val="${AZURE_OPENAI_ENDPOINT}"
if [[ -z "$endpoint_val" ]]; then
echo -e " ${RED}❌ AZURE_OPENAI_ENDPOINT is not set${NC}"
config_valid=false
elif [[ "$endpoint_val" == *"your-"* ]] || [[ "$endpoint_val" == *"placeholder"* ]]; then
echo -e " ${YELLOW}⚠️ AZURE_OPENAI_ENDPOINT contains placeholder: $endpoint_val${NC}"
config_valid=false
else
echo -e " ${GREEN}✅ AZURE_OPENAI_ENDPOINT: $endpoint_val${NC}"
fi
echo
# --- Authentication ---
echo -e "${CYAN}Authentication:${NC}"
api_key_val="${AZURE_OPENAI_API_KEY}"
if [[ -n "$api_key_val" ]] && [[ "$api_key_val" != *"your-"* ]] && [[ "$api_key_val" != *"placeholder"* ]] && [[ "$api_key_val" != *"key-placeholder"* ]]; then
masked_key="${api_key_val:0:4}...${api_key_val: -4}"
echo -e " ${GREEN}✅ API Key: $masked_key${NC}"
elif command -v az >/dev/null 2>&1 && az account show >/dev/null 2>&1; then
local az_user
az_user=$(az account show --query "user.name" -o tsv 2>/dev/null)
echo -e " ${GREEN}✅ Azure AD (Entra ID): $az_user${NC}"
else
echo -e " ${RED}❌ No valid auth: set API key in ai-config.local.env or run 'az login'${NC}"
config_valid=false
fi
echo
# --- Code Model (Responses API - used by migration agents) ---
echo -e "${CYAN}Code Model (migration agents):${NC}"
code_vars=("AISETTINGS__DEPLOYMENTNAME" "AISETTINGS__MODELID")
for var in "${code_vars[@]}"; do
value="${!var}"
if [[ -z "$value" ]]; then
echo -e " ${RED}❌ $var is not set${NC}"
config_valid=false
elif [[ "$value" == *"your-"* ]] || [[ "$value" == *"placeholder"* ]]; then
echo -e " ${YELLOW}⚠️ $var contains placeholder: $value${NC}"
config_valid=false
else
echo -e " ${GREEN}✅ $var: $value${NC}"
fi
done
echo
# --- Chat Model (Chat Completions API - used by portal & reports) ---
echo -e "${CYAN}Chat Model (portal & reports):${NC}"
chat_vars=("AISETTINGS__CHATDEPLOYMENTNAME" "AISETTINGS__CHATMODELID")
for var in "${chat_vars[@]}"; do
value="${!var}"
if [[ -z "$value" ]]; then
echo -e " ${YELLOW}⚠️ $var is not set (will fall back to code model)${NC}"
else
echo -e " ${GREEN}✅ $var: $value${NC}"
fi
done
# --- Agent-specific model overrides (optional) ---
echo
echo -e "${CYAN}Agent model overrides (optional):${NC}"
agent_vars=("AZURE_OPENAI_COBOL_ANALYZER_MODEL" "AZURE_OPENAI_JAVA_CONVERTER_MODEL" "AZURE_OPENAI_DEPENDENCY_MAPPER_MODEL" "AZURE_OPENAI_UNIT_TEST_MODEL")
for var in "${agent_vars[@]}"; do
value="${!var}"
if [[ -n "$value" ]]; then
echo -e " ${GREEN}✅ $var: $value${NC}"
else
echo -e " ${BLUE}ℹ️ $var: (defaults to code model)${NC}"
fi
done
echo
if [[ "$config_valid" == true ]]; then
echo -e "${GREEN}🎉 Configuration validation successful!${NC}"
echo
echo "Your configuration is ready to use. You can now run:"
echo " ./doctor.sh run"
echo " ./doctor.sh test"
echo " dotnet run"
else
echo -e "${YELLOW}⚠️ Configuration needs attention${NC}"
echo
echo "Next steps:"
echo "1. Edit Config/ai-config.local.env"
echo "2. Replace template placeholders with your actual AI service credentials"
echo "3. Run this doctor script again to validate"
echo
echo "Need help? Run: ./doctor.sh setup"
fi
else
echo -e "${RED}❌ Failed to load configuration${NC}"
fi
fi
echo
echo -e "${BLUE}🔧 Available Commands${NC}"
echo "===================="
echo "• ./doctor.sh setup - Interactive configuration setup"
echo "• ./doctor.sh test - Full system validation"
echo "• ./doctor.sh run - Start migration"
echo "• ./doctor.sh reverse-eng - Run reverse engineering only"
echo "• ./doctor.sh portal - Start the web portal"
echo ""
echo -e "${BLUE}📄 Documentation${NC}"
echo "=================="
echo "• output/reverse-engineering-details.md - Generated business logic report"
echo ""
echo -e "${BLUE}🌐 Portal Documentation${NC}"
echo "========================"
echo "• Start portal: ./doctor.sh portal (or cd McpChatWeb && dotnet run)"
echo
echo -e "${BLUE}💡 Troubleshooting Tips${NC}"
echo "======================"
echo "• Make sure your AI service endpoint is deployed and accessible"
echo "• Verify your model deployment names match your provider setup"
echo "• Check that your API key has proper permissions (or Azure AD login is active)"
echo "• Ensure your endpoint URL is correct (should end with /)"
echo
echo "Configuration doctor completed!"
}
# Function to generate migration report
generate_migration_report() {
echo -e "${BLUE}📝 Generating Migration Report...${NC}"
if ! resolve_sqlite3; then
echo -e "${RED}❌ sqlite3 is not installed. Install it to generate reports.${NC}"
echo -e "${YELLOW} macOS: brew install sqlite3${NC}"
echo -e "${YELLOW} Linux: sudo apt install sqlite3${NC}"
echo -e "${YELLOW} Windows: winget install SQLite.SQLite (then restart your terminal)${NC}"
return 1
fi
local db_path="$REPO_ROOT/Data/migration.db"
if [ ! -f "$db_path" ]; then
echo -e "${RED}❌ Migration database not found at: $db_path${NC}"
return 1
fi
# Get the latest run ID
local run_id=$($SQLITE3_CMD "$db_path" "SELECT MAX(run_id) FROM cobol_files;")
if [ -z "$run_id" ]; then
echo -e "${RED}❌ No migration runs found in database${NC}"
return 1
fi
echo -e "${GREEN}✅ Found run ID: $run_id${NC}"
echo "Generating comprehensive report..."
local output_dir="$REPO_ROOT/output"
local report_file="$output_dir/migration_report_run_${run_id}.md"
# Generate the report using SQLite queries
{
echo "# COBOL Migration Report - Run $run_id"
echo ""
echo "**Generated:** $(date '+%Y-%m-%d %H:%M:%S')"
echo ""
echo "---"
echo ""
echo "## 📊 Migration Summary"
echo ""
$SQLITE3_CMD "$db_path" <<SQL
.mode markdown
.headers off
SELECT '- **Total COBOL Files:** ' || COUNT(DISTINCT file_name) FROM cobol_files WHERE run_id = $run_id;
SELECT '- **Programs (.cbl):** ' || COUNT(DISTINCT file_name) FROM cobol_files WHERE run_id = $run_id AND file_name LIKE '%.cbl';
SELECT '- **Copybooks (.cpy):** ' || COUNT(DISTINCT file_name) FROM cobol_files WHERE run_id = $run_id AND file_name LIKE '%.cpy';
SQL
echo ""
$SQLITE3_CMD "$db_path" <<SQL
.mode markdown
.headers off
SELECT '- **Total Dependencies:** ' || COUNT(*) FROM dependencies WHERE run_id = $run_id;
SELECT ' - CALL: ' || COUNT(*) FROM dependencies WHERE run_id = $run_id AND dependency_type = 'CALL';
SELECT ' - COPY: ' || COUNT(*) FROM dependencies WHERE run_id = $run_id AND dependency_type = 'COPY';
SELECT ' - PERFORM: ' || COUNT(*) FROM dependencies WHERE run_id = $run_id AND dependency_type = 'PERFORM';
SELECT ' - EXEC: ' || COUNT(*) FROM dependencies WHERE run_id = $run_id AND dependency_type = 'EXEC';
SELECT ' - READ: ' || COUNT(*) FROM dependencies WHERE run_id = $run_id AND dependency_type = 'READ';
SELECT ' - WRITE: ' || COUNT(*) FROM dependencies WHERE run_id = $run_id AND dependency_type = 'WRITE';
SELECT ' - OPEN: ' || COUNT(*) FROM dependencies WHERE run_id = $run_id AND dependency_type = 'OPEN';
SELECT ' - CLOSE: ' || COUNT(*) FROM dependencies WHERE run_id = $run_id AND dependency_type = 'CLOSE';
SQL
echo ""
echo "---"
echo ""
echo "## 📁 File Inventory"
echo ""
$SQLITE3_CMD "$db_path" <<SQL
.mode markdown
.headers on
SELECT file_name AS 'File Name', file_path AS 'Path', is_copybook AS 'Is Copybook'
FROM cobol_files
WHERE run_id = $run_id
ORDER BY file_name;
SQL
echo ""
echo "---"
echo ""
echo "## 🔗 Dependency Relationships"
echo ""
$SQLITE3_CMD "$db_path" <<SQL