-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathdeploy.rs
More file actions
2177 lines (1920 loc) · 84.3 KB
/
deploy.rs
File metadata and controls
2177 lines (1920 loc) · 84.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
use std::convert::TryFrom;
use std::path::{Path, PathBuf};
use std::time::Duration;
use crate::cli::ai_client::{
build_prompt, create_provider, ollama_complete_streaming, AiTask, PromptContext,
};
use crate::cli::config_parser::{
AiProviderType, CloudConfig, CloudOrchestrator, CloudProvider, DeployTarget, ServerConfig,
StackerConfig,
};
use crate::cli::credentials::CredentialsManager;
use crate::cli::deployment_lock::DeploymentLock;
use crate::cli::error::CliError;
use crate::cli::generator::compose::ComposeDefinition;
use crate::cli::generator::dockerfile::DockerfileBuilder;
use crate::cli::install_runner::{
strategy_for, CommandExecutor, DeployContext, DeployResult, ShellExecutor,
};
use crate::cli::progress;
use crate::cli::stacker_client::{self, StackerClient};
use crate::helpers::ssh_client;
use crate::console::commands::CallableTrait;
/// Default config filename.
const DEFAULT_CONFIG_FILE: &str = "stacker.yml";
/// Output directory for generated artifacts.
const OUTPUT_DIR: &str = ".stacker";
fn parse_ai_provider(s: &str) -> Result<AiProviderType, CliError> {
let json = format!("\"{}\"", s.trim().to_lowercase());
serde_json::from_str::<AiProviderType>(&json).map_err(|_| {
CliError::ConfigValidation(
"Unknown AI provider. Use: openai, anthropic, ollama, custom".to_string(),
)
})
}
fn resolve_ai_from_env_or_config(project_dir: &Path, config_file: Option<&str>) -> Result<crate::cli::config_parser::AiConfig, CliError> {
let config_path = match config_file {
Some(f) => project_dir.join(f),
None => project_dir.join(DEFAULT_CONFIG_FILE),
};
let mut ai = if config_path.exists() {
StackerConfig::from_file(&config_path)?.ai
} else {
Default::default()
};
if let Ok(provider) = std::env::var("STACKER_AI_PROVIDER") {
ai.provider = parse_ai_provider(&provider)?;
ai.enabled = true;
}
if let Ok(model) = std::env::var("STACKER_AI_MODEL") {
if !model.trim().is_empty() {
ai.model = Some(model);
ai.enabled = true;
}
}
if let Ok(endpoint) = std::env::var("STACKER_AI_ENDPOINT") {
if !endpoint.trim().is_empty() {
ai.endpoint = Some(endpoint);
ai.enabled = true;
}
}
if let Ok(timeout) = std::env::var("STACKER_AI_TIMEOUT") {
if let Ok(value) = timeout.parse::<u64>() {
ai.timeout = value;
ai.enabled = true;
}
}
if let Ok(generic_key) = std::env::var("STACKER_AI_API_KEY") {
if !generic_key.trim().is_empty() {
ai.api_key = Some(generic_key);
ai.enabled = true;
}
}
if ai.api_key.is_none() {
match ai.provider {
AiProviderType::Openai => {
if let Ok(key) = std::env::var("OPENAI_API_KEY") {
if !key.trim().is_empty() {
ai.api_key = Some(key);
ai.enabled = true;
}
}
}
AiProviderType::Anthropic => {
if let Ok(key) = std::env::var("ANTHROPIC_API_KEY") {
if !key.trim().is_empty() {
ai.api_key = Some(key);
ai.enabled = true;
}
}
}
_ => {}
}
}
Ok(ai)
}
fn fallback_troubleshooting_hints(reason: &str) -> Vec<String> {
let lower = reason.to_lowercase();
let mut hints = Vec::new();
if lower.contains("npm ci") {
hints.push("npm ci failed: ensure package-lock.json exists and is in sync with package.json".to_string());
hints.push("Try locally: npm ci --production (or npm ci) to see the full dependency error".to_string());
}
if lower.contains("the attribute `version` is obsolete") || lower.contains("attribute `version` is obsolete") {
hints.push("docker-compose version warning: remove top-level 'version:' from .stacker/docker-compose.yml".to_string());
}
if lower.contains("failed to solve") {
hints.push("Docker build step failed: inspect the failing Dockerfile line and run docker build manually for verbose output".to_string());
}
if lower.contains("permission denied") || lower.contains("eacces") {
hints.push("Permission issue detected: verify file ownership and executable bits for scripts copied into the image".to_string());
}
if lower.contains("no such file") || lower.contains("not found") {
hints.push("Missing file in build context: confirm COPY paths and .dockerignore rules".to_string());
}
if lower.contains("network") || lower.contains("timed out") {
hints.push("Network/timeout issue: retry build and verify registry connectivity".to_string());
}
if lower.contains("port is already allocated")
|| lower.contains("bind for 0.0.0.0")
|| lower.contains("failed programming external connectivity")
{
hints.push("Port conflict: another process/container already uses this host port (for example 3000).".to_string());
hints.push("Find the owner with: lsof -nP -iTCP:3000 -sTCP:LISTEN".to_string());
hints.push("Then stop it (docker compose down / docker rm -f <container>) or change ports in stacker.yml".to_string());
}
if lower.contains("remote orchestrator request failed")
&& lower.contains("http error")
&& lower.contains("404")
&& (lower.contains("<!doctype html") || lower.contains("<html"))
{
hints.push("Remote orchestrator URL looks incorrect (received frontend 404 HTML instead of User Service JSON).".to_string());
hints.push("If you logged in with /server/user/auth/login, deploy expects User Service base URL ending with /server/user.".to_string());
hints.push("Try re-login with: stacker-cli login --auth-url https://dev.try.direct/server/user/auth/login".to_string());
}
if lower.contains("orphan containers") {
hints.push("Orphan containers detected: run docker compose -f .stacker/docker-compose.yml down --remove-orphans".to_string());
}
if lower.contains("manifest unknown") || lower.contains("pull access denied") {
hints.push("Image pull failed: the configured image tag is not available in the registry".to_string());
if let Some(image) = extract_missing_image(reason) {
hints.push(format!("Missing image detected: {}", image));
hints.push(format!("Build and tag locally: docker build -t {} .", image));
hints.push(format!("If using a remote registry, push it first: docker push {}", image));
} else {
hints.push("Build locally first (docker build -t <image:tag> .) or use an existing published tag".to_string());
}
hints.push("Alternative: remove app.image in stacker.yml so Stacker generates/uses a local build context".to_string());
}
if hints.is_empty() {
hints.push("Run docker compose -f .stacker/docker-compose.yml build --no-cache for detailed build logs".to_string());
hints.push("Inspect .stacker/Dockerfile and .stacker/docker-compose.yml for invalid paths and commands".to_string());
hints.push("If the issue is dependency-related, run the failing install command locally first".to_string());
}
hints
}
fn extract_missing_image(reason: &str) -> Option<String> {
for marker in ["manifest for ", "pull access denied for "] {
if let Some(start) = reason.find(marker) {
let image_start = start + marker.len();
let tail = &reason[image_start..];
let image = tail
.split(|c: char| c.is_whitespace() || c == ',' || c == '\n')
.next()
.unwrap_or("")
.trim_matches('"')
.to_string();
if !image.is_empty() {
return Some(image);
}
}
}
None
}
fn ensure_env_file_if_needed(config: &StackerConfig, project_dir: &Path) -> Result<(), CliError> {
let env_file = match &config.env_file {
Some(path) => path,
None => return Ok(()),
};
let env_path = if env_file.is_absolute() {
env_file.clone()
} else {
project_dir.join(env_file)
};
if env_path.exists() {
return Ok(());
}
if let Some(parent) = env_path.parent() {
std::fs::create_dir_all(parent)?;
}
let mut content = String::from("# Auto-created by Stacker because env_file was configured\n");
if !config.env.is_empty() {
let mut keys: Vec<&String> = config.env.keys().collect();
keys.sort();
for key in keys {
content.push_str(&format!("{}={}\n", key, config.env[key]));
}
}
std::fs::write(&env_path, content)?;
eprintln!(" Created missing env file: {}", env_path.display());
Ok(())
}
/// SSH connection timeout for server pre-check (seconds).
const SSH_CHECK_TIMEOUT_SECS: u64 = 15;
/// Resolve the path to an SSH key, expanding `~` to the user's home directory.
fn resolve_ssh_key_path(key_path: &Path) -> PathBuf {
let path_str = key_path.to_string_lossy();
if path_str.starts_with("~/") {
if let Ok(home) = std::env::var("HOME") {
return PathBuf::from(home).join(&path_str[2..]);
}
}
key_path.to_path_buf()
}
/// Try SSH connection to the server defined in `deploy.server` and return
/// the system check result. Returns `None` if no server section is configured
/// or if the SSH key cannot be read.
fn try_ssh_server_check(server: &ServerConfig) -> Option<ssh_client::SystemCheckResult> {
let ssh_key_path = match &server.ssh_key {
Some(key) => resolve_ssh_key_path(key),
None => {
// Try default SSH key locations
let home = match std::env::var("HOME") {
Ok(h) => PathBuf::from(h),
Err(_) => {
eprintln!(" Cannot determine home directory for SSH key lookup");
return None;
}
};
let candidates = [
home.join(".ssh/id_ed25519"),
home.join(".ssh/id_rsa"),
];
match candidates.iter().find(|p| p.exists()) {
Some(p) => p.clone(),
None => {
eprintln!(" No SSH key specified and no default key found (~/.ssh/id_ed25519 or ~/.ssh/id_rsa)");
return None;
}
}
}
};
let key_content = match std::fs::read_to_string(&ssh_key_path) {
Ok(content) => content,
Err(e) => {
eprintln!(" Cannot read SSH key {}: {}", ssh_key_path.display(), e);
return None;
}
};
let rt = match tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
{
Ok(rt) => rt,
Err(e) => {
eprintln!(" Failed to initialize async runtime for SSH check: {}", e);
return None;
}
};
let result = rt.block_on(ssh_client::check_server(
&server.host,
server.port,
&server.user,
&key_content,
Duration::from_secs(SSH_CHECK_TIMEOUT_SECS),
));
Some(result)
}
/// Print a helpful message when the existing server is not reachable,
/// suggesting how to fix or proceed with a new cloud server.
fn print_server_unreachable_hint(server: &ServerConfig, check: &ssh_client::SystemCheckResult) {
eprintln!();
eprintln!(" ╭─ Existing server check failed ──────────────────────────────────╮");
eprintln!(" │ Host: {}:{}", server.host, server.port);
eprintln!(" │ User: {}", server.user);
if let Some(ref err) = check.error {
eprintln!(" │ Error: {}", err);
}
eprintln!(" ├─────────────────────────────────────────────────────────────────┤");
eprintln!(" │ To deploy to this server, fix the connection issue and retry: │");
eprintln!(" │ │");
if let Some(ref key) = server.ssh_key {
eprintln!(" │ ssh -i {} -p {} {}@{}", key.display(), server.port, server.user, server.host);
} else {
eprintln!(" │ ssh -p {} {}@{}", server.port, server.user, server.host);
}
eprintln!(" │ │");
eprintln!(" │ Or, to provision a new cloud server instead, remove the │");
eprintln!(" │ 'server' section from stacker.yml and re-run: │");
eprintln!(" │ │");
eprintln!(" │ stacker deploy --target cloud │");
eprintln!(" ╰─────────────────────────────────────────────────────────────────╯");
eprintln!();
}
fn normalize_generated_compose_paths(compose_path: &Path) -> Result<(), CliError> {
let is_stacker_compose = compose_path
.components()
.any(|c| c.as_os_str() == OUTPUT_DIR);
if !is_stacker_compose || !compose_path.exists() {
return Ok(());
}
let raw = std::fs::read_to_string(compose_path)?;
let mut doc: serde_yaml::Value = serde_yaml::from_str(&raw)
.map_err(|e| CliError::ConfigValidation(format!("Failed to parse compose file: {e}")))?;
let mut changed = false;
if let serde_yaml::Value::Mapping(ref mut root) = doc {
// Remove obsolete compose version key.
if root.remove(serde_yaml::Value::String("version".to_string())).is_some() {
changed = true;
}
let services_key = serde_yaml::Value::String("services".to_string());
if let Some(serde_yaml::Value::Mapping(services)) = root.get_mut(&services_key) {
for (service_key, service_value) in services.iter_mut() {
let service_name = service_key.as_str().unwrap_or("");
let service_map = match service_value {
serde_yaml::Value::Mapping(m) => m,
_ => continue,
};
let build_key = serde_yaml::Value::String("build".to_string());
let build_val = match service_map.get_mut(&build_key) {
Some(v) => v,
None => continue,
};
let build_map = match build_val {
serde_yaml::Value::Mapping(m) => m,
_ => continue,
};
let context_key = serde_yaml::Value::String("context".to_string());
let dockerfile_key = serde_yaml::Value::String("dockerfile".to_string());
let current_context = build_map
.get(&context_key)
.and_then(|v| v.as_str())
.unwrap_or(".")
.to_string();
let dockerfile = build_map
.get(&dockerfile_key)
.and_then(|v| v.as_str())
.map(|s| s.to_string());
let dockerfile_points_to_stacker = dockerfile
.as_deref()
.map(|d| d.starts_with(".stacker/"))
.unwrap_or(false);
if dockerfile_points_to_stacker && (current_context == "." || current_context == "./") {
build_map.insert(
context_key.clone(),
serde_yaml::Value::String("..".to_string()),
);
changed = true;
}
if service_name == "app" && (current_context == "." || current_context == "./") {
build_map.insert(
context_key,
serde_yaml::Value::String("..".to_string()),
);
let dockerfile_needs_rewrite = match dockerfile.as_deref() {
None => true,
Some("Dockerfile") | Some("./Dockerfile") => true,
_ => false,
};
if dockerfile_needs_rewrite {
build_map.insert(
dockerfile_key,
serde_yaml::Value::String(".stacker/Dockerfile".to_string()),
);
}
changed = true;
}
}
}
}
if changed {
let updated = serde_yaml::to_string(&doc)
.map_err(|e| CliError::ConfigValidation(format!("Failed to serialize compose file: {e}")))?;
std::fs::write(compose_path, updated)?;
eprintln!(" Normalized {}/docker-compose.yml paths", OUTPUT_DIR);
}
Ok(())
}
fn compose_app_build_source(compose_path: &Path) -> Option<String> {
let raw = std::fs::read_to_string(compose_path).ok()?;
let doc: serde_yaml::Value = serde_yaml::from_str(&raw).ok()?;
let root = match doc {
serde_yaml::Value::Mapping(m) => m,
_ => return None,
};
let services_key = serde_yaml::Value::String("services".to_string());
let app_key = serde_yaml::Value::String("app".to_string());
let build_key = serde_yaml::Value::String("build".to_string());
let context_key = serde_yaml::Value::String("context".to_string());
let dockerfile_key = serde_yaml::Value::String("dockerfile".to_string());
let services = match root.get(&services_key) {
Some(serde_yaml::Value::Mapping(m)) => m,
_ => return None,
};
let app = match services.get(&app_key) {
Some(serde_yaml::Value::Mapping(m)) => m,
_ => return None,
};
let build = app.get(&build_key)?;
let compose_dir = compose_path.parent().unwrap_or_else(|| Path::new("."));
match build {
serde_yaml::Value::String(context_str) => {
let context_path = PathBuf::from(context_str);
let context_abs = if context_path.is_absolute() {
context_path
} else {
compose_dir.join(context_path)
};
let dockerfile_abs = context_abs.join("Dockerfile");
Some(format!(
"context={}, dockerfile={}",
context_abs.display(),
dockerfile_abs.display()
))
}
serde_yaml::Value::Mapping(build_map) => {
let context_raw = build_map
.get(&context_key)
.and_then(|v| v.as_str())
.unwrap_or(".");
let dockerfile_raw = build_map
.get(&dockerfile_key)
.and_then(|v| v.as_str())
.unwrap_or("Dockerfile");
let context_path = PathBuf::from(context_raw);
let context_abs = if context_path.is_absolute() {
context_path
} else {
compose_dir.join(context_path)
};
let dockerfile_path = PathBuf::from(dockerfile_raw);
let dockerfile_abs = if dockerfile_path.is_absolute() {
dockerfile_path
} else {
context_abs.join(dockerfile_path)
};
Some(format!(
"context={}, dockerfile={}",
context_abs.display(),
dockerfile_abs.display()
))
}
_ => None,
}
}
fn build_troubleshoot_error_log(project_dir: &Path, reason: &str) -> String {
let dockerfile_path = project_dir.join(OUTPUT_DIR).join("Dockerfile");
let compose_path = project_dir.join(OUTPUT_DIR).join("docker-compose.yml");
let dockerfile = std::fs::read_to_string(&dockerfile_path).unwrap_or_default();
let compose = std::fs::read_to_string(&compose_path).unwrap_or_default();
let dockerfile_snippet = if dockerfile.is_empty() {
"(not found)".to_string()
} else {
dockerfile.chars().take(4000).collect()
};
let compose_snippet = if compose.is_empty() {
"(not found)".to_string()
} else {
compose.chars().take(4000).collect()
};
format!(
"Deploy error:\n{}\n\nGenerated Dockerfile (.stacker/Dockerfile):\n{}\n\nGenerated Compose (.stacker/docker-compose.yml):\n{}",
reason, dockerfile_snippet, compose_snippet
)
}
fn print_ai_deploy_help(project_dir: &Path, config_file: Option<&str>, err: &CliError) {
let reason = match err {
CliError::DeployFailed { reason, .. } => reason,
_ => return,
};
eprintln!("\nTroubleshooting help:");
let ai_config = match resolve_ai_from_env_or_config(project_dir, config_file) {
Ok(cfg) => cfg,
Err(load_err) => {
eprintln!(" Could not load AI config for troubleshooting: {}", load_err);
for hint in fallback_troubleshooting_hints(reason) {
eprintln!(" - {}", hint);
}
eprintln!(" Tip: enable AI with stacker init --with-ai or set STACKER_AI_PROVIDER=ollama");
return;
}
};
if !ai_config.enabled {
eprintln!(" AI troubleshooting disabled (ai.enabled=false).");
for hint in fallback_troubleshooting_hints(reason) {
eprintln!(" - {}", hint);
}
eprintln!(" Tip: enable AI in stacker.yml if you want AI troubleshooting suggestions");
return;
}
let error_log = build_troubleshoot_error_log(project_dir, reason);
let ctx = PromptContext {
project_type: None,
files: vec![".stacker/Dockerfile".to_string(), ".stacker/docker-compose.yml".to_string()],
error_log: Some(error_log),
current_config: None,
};
let (system, prompt) = build_prompt(AiTask::Troubleshoot, &ctx);
if ai_config.provider == AiProviderType::Ollama {
eprintln!(" AI suggestion (streaming from Ollama):");
match ollama_complete_streaming(&ai_config, &prompt, &system) {
Ok(answer) => {
if answer.trim().is_empty() {
eprintln!(" (empty AI response)");
}
eprintln!();
}
Err(ai_err) => {
eprintln!(" AI troubleshooting unavailable: {}", ai_err);
for hint in fallback_troubleshooting_hints(reason) {
eprintln!(" - {}", hint);
}
eprintln!(" Tip: set STACKER_AI_PROVIDER=ollama and ensure Ollama is running");
}
}
return;
}
eprintln!(" AI request in progress...");
match create_provider(&ai_config).and_then(|provider| provider.complete(&prompt, &system)) {
Ok(answer) => {
eprintln!(" AI suggestion:");
for line in answer.lines().take(20) {
eprintln!(" {}", line);
}
}
Err(ai_err) => {
eprintln!(" AI troubleshooting unavailable: {}", ai_err);
for hint in fallback_troubleshooting_hints(reason) {
eprintln!(" - {}", hint);
}
eprintln!(" Tip: set STACKER_AI_PROVIDER=ollama and ensure Ollama is running");
}
}
}
/// Map a provider code string (as stored in CloudInfo.provider) to a `CloudProvider` enum.
///
/// Accepts both short codes ("htz", "do", "aws", "lo", "vu") and full names
/// ("hetzner", "digitalocean", "aws", "linode", "vultr").
fn cloud_provider_from_code(code: &str) -> Option<CloudProvider> {
match code.to_lowercase().as_str() {
"htz" | "hetzner" => Some(CloudProvider::Hetzner),
"do" | "digitalocean" => Some(CloudProvider::Digitalocean),
"aws" => Some(CloudProvider::Aws),
"lo" | "linode" => Some(CloudProvider::Linode),
"vu" | "vultr" => Some(CloudProvider::Vultr),
_ => None,
}
}
/// Interactively prompt the user to select a saved cloud credential when
/// no `deploy.cloud` section is present in stacker.yml.
///
/// - Fetches the list of saved clouds from the Stacker server.
/// - Presents an interactive `Select` menu with each cloud plus a
/// "Connect a new cloud provider" option at the end.
/// - Returns:
/// - `Ok(Some(cloud_info))` when the user picks an existing credential.
/// - `Ok(None)` when the user picks "Connect a new cloud provider".
/// - `Err(...)` on I/O or network errors.
fn prompt_select_cloud(
access_token: &str,
) -> Result<Option<stacker_client::CloudInfo>, CliError> {
let base_url = crate::cli::install_runner::normalize_stacker_server_url(
stacker_client::DEFAULT_STACKER_URL,
);
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.map_err(|e| CliError::ConfigValidation(format!("Failed to create async runtime: {}", e)))?;
let clouds = rt.block_on(async {
let client = StackerClient::new(&base_url, access_token);
client.list_clouds().await
})?;
const CONNECT_NEW: &str = "→ Connect a new cloud provider";
if clouds.is_empty() {
eprintln!();
eprintln!(" No saved cloud credentials found.");
eprintln!(" To add cloud credentials, export your provider token and redeploy:");
eprintln!(" HCLOUD_TOKEN=<token> stacker deploy --target cloud # Hetzner");
eprintln!(" DO_API_TOKEN=<token> stacker deploy --target cloud # DigitalOcean");
eprintln!(" AWS_ACCESS_KEY_ID=<key> AWS_SECRET_ACCESS_KEY=<secret> stacker deploy --target cloud # AWS");
eprintln!();
return Err(CliError::CloudProviderMissing);
}
// Column widths for the interactive cloud selection menu.
const CLOUD_ID_WIDTH: usize = 6;
const CLOUD_NAME_WIDTH: usize = 24;
let mut items: Vec<String> = clouds
.iter()
.map(|c| format!("{:<width_id$} {:<width_name$} ({})", c.id, c.name, c.provider,
width_id = CLOUD_ID_WIDTH, width_name = CLOUD_NAME_WIDTH))
.collect();
items.push(CONNECT_NEW.to_string());
eprintln!();
eprintln!(" No cloud provider configured in stacker.yml.");
eprintln!(" Select a saved cloud credential to use for this deployment:");
eprintln!();
let selection = dialoguer::Select::new()
.with_prompt("Cloud credential")
.items(&items)
.default(0)
.interact()
.map_err(|e| CliError::ConfigValidation(format!("Selection error: {}", e)))?;
if selection == clouds.len() {
// User chose "Connect a new cloud provider"
return Ok(None);
}
Ok(Some(
clouds
.into_iter()
.nth(selection)
.expect("selection index should be within bounds of clouds vector"),
))
}
/// `stacker deploy [--target local|cloud|server] [--file stacker.yml] [--dry-run] [--force-rebuild]`
/// `stacker deploy --project=myapp --target cloud --key devops --server bastion`
///
/// Generates Dockerfile + docker-compose from stacker.yml, then
/// deploys using the appropriate strategy (local, cloud, or server).
///
/// For remote cloud deploys, the CLI now goes through the Stacker server API
/// instead of calling User Service directly:
/// 1. Resolves (or auto-creates) the project on the Stacker server
/// 2. Looks up saved cloud credentials by provider (or passes env-var creds)
/// 3. Looks up saved server by name (optional)
/// 4. Calls `POST /project/{id}/deploy[/{cloud_id}]`
pub struct DeployCommand {
pub target: Option<String>,
pub file: Option<String>,
pub dry_run: bool,
pub force_rebuild: bool,
/// Override project name (--project flag)
pub project_name: Option<String>,
/// Override cloud key name (--key flag)
pub key_name: Option<String>,
/// Override cloud key by ID (--key-id flag)
pub key_id: Option<i32>,
/// Override server name (--server flag)
pub server_name: Option<String>,
/// Watch deployment progress until complete (--watch / --no-watch).
/// `None` means "auto" (watch for cloud, health-check for local).
pub watch: Option<bool>,
/// Persist server details into stacker.yml after deploy (--lock).
pub lock: bool,
/// Skip smart server pre-check and lockfile hints; force fresh cloud provision (--force-new).
pub force_new: bool,
}
impl DeployCommand {
pub fn new(
target: Option<String>,
file: Option<String>,
dry_run: bool,
force_rebuild: bool,
) -> Self {
Self {
target,
file,
dry_run,
force_rebuild,
project_name: None,
key_name: None,
key_id: None,
server_name: None,
watch: None,
lock: false,
force_new: false,
}
}
/// Builder method to set remote override flags from CLI args.
pub fn with_remote_overrides(
mut self,
project: Option<String>,
key: Option<String>,
server: Option<String>,
) -> Self {
self.project_name = project;
self.key_name = key;
self.server_name = server;
self
}
/// Builder method to set cloud key ID from CLI `--key-id` flag.
pub fn with_key_id(mut self, key_id: Option<i32>) -> Self {
self.key_id = key_id;
self
}
/// Builder method to set watch behaviour.
/// `--watch` forces watch on; `--no-watch` forces it off.
/// Neither flag → auto (cloud=watch, local=health-check).
pub fn with_watch(mut self, watch: bool, no_watch: bool) -> Self {
if no_watch {
self.watch = Some(false);
} else if watch {
self.watch = Some(true);
}
// else remains None → auto
self
}
/// Builder method to set lock behaviour (--lock flag).
pub fn with_lock(mut self, lock: bool) -> Self {
self.lock = lock;
self
}
/// Builder method to set force-new behaviour (--force-new flag).
pub fn with_force_new(mut self, force_new: bool) -> Self {
self.force_new = force_new;
self
}
}
/// Parse a deploy target string into `DeployTarget`.
fn parse_deploy_target(s: &str) -> Result<DeployTarget, CliError> {
let json = format!("\"{}\"", s.to_lowercase());
serde_json::from_str::<DeployTarget>(&json).map_err(|_| {
CliError::ConfigValidation(format!(
"Unknown deploy target '{}'. Valid targets: local, cloud, server",
s
))
})
}
/// Override values from CLI flags for remote cloud deploys.
#[derive(Debug, Clone, Default)]
pub struct RemoteDeployOverrides {
pub project_name: Option<String>,
pub key_name: Option<String>,
pub key_id: Option<i32>,
pub server_name: Option<String>,
}
/// Core deploy logic, extracted for testability.
///
/// Takes injectable `CommandExecutor` so tests can mock shell calls.
pub fn run_deploy(
project_dir: &Path,
config_file: Option<&str>,
target_override: Option<&str>,
dry_run: bool,
force_rebuild: bool,
force_new: bool,
executor: &dyn CommandExecutor,
remote_overrides: &RemoteDeployOverrides,
) -> Result<DeployResult, CliError> {
// 1. Load config
let config_path = match config_file {
Some(f) => project_dir.join(f),
None => project_dir.join(DEFAULT_CONFIG_FILE),
};
let mut config = StackerConfig::from_file(&config_path)?;
ensure_env_file_if_needed(&config, project_dir)?;
// 2. Resolve deploy target (flag > config)
let mut deploy_target = match target_override {
Some(t) => parse_deploy_target(t)?,
None => config.deploy.target,
};
// 2b. Server pre-check: when target is Cloud but deploy.server section
// is defined with a host, try SSH connectivity first.
// If the server is reachable, automatically switch to Server target.
// If not, show diagnostics and abort so the user can fix or remove the section.
// Skipped when --force-new is set (user explicitly wants a fresh cloud provision).
// When a lockfile exists, auto-inject the server name so the API reuses the server.
let mut lock_server_name: Option<String> = None;
if deploy_target == DeployTarget::Cloud && !force_new {
if let Some(ref server_cfg) = config.deploy.server {
eprintln!(" Found deploy.server section (host={}). Checking SSH connectivity...", server_cfg.host);
match try_ssh_server_check(server_cfg) {
Some(check) if check.connected && check.authenticated => {
eprintln!(" ✓ Server {} is reachable ({})", server_cfg.host, check.summary());
if !check.docker_installed {
eprintln!(" ⚠ Docker is NOT installed on the server.");
eprintln!(" Install Docker first: ssh {}@{} 'curl -fsSL https://get.docker.com | sh'",
server_cfg.user, server_cfg.host);
return Err(CliError::DeployFailed {
target: DeployTarget::Server,
reason: format!(
"Server {} is reachable but Docker is not installed. \
Install Docker and retry, or remove the 'server' section from stacker.yml \
to provision a new cloud server.",
server_cfg.host
),
});
}
eprintln!(" Switching deploy target from 'cloud' → 'server' (using existing server)");
deploy_target = DeployTarget::Server;
}
Some(check) => {
// Server defined but not reachable — abort with helpful hints
print_server_unreachable_hint(server_cfg, &check);
return Err(CliError::DeployFailed {
target: DeployTarget::Cloud,
reason: format!(
"deploy.server section defines host {} but the server is not reachable: {}. \
Fix the connection or remove the 'server' section to provision a new cloud server.",
server_cfg.host,
check.error.as_deref().unwrap_or("unknown error")
),
});
}
None => {
// Could not perform SSH check (missing key, etc.) — warn and abort
eprintln!(" ⚠ Could not verify server connectivity (see above).");
eprintln!(" Remove the 'server' section from stacker.yml to provision a new cloud server,");
eprintln!(" or fix the SSH key configuration and retry.");
return Err(CliError::DeployFailed {
target: DeployTarget::Cloud,
reason: format!(
"deploy.server section defines host {} but SSH connectivity check could not be performed. \
Fix the SSH key or remove the 'server' section to provision a new cloud server.",
server_cfg.host
),
});
}
}
} else if DeploymentLock::exists(project_dir) {
// No deploy.server in config, but a lockfile exists from a prior deploy.
// Auto-inject the server name so the cloud deploy API reuses the same server.
if let Ok(Some(lock)) = DeploymentLock::load(project_dir) {
if let Some(ref name) = lock.server_name {
eprintln!(" ℹ Found previous deployment (server='{}') — reusing server", name);
eprintln!(" To provision a new server instead: stacker deploy --force-new");
lock_server_name = Some(name.clone());
} else if let Some(ref ip) = lock.server_ip {
if ip != "127.0.0.1" {
eprintln!(" ℹ Found previous deployment to {} (from .stacker/deployment.lock)", ip);
eprintln!(" Server name unknown — cannot auto-reuse. Run: stacker config lock");
eprintln!(" To provision a new server instead: stacker deploy --force-new");
}
}
}
}
}
// 3. Cloud/server prerequisites — verify login and keep credentials for later use.
let cloud_creds = if deploy_target == DeployTarget::Cloud {
let cred_manager = CredentialsManager::with_default_store();
Some(cred_manager.require_valid_token("cloud deploy")?)
} else {
None
};
// 3b. If cloud target but no cloud section in stacker.yml, prompt to select a saved credential.
if deploy_target == DeployTarget::Cloud && config.deploy.cloud.is_none() {
let access_token = &cloud_creds
.as_ref()
.expect("cloud_creds should be set when deploy_target is Cloud (verified in step 3)")
.access_token;
match prompt_select_cloud(access_token)? {
Some(cloud_info) => {
// Map the provider code to a CloudProvider enum value.
let provider = cloud_provider_from_code(&cloud_info.provider)
.ok_or_else(|| CliError::ConfigValidation(format!(
"Unrecognised cloud provider '{}' for credential '{}'. \
Supported providers: hetzner (htz), digitalocean (do), aws, linode (lo), vultr (vu).",
cloud_info.provider, cloud_info.name
)))?;
eprintln!(
" Selected cloud credential: {} (id={}, provider={})",
cloud_info.name, cloud_info.id, cloud_info.provider
);
// Apply the selected cloud to the in-memory config.
config.deploy.target = DeployTarget::Cloud;
config.deploy.cloud = Some(CloudConfig {
provider,
orchestrator: CloudOrchestrator::Remote,
region: None,
size: None,
install_image: None,
remote_payload_file: None,
ssh_key: None,
key: Some(cloud_info.name.clone()),
server: None,
});
// Persist the selection to stacker.yml so subsequent deploys
// do not prompt again.
if config_path.exists() {
let yaml = serde_yaml::to_string(&config).map_err(|e| {
CliError::ConfigValidation(format!(
"Failed to serialize updated config: {}",
e
))
})?;
std::fs::write(&config_path, yaml)?;
eprintln!(
" ✓ Updated {} with deploy.cloud.key={}",
config_path.display(),
cloud_info.name
);
}
}
None => {
// User chose "Connect a new cloud provider"
eprintln!();
eprintln!(" To connect a new cloud provider, export your API token and redeploy:");
eprintln!(" Hetzner: HCLOUD_TOKEN=<token> stacker deploy --target cloud");
eprintln!(" DigitalOcean: DO_API_TOKEN=<token> stacker deploy --target cloud");
eprintln!(" Linode: LINODE_TOKEN=<token> stacker deploy --target cloud");
eprintln!(" Vultr: VULTR_API_KEY=<key> stacker deploy --target cloud");
eprintln!(" AWS: AWS_ACCESS_KEY_ID=<key> AWS_SECRET_ACCESS_KEY=<secret> stacker deploy --target cloud");
eprintln!();
eprintln!(" Or configure manually with: stacker config setup cloud");
eprintln!();
return Err(CliError::CloudProviderMissing);