15
15
16
16
from oslo_log import log as logging
17
17
from oslo_utils import encodeutils
18
+ from oslo_utils import uuidutils
18
19
19
20
from magnum .api import utils as api_utils
20
21
from magnum .common import clients
@@ -456,6 +457,145 @@ def _get_app_cred_name(self, cluster):
456
457
self ._get_chart_release_name (cluster ), "cloud-credentials"
457
458
)
458
459
460
+ def _get_network (self , context , network , external ):
461
+ # NOTE(mkjpryor) inspired by magnum.common.neutron
462
+
463
+ n_client = clients .OpenStackClients (context ).neutron ()
464
+ filters = {"router:external" : external }
465
+ if network :
466
+ if uuidutils .is_uuid_like (network ):
467
+ filters ["id" ] = network
468
+ else :
469
+ filters ["name" ] = network
470
+ networks = n_client .list_networks (** filters ).get ("networks" , [])
471
+
472
+ if len (networks ) > 1 :
473
+ if network :
474
+ raise exception .Conflict (
475
+ f"Multiple networks exist with name '{ network } '. "
476
+ "Please use the network ID instead."
477
+ )
478
+ elif external :
479
+ raise exception .Conflict (
480
+ "Multiple external networks found. "
481
+ "Please specify one using the network ID."
482
+ )
483
+ else :
484
+ raise exception .Conflict (
485
+ "Multiple networks found. "
486
+ "Please specify one using the network ID."
487
+ )
488
+
489
+ return next (iter (networks ), None )
490
+
491
+ def _get_subnet (self , context , subnet ):
492
+ # NOTE(mkjpryor) inspired by magnum.common.neutron
493
+
494
+ n_client = clients .OpenStackClients (context ).neutron ()
495
+ filters = {}
496
+ if uuidutils .is_uuid_like (subnet ):
497
+ filters ["id" ] = subnet
498
+ else :
499
+ filters ["name" ] = subnet
500
+ subnets = n_client .list_subnets (** filters ).get ("subnets" , [])
501
+
502
+ if len (subnets ) > 1 :
503
+ raise exception .Conflict (
504
+ f"Multiple subnets exist with name '{ subnet } '. "
505
+ "Please use the subnet ID instead."
506
+ )
507
+
508
+ return next (iter (subnets ), None )
509
+
510
+ def _get_external_network_id (self , context , cluster ):
511
+ # NOTE(mkjpryor)
512
+ # Even if no external network is specified, we still run the search
513
+ # without an ID or name filter
514
+ # This will make sure that we correctly identify _an_ external network
515
+ # and will also fail if there is more than one
516
+ # This is the same as CAPO but will explicitly report failures
517
+ external_network = self ._get_network (
518
+ context ,
519
+ cluster .cluster_template .external_network_id ,
520
+ True
521
+ )
522
+ if external_network :
523
+ return external_network ["id" ]
524
+ else :
525
+ raise exception .ExternalNetworkNotFound (
526
+ network = cluster .cluster_template .external_network_id
527
+ )
528
+
529
+ def _get_cluster_fixed_network (self , context , cluster ):
530
+ network = self ._get_network (
531
+ context ,
532
+ cluster .fixed_network ,
533
+ False
534
+ )
535
+ if network :
536
+ return network
537
+ else :
538
+ raise exception .FixedNetworkNotFound (
539
+ network = cluster .fixed_network
540
+ )
541
+
542
+ def _get_cluster_fixed_subnet (self , context , cluster , network ):
543
+ subnet = self ._get_subnet (context , cluster .fixed_subnet )
544
+ if subnet and subnet ["network_id" ] == network ["id" ]:
545
+ return subnet
546
+ elif subnet :
547
+ raise exception .Conflict (
548
+ f"Subnet { subnet ['id' ]} does not "
549
+ f"belong to network { network ['id' ]} ."
550
+ )
551
+ else :
552
+ raise exception .FixedSubnetNotFound (
553
+ subnet = cluster .fixed_subnet
554
+ )
555
+
556
+ def _get_cluster_network (self , context , cluster ):
557
+ network = None
558
+ subnet = None
559
+
560
+ if cluster .fixed_network :
561
+ network = self ._get_cluster_fixed_network (
562
+ context ,
563
+ cluster
564
+ )
565
+
566
+ if cluster .fixed_subnet :
567
+ subnet = self ._get_cluster_fixed_subnet (
568
+ context ,
569
+ cluster ,
570
+ network
571
+ )
572
+
573
+ if network and subnet :
574
+ return (network ["id" ], subnet ["id" ])
575
+ elif network :
576
+ subnets = network .get ("subnets" , [])
577
+ if len (subnets ) > 1 :
578
+ raise exception .Conflict (
579
+ f"Network { network ['id' ]} has multiple subnets. "
580
+ "Please specify one using the subnet ID."
581
+ )
582
+ if len (subnets ) < 1 :
583
+ raise exception .Conflict (
584
+ f"Network { network ['id' ]} has no subnets."
585
+ )
586
+ return (network ["id" ], subnets [0 ])
587
+ elif subnet :
588
+ return (subnet ["network_id" ], subnet ["id" ])
589
+ else :
590
+ return (None , None )
591
+
592
+ def _get_dns_nameservers (self , cluster ):
593
+ dns_nameserver = cluster .cluster_template .dns_nameserver
594
+ if dns_nameserver :
595
+ return dns_nameserver .split ("," )
596
+ else :
597
+ return None
598
+
459
599
def _get_monitoring_enabled (self , cluster ):
460
600
mon_label = self ._label (cluster , "monitoring_enabled" , "" )
461
601
# NOTE(mkjpryor) default of, like heat driver,
@@ -470,32 +610,65 @@ def _get_kube_dash_enabled(self, cluster):
470
610
def _update_helm_release (self , context , cluster , nodegroups = None ):
471
611
if nodegroups is None :
472
612
nodegroups = cluster .nodegroups
473
- cluster_template = cluster . cluster_template
613
+
474
614
image_id , kube_version = self ._get_image_details (
475
- context , cluster_template .image_id
615
+ context ,
616
+ cluster .cluster_template .image_id
476
617
)
618
+
619
+ network_id , subnet_id = self ._get_cluster_network (context , cluster )
620
+
477
621
values = {
478
622
"kubernetesVersion" : kube_version ,
479
623
"machineImageId" : image_id ,
624
+ "machineSSHKeyName" : cluster .keypair or None ,
480
625
"cloudCredentialsSecretName" : self ._get_app_cred_name (cluster ),
481
- # TODO(johngarbutt): need to respect requested networks
482
- "clusterNetworking" : {
483
- "internalNetwork" : {
484
- "nodeCidr" : self ._label (
485
- cluster , "fixed_subnet_cidr" , "10.0.0.0/24"
486
- ),
487
- }
488
- },
489
626
"apiServer" : {
490
627
"enableLoadBalancer" : True ,
491
628
"loadBalancerProvider" : self ._label (
492
- cluster , "octavia_provider" , "amphora"
629
+ cluster ,
630
+ "octavia_provider" ,
631
+ "amphora"
493
632
),
494
633
},
634
+ "clusterNetworking" : {
635
+ "dnsNameservers" : self ._get_dns_nameservers (cluster ),
636
+ "externalNetworkId" : self ._get_external_network_id (
637
+ context ,
638
+ cluster
639
+ ),
640
+ "internalNetwork" : {
641
+ "networkFilter" : (
642
+ {"id" : network_id }
643
+ if network_id
644
+ else None
645
+ ),
646
+ "subnetFilter" : (
647
+ {"id" : subnet_id }
648
+ if subnet_id
649
+ else None
650
+ ),
651
+ # This is only used if a fixed network is not specified
652
+ "nodeCidr" : self ._label (
653
+ cluster ,
654
+ "fixed_subnet_cidr" ,
655
+ "10.0.0.0/24"
656
+ ),
657
+ },
658
+ },
495
659
"controlPlane" : {
496
660
"machineFlavor" : cluster .master_flavor_id ,
497
661
"machineCount" : cluster .master_count ,
498
662
},
663
+ "nodeGroups" : [
664
+ {
665
+ "name" : self ._sanitised_name (ng .name ),
666
+ "machineFlavor" : ng .flavor_id ,
667
+ "machineCount" : ng .node_count ,
668
+ }
669
+ for ng in nodegroups
670
+ if ng .role != NODE_GROUP_ROLE_CONTROLLER
671
+ ],
499
672
"addons" : {
500
673
"monitoring" : {
501
674
"enabled" : self ._get_monitoring_enabled (cluster )
@@ -507,32 +680,14 @@ def _update_helm_release(self, context, cluster, nodegroups=None):
507
680
# remove the load balancer
508
681
"ingress" : {"enabled" : False },
509
682
},
510
- "nodeGroups" : [
511
- {
512
- "name" : self ._sanitised_name (ng .name ),
513
- "machineFlavor" : ng .flavor_id ,
514
- "machineCount" : ng .node_count ,
515
- }
516
- for ng in nodegroups
517
- if ng .role != NODE_GROUP_ROLE_CONTROLLER
518
- ],
519
683
}
520
684
521
- if cluster_template .dns_nameserver :
522
- dns_nameservers = cluster_template .dns_nameserver .split ("," )
523
- values ["clusterNetworking" ]["dnsNameservers" ] = dns_nameservers
524
-
525
- if cluster .keypair :
526
- values ["machineSSHKeyName" ] = cluster .keypair
527
-
528
- chart_version = self ._get_chart_version (cluster )
529
-
530
685
self ._helm_client .install_or_upgrade (
531
686
self ._get_chart_release_name (cluster ),
532
687
CONF .capi_driver .helm_chart_name ,
533
688
values ,
534
689
repo = CONF .capi_driver .helm_chart_repo ,
535
- version = chart_version ,
690
+ version = self . _get_chart_version ( cluster ) ,
536
691
namespace = self ._namespace (cluster ),
537
692
)
538
693
0 commit comments