@@ -367,7 +367,7 @@ def mds_fail_restart(self, mds_id=None):
367367 """
368368 def _fail_restart (id_ ):
369369 self .mds_daemons [id_ ].stop ()
370- self .get_ceph_cmd_stdout ("mds" , "fail" , id_ )
370+ self .run_ceph_cmd ("mds" , "fail" , id_ )
371371 self .mds_daemons [id_ ].restart ()
372372
373373 self ._one_or_all (mds_id , _fail_restart )
@@ -578,21 +578,21 @@ def reach_max_mds(self):
578578 assert (mds_map ['in' ] == list (range (0 , mds_map ['max_mds' ])))
579579
580580 def reset (self ):
581- self .get_ceph_cmd_stdout ("fs" , "reset" , str (self .name ), '--yes-i-really-mean-it' )
581+ self .run_ceph_cmd ("fs" , "reset" , str (self .name ), '--yes-i-really-mean-it' )
582582
583583 def fail (self ):
584- self .get_ceph_cmd_stdout ("fs" , "fail" , str (self .name ))
584+ self .run_ceph_cmd ("fs" , "fail" , str (self .name ))
585585
586586 def set_flag (self , var , * args ):
587587 a = map (lambda x : str (x ).lower (), args )
588- self .get_ceph_cmd_stdout ("fs" , "flag" , "set" , var , * a )
588+ self .run_ceph_cmd ("fs" , "flag" , "set" , var , * a )
589589
590590 def set_allow_multifs (self , yes = True ):
591591 self .set_flag ("enable_multiple" , yes )
592592
593593 def set_var (self , var , * args ):
594594 a = map (lambda x : str (x ).lower (), args )
595- self .get_ceph_cmd_stdout ("fs" , "set" , self .name , var , * a )
595+ self .run_ceph_cmd ("fs" , "set" , self .name , var , * a )
596596
597597 def set_down (self , down = True ):
598598 self .set_var ("down" , str (down ).lower ())
@@ -620,7 +620,7 @@ def set_refuse_client_session(self, yes):
620620
621621 def compat (self , * args ):
622622 a = map (lambda x : str (x ).lower (), args )
623- self .get_ceph_cmd_stdout ("fs" , "compat" , self .name , * a )
623+ self .run_ceph_cmd ("fs" , "compat" , self .name , * a )
624624
625625 def add_compat (self , * args ):
626626 self .compat ("add_compat" , * args )
@@ -665,24 +665,23 @@ def create(self, recover=False, metadata_overlay=False):
665665 log .debug ("Creating filesystem '{0}'" .format (self .name ))
666666
667667 try :
668- self .get_ceph_cmd_stdout ('osd' , 'pool' , 'create' ,
669- self .metadata_pool_name ,
670- '--pg_num_min' , str (self .pg_num_min ))
671-
672- self .get_ceph_cmd_stdout ('osd' , 'pool' , 'create' ,
673- data_pool_name , str (self .pg_num ),
674- '--pg_num_min' , str (self .pg_num_min ),
675- '--target_size_ratio' ,
676- str (self .target_size_ratio ))
668+ self .run_ceph_cmd ('osd' , 'pool' , 'create' ,self .metadata_pool_name ,
669+ '--pg_num_min' , str (self .pg_num_min ))
670+
671+ self .run_ceph_cmd ('osd' , 'pool' , 'create' , data_pool_name ,
672+ str (self .pg_num ),
673+ '--pg_num_min' , str (self .pg_num_min ),
674+ '--target_size_ratio' ,
675+ str (self .target_size_ratio ))
677676 except CommandFailedError as e :
678677 if e .exitstatus == 22 : # nautilus couldn't specify --pg_num_min option
679- self .get_ceph_cmd_stdout ('osd' , 'pool' , 'create' ,
680- self .metadata_pool_name ,
681- str (self .pg_num_min ))
678+ self .run_ceph_cmd ('osd' , 'pool' , 'create' ,
679+ self .metadata_pool_name ,
680+ str (self .pg_num_min ))
682681
683- self .get_ceph_cmd_stdout ('osd' , 'pool' , 'create' ,
684- data_pool_name , str (self .pg_num ),
685- str (self .pg_num_min ))
682+ self .run_ceph_cmd ('osd' , 'pool' , 'create' ,
683+ data_pool_name , str (self .pg_num ),
684+ str (self .pg_num_min ))
686685 else :
687686 raise
688687
@@ -691,31 +690,30 @@ def create(self, recover=False, metadata_overlay=False):
691690 args .append ('--recover' )
692691 if metadata_overlay :
693692 args .append ('--allow-dangerous-metadata-overlay' )
694- self .get_ceph_cmd_stdout (* args )
693+ self .run_ceph_cmd (* args )
695694
696695 if not recover :
697696 if self .ec_profile and 'disabled' not in self .ec_profile :
698697 ec_data_pool_name = data_pool_name + "_ec"
699698 log .debug ("EC profile is %s" , self .ec_profile )
700699 cmd = ['osd' , 'erasure-code-profile' , 'set' , ec_data_pool_name ]
701700 cmd .extend (self .ec_profile )
702- self .get_ceph_cmd_stdout (* cmd )
701+ self .run_ceph_cmd (* cmd )
703702 try :
704- self .get_ceph_cmd_stdout (
703+ self .run_ceph_cmd (
705704 'osd' , 'pool' , 'create' , ec_data_pool_name ,
706705 'erasure' , ec_data_pool_name ,
707706 '--pg_num_min' , str (self .pg_num_min ),
708707 '--target_size_ratio' , str (self .target_size_ratio_ec ))
709708 except CommandFailedError as e :
710709 if e .exitstatus == 22 : # nautilus couldn't specify --pg_num_min option
711- self .get_ceph_cmd_stdout (
710+ self .run_ceph_cmd (
712711 'osd' , 'pool' , 'create' , ec_data_pool_name ,
713712 str (self .pg_num_min ), 'erasure' , ec_data_pool_name )
714713 else :
715714 raise
716- self .get_ceph_cmd_stdout (
717- 'osd' , 'pool' , 'set' ,
718- ec_data_pool_name , 'allow_ec_overwrites' , 'true' )
715+ self .run_ceph_cmd ('osd' , 'pool' , 'set' , ec_data_pool_name ,
716+ 'allow_ec_overwrites' , 'true' )
719717 self .add_data_pool (ec_data_pool_name , create = False )
720718 self .check_pool_application (ec_data_pool_name )
721719
@@ -726,7 +724,8 @@ def create(self, recover=False, metadata_overlay=False):
726724
727725 # Turn off spurious standby count warnings from modifying max_mds in tests.
728726 try :
729- self .get_ceph_cmd_stdout ('fs' , 'set' , self .name , 'standby_count_wanted' , '0' )
727+ self .run_ceph_cmd ('fs' , 'set' , self .name , 'standby_count_wanted' ,
728+ '0' )
730729 except CommandFailedError as e :
731730 if e .exitstatus == 22 :
732731 # standby_count_wanted not available prior to luminous (upgrade tests would fail otherwise)
@@ -761,9 +760,9 @@ def create(self, recover=False, metadata_overlay=False):
761760
762761 for sv in range (0 , subvols ['create' ]):
763762 sv_name = f'sv_{ sv } '
764- self .get_ceph_cmd_stdout (
765- 'fs' , 'subvolume' , 'create' , self . name , sv_name ,
766- self .fs_config .get ('subvol_options' , '' ))
763+ self .run_ceph_cmd ( 'fs' , 'subvolume' , 'create' , self . name ,
764+ sv_name ,
765+ self .fs_config .get ('subvol_options' , '' ))
767766
768767 if self .name not in self ._ctx .created_subvols :
769768 self ._ctx .created_subvols [self .name ] = []
@@ -904,15 +903,15 @@ def set_dir_layout(self, mount, path, layout):
904903 def add_data_pool (self , name , create = True ):
905904 if create :
906905 try :
907- self .get_ceph_cmd_stdout ('osd' , 'pool' , 'create' , name ,
908- '--pg_num_min' , str (self .pg_num_min ))
906+ self .run_ceph_cmd ('osd' , 'pool' , 'create' , name ,
907+ '--pg_num_min' , str (self .pg_num_min ))
909908 except CommandFailedError as e :
910909 if e .exitstatus == 22 : # nautilus couldn't specify --pg_num_min option
911- self .get_ceph_cmd_stdout ('osd' , 'pool' , 'create' , name ,
912- str (self .pg_num_min ))
910+ self .run_ceph_cmd ('osd' , 'pool' , 'create' , name ,
911+ str (self .pg_num_min ))
913912 else :
914913 raise
915- self .get_ceph_cmd_stdout ('fs' , 'add_data_pool' , self .name , name )
914+ self .run_ceph_cmd ('fs' , 'add_data_pool' , self .name , name )
916915 self .get_pool_names (refresh = True )
917916 for poolid , fs_name in self .data_pools .items ():
918917 if name == fs_name :
@@ -1098,13 +1097,13 @@ def rank_signal(self, signal, rank=0, status=None):
10981097 self .mds_signal (name , signal )
10991098
11001099 def rank_freeze (self , yes , rank = 0 ):
1101- self .get_ceph_cmd_stdout ("mds" , "freeze" , "{}:{}" .format (self .id , rank ), str (yes ).lower ())
1100+ self .run_ceph_cmd ("mds" , "freeze" , "{}:{}" .format (self .id , rank ), str (yes ).lower ())
11021101
11031102 def rank_repaired (self , rank ):
1104- self .get_ceph_cmd_stdout ("mds" , "repaired" , "{}:{}" .format (self .id , rank ))
1103+ self .run_ceph_cmd ("mds" , "repaired" , "{}:{}" .format (self .id , rank ))
11051104
11061105 def rank_fail (self , rank = 0 ):
1107- self .get_ceph_cmd_stdout ("mds" , "fail" , "{}:{}" .format (self .id , rank ))
1106+ self .run_ceph_cmd ("mds" , "fail" , "{}:{}" .format (self .id , rank ))
11081107
11091108 def rank_is_running (self , rank = 0 , status = None ):
11101109 name = self .get_rank (rank = rank , status = status )['name' ]
0 commit comments