@@ -2796,6 +2796,59 @@ def _get_num_peered(self, pgs):
27962796 num += 1
27972797 return num
27982798
2799+ def _print_not_active_clean_pg (self , pgs ):
2800+ """
2801+ Print the PGs that are not active+clean.
2802+ """
2803+ for pg in pgs :
2804+ if not (pg ['state' ].count ('active' ) and
2805+ pg ['state' ].count ('clean' ) and
2806+ not pg ['state' ].count ('stale' )):
2807+ log .debug (
2808+ "PG %s is not active+clean, but %s" ,
2809+ pg ['pgid' ], pg ['state' ]
2810+ )
2811+
2812+ def pg_all_active_clean (self ):
2813+ """
2814+ Check if all pgs are active+clean
2815+ return: True if all pgs are active+clean else False
2816+ """
2817+ pgs = self .get_pg_stats ()
2818+ result = self ._get_num_active_clean (pgs ) == len (pgs )
2819+ if result :
2820+ log .debug ("All PGs are active+clean" )
2821+ else :
2822+ log .debug ("Not all PGs are active+clean" )
2823+ self ._print_not_active_clean_pg (pgs )
2824+ return result
2825+
2826+ def _print_not_active_pg (self , pgs ):
2827+ """
2828+ Print the PGs that are not active.
2829+ """
2830+ for pg in pgs :
2831+ if not (pg ['state' ].count ('active' )
2832+ and not pg ['state' ].count ('stale' )):
2833+ log .debug (
2834+ "PG %s is not active, but %s" ,
2835+ pg ['pgid' ], pg ['state' ]
2836+ )
2837+
2838+ def pg_all_active (self ):
2839+ """
2840+ Check if all pgs are active
2841+ return: True if all pgs are active else False
2842+ """
2843+ pgs = self .get_pg_stats ()
2844+ result = self ._get_num_active (pgs ) == len (pgs )
2845+ if result :
2846+ log .debug ("All PGs are active" )
2847+ else :
2848+ log .debug ("Not all PGs are active" )
2849+ self ._print_not_active_pg (pgs )
2850+ return result
2851+
27992852 def is_clean (self ):
28002853 """
28012854 True if all pgs are clean
@@ -3237,6 +3290,26 @@ def revive_mgr(self, mgr):
32373290 self .make_admin_daemon_dir (remote )
32383291 self .ctx .daemons .get_daemon ('mgr' , mgr , self .cluster ).restart ()
32393292
3293+ def get_crush_rule_id (self , crush_rule_name ):
3294+ """
3295+ Get crush rule id by name
3296+ :returns: int -- crush rule id
3297+ """
3298+ out = self .raw_cluster_cmd ('osd' , 'crush' , 'rule' , 'dump' , '--format=json' )
3299+ j = json .loads ('\n ' .join (out .split ('\n ' )[1 :]))
3300+ for rule in j :
3301+ if rule ['rule_name' ] == crush_rule_name :
3302+ return rule ['rule_id' ]
3303+ assert False , 'rule %s not found' % crush_rule_name
3304+
3305+ def get_mon_dump_json (self ):
3306+ """
3307+ mon dump --format=json converted to a python object
3308+ :returns: the python object
3309+ """
3310+ out = self .raw_cluster_cmd ('mon' , 'dump' , '--format=json' )
3311+ return json .loads ('\n ' .join (out .split ('\n ' )[1 :]))
3312+
32403313 def get_mon_status (self , mon ):
32413314 """
32423315 Extract all the monitor status information from the cluster
@@ -3340,6 +3413,23 @@ def get_service_task_status(self, service, status_key):
33403413 self .log (task_status )
33413414 return task_status
33423415
3416+ # Stretch mode related functions
3417+ def is_degraded_stretch_mode (self ):
3418+ """
3419+ Return whether the cluster is in degraded stretch mode
3420+ """
3421+ try :
3422+ osdmap = self .get_osd_dump_json ()
3423+ stretch_mode = osdmap .get ('stretch_mode' , {})
3424+ degraded_stretch_mode = stretch_mode .get ('degraded_stretch_mode' , 0 )
3425+ self .log ("is_degraded_stretch_mode: {0}" .format (degraded_stretch_mode ))
3426+ return degraded_stretch_mode == 1
3427+ except (TypeError , AttributeError ) as e :
3428+ # Log the error or handle it as needed
3429+ self .log ("Error accessing degraded_stretch_mode: {0}" .format (e ))
3430+ return False
3431+
3432+
33433433def utility_task (name ):
33443434 """
33453435 Generate ceph_manager subtask corresponding to ceph_manager
0 commit comments