Skip to content

Commit ff3f343

Browse files
authored
Include Notebook for Intermediary CESM Output Diagnostics (NCAR#340)
Include Cecile's spaghetti plot nb, pull utils out of nb, include intermediary outputs example
1 parent af0900c commit ff3f343

File tree

4 files changed

+817
-0
lines changed

4 files changed

+817
-0
lines changed
Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
from __future__ import annotations
2+
3+
import xarray as xr
4+
import numpy as np
5+
6+
7+
def global_mean(variable):
8+
"""
9+
Description: compute the global mean (weigthed) of a variable
10+
Argument:
11+
ds: variable
12+
Return:
13+
ds_ann: annual mean
14+
"""
15+
weights = np.cos(np.deg2rad(variable.lat))
16+
variable_weighted = variable.weighted(weights)
17+
variable_mean = variable_weighted.mean(("lon", "lat"))
18+
return variable_mean
19+
20+
21+
def compute_ann_mean(filepath, case, var, lat1=None, lat2=None, lon1=None, lon2=None):
22+
"""
23+
Description: Compute the annual mean within a specified latitude (lat1-lat2)
24+
and longitude (lon1-lon2) range of the variable "var".
25+
26+
Arguments:
27+
filepath = filepath
28+
case = casename
29+
var = variable name
30+
lat1 = starting latitude
31+
lat2 = ending latitude
32+
lon1 = starting longitude
33+
lon2 = ending longitude
34+
"""
35+
filename = filepath + case + "/" + case + "." + var + ".nc"
36+
ds = xr.open_dataset(filename)
37+
38+
# Select the subset of data within the specified latitude and longitude ranges before calculating the annual mean
39+
ds_subset = ds.sel(lat=slice(lat1, lat2), lon=slice(lon1, lon2))
40+
41+
var_ann = ds_subset[var].groupby("time.year").mean()
42+
var_lat_lon_ann = lat_lon_mean(var_ann, lat1, lat2, lon1, lon2)
43+
44+
return var_lat_lon_ann
45+
46+
47+
def compute_var_g_ann(filepath, case, var):
48+
"""
49+
Compute the weighted annual global mean of
50+
a given variable.
51+
Argument:
52+
case = casename
53+
var = variable name
54+
"""
55+
filename = filepath + case + "/" + case + "." + var + ".nc"
56+
ds = xr.open_dataset(filename)
57+
return global_mean(ds[var].groupby("time.year").mean())
58+
59+
60+
def lat_lon_mean(variable, lat1, lat2, lon1, lon2):
61+
"""
62+
Description: Compute the mean (weighted by cosine of latitude) of a variable
63+
within a specified latitude (lat1-lat2) and longitude (lon1-lon2) range.
64+
65+
Arguments:
66+
variable: xarray DataArray representing the variable to be averaged.
67+
lat1: Starting latitude for the range.
68+
lat2: Ending latitude for the range.
69+
lon1: Starting longitude for the range.
70+
lon2: Ending longitude for the range.
71+
72+
Return:
73+
variable_mean: The mean value of the variable within the specified lat/lon range.
74+
"""
75+
# Select the subset of data within the specified latitude and longitude ranges
76+
variable_subset = variable.sel(lat=slice(lat1, lat2), lon=slice(lon1, lon2))
77+
78+
# Compute weights as the cosine of the latitude, adjusted for the selected range
79+
weights = np.cos(np.deg2rad(variable_subset.lat))
80+
81+
# Apply weighted mean over the subset
82+
variable_weighted = variable_subset.weighted(weights)
83+
variable_mean = variable_weighted.mean(("lon", "lat"))
84+
85+
return variable_mean
86+
87+
88+
def lat_lon_mean_norm(variable, lat1, lat2, lon1, lon2):
89+
# TODO: This could be combined with lat_lon_mean by adding a normalize_weights argument
90+
"""
91+
Description: Compute the mean (weighted by cosine of latitude) of a variable
92+
within a specified latitude (lat1-lat2) and longitude (lon1-lon2) range.
93+
94+
Arguments:
95+
variable: xarray DataArray representing the variable to be averaged.
96+
lat1: Starting latitude for the range.
97+
lat2: Ending latitude for the range.
98+
lon1: Starting longitude for the range.
99+
lon2: Ending longitude for the range.
100+
101+
Return:
102+
variable_mean: The mean value of the variable within the specified lat/lon range.
103+
"""
104+
# Select the data subset within the specified latitude and longitude ranges
105+
variable_subset = variable.sel(lat=slice(lat1, lat2), lon=slice(lon1, lon2))
106+
107+
# Compute weights as the cosine of the latitude, adjusted for the selected range
108+
# Ensure weights are normalized (sum to 1) over the selected latitude range for accurate weighting
109+
latitudes = variable_subset.lat
110+
weights = np.cos(np.deg2rad(latitudes))
111+
weights /= weights.sum(dim="lat")
112+
113+
# Apply weighted mean over the subset
114+
variable_weighted = variable_subset.weighted(weights)
115+
variable_mean = variable_weighted.mean(("lon", "lat"))
116+
117+
return variable_mean
118+
119+
120+
def compute_var_zonal_ann(filepath, case, var):
121+
"""
122+
Description: compute the annual global mean (weighted) of
123+
a variable var
124+
Argument:
125+
case = casename
126+
var = variable name
127+
"""
128+
filename = filepath + case + "/" + case + "." + var + ".nc"
129+
ds = xr.open_dataset(filename)
130+
ds.mean(["lon"])
131+
var_zonal_ann = ds[var].groupby("time.year").mean().mean(["lon"])
132+
return var_zonal_ann
Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,169 @@
1+
################## SETUP ##################
2+
3+
################
4+
# Data Sources #
5+
################
6+
data_sources:
7+
# run_dir is the path to the folder you want
8+
### all the files associated with this configuration
9+
### to be created in
10+
run_dir: .
11+
12+
# nb_path_root is the path to the folder that cupid will
13+
### look for your template notebooks in. It doesn't have to
14+
### be inside run_dir, or be specific to this project, as
15+
### long as the notebooks are there
16+
nb_path_root: ../../nblibrary
17+
18+
######################
19+
# Computation Config #
20+
######################
21+
22+
computation_config:
23+
24+
# default_kernel_name is the name of the environment that
25+
### the notebooks in this configuration will be run in by default.
26+
### It must already be installed on your machine. You can also
27+
### specify a different environment than the default for any
28+
### notebook in NOTEBOOK CONFIG
29+
default_kernel_name: cupid-analysis
30+
31+
# log level sets the level of how verbose logging will be.
32+
# options include: debug, info, warning, error
33+
log_level: 'info'
34+
35+
############# NOTEBOOK CONFIG #############
36+
37+
############################
38+
# Notebooks and Parameters #
39+
############################
40+
41+
# All parameters under global_params get passed to all the notebooks
42+
43+
global_params: # TODO: a lot of these params may be irrelevant/misleading for this notebook
44+
case_name: 'b.e30_alpha07c_cesm.B1850C_LTso.ne30_t232_wgx3.232'
45+
base_case_name: 'b.e30_alpha07c_cesm.B1850C_LTso.ne30_t232_wgx3.228'
46+
case_nickname: 'BLT1850_232'
47+
base_case_nickname: 'BLT1850_228'
48+
CESM_output_dir: /glade/campaign/cesm/development/cross-wg/diagnostic_framework/CESM_output_for_testing
49+
ts_dir: null # If this is set to null, it will default to CESM_output_dir; if you don't have permissions to write to CESM_output_dir, you can specify a directory such as your scratch arcive directory
50+
lc_kwargs:
51+
threads_per_worker: 1
52+
53+
timeseries: # TODO: will need to update timeseries portion of this in next PR
54+
num_procs: 8
55+
ts_done: [False, False]
56+
overwrite_ts: [False, False]
57+
case_name: ['b.e30_alpha07c_cesm.B1850C_LTso.ne30_t232_wgx3.232', 'b.e30_alpha07c_cesm.B1850C_LTso.ne30_t232_wgx3.228']
58+
59+
atm:
60+
vars: ['RESTOM','TS']
61+
derive_vars: []
62+
hist_str: 'cam.h0a'
63+
start_years: [1,1]
64+
end_years: [21,45]
65+
level: 'lev'
66+
mapping_file: [Null, '/glade/campaign/cesm/cesmdata/inputdata/cpl/gridmaps/ne30pg3/map_ne30pg3_TO_fv0.9x1.25_blin.240826.nc']
67+
68+
69+
lnd:
70+
vars: []
71+
derive_vars: []
72+
hist_str: 'clm2.h0a'
73+
start_years: [1,1]
74+
end_years: [21,45]
75+
level: 'lev'
76+
77+
ocn:
78+
vars: []
79+
derive_vars: []
80+
hist_str: 'h.z'
81+
start_years: [1,1]
82+
end_years: [21,45]
83+
level: 'lev'
84+
85+
ice:
86+
vars: ['ICEFRAC']
87+
derive_vars: []
88+
hist_str: 'cice.h'
89+
start_years: [1,1]
90+
end_years: [21,45]
91+
level: 'lev'
92+
93+
glc:
94+
vars: []
95+
derive_vars: []
96+
hist_str: 'initial_hist'
97+
start_years: [1,1]
98+
end_years: [21,45]
99+
level: 'lev'
100+
101+
rof:
102+
vars: []
103+
derive_vars: []
104+
hist_str: 'h0'
105+
start_years: [1,1]
106+
end_years: [21,45]
107+
level: 'lev'
108+
109+
compute_notebooks:
110+
111+
# This is where all the notebooks you want run and their
112+
# parameters are specified. Several examples of different
113+
# types of notebooks are provided.
114+
115+
# The first key (here infrastructure) is the name of the
116+
# notebook from nb_path_root, minus the .ipynb
117+
118+
infrastructure:
119+
index:
120+
parameter_groups:
121+
none: {}
122+
123+
atm:
124+
Global_TS_RESTOM_tseries:
125+
parameter_groups:
126+
none:
127+
vars:
128+
{'TS':'Global',
129+
'RESTOM':'Global'}
130+
ice:
131+
Lab_Sea_ICEFRAC_tseries:
132+
parameter_groups:
133+
none:
134+
vars:
135+
{'ICEFRAC': 'Lab Sea'}
136+
137+
########### JUPYTER BOOK CONFIG ###########
138+
139+
##################################
140+
# Jupyter Book Table of Contents #
141+
##################################
142+
book_toc:
143+
144+
# See https://jupyterbook.org/en/stable/structure/configure.html for
145+
# complete documentation of Jupyter book construction options
146+
147+
format: jb-book
148+
149+
# All filenames are notebook filename without the .ipynb, similar to above
150+
151+
root: infrastructure/index # root is the notebook that will be the homepage for the book
152+
parts:
153+
154+
# Parts group notebooks into different sections in the Jupyter book
155+
# table of contents, so you can organize different parts of your project.
156+
# Each chapter is the name of one of the notebooks that you executed
157+
# in compute_notebooks above, also without .ipynb
158+
159+
- caption: Diagnostic Output from Multiple Components
160+
chapters:
161+
- file: atm/Global_TS_ICEFRAC_RESTOM_tseries
162+
163+
164+
#####################################
165+
# Keys for Jupyter Book _config.yml #
166+
#####################################
167+
book_config_keys:
168+
169+
title: CESM Intermediate Diagnostic Output # Title of your jupyter book

0 commit comments

Comments
 (0)