|
| 1 | +import numpy as np |
| 2 | + |
| 3 | + |
| 4 | +def local_prediction_rigidity(X_train, X_test, alpha): |
| 5 | + r"""Computes the local prediction rigidity (LPR) of a linear or kernel model |
| 6 | + trained on a training dataset provided as input, on the local environments |
| 7 | + in the test set provided as a separate input. LPR is defined as follows: |
| 8 | +
|
| 9 | + .. math:: |
| 10 | + LPR_{i} = \frac{1}{X_i (X^{T} X + \lambda I)^{-1} X_i^{T}} |
| 11 | +
|
| 12 | + The function assumes that the model training is undertaken in a manner where |
| 13 | + the global prediction targets are averaged over the number of atoms |
| 14 | + appearing in each training structure, and the average feature vector of each |
| 15 | + structure is hence used in the regression. This ensures that (1) |
| 16 | + regularization strength across structures with different number of atoms is |
| 17 | + kept constant per structure during model training, and (2) range of |
| 18 | + resulting LPR values are loosely kept between 0 and 1 for the ease of |
| 19 | + interpretation. This requires the user to provide the regularizer value that |
| 20 | + results from such training procedure. To guarantee valid comparison in the |
| 21 | + LPR across different models, feature vectors are scaled by a global factor |
| 22 | + based on standard deviation across atomic envs. |
| 23 | +
|
| 24 | + If the model is a kernel model, K_train and K_test can be provided in lieu |
| 25 | + of X_train and X_test, alnog with the appropriate regularizer for the |
| 26 | + trained model. |
| 27 | +
|
| 28 | + Parameters |
| 29 | + ---------- |
| 30 | + X_train : list of ndarray of shape (n_atoms, n_features) |
| 31 | + Training dataset where each training set structure is stored as a |
| 32 | + separate ndarray. |
| 33 | +
|
| 34 | + X_test : list of ndarray of shape (n_atoms, n_features) |
| 35 | + Test dataset where each training set structure is stored as a separate |
| 36 | + ndarray. |
| 37 | +
|
| 38 | + alpha : float |
| 39 | + Regularizer value that the linear/kernel model has been optimized to. |
| 40 | +
|
| 41 | + Returns |
| 42 | + ------- |
| 43 | + LPR : list of array of shape (n_atoms) |
| 44 | + Local prediction rigidity (LPR) of the test set structures. LPR is |
| 45 | + separately stored for each test structure, and hence list length = |
| 46 | + n_test_strucs. |
| 47 | + rank_diff : int |
| 48 | + integer value of the difference between cov matrix dimension and rank |
| 49 | +
|
| 50 | + """ |
| 51 | + |
| 52 | + # initialize a StandardFlexibleScaler and fit to train set atom envs |
| 53 | + X_atom = np.vstack(X_train) |
| 54 | + sfactor = np.sqrt(np.mean(X_atom**2, axis=0).sum()) |
| 55 | + |
| 56 | + # prep to build covariance matrix XX, take average feat vecs per struc |
| 57 | + X_struc = [] |
| 58 | + for X_i in X_train: |
| 59 | + X_struc.append(np.mean(X_i / sfactor, axis=0)) |
| 60 | + X_struc = np.vstack(X_struc) |
| 61 | + |
| 62 | + # build XX and obtain Xinv for LPR calculation |
| 63 | + XX = X_struc.T @ X_struc |
| 64 | + Xprime = XX + alpha * np.eye(XX.shape[0]) |
| 65 | + rank_diff = X_struc.shape[1] - np.linalg.matrix_rank(Xprime) |
| 66 | + Xinv = np.linalg.pinv(Xprime) |
| 67 | + |
| 68 | + # track test set atom indices for output |
| 69 | + lens = [] |
| 70 | + for X in X_test: |
| 71 | + lens.append(len(X)) |
| 72 | + test_idxs = np.cumsum([0] + lens) |
| 73 | + |
| 74 | + # prep and compute LPR |
| 75 | + num_test = len(X_test) |
| 76 | + X_test = np.vstack(X_test) |
| 77 | + atom_count = X_test.shape[0] |
| 78 | + LPR_np = np.zeros(X_test.shape[0]) |
| 79 | + |
| 80 | + for ai in range(atom_count): |
| 81 | + Xi = X_test[ai].reshape(1, -1) / sfactor |
| 82 | + LPR_np[ai] = 1 / (Xi @ Xinv @ Xi.T) |
| 83 | + |
| 84 | + # separately store LPR by test struc |
| 85 | + LPR = [] |
| 86 | + for i in range(num_test): |
| 87 | + LPR.append(LPR_np[test_idxs[i] : test_idxs[i + 1]]) |
| 88 | + |
| 89 | + return LPR, rank_diff |
| 90 | + |
| 91 | + |
| 92 | +def componentwise_prediction_rigidity(X_train, X_test, alpha, comp_dims): |
| 93 | + r"""Computes the component-wise prediction rigidity (CPR) and the local CPR |
| 94 | + (LCPR) of a linear or kernel model trained on a training dataset provided as |
| 95 | + input, on the local environments in the test set provided as a separate |
| 96 | + input. CPR and LCPR are defined as follows: |
| 97 | +
|
| 98 | + .. math:: |
| 99 | + CPR_{A,c} = \frac{1}{X_{A,c} (X^{T} X + \lambda I)^{-1} X_{A,c}^{T}} |
| 100 | +
|
| 101 | + .. math:: |
| 102 | + LCPR_{i,c} = \frac{1}{X_{i,c} (X^{T} X + \lambda I)^{-1} X_{i,c}^{T}} |
| 103 | +
|
| 104 | + The function assumes that the feature vectors for the local environments and |
| 105 | + structures are built by concatenating the descriptors of different |
| 106 | + prediction components together. It also assumes, like the case of LPR, that |
| 107 | + model training is undertaken in a manner where the global prediction targets |
| 108 | + are averaged over the number of atoms appearing in each training structure, |
| 109 | + and the average feature vector of each structure is hence used in the |
| 110 | + regression. Likewise, to guarantee valid comparison in the (L)CPR across |
| 111 | + different models, feature vectors are scaled by a global factor based on |
| 112 | + standard deviation across atomic envs. |
| 113 | +
|
| 114 | + If the model is a kernel model, K_train and K_test can be provided in lieu |
| 115 | + of X_train and X_test, alnog with the appropriate regularizer for the |
| 116 | + trained model. However, in computing the kernels, one must strictly keep the |
| 117 | + different components separate, and compute separate kernel blocks for |
| 118 | + different prediction components. |
| 119 | +
|
| 120 | + Parameters |
| 121 | + ---------- |
| 122 | + X_train : list of ndarray of shape (n_atoms, n_features) |
| 123 | + Training dataset where each training set structure is stored as a |
| 124 | + separate ndarray. |
| 125 | +
|
| 126 | + X_test : list of ndarray of shape (n_atoms, n_features) |
| 127 | + Test dataset where each training set structure is stored as a separate |
| 128 | + ndarray. |
| 129 | +
|
| 130 | + alpha : float |
| 131 | + Regularizer value that the linear/kernel model has been optimized to. |
| 132 | +
|
| 133 | + comp_dims : array of int values |
| 134 | + Dimensions of the feature vectors pertaining to each prediction |
| 135 | + component. |
| 136 | +
|
| 137 | +
|
| 138 | + Returns |
| 139 | + ------- |
| 140 | + CPR : ndarray of shape (n_test_strucs, n_comps) |
| 141 | + Component-wise prediction rigidity computed for each prediction |
| 142 | + component, pertaining to the entire test structure. |
| 143 | + LCPR : list of ndarrays of shape (n_atoms, n_comps) |
| 144 | + Local component-wise prediction rigidity of the test set structures. |
| 145 | + Values are separately stored for each test structure, and hence list |
| 146 | + length = n_test_strucs |
| 147 | + rank_diff : int |
| 148 | + value of the difference between cov matrix dimension and rank |
| 149 | +
|
| 150 | + """ |
| 151 | + |
| 152 | + # initialize a StandardFlexibleScaler and fit to train set atom envs |
| 153 | + X_atom = np.vstack(X_train) |
| 154 | + sfactor = np.sqrt(np.mean(X_atom**2, axis=0).sum()) |
| 155 | + |
| 156 | + # prep to build covariance matrix XX, take average feat vecs per struc |
| 157 | + X_struc = [] |
| 158 | + for X_i in X_train: |
| 159 | + X_struc.append(np.mean(X_i / sfactor, axis=0)) |
| 160 | + X_struc = np.vstack(X_struc) |
| 161 | + |
| 162 | + # build XX and obtain Xinv for LPR calculation |
| 163 | + XX = X_struc.T @ X_struc |
| 164 | + Xprime = XX + alpha * np.eye(XX.shape[0]) |
| 165 | + rank_diff = X_struc.shape[1] - np.linalg.matrix_rank(Xprime) |
| 166 | + Xinv = np.linalg.pinv(Xprime) |
| 167 | + |
| 168 | + # track test set atom indices for output |
| 169 | + lens = [] |
| 170 | + for X in X_test: |
| 171 | + lens.append(len(X)) |
| 172 | + test_idxs = np.cumsum([0] + lens) |
| 173 | + |
| 174 | + # get struc average feat vecs for test set |
| 175 | + X_struc_test = [] |
| 176 | + for X_i in X_test: |
| 177 | + X_struc_test.append(np.mean(X_i / sfactor, axis=0)) |
| 178 | + X_struc_test = np.vstack(X_struc_test) |
| 179 | + |
| 180 | + # prep and compute CPR and LCPR |
| 181 | + num_test = len(X_test) |
| 182 | + num_comp = len(comp_dims) |
| 183 | + comp_idxs = np.cumsum([0] + comp_dims.tolist()) |
| 184 | + X_test = np.vstack(X_test) |
| 185 | + atom_count = X_test.shape[0] |
| 186 | + CPR = np.zeros((num_test, num_comp)) |
| 187 | + LCPR_np = np.zeros((atom_count, num_comp)) |
| 188 | + |
| 189 | + for ci in range(num_comp): |
| 190 | + tot_comp_idx = np.arange(comp_dims.sum()) |
| 191 | + mask = ( |
| 192 | + (tot_comp_idx >= comp_idxs[ci]) & (tot_comp_idx < comp_idxs[ci + 1]) |
| 193 | + ).astype(float) |
| 194 | + |
| 195 | + for ai in range(atom_count): |
| 196 | + Xic = np.multiply(X_test[ai].reshape(1, -1) / sfactor, mask) |
| 197 | + LCPR_np[ai, ci] = 1 / (Xic @ Xinv @ Xic.T) |
| 198 | + |
| 199 | + for si in range(len(X_struc_test)): |
| 200 | + XAc = np.multiply(X_struc_test[si].reshape(1, -1), mask) |
| 201 | + CPR[si, ci] = 1 / (XAc @ Xinv @ XAc.T) |
| 202 | + |
| 203 | + # separately store LCPR by test struc |
| 204 | + LCPR = [] |
| 205 | + for i in range(num_test): |
| 206 | + LCPR.append(LCPR_np[test_idxs[i] : test_idxs[i + 1]]) |
| 207 | + |
| 208 | + return CPR, LCPR, rank_diff |
0 commit comments