|
| 1 | +""" |
| 2 | +Reproducible Parallelized Reduction is difficult |
| 3 | +================================================ |
| 4 | +
|
| 5 | +A reduction is a frequent operation with neural networks. It appears in layer normalization, |
| 6 | +softmax... Because of the float precision, the result of the computation |
| 7 | +changes based on the order of the elements. The following examples show the variation |
| 8 | +based on different hypothesis on the vector distribution. |
| 9 | +We consider a vector :math:`X = (x_1, ..., x_n)`. |
| 10 | +It computes the average: |
| 11 | +
|
| 12 | +.. math:: |
| 13 | +
|
| 14 | + mean(X) = \\frac{\\sum_{i=1}^n x_i}{n} |
| 15 | +
|
| 16 | +Or the normalization of the vector: |
| 17 | +
|
| 18 | +.. math:: |
| 19 | +
|
| 20 | + norm(X)_i = \\frac{ X_i - \\mathbb{E}X}{ \\sqrt{ \\mathbb{V}X}} |
| 21 | +
|
| 22 | +With :math:`\\mathbb{E}X = mean(X)`, |
| 23 | +:math:`\\mathbb{V}X = mean\\left(\\left(X - mean(X)\\right)^2\\right)`. |
| 24 | +We draw 128 random permutations of X. The average or mean should not change. |
| 25 | +And the normalized vector should have the same values. In the first case, we compute |
| 26 | +the difference between the highest and the lowest values obtained for the average. |
| 27 | +In the second case, we look for the maximum difference between the original normalized |
| 28 | +vector and the permuted one, both sorted. |
| 29 | +
|
| 30 | +The computation code |
| 31 | +++++++++++++++++++++ |
| 32 | +""" |
| 33 | + |
| 34 | +import itertools |
| 35 | +from tqdm import tqdm |
| 36 | +import numpy as np |
| 37 | +import pandas |
| 38 | + |
| 39 | +DATA = [] |
| 40 | + |
| 41 | + |
| 42 | +def str_dtype(dtype): |
| 43 | + """Displays numpy dtype in a nicer way.""" |
| 44 | + if dtype == np.float64: |
| 45 | + return "fp64" |
| 46 | + if dtype == np.float32: |
| 47 | + return "fp32" |
| 48 | + if dtype == np.float16: |
| 49 | + return "fp16" |
| 50 | + raise ValueError(f"Unexpected value {dtype}") |
| 51 | + |
| 52 | + |
| 53 | +def layer_norm(a, eps=1e-6): |
| 54 | + """ |
| 55 | + Normalized the vector a. |
| 56 | + The computation is done in float32 or float64. |
| 57 | + """ |
| 58 | + ctype = np.float32 if a.dtype == np.float16 else a.dtype |
| 59 | + a32 = a.astype(ctype) |
| 60 | + m = a32.mean(axis=-1, keepdims=True) |
| 61 | + c = a32 - m |
| 62 | + va = np.sqrt((c * c).mean(axis=-1, keepdims=True)) |
| 63 | + va += eps |
| 64 | + return (c / va).astype(a.dtype) |
| 65 | + |
| 66 | + |
| 67 | +def compute(values, fct): |
| 68 | + """ |
| 69 | + Compare the results of function ``fct`` on a sample. |
| 70 | + Loops over multiple sizes, dtypes. Tries 128 times. |
| 71 | + """ |
| 72 | + |
| 73 | + def make_value(base, value): |
| 74 | + if value.size > 1: |
| 75 | + return np.abs(np.sort(base) - np.sort(value)).max() |
| 76 | + return value |
| 77 | + |
| 78 | + sizes = [2, 4, 8, 16, 512, 1024, 2048, 4096, 8192] |
| 79 | + dtypes = [np.float64, np.float32, np.float16] |
| 80 | + N = list(range(128)) |
| 81 | + exps = list(itertools.product(sizes, dtypes, N)) |
| 82 | + data = [] |
| 83 | + ech = None |
| 84 | + for size, dtype, n in tqdm(exps): |
| 85 | + if n == 0: |
| 86 | + ech = values[:size].astype(dtype) |
| 87 | + base = fct(ech) |
| 88 | + assert base.dtype == ech.dtype |
| 89 | + obs = dict( |
| 90 | + n=n, size=size, dtype=str_dtype(ech.dtype), value=make_value(base, fct(ech)) |
| 91 | + ) |
| 92 | + data.append(obs) |
| 93 | + |
| 94 | + if n == 1: |
| 95 | + new_ech = np.sort(ech) |
| 96 | + elif n == 2: |
| 97 | + new_ech = np.sort(ech)[::-1] |
| 98 | + else: |
| 99 | + new_ech = np.random.permutation(ech) |
| 100 | + assert new_ech.dtype == ech.dtype |
| 101 | + assert new_ech.shape == ech.shape |
| 102 | + obs = dict( |
| 103 | + n=n + 1, |
| 104 | + size=size, |
| 105 | + dtype=str_dtype(new_ech.dtype), |
| 106 | + value=make_value(base, fct(new_ech)), |
| 107 | + ) |
| 108 | + data.append(obs) |
| 109 | + |
| 110 | + df = pandas.DataFrame(data) |
| 111 | + agg = df.drop("n", axis=1).groupby(["dtype", "size"], as_index=False).agg(["min", "max"]) |
| 112 | + agg["value", "delta"] = agg["value", "max"] - agg["value", "min"] |
| 113 | + piv = agg.pivot(index="size", columns="dtype", values=("value", "delta")) |
| 114 | + return piv |
| 115 | + |
| 116 | + |
| 117 | +# %% |
| 118 | +# Normal Law |
| 119 | +# ++++++++++ |
| 120 | +# |
| 121 | +# Let's see what it returns an on random sample following a normal law. |
| 122 | +# First the average. |
| 123 | + |
| 124 | +values = np.random.randn(4096) |
| 125 | +mean = compute(values, lambda x: np.mean(x).astype(x.dtype)) |
| 126 | +mean["name"] = "normal" |
| 127 | +print(mean) |
| 128 | + |
| 129 | +# %% |
| 130 | +# Then the layer normalization. |
| 131 | + |
| 132 | +ln = compute(values, layer_norm) |
| 133 | +ln["name"] = "normal" |
| 134 | +DATA.append(ln.reset_index(drop=True).max(axis=0)) |
| 135 | +print(ln) |
| 136 | + |
| 137 | +# %% |
| 138 | +# Fixed values |
| 139 | +# ++++++++++++ |
| 140 | +# |
| 141 | +# We try a fixed vector with one very high value and all the others are small. |
| 142 | + |
| 143 | +values[:] = -1e-4 |
| 144 | +values[::128] = 100 |
| 145 | +mean = compute(values, lambda x: np.mean(x).astype(x.dtype)) |
| 146 | +mean["name"] = "fixed" |
| 147 | +print(mean) |
| 148 | + |
| 149 | +# %% |
| 150 | +# And the normalized vector. |
| 151 | +ln = compute(values, layer_norm) |
| 152 | +ln["name"] = "fixed" |
| 153 | +DATA.append(ln.reset_index(drop=True).max(axis=0)) |
| 154 | +print(ln) |
| 155 | + |
| 156 | +# %% |
| 157 | +# Pareto Distribution |
| 158 | +# +++++++++++++++++++ |
| 159 | +# |
| 160 | +# A law with a long tail. |
| 161 | + |
| 162 | +values = np.random.pareto(1, (4096,)) |
| 163 | +print(values) |
| 164 | + |
| 165 | +mean = compute(values, lambda x: np.mean(x).astype(x.dtype)) |
| 166 | +mean["name"] = "normal" |
| 167 | +print(mean) |
| 168 | + |
| 169 | +# %% |
| 170 | +# And the normalized vector. |
| 171 | +ln = compute(values, layer_norm) |
| 172 | +ln["name"] = "pareto" |
| 173 | +DATA.append(ln.reset_index(drop=True).max(axis=0)) |
| 174 | +print(ln) |
| 175 | + |
| 176 | +# %% |
| 177 | +# Summary |
| 178 | +# +++++++ |
| 179 | +# |
| 180 | +# We consider the maximum difference obtained for any sample size. |
| 181 | + |
| 182 | +df = pandas.DataFrame(DATA).set_index("name") |
| 183 | +print(df) |
| 184 | + |
| 185 | +# %% |
| 186 | +# Visually. |
| 187 | + |
| 188 | +ax = df.plot.bar(logy=True) |
| 189 | +fig = ax.get_figure() |
| 190 | +fig.savefig("plot_parallelized_reduction.png") |
| 191 | + |
| 192 | +# %% |
| 193 | +# In a deep neural network |
| 194 | +# ++++++++++++++++++++++++ |
| 195 | +# |
| 196 | +# Some of the vector have 500 values, 16x32x1024x1024. A layer normalization |
| 197 | +# does 16x32x1024 ~ 2M reductions, over 20 layers. |
| 198 | +# When a deep neural network is computed with a different code |
| 199 | +# doing a different parallelization (GPU/CPU for example), |
| 200 | +# the order of the reduction may change and therefore, |
| 201 | +# some errors will appear and propagate. |
0 commit comments