From 6dba100a2cf4bd239c74875624e5a17316c70fb2 Mon Sep 17 00:00:00 2001 From: Jarrod Millman Date: Wed, 19 Jun 2024 05:07:12 -0700 Subject: [PATCH 1/4] Update numpy version (2.0) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 64df91746..3c2a56e27 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -numpy==1.26.4 +numpy==2.0.0 scipy==1.13.1 matplotlib==3.9.0 pandas==2.2.2 From ae76a6be81fe31dc97028e54f37ffde58a1dbaff Mon Sep 17 00:00:00 2001 From: Jarrod Millman Date: Wed, 19 Jun 2024 05:10:37 -0700 Subject: [PATCH 2/4] Fix numpy 2 doctests --- advanced/advanced_numpy/index.rst | 28 +++++------- advanced/image_processing/index.rst | 12 ++--- advanced/mathematical_optimization/index.rst | 8 ++-- advanced/scipy_sparse/dia_array.rst | 18 ++++---- advanced/scipy_sparse/dok_array.rst | 2 +- intro/numpy/advanced_operations.rst | 4 +- intro/numpy/array_object.rst | 4 +- intro/numpy/elaborate_arrays.rst | 12 ++--- intro/numpy/operations.rst | 44 +++++++++---------- .../image_processing/image_processing.rst | 2 +- intro/scipy/index.rst | 28 ++++++------ .../answers_image_processing.rst | 2 +- packages/scikit-image/index.rst | 6 +-- packages/scikit-learn/index.rst | 10 ++--- packages/statistics/index.rst | 24 +++++----- 15 files changed, 100 insertions(+), 104 deletions(-) diff --git a/advanced/advanced_numpy/index.rst b/advanced/advanced_numpy/index.rst index 9d0b552db..d7e1d11ff 100644 --- a/advanced/advanced_numpy/index.rst +++ b/advanced/advanced_numpy/index.rst @@ -312,7 +312,9 @@ Casting >>> y + 1 array([2, 3, 4, 5], dtype=int8) >>> y + 256 - array([257, 258, 259, 260], dtype=int16) + Traceback (most recent call last): + File "", line 1, in + OverflowError: Python integer 256 out of bounds for int8 >>> y + 256.0 array([257., 258., 259., 260.]) >>> y + np.array([256], dtype=np.int32) @@ -507,9 +509,9 @@ Main point (3, 1) >>> byte_offset = 3 * 1 + 1 * 2 # to find x[1, 2] >>> x.flat[byte_offset] - 6 + np.int8(6) >>> x[1, 2] - 6 + np.int8(6) simple, **flexible** @@ -1343,18 +1345,12 @@ Array siblings: :class:`chararray`, :class:`maskedarray` :class:`chararray`: vectorized string operations -------------------------------------------------- ->>> x = np.array(['a', ' bbb', ' ccc']).view(np.chararray) ->>> x.lstrip(' ') -chararray(['a', 'bbb', 'ccc'], - dtype='...') +>>> x = np.char.asarray(['a', ' bbb', ' ccc']) +>>> x +chararray(['a', ' bbb', ' ccc'], dtype='>> x.upper() -chararray(['A', ' BBB', ' CCC'], - dtype='...') +chararray(['A', ' BBB', ' CCC'], dtype='>> mx.mean() - 2.75 + np.float64(2.75) >>> np.mean(mx) - 2.75 + np.float64(2.75) .. warning:: Not all NumPy functions respect masks, for instance ``np.dot``, so check the return types. @@ -1588,7 +1584,7 @@ Good bug report 3. Version of NumPy/SciPy >>> print(np.__version__) - 1... + 2... **Check that the following is what you expect** diff --git a/advanced/image_processing/index.rst b/advanced/image_processing/index.rst index 8dd0b6868..25afce1bf 100644 --- a/advanced/image_processing/index.rst +++ b/advanced/image_processing/index.rst @@ -137,7 +137,7 @@ Increase contrast by setting min and max values:: >>> # Remove axes and ticks >>> plt.axis('off') - (-0.5, 1023.5, 767.5, -0.5) + (np.float64(-0.5), np.float64(1023.5), np.float64(767.5), np.float64(-0.5)) Draw contour lines:: @@ -190,7 +190,7 @@ Images are arrays: use the whole ``numpy`` machinery. >>> face = sp.datasets.face(gray=True) >>> face[0, 40] - 127 + np.uint8(127) >>> # Slicing >>> face[10:13, 20:23] array([[141, 153, 145], @@ -222,9 +222,9 @@ Statistical information >>> face = sp.datasets.face(gray=True) >>> face.mean() - 113.48026784261067 + np.float64(113.48026784261067) >>> face.max(), face.min() - (250, 0) + (np.uint8(250), np.uint8(0)) ``np.histogram`` @@ -653,9 +653,9 @@ Use mathematical morphology to clean up the result:: >>> eroded_tmp = sp.ndimage.binary_erosion(tmp) >>> reconstruct_final = np.logical_not(sp.ndimage.binary_propagation(eroded_tmp, mask=tmp)) >>> np.abs(mask - close_img).mean() - 0.00640699... + np.float64(0.00640699...) >>> np.abs(mask - reconstruct_final).mean() - 0.00082232... + np.float64(0.00082232...) .. topic:: **Exercise** :class: green diff --git a/advanced/mathematical_optimization/index.rst b/advanced/mathematical_optimization/index.rst index b16130dad..b59801aaa 100644 --- a/advanced/mathematical_optimization/index.rst +++ b/advanced/mathematical_optimization/index.rst @@ -180,9 +180,9 @@ Brent's method to find the minimum of a function: True >>> x_min = result.x >>> x_min - 0.50... + np.float64(0.50...) >>> x_min - 0.5 - 5.8...e-09 + np.float64(5.8...e-09) .. |1d_optim_1| image:: auto_examples/images/sphx_glr_plot_1d_optim_001.png @@ -824,7 +824,7 @@ handy. given, and a gradient computed numerically: >>> sp.optimize.check_grad(f, jacobian, [2, -1]) - 2.384185791015625e-07 + np.float64(2.384185791015625e-07) See also :func:`scipy.optimize.approx_fprime` to find your errors. @@ -897,7 +897,7 @@ if we compute the norm ourselves and use a good generic optimizer ... return np.sum(f(x)**2) >>> result = sp.optimize.minimize(g, x0, method="BFGS") >>> result.fun - 2.6940...e-11 + np.float64(2.6940...e-11) BFGS needs more function calls, and gives a less precise result. diff --git a/advanced/scipy_sparse/dia_array.rst b/advanced/scipy_sparse/dia_array.rst index a70da01a1..d927abc77 100644 --- a/advanced/scipy_sparse/dia_array.rst +++ b/advanced/scipy_sparse/dia_array.rst @@ -63,15 +63,15 @@ Examples >>> mtx.offsets array([ 0, -1, 2], dtype=int32) >>> print(mtx) - (0, 0) 1 - (1, 1) 2 - (2, 2) 3 - (3, 3) 4 - (1, 0) 5 - (2, 1) 6 - (3, 2) 7 - (0, 2) 11 - (1, 3) 12 + (np.int32(0), np.int32(0)) 1 + (np.int32(1), np.int32(1)) 2 + (np.int32(2), np.int32(2)) 3 + (np.int32(3), np.int32(3)) 4 + (np.int32(1), np.int32(0)) 5 + (np.int32(2), np.int32(1)) 6 + (np.int32(3), np.int32(2)) 7 + (np.int32(0), np.int32(2)) 11 + (np.int32(1), np.int32(3)) 12 >>> mtx.toarray() array([[ 1, 0, 11, 0], [ 5, 2, 0, 12], diff --git a/advanced/scipy_sparse/dok_array.rst b/advanced/scipy_sparse/dok_array.rst index 2ab6106ae..66b12cc9e 100644 --- a/advanced/scipy_sparse/dok_array.rst +++ b/advanced/scipy_sparse/dok_array.rst @@ -46,7 +46,7 @@ Examples * slicing and indexing:: >>> mtx[1, 1] - 0.0 + np.float64(0.0) >>> mtx[[1], 1:3] <1x2 sparse array of type '<... 'numpy.float64'>' with 1 stored elements in Dictionary Of Keys format> diff --git a/intro/numpy/advanced_operations.rst b/intro/numpy/advanced_operations.rst index 5f0fa3c9c..3263a94eb 100644 --- a/intro/numpy/advanced_operations.rst +++ b/intro/numpy/advanced_operations.rst @@ -25,7 +25,7 @@ For example, :math:`3x^2 + 2x - 1`:: >>> p = np.poly1d([3, 2, -1]) >>> p(0) - -1 + np.int64(-1) >>> p.roots array([-1. , 0.33333333]) >>> p.order @@ -60,7 +60,7 @@ e.g. the Chebyshev basis. >>> p = np.polynomial.Polynomial([-1, 2, 3]) # coefs in different order! >>> p(0) - -1.0 + np.float64(-1.0) >>> p.roots() array([-1. , 0.33333333]) >>> p.degree() # In general polynomials do not always expose 'order' diff --git a/intro/numpy/array_object.rst b/intro/numpy/array_object.rst index 60cb535e8..85ff838c2 100644 --- a/intro/numpy/array_object.rst +++ b/intro/numpy/array_object.rst @@ -463,7 +463,7 @@ other Python sequences (e.g. lists): >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> a[0], a[2], a[-1] - (0, 2, 9) + (np.int64(0), np.int64(2), np.int64(9)) .. warning:: @@ -487,7 +487,7 @@ For multidimensional arrays, indices are tuples of integers: [0, 1, 0], [0, 0, 2]]) >>> a[1, 1] - 1 + np.int64(1) >>> a[2, 1] = 10 # third line, second column >>> a array([[ 0, 0, 0], diff --git a/intro/numpy/elaborate_arrays.rst b/intro/numpy/elaborate_arrays.rst index 0b1e94d2e..d35230c13 100644 --- a/intro/numpy/elaborate_arrays.rst +++ b/intro/numpy/elaborate_arrays.rst @@ -97,14 +97,14 @@ Floating-point numbers: :: >>> np.finfo(np.float32).eps - 1.1920929e-07 + np.float32(1.1920929e-07) >>> np.finfo(np.float64).eps - 2.2204460492503131e-16 + np.float64(2.220446049250313e-16) >>> np.float32(1e-8) + np.float32(1) == 1 - True + np.True_ >>> np.float64(1e-8) + np.float64(1) == 1 - False + np.False_ Complex floating-point numbers: @@ -173,11 +173,11 @@ Field access works by indexing with field names:: >>> samples['value'] array([0.37, 0.11, 0.13, 0.37, 0.11, 0.13]) >>> samples[0] - (b'ALFA', 1., 0.37) + np.void((b'ALFA', 1.0, 0.37), dtype=[('sensor_code', 'S4'), ('position', '>> samples[0]['sensor_code'] = 'TAU' >>> samples[0] - (b'TAU', 1., 0.37) + np.void((b'TAU', 1.0, 0.37), dtype=[('sensor_code', 'S4'), ('position', '>> x = np.array([1, 2, 3, 4]) >>> np.sum(x) - 10 + np.int64(10) >>> x.sum() - 10 + np.int64(10) .. image:: images/reductions.png :align: right @@ -226,11 +226,11 @@ Sum by rows and by columns: >>> x.sum(axis=0) # columns (first dimension) array([3, 3]) >>> x[:, 0].sum(), x[:, 1].sum() - (3, 3) + (np.int64(3), np.int64(3)) >>> x.sum(axis=1) # rows (second dimension) array([2, 4]) >>> x[0, :].sum(), x[1, :].sum() - (2, 4) + (np.int64(2), np.int64(4)) .. tip:: @@ -241,9 +241,9 @@ Sum by rows and by columns: >>> rng = np.random.default_rng(27446968) >>> x = rng.random((2, 2, 2)) >>> x.sum(axis=2)[0, 1] - 0.73415... + np.float64(0.73415...) >>> x[0, 1, :].sum() - 0.73415... + np.float64(0.73415...) Other reductions ................ @@ -256,23 +256,23 @@ Other reductions >>> x = np.array([1, 3, 2]) >>> x.min() - 1 + np.int64(1) >>> x.max() - 3 + np.int64(3) >>> x.argmin() # index of minimum - 0 + np.int64(0) >>> x.argmax() # index of maximum - 1 + np.int64(1) **Logical operations:** .. sourcecode:: pycon >>> np.all([True, True, False]) - False + np.False_ >>> np.any([True, True, False]) - True + np.True_ .. note:: @@ -282,15 +282,15 @@ Other reductions >>> a = np.zeros((100, 100)) >>> np.any(a != 0) - False + np.False_ >>> np.all(a == a) - True + np.True_ >>> a = np.array([1, 2, 3, 2]) >>> b = np.array([2, 2, 3, 2]) >>> c = np.array([6, 4, 4, 5]) >>> ((a <= b) & (b <= c)).all() - True + np.True_ **Statistics:** @@ -299,14 +299,14 @@ Other reductions >>> x = np.array([1, 2, 3, 1]) >>> y = np.array([[1, 2, 3], [5, 6, 1]]) >>> x.mean() - 1.75 + np.float64(1.75) >>> np.median(x) - 1.5 + np.float64(1.5) >>> np.median(y, axis=-1) # last axis array([2., 5.]) >>> x.std() # full population standard dev. - 0.82915619758884995 + np.float64(0.82915619758884995) ... and many more (best to learn as you go). @@ -709,12 +709,12 @@ Dimension shuffling >>> a.shape (4, 3, 2) >>> a[0, 2, 1] - 5 + np.int64(5) >>> b = a.transpose(1, 2, 0) >>> b.shape (3, 2, 4) >>> b[2, 1, 0] - 5 + np.int64(5) Also creates a view: @@ -722,7 +722,7 @@ Also creates a view: >>> b[2, 1, 0] = -1 >>> a[0, 2, 1] - -1 + np.int64(-1) Resizing ........ @@ -817,7 +817,7 @@ Finding minima and maxima: >>> j_max = np.argmax(a) >>> j_min = np.argmin(a) >>> j_max, j_min - (0, 2) + (np.int64(0), np.int64(2)) .. XXX: need a frame for summaries diff --git a/intro/scipy/image_processing/image_processing.rst b/intro/scipy/image_processing/image_processing.rst index e9850927f..a8af7ffbd 100644 --- a/intro/scipy/image_processing/image_processing.rst +++ b/intro/scipy/image_processing/image_processing.rst @@ -40,7 +40,7 @@ Changing orientation, resolution, .. :: >>> plt.axis('off') - (-0.5, 1023.5, 767.5, -0.5) + (np.float64(-0.5), np.float64(1023.5), np.float64(767.5), np.float64(-0.5)) >>> # etc. diff --git a/intro/scipy/index.rst b/intro/scipy/index.rst index e2e673e57..4b802437c 100644 --- a/intro/scipy/index.rst +++ b/intro/scipy/index.rst @@ -167,9 +167,9 @@ point truncation error. >>> x = 2.5 >>> y = 1e-18 >>> x * np.log(1 + y) - 0.0 + np.float64(0.0) >>> sp.special.xlog1py(x, y) - 2.5e-18 + np.float64(2.5e-18) Many special functions also have "logarithmized" variants. For instance, the gamma function :math:`\Gamma(\cdot)` is related to the factorial @@ -180,7 +180,7 @@ positive integers to the complex plane. >>> np.allclose(sp.special.gamma(x + 1), sp.special.factorial(x)) True >>> sp.special.gamma(5) < sp.special.gamma(5.5) < sp.special.gamma(6) - True + np.True_ The factorial function grows quickly, and so the gamma function overflows for moderate values of the argument. However, sometimes only the logarithm @@ -201,7 +201,7 @@ For example, suppose we wish to compute the ratio >>> a = sp.special.gamma(500) >>> b = sp.special.gamma(499) >>> a, b - (inf, inf) + (np.float64(inf), np.float64(inf)) Both the numerator and denominator overflow, so performing :math:`a / b` will not return the result we seek. However, the magnitude of the result should @@ -214,7 +214,7 @@ we get: >>> log_res = log_a - log_b >>> res = np.exp(log_res) >>> res - 499.0000000... + np.float64(499.0000000...) Similarly, suppose we wish to compute the difference :math:`\log(\Gamma(500) - \Gamma(499))`. For this, we use @@ -224,7 +224,7 @@ Similarly, suppose we wish to compute the difference >>> res = sp.special.logsumexp([log_a, log_b], ... b=[1, -1]) # weights the terms of the sum >>> res - 2605.113844343... + np.float64(2605.113844343...) For more information about these and many other special functions, see the documentation of :mod:`scipy.special`. @@ -245,7 +245,7 @@ of a square matrix:: >>> arr = np.array([[1, 2], ... [3, 4]]) >>> sp.linalg.det(arr) - -2.0 + np.float64(-2.0) Mathematically, the solution of a linear system :math:`Ax = b` is :math:`x = A^{-1}b`, but explicit inversion of a matrix is numerically unstable and should be avoided. @@ -552,7 +552,7 @@ bounds that restrict the search to the vicinity of the global minimum. nit: 8 nfev: 8 >>> res.fun == f(res.x) - True + np.True_ If we did not already know the approximate location of the global minimum, we could use one of SciPy's global minimizers, such as @@ -688,9 +688,9 @@ distribution family's ``fit`` method:: >>> loc, scale = sp.stats.norm.fit(sample) >>> loc - 0.0015767005... + np.float64(0.0015767005...) >>> scale - 0.9973396878... + np.float64(0.9973396878...) Since we know the true parameters of the distribution from which the sample was drawn, we are not surprised that these estimates are similar. @@ -715,7 +715,7 @@ The sample mean is an estimator of the mean of the distribution from which the sample was drawn:: >>> np.mean(sample) - 0.001576700508... + np.float64(0.001576700508...) NumPy includes some of the most fundamental sample statistics (e.g. :func:`numpy.mean`, :func:`numpy.var`, :func:`numpy.percentile`); @@ -724,7 +724,7 @@ is a common measure of central tendency for data that tends to be distributed over many orders of magnitude. >>> sp.stats.gmean(2**sample) - 1.0010934829... + np.float64(1.0010934829...) SciPy also includes a variety of hypothesis tests that produce a sample statistic and a p-value. For instance, suppose we wish to @@ -733,9 +733,9 @@ distribution:: >>> res = sp.stats.normaltest(sample) >>> res.statistic - 5.20841759... + np.float64(5.20841759...) >>> res.pvalue - 0.07396163283... + np.float64(0.07396163283...) Here, ``statistic`` is a sample statistic that tends to be high for samples that are drawn from non-normal distributions. ``pvalue`` is diff --git a/intro/scipy/summary-exercises/answers_image_processing.rst b/intro/scipy/summary-exercises/answers_image_processing.rst index 735ccaaa0..f95a440d7 100644 --- a/intro/scipy/summary-exercises/answers_image_processing.rst +++ b/intro/scipy/summary-exercises/answers_image_processing.rst @@ -76,4 +76,4 @@ Example of solution for the image processing exercise: unmolten grains in glass >>> mean_bubble_size = bubbles_areas.mean() >>> median_bubble_size = np.median(bubbles_areas) >>> mean_bubble_size, median_bubble_size - (1699.875, 65.0) + (np.float64(1699.875), np.float64(65.0)) diff --git a/packages/scikit-image/index.rst b/packages/scikit-image/index.rst index 99d3680d5..d7b5e7a3e 100644 --- a/packages/scikit-image/index.rst +++ b/packages/scikit-image/index.rst @@ -281,7 +281,7 @@ unsigned. >>> camera_float = ski.util.img_as_float(camera) >>> camera.max(), camera_float.max() - (255, 1.0) + (np.uint8(255), np.float64(1.0)) Some image processing routines need to work with float arrays, and may hence output an array with a different type and the data range from the @@ -289,7 +289,7 @@ input array :: >>> camera_sobel = ski.filters.sobel(camera) >>> camera_sobel.max() - 0.644... + np.float64(0.644...) Utility functions are provided in :mod:`skimage` to convert both the @@ -670,7 +670,7 @@ Example: compute the size and perimeter of the two segmented regions:: >>> [float(prop.area) for prop in properties] [770.0, 1168.0] >>> [prop.perimeter for prop in properties] - [100.91..., 126.81...] + [np.float64(100.91...), np.float64(126.81...)] .. seealso:: diff --git a/packages/scikit-learn/index.rst b/packages/scikit-learn/index.rst index 447cee5a8..879408166 100644 --- a/packages/scikit-learn/index.rst +++ b/packages/scikit-learn/index.rst @@ -612,7 +612,7 @@ the number of matches:: >>> print(len(matches)) 450 >>> matches.sum() / float(len(matches)) - 0.8555... + np.float64(0.8555...) We see that more than 80% of the 450 predictions match the input. But there are other more sophisticated metrics that can be used to judge the @@ -999,13 +999,13 @@ overall performance of an algorithm. It appears in the bottom row of the classification report; it can also be accessed directly:: >>> metrics.f1_score(y_test, y_pred, average="macro") - 0.991367... + np.float64(0.991367...) The over-fitting we saw previously can be quantified by computing the f1-score on the training data itself:: >>> metrics.f1_score(y_train, clf.predict(X_train), average="macro") - 1.0 + np.float64(1.0) .. note:: @@ -1164,8 +1164,8 @@ We can find the optimal parameters this way:: >>> for Model in [Ridge, Lasso]: ... gscv = GridSearchCV(Model(), dict(alpha=alphas), cv=3).fit(X, y) ... print('%s: %s' % (Model.__name__, gscv.best_params_)) - Ridge: {'alpha': 0.062101694189156162} - Lasso: {'alpha': 0.01268961003167922} + Ridge: {'alpha': np.float64(0.06210169418915616)} + Lasso: {'alpha': np.float64(0.01268961003167922)} Built-in Hyperparameter Search ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/packages/statistics/index.rst b/packages/statistics/index.rst index 70cd32646..41966c2e7 100644 --- a/packages/statistics/index.rst +++ b/packages/statistics/index.rst @@ -193,7 +193,7 @@ Manipulating data >>> # Simpler selector >>> data[data['Gender'] == 'Female']['VIQ'].mean() - 109.45 + np.float64(109.45) .. note:: For a quick view on a large dataframe, use its `describe` method: :meth:`pandas.DataFrame.describe`. @@ -205,8 +205,8 @@ Manipulating data >>> groupby_gender = data.groupby('Gender') >>> for gender, value in groupby_gender['VIQ']: ... print((gender, value.mean())) - ('Female', 109.45) - ('Male', 115.25) + ('Female', np.float64(109.45)) + ('Male', np.float64(115.25)) `groupby_gender` is a powerful object that exposes many @@ -345,7 +345,7 @@ and the `p-value `_ (see the function's help):: >>> sp.stats.ttest_1samp(data['VIQ'], 0) - TtestResult(statistic=30.088099970..., pvalue=1.32891964...e-28, df=39) + TtestResult(statistic=np.float64(30.088099970...), pvalue=np.float64(1.32891964...e-28), df=np.int64(39)) The p-value of :math:`10^-28` indicates that such an extreme value of the statistic is unlikely to be observed under the null hypothesis. This may be taken as @@ -365,7 +365,7 @@ will affect the conclusions of the test, we can use a `Wilcoxon signed-rank test this assumption at the expense of test power:: >>> sp.stats.wilcoxon(data['VIQ']) - WilcoxonResult(statistic=0.0, pvalue=1.8189894...e-12) + WilcoxonResult(statistic=np.float64(0.0), pvalue=np.float64(1.8189894...e-12)) Two-sample t-test: testing for difference across populations ............................................................ @@ -378,14 +378,14 @@ a two-sample t-test using :func:`scipy.stats.ttest_ind`:: >>> female_viq = data[data['Gender'] == 'Female']['VIQ'] >>> male_viq = data[data['Gender'] == 'Male']['VIQ'] >>> sp.stats.ttest_ind(female_viq, male_viq) - TtestResult(statistic=-0.77261617232..., pvalue=0.4445287677858..., df=38.0) + TtestResult(statistic=np.float64(-0.77261617232...), pvalue=np.float64(0.4445287677858...), df=np.float64(38.0)) The corresponding non-parametric test is the `Mann–Whitney U test `_, :func:`scipy.stats.mannwhitneyu`. >>> sp.stats.mannwhitneyu(female_viq, male_viq) - MannwhitneyuResult(statistic=164.5, pvalue=0.34228868687...) + MannwhitneyuResult(statistic=np.float64(164.5), pvalue=np.float64(0.34228868687...)) Paired tests: repeated measurements on the same individuals ----------------------------------------------------------- @@ -399,7 +399,7 @@ PIQ, VIQ, and FSIQ give three measures of IQ. Let us test whether FISQ and PIQ are significantly different. We can use an "independent sample" test:: >>> sp.stats.ttest_ind(data['FSIQ'], data['PIQ']) - TtestResult(statistic=0.46563759638..., pvalue=0.64277250..., df=78.0) + TtestResult(statistic=np.float64(0.46563759638...), pvalue=np.float64(0.64277250...), df=np.float64(78.0)) The problem with this approach is that it ignores an important relationship between observations: FSIQ and PIQ are measured on the same individuals. @@ -409,7 +409,7 @@ or `"repeated measures test" `_:: >>> sp.stats.ttest_rel(data['FSIQ'], data['PIQ']) - TtestResult(statistic=1.784201940..., pvalue=0.082172638183..., df=39) + TtestResult(statistic=np.float64(1.784201940...), pvalue=np.float64(0.082172638183...), df=np.int64(39)) .. image:: auto_examples/images/sphx_glr_plot_paired_boxplots_002.png :target: auto_examples/plot_pandas.html @@ -420,13 +420,13 @@ This is equivalent to a one-sample test on the differences between paired observations:: >>> sp.stats.ttest_1samp(data['FSIQ'] - data['PIQ'], 0) - TtestResult(statistic=1.784201940..., pvalue=0.082172638..., df=39) + TtestResult(statistic=np.float64(1.784201940...), pvalue=np.float64(0.082172638...), df=np.int64(39)) Accordingly, we can perform a nonparametric version of the test with ``wilcoxon``. >>> sp.stats.wilcoxon(data['FSIQ'], data['PIQ'], method="approx") - WilcoxonResult(statistic=274.5, pvalue=0.106594927135...) + WilcoxonResult(statistic=np.float64(274.5), pvalue=np.float64(0.106594927135...)) .. topic:: **Exercise** :class: green @@ -646,7 +646,7 @@ model:: previous t-test:: >>> sp.stats.ttest_ind(data['FSIQ'], data['PIQ']) - TtestResult(statistic=0.46563759638..., pvalue=0.64277250..., df=78.0) + TtestResult(statistic=np.float64(0.46563759638...), pvalue=np.float64(0.64277250...), df=np.float64(78.0)) Multiple Regression: including multiple factors From abe43dfc8347d44e5dbf6b9a8d7c00ee95318813 Mon Sep 17 00:00:00 2001 From: Jarrod Millman Date: Wed, 19 Jun 2024 06:40:38 -0700 Subject: [PATCH 3/4] Remove np.lookfor discussion --- intro/help/help.rst | 9 --------- intro/numpy/array_object.rst | 10 ---------- intro/numpy/operations.rst | 2 +- 3 files changed, 1 insertion(+), 20 deletions(-) diff --git a/intro/help/help.rst b/intro/help/help.rst index 1fd558bc0..e3cdb2146 100644 --- a/intro/help/help.rst +++ b/intro/help/help.rst @@ -51,8 +51,6 @@ just to display help and docstrings... learning by example. More standard documentation is also available. -Finally, two more "technical" possibilities are useful as well: - * In Ipython, the magical function ``%psearch`` search for objects matching patterns. This is useful if, for example, one does not know the exact name of a function. @@ -63,13 +61,6 @@ Finally, two more "technical" possibilities are useful as well: In [3]: import numpy as np In [4]: %psearch np.diag* -* numpy.lookfor looks for keywords inside the docstrings of specified modules. - - .. ipython:: - :okwarning: - - In [45]: np.lookfor('convolution') - * If everything listed above fails (and Google doesn't have the answer)... don't despair! There is a vibrant Scientific Python community. Scientific Python is present on various platform. diff --git a/intro/numpy/array_object.rst b/intro/numpy/array_object.rst index 85ff838c2..6212640f5 100644 --- a/intro/numpy/array_object.rst +++ b/intro/numpy/array_object.rst @@ -113,16 +113,6 @@ NumPy Reference documentation - Looking for something: - .. sourcecode:: pycon - - >>> np.lookfor('create array') # doctest: +SKIP - Search results for 'create array' - --------------------------------- - numpy.array - Create an array. - numpy.memmap - Create a memory-map to an array stored in a *binary* file on disk. - .. ipython:: In [6]: np.con*? diff --git a/intro/numpy/operations.rst b/intro/numpy/operations.rst index 4e82b7fc2..4e1853692 100644 --- a/intro/numpy/operations.rst +++ b/intro/numpy/operations.rst @@ -864,7 +864,7 @@ Summary * Know miscellaneous operations on arrays, such as finding the mean or max (``array.max()``, ``array.mean()``). No need to retain everything, but have the reflex to search in the documentation (online docs, - ``help()``, ``lookfor()``)!! + ``help()``)!! * For advanced use: master the indexing with arrays of integers, as well as broadcasting. Know more NumPy functions to handle various array From 4da5c2761da8085eb76e91de68544b70831a695b Mon Sep 17 00:00:00 2001 From: Jarrod Millman Date: Wed, 19 Jun 2024 06:49:51 -0700 Subject: [PATCH 4/4] Use np.ptp --- .../mathematical_optimization/examples/plot_gradient_descent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/advanced/mathematical_optimization/examples/plot_gradient_descent.py b/advanced/mathematical_optimization/examples/plot_gradient_descent.py index 812215b9a..b508af989 100644 --- a/advanced/mathematical_optimization/examples/plot_gradient_descent.py +++ b/advanced/mathematical_optimization/examples/plot_gradient_descent.py @@ -242,7 +242,7 @@ def store(X): extent=[x_min, x_max, y_min, y_max], cmap=plt.cm.gray_r, origin="lower", - vmax=log_z.min() + 1.5 * log_z.ptp(), + vmax=log_z.min() + 1.5 * np.ptp(log_z), ) contours = plt.contour( log_z,