Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- .venv/Lib/site-packages/scipy/signal/_spectral.cp39-win_amd64.pyd +3 -0
- .venv/Lib/site-packages/scipy/special/__init__.py +863 -0
- .venv/Lib/site-packages/scipy/special/__pycache__/_basic.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/special/__pycache__/_ellip_harm.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/special/__pycache__/_lambertw.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/special/__pycache__/_logsumexp.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/special/__pycache__/add_newdocs.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/special/__pycache__/basic.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/special/__pycache__/orthogonal.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/special/__pycache__/sf_error.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/special/__pycache__/specfun.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/special/__pycache__/spfun_stats.cpython-39.pyc +0 -0
- .venv/Lib/site-packages/scipy/special/_precompute/__init__.py +0 -0
- .venv/Lib/site-packages/scipy/special/_precompute/cosine_cdf.py +17 -0
- .venv/Lib/site-packages/scipy/special/_precompute/expn_asy.py +54 -0
- .venv/Lib/site-packages/scipy/special/_precompute/gammainc_asy.py +116 -0
- .venv/Lib/site-packages/scipy/special/_precompute/gammainc_data.py +124 -0
- .venv/Lib/site-packages/scipy/special/_precompute/lambertw.py +68 -0
- .venv/Lib/site-packages/scipy/special/_precompute/loggamma.py +43 -0
- .venv/Lib/site-packages/scipy/special/_precompute/struve_convergence.py +131 -0
- .venv/Lib/site-packages/scipy/special/_precompute/utils.py +38 -0
- .venv/Lib/site-packages/scipy/special/_precompute/wright_bessel.py +342 -0
- .venv/Lib/site-packages/scipy/special/_precompute/wright_bessel_data.py +152 -0
- .venv/Lib/site-packages/scipy/special/_precompute/wrightomega.py +41 -0
- .venv/Lib/site-packages/scipy/special/_precompute/zetac.py +27 -0
- .venv/Lib/site-packages/scipy/special/_ufuncs_cxx.pyx +181 -0
- .venv/Lib/site-packages/scipy/special/_ufuncs_cxx_defs.h +68 -0
- .venv/Lib/site-packages/scipy/special/_ufuncs_defs.h +185 -0
- .venv/Lib/site-packages/scipy/special/special/binom.h +85 -0
- .venv/Lib/site-packages/scipy/special/special/config.h +158 -0
- .venv/Lib/site-packages/scipy/special/special/digamma.h +198 -0
- .venv/Lib/site-packages/scipy/special/special/error.h +42 -0
- .venv/Lib/site-packages/scipy/special/tests/__init__.py +0 -0
- .venv/Lib/site-packages/scipy/special/tests/data/__init__.py +0 -0
- .venv/Lib/site-packages/scipy/special/tests/test_wright_bessel.py +115 -0
- .venv/Lib/site-packages/scipy/special/tests/test_zeta.py +49 -0
- .venv/Lib/site-packages/scipy/stats/morestats.py +34 -0
- .venv/Lib/site-packages/scipy/stats/mstats.py +140 -0
- .venv/Lib/site-packages/scipy/stats/mstats_basic.py +50 -0
- .venv/Lib/site-packages/scipy/stats/mstats_extras.py +26 -0
- .venv/Lib/site-packages/scipy/stats/tests/test_axis_nan_policy.py +1188 -0
- .venv/Lib/site-packages/scipy/stats/tests/test_binned_statistic.py +568 -0
- .venv/Lib/site-packages/scipy/stats/tests/test_boost_ufuncs.py +47 -0
- .venv/Lib/site-packages/scipy/stats/tests/test_censored_data.py +152 -0
- .venv/Lib/site-packages/scipy/stats/tests/test_contingency.py +241 -0
- .venv/Lib/site-packages/scipy/stats/tests/test_continuous_basic.py +1016 -0
- .venv/Lib/site-packages/scipy/stats/tests/test_continuous_fit_censored.py +683 -0
- .venv/Lib/site-packages/scipy/stats/tests/test_crosstab.py +115 -0
- .venv/Lib/site-packages/scipy/stats/tests/test_discrete_basic.py +548 -0
.gitattributes
CHANGED
@@ -232,3 +232,4 @@ Data/Tsukuyomi/wavs/VOICEACTRESS100_100.wav filter=lfs diff=lfs merge=lfs -text
|
|
232 |
.venv/Lib/site-packages/torch/lib/cudnn_engines_precompiled64_9.dll filter=lfs diff=lfs merge=lfs -text
|
233 |
.venv/Lib/site-packages/torch/lib/dnnl.lib filter=lfs diff=lfs merge=lfs -text
|
234 |
.venv/Lib/site-packages/torch/lib/torch_cuda.dll filter=lfs diff=lfs merge=lfs -text
|
|
|
|
232 |
.venv/Lib/site-packages/torch/lib/cudnn_engines_precompiled64_9.dll filter=lfs diff=lfs merge=lfs -text
|
233 |
.venv/Lib/site-packages/torch/lib/dnnl.lib filter=lfs diff=lfs merge=lfs -text
|
234 |
.venv/Lib/site-packages/torch/lib/torch_cuda.dll filter=lfs diff=lfs merge=lfs -text
|
235 |
+
.venv/Lib/site-packages/scipy/signal/_spectral.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
.venv/Lib/site-packages/scipy/signal/_spectral.cp39-win_amd64.pyd
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:34c12e9119432a42d10bb2fde95094848e3810fc5b47c12c09bbdaa7b821ee32
|
3 |
+
size 1009152
|
.venv/Lib/site-packages/scipy/special/__init__.py
ADDED
@@ -0,0 +1,863 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
========================================
|
3 |
+
Special functions (:mod:`scipy.special`)
|
4 |
+
========================================
|
5 |
+
|
6 |
+
.. currentmodule:: scipy.special
|
7 |
+
|
8 |
+
Almost all of the functions below accept NumPy arrays as input
|
9 |
+
arguments as well as single numbers. This means they follow
|
10 |
+
broadcasting and automatic array-looping rules. Technically,
|
11 |
+
they are `NumPy universal functions
|
12 |
+
<https://numpy.org/doc/stable/user/basics.ufuncs.html#ufuncs-basics>`_.
|
13 |
+
Functions which do not accept NumPy arrays are marked by a warning
|
14 |
+
in the section description.
|
15 |
+
|
16 |
+
.. seealso::
|
17 |
+
|
18 |
+
`scipy.special.cython_special` -- Typed Cython versions of special functions
|
19 |
+
|
20 |
+
|
21 |
+
Error handling
|
22 |
+
==============
|
23 |
+
|
24 |
+
Errors are handled by returning NaNs or other appropriate values.
|
25 |
+
Some of the special function routines can emit warnings or raise
|
26 |
+
exceptions when an error occurs. By default this is disabled; to
|
27 |
+
query and control the current error handling state the following
|
28 |
+
functions are provided.
|
29 |
+
|
30 |
+
.. autosummary::
|
31 |
+
:toctree: generated/
|
32 |
+
|
33 |
+
geterr -- Get the current way of handling special-function errors.
|
34 |
+
seterr -- Set how special-function errors are handled.
|
35 |
+
errstate -- Context manager for special-function error handling.
|
36 |
+
SpecialFunctionWarning -- Warning that can be emitted by special functions.
|
37 |
+
SpecialFunctionError -- Exception that can be raised by special functions.
|
38 |
+
|
39 |
+
Available functions
|
40 |
+
===================
|
41 |
+
|
42 |
+
Airy functions
|
43 |
+
--------------
|
44 |
+
|
45 |
+
.. autosummary::
|
46 |
+
:toctree: generated/
|
47 |
+
|
48 |
+
airy -- Airy functions and their derivatives.
|
49 |
+
airye -- Exponentially scaled Airy functions and their derivatives.
|
50 |
+
ai_zeros -- Compute `nt` zeros and values of the Airy function Ai and its derivative.
|
51 |
+
bi_zeros -- Compute `nt` zeros and values of the Airy function Bi and its derivative.
|
52 |
+
itairy -- Integrals of Airy functions
|
53 |
+
|
54 |
+
|
55 |
+
Elliptic functions and integrals
|
56 |
+
--------------------------------
|
57 |
+
|
58 |
+
.. autosummary::
|
59 |
+
:toctree: generated/
|
60 |
+
|
61 |
+
ellipj -- Jacobian elliptic functions.
|
62 |
+
ellipk -- Complete elliptic integral of the first kind.
|
63 |
+
ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1.
|
64 |
+
ellipkinc -- Incomplete elliptic integral of the first kind.
|
65 |
+
ellipe -- Complete elliptic integral of the second kind.
|
66 |
+
ellipeinc -- Incomplete elliptic integral of the second kind.
|
67 |
+
elliprc -- Degenerate symmetric integral RC.
|
68 |
+
elliprd -- Symmetric elliptic integral of the second kind.
|
69 |
+
elliprf -- Completely-symmetric elliptic integral of the first kind.
|
70 |
+
elliprg -- Completely-symmetric elliptic integral of the second kind.
|
71 |
+
elliprj -- Symmetric elliptic integral of the third kind.
|
72 |
+
|
73 |
+
Bessel functions
|
74 |
+
----------------
|
75 |
+
|
76 |
+
.. autosummary::
|
77 |
+
:toctree: generated/
|
78 |
+
|
79 |
+
jv -- Bessel function of the first kind of real order and \
|
80 |
+
complex argument.
|
81 |
+
jve -- Exponentially scaled Bessel function of order `v`.
|
82 |
+
yn -- Bessel function of the second kind of integer order and \
|
83 |
+
real argument.
|
84 |
+
yv -- Bessel function of the second kind of real order and \
|
85 |
+
complex argument.
|
86 |
+
yve -- Exponentially scaled Bessel function of the second kind \
|
87 |
+
of real order.
|
88 |
+
kn -- Modified Bessel function of the second kind of integer \
|
89 |
+
order `n`
|
90 |
+
kv -- Modified Bessel function of the second kind of real order \
|
91 |
+
`v`
|
92 |
+
kve -- Exponentially scaled modified Bessel function of the \
|
93 |
+
second kind.
|
94 |
+
iv -- Modified Bessel function of the first kind of real order.
|
95 |
+
ive -- Exponentially scaled modified Bessel function of the \
|
96 |
+
first kind.
|
97 |
+
hankel1 -- Hankel function of the first kind.
|
98 |
+
hankel1e -- Exponentially scaled Hankel function of the first kind.
|
99 |
+
hankel2 -- Hankel function of the second kind.
|
100 |
+
hankel2e -- Exponentially scaled Hankel function of the second kind.
|
101 |
+
wright_bessel -- Wright's generalized Bessel function.
|
102 |
+
|
103 |
+
The following function does not accept NumPy arrays (it is not a
|
104 |
+
universal function):
|
105 |
+
|
106 |
+
.. autosummary::
|
107 |
+
:toctree: generated/
|
108 |
+
|
109 |
+
lmbda -- Jahnke-Emden Lambda function, Lambdav(x).
|
110 |
+
|
111 |
+
Zeros of Bessel functions
|
112 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^
|
113 |
+
|
114 |
+
The following functions do not accept NumPy arrays (they are not
|
115 |
+
universal functions):
|
116 |
+
|
117 |
+
.. autosummary::
|
118 |
+
:toctree: generated/
|
119 |
+
|
120 |
+
jnjnp_zeros -- Compute zeros of integer-order Bessel functions Jn and Jn'.
|
121 |
+
jnyn_zeros -- Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
|
122 |
+
jn_zeros -- Compute zeros of integer-order Bessel function Jn(x).
|
123 |
+
jnp_zeros -- Compute zeros of integer-order Bessel function derivative Jn'(x).
|
124 |
+
yn_zeros -- Compute zeros of integer-order Bessel function Yn(x).
|
125 |
+
ynp_zeros -- Compute zeros of integer-order Bessel function derivative Yn'(x).
|
126 |
+
y0_zeros -- Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
|
127 |
+
y1_zeros -- Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
|
128 |
+
y1p_zeros -- Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
|
129 |
+
|
130 |
+
Faster versions of common Bessel functions
|
131 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
132 |
+
|
133 |
+
.. autosummary::
|
134 |
+
:toctree: generated/
|
135 |
+
|
136 |
+
j0 -- Bessel function of the first kind of order 0.
|
137 |
+
j1 -- Bessel function of the first kind of order 1.
|
138 |
+
y0 -- Bessel function of the second kind of order 0.
|
139 |
+
y1 -- Bessel function of the second kind of order 1.
|
140 |
+
i0 -- Modified Bessel function of order 0.
|
141 |
+
i0e -- Exponentially scaled modified Bessel function of order 0.
|
142 |
+
i1 -- Modified Bessel function of order 1.
|
143 |
+
i1e -- Exponentially scaled modified Bessel function of order 1.
|
144 |
+
k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`.
|
145 |
+
k0e -- Exponentially scaled modified Bessel function K of order 0
|
146 |
+
k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
|
147 |
+
k1e -- Exponentially scaled modified Bessel function K of order 1.
|
148 |
+
|
149 |
+
Integrals of Bessel functions
|
150 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
151 |
+
|
152 |
+
.. autosummary::
|
153 |
+
:toctree: generated/
|
154 |
+
|
155 |
+
itj0y0 -- Integrals of Bessel functions of order 0.
|
156 |
+
it2j0y0 -- Integrals related to Bessel functions of order 0.
|
157 |
+
iti0k0 -- Integrals of modified Bessel functions of order 0.
|
158 |
+
it2i0k0 -- Integrals related to modified Bessel functions of order 0.
|
159 |
+
besselpoly -- Weighted integral of a Bessel function.
|
160 |
+
|
161 |
+
Derivatives of Bessel functions
|
162 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
163 |
+
|
164 |
+
.. autosummary::
|
165 |
+
:toctree: generated/
|
166 |
+
|
167 |
+
jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`.
|
168 |
+
yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`.
|
169 |
+
kvp -- Compute nth derivative of real-order modified Bessel function Kv(z)
|
170 |
+
ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`.
|
171 |
+
h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`.
|
172 |
+
h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`.
|
173 |
+
|
174 |
+
Spherical Bessel functions
|
175 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
176 |
+
|
177 |
+
.. autosummary::
|
178 |
+
:toctree: generated/
|
179 |
+
|
180 |
+
spherical_jn -- Spherical Bessel function of the first kind or its derivative.
|
181 |
+
spherical_yn -- Spherical Bessel function of the second kind or its derivative.
|
182 |
+
spherical_in -- Modified spherical Bessel function of the first kind or its derivative.
|
183 |
+
spherical_kn -- Modified spherical Bessel function of the second kind or its derivative.
|
184 |
+
|
185 |
+
Riccati-Bessel functions
|
186 |
+
^^^^^^^^^^^^^^^^^^^^^^^^
|
187 |
+
|
188 |
+
The following functions do not accept NumPy arrays (they are not
|
189 |
+
universal functions):
|
190 |
+
|
191 |
+
.. autosummary::
|
192 |
+
:toctree: generated/
|
193 |
+
|
194 |
+
riccati_jn -- Compute Ricatti-Bessel function of the first kind and its derivative.
|
195 |
+
riccati_yn -- Compute Ricatti-Bessel function of the second kind and its derivative.
|
196 |
+
|
197 |
+
Struve functions
|
198 |
+
----------------
|
199 |
+
|
200 |
+
.. autosummary::
|
201 |
+
:toctree: generated/
|
202 |
+
|
203 |
+
struve -- Struve function.
|
204 |
+
modstruve -- Modified Struve function.
|
205 |
+
itstruve0 -- Integral of the Struve function of order 0.
|
206 |
+
it2struve0 -- Integral related to the Struve function of order 0.
|
207 |
+
itmodstruve0 -- Integral of the modified Struve function of order 0.
|
208 |
+
|
209 |
+
|
210 |
+
Raw statistical functions
|
211 |
+
-------------------------
|
212 |
+
|
213 |
+
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
|
214 |
+
|
215 |
+
Binomial distribution
|
216 |
+
^^^^^^^^^^^^^^^^^^^^^
|
217 |
+
|
218 |
+
.. autosummary::
|
219 |
+
:toctree: generated/
|
220 |
+
|
221 |
+
bdtr -- Binomial distribution cumulative distribution function.
|
222 |
+
bdtrc -- Binomial distribution survival function.
|
223 |
+
bdtri -- Inverse function to `bdtr` with respect to `p`.
|
224 |
+
bdtrik -- Inverse function to `bdtr` with respect to `k`.
|
225 |
+
bdtrin -- Inverse function to `bdtr` with respect to `n`.
|
226 |
+
|
227 |
+
Beta distribution
|
228 |
+
^^^^^^^^^^^^^^^^^
|
229 |
+
|
230 |
+
.. autosummary::
|
231 |
+
:toctree: generated/
|
232 |
+
|
233 |
+
btdtr -- Cumulative distribution function of the beta distribution.
|
234 |
+
btdtri -- The `p`-th quantile of the beta distribution.
|
235 |
+
btdtria -- Inverse of `btdtr` with respect to `a`.
|
236 |
+
btdtrib -- btdtria(a, p, x).
|
237 |
+
|
238 |
+
F distribution
|
239 |
+
^^^^^^^^^^^^^^
|
240 |
+
|
241 |
+
.. autosummary::
|
242 |
+
:toctree: generated/
|
243 |
+
|
244 |
+
fdtr -- F cumulative distribution function.
|
245 |
+
fdtrc -- F survival function.
|
246 |
+
fdtri -- The `p`-th quantile of the F-distribution.
|
247 |
+
fdtridfd -- Inverse to `fdtr` vs dfd.
|
248 |
+
|
249 |
+
Gamma distribution
|
250 |
+
^^^^^^^^^^^^^^^^^^
|
251 |
+
|
252 |
+
.. autosummary::
|
253 |
+
:toctree: generated/
|
254 |
+
|
255 |
+
gdtr -- Gamma distribution cumulative distribution function.
|
256 |
+
gdtrc -- Gamma distribution survival function.
|
257 |
+
gdtria -- Inverse of `gdtr` vs a.
|
258 |
+
gdtrib -- Inverse of `gdtr` vs b.
|
259 |
+
gdtrix -- Inverse of `gdtr` vs x.
|
260 |
+
|
261 |
+
Negative binomial distribution
|
262 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
263 |
+
|
264 |
+
.. autosummary::
|
265 |
+
:toctree: generated/
|
266 |
+
|
267 |
+
nbdtr -- Negative binomial cumulative distribution function.
|
268 |
+
nbdtrc -- Negative binomial survival function.
|
269 |
+
nbdtri -- Inverse of `nbdtr` vs `p`.
|
270 |
+
nbdtrik -- Inverse of `nbdtr` vs `k`.
|
271 |
+
nbdtrin -- Inverse of `nbdtr` vs `n`.
|
272 |
+
|
273 |
+
Noncentral F distribution
|
274 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^
|
275 |
+
|
276 |
+
.. autosummary::
|
277 |
+
:toctree: generated/
|
278 |
+
|
279 |
+
ncfdtr -- Cumulative distribution function of the non-central F distribution.
|
280 |
+
ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution.
|
281 |
+
ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution.
|
282 |
+
ncfdtri -- Inverse cumulative distribution function of the non-central F distribution.
|
283 |
+
ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution.
|
284 |
+
|
285 |
+
Noncentral t distribution
|
286 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^
|
287 |
+
|
288 |
+
.. autosummary::
|
289 |
+
:toctree: generated/
|
290 |
+
|
291 |
+
nctdtr -- Cumulative distribution function of the non-central `t` distribution.
|
292 |
+
nctdtridf -- Calculate degrees of freedom for non-central t distribution.
|
293 |
+
nctdtrit -- Inverse cumulative distribution function of the non-central t distribution.
|
294 |
+
nctdtrinc -- Calculate non-centrality parameter for non-central t distribution.
|
295 |
+
|
296 |
+
Normal distribution
|
297 |
+
^^^^^^^^^^^^^^^^^^^
|
298 |
+
|
299 |
+
.. autosummary::
|
300 |
+
:toctree: generated/
|
301 |
+
|
302 |
+
nrdtrimn -- Calculate mean of normal distribution given other params.
|
303 |
+
nrdtrisd -- Calculate standard deviation of normal distribution given other params.
|
304 |
+
ndtr -- Normal cumulative distribution function.
|
305 |
+
log_ndtr -- Logarithm of normal cumulative distribution function.
|
306 |
+
ndtri -- Inverse of `ndtr` vs x.
|
307 |
+
ndtri_exp -- Inverse of `log_ndtr` vs x.
|
308 |
+
|
309 |
+
Poisson distribution
|
310 |
+
^^^^^^^^^^^^^^^^^^^^
|
311 |
+
|
312 |
+
.. autosummary::
|
313 |
+
:toctree: generated/
|
314 |
+
|
315 |
+
pdtr -- Poisson cumulative distribution function.
|
316 |
+
pdtrc -- Poisson survival function.
|
317 |
+
pdtri -- Inverse to `pdtr` vs m.
|
318 |
+
pdtrik -- Inverse to `pdtr` vs k.
|
319 |
+
|
320 |
+
Student t distribution
|
321 |
+
^^^^^^^^^^^^^^^^^^^^^^
|
322 |
+
|
323 |
+
.. autosummary::
|
324 |
+
:toctree: generated/
|
325 |
+
|
326 |
+
stdtr -- Student t distribution cumulative distribution function.
|
327 |
+
stdtridf -- Inverse of `stdtr` vs df.
|
328 |
+
stdtrit -- Inverse of `stdtr` vs `t`.
|
329 |
+
|
330 |
+
Chi square distribution
|
331 |
+
^^^^^^^^^^^^^^^^^^^^^^^
|
332 |
+
|
333 |
+
.. autosummary::
|
334 |
+
:toctree: generated/
|
335 |
+
|
336 |
+
chdtr -- Chi square cumulative distribution function.
|
337 |
+
chdtrc -- Chi square survival function.
|
338 |
+
chdtri -- Inverse to `chdtrc`.
|
339 |
+
chdtriv -- Inverse to `chdtr` vs `v`.
|
340 |
+
|
341 |
+
Non-central chi square distribution
|
342 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
343 |
+
|
344 |
+
.. autosummary::
|
345 |
+
:toctree: generated/
|
346 |
+
|
347 |
+
chndtr -- Non-central chi square cumulative distribution function.
|
348 |
+
chndtridf -- Inverse to `chndtr` vs `df`.
|
349 |
+
chndtrinc -- Inverse to `chndtr` vs `nc`.
|
350 |
+
chndtrix -- Inverse to `chndtr` vs `x`.
|
351 |
+
|
352 |
+
Kolmogorov distribution
|
353 |
+
^^^^^^^^^^^^^^^^^^^^^^^
|
354 |
+
|
355 |
+
.. autosummary::
|
356 |
+
:toctree: generated/
|
357 |
+
|
358 |
+
smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function.
|
359 |
+
smirnovi -- Inverse to `smirnov`.
|
360 |
+
kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution.
|
361 |
+
kolmogi -- Inverse function to `kolmogorov`.
|
362 |
+
|
363 |
+
Box-Cox transformation
|
364 |
+
^^^^^^^^^^^^^^^^^^^^^^
|
365 |
+
|
366 |
+
.. autosummary::
|
367 |
+
:toctree: generated/
|
368 |
+
|
369 |
+
boxcox -- Compute the Box-Cox transformation.
|
370 |
+
boxcox1p -- Compute the Box-Cox transformation of 1 + `x`.
|
371 |
+
inv_boxcox -- Compute the inverse of the Box-Cox transformation.
|
372 |
+
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation.
|
373 |
+
|
374 |
+
|
375 |
+
Sigmoidal functions
|
376 |
+
^^^^^^^^^^^^^^^^^^^
|
377 |
+
|
378 |
+
.. autosummary::
|
379 |
+
:toctree: generated/
|
380 |
+
|
381 |
+
logit -- Logit ufunc for ndarrays.
|
382 |
+
expit -- Logistic sigmoid function.
|
383 |
+
log_expit -- Logarithm of the logistic sigmoid function.
|
384 |
+
|
385 |
+
Miscellaneous
|
386 |
+
^^^^^^^^^^^^^
|
387 |
+
|
388 |
+
.. autosummary::
|
389 |
+
:toctree: generated/
|
390 |
+
|
391 |
+
tklmbda -- Tukey-Lambda cumulative distribution function.
|
392 |
+
owens_t -- Owen's T Function.
|
393 |
+
|
394 |
+
|
395 |
+
Information Theory functions
|
396 |
+
----------------------------
|
397 |
+
|
398 |
+
.. autosummary::
|
399 |
+
:toctree: generated/
|
400 |
+
|
401 |
+
entr -- Elementwise function for computing entropy.
|
402 |
+
rel_entr -- Elementwise function for computing relative entropy.
|
403 |
+
kl_div -- Elementwise function for computing Kullback-Leibler divergence.
|
404 |
+
huber -- Huber loss function.
|
405 |
+
pseudo_huber -- Pseudo-Huber loss function.
|
406 |
+
|
407 |
+
|
408 |
+
Gamma and related functions
|
409 |
+
---------------------------
|
410 |
+
|
411 |
+
.. autosummary::
|
412 |
+
:toctree: generated/
|
413 |
+
|
414 |
+
gamma -- Gamma function.
|
415 |
+
gammaln -- Logarithm of the absolute value of the Gamma function for real inputs.
|
416 |
+
loggamma -- Principal branch of the logarithm of the Gamma function.
|
417 |
+
gammasgn -- Sign of the gamma function.
|
418 |
+
gammainc -- Regularized lower incomplete gamma function.
|
419 |
+
gammaincinv -- Inverse to `gammainc`.
|
420 |
+
gammaincc -- Regularized upper incomplete gamma function.
|
421 |
+
gammainccinv -- Inverse to `gammaincc`.
|
422 |
+
beta -- Beta function.
|
423 |
+
betaln -- Natural logarithm of absolute value of beta function.
|
424 |
+
betainc -- Incomplete beta integral.
|
425 |
+
betaincc -- Complemented incomplete beta integral.
|
426 |
+
betaincinv -- Inverse function to beta integral.
|
427 |
+
betainccinv -- Inverse of the complemented incomplete beta integral.
|
428 |
+
psi -- The digamma function.
|
429 |
+
rgamma -- Gamma function inverted.
|
430 |
+
polygamma -- Polygamma function n.
|
431 |
+
multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma.
|
432 |
+
digamma -- psi(x[, out]).
|
433 |
+
poch -- Rising factorial (z)_m.
|
434 |
+
|
435 |
+
|
436 |
+
Error function and Fresnel integrals
|
437 |
+
------------------------------------
|
438 |
+
|
439 |
+
.. autosummary::
|
440 |
+
:toctree: generated/
|
441 |
+
|
442 |
+
erf -- Returns the error function of complex argument.
|
443 |
+
erfc -- Complementary error function, ``1 - erf(x)``.
|
444 |
+
erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``.
|
445 |
+
erfi -- Imaginary error function, ``-i erf(i z)``.
|
446 |
+
erfinv -- Inverse function for erf.
|
447 |
+
erfcinv -- Inverse function for erfc.
|
448 |
+
wofz -- Faddeeva function.
|
449 |
+
dawsn -- Dawson's integral.
|
450 |
+
fresnel -- Fresnel sin and cos integrals.
|
451 |
+
fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
|
452 |
+
modfresnelp -- Modified Fresnel positive integrals.
|
453 |
+
modfresnelm -- Modified Fresnel negative integrals.
|
454 |
+
voigt_profile -- Voigt profile.
|
455 |
+
|
456 |
+
The following functions do not accept NumPy arrays (they are not
|
457 |
+
universal functions):
|
458 |
+
|
459 |
+
.. autosummary::
|
460 |
+
:toctree: generated/
|
461 |
+
|
462 |
+
erf_zeros -- Compute nt complex zeros of error function erf(z).
|
463 |
+
fresnelc_zeros -- Compute nt complex zeros of cosine Fresnel integral C(z).
|
464 |
+
fresnels_zeros -- Compute nt complex zeros of sine Fresnel integral S(z).
|
465 |
+
|
466 |
+
Legendre functions
|
467 |
+
------------------
|
468 |
+
|
469 |
+
.. autosummary::
|
470 |
+
:toctree: generated/
|
471 |
+
|
472 |
+
lpmv -- Associated Legendre function of integer order and real degree.
|
473 |
+
sph_harm -- Compute spherical harmonics.
|
474 |
+
|
475 |
+
The following functions do not accept NumPy arrays (they are not
|
476 |
+
universal functions):
|
477 |
+
|
478 |
+
.. autosummary::
|
479 |
+
:toctree: generated/
|
480 |
+
|
481 |
+
clpmn -- Associated Legendre function of the first kind for complex arguments.
|
482 |
+
lpn -- Legendre function of the first kind.
|
483 |
+
lqn -- Legendre function of the second kind.
|
484 |
+
lpmn -- Sequence of associated Legendre functions of the first kind.
|
485 |
+
lqmn -- Sequence of associated Legendre functions of the second kind.
|
486 |
+
|
487 |
+
Ellipsoidal harmonics
|
488 |
+
---------------------
|
489 |
+
|
490 |
+
.. autosummary::
|
491 |
+
:toctree: generated/
|
492 |
+
|
493 |
+
ellip_harm -- Ellipsoidal harmonic functions E^p_n(l).
|
494 |
+
ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l).
|
495 |
+
ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n.
|
496 |
+
|
497 |
+
Orthogonal polynomials
|
498 |
+
----------------------
|
499 |
+
|
500 |
+
The following functions evaluate values of orthogonal polynomials:
|
501 |
+
|
502 |
+
.. autosummary::
|
503 |
+
:toctree: generated/
|
504 |
+
|
505 |
+
assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k.
|
506 |
+
eval_legendre -- Evaluate Legendre polynomial at a point.
|
507 |
+
eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point.
|
508 |
+
eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point.
|
509 |
+
eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point.
|
510 |
+
eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point.
|
511 |
+
eval_jacobi -- Evaluate Jacobi polynomial at a point.
|
512 |
+
eval_laguerre -- Evaluate Laguerre polynomial at a point.
|
513 |
+
eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point.
|
514 |
+
eval_hermite -- Evaluate physicist's Hermite polynomial at a point.
|
515 |
+
eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point.
|
516 |
+
eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point.
|
517 |
+
eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point.
|
518 |
+
eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point.
|
519 |
+
eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point.
|
520 |
+
eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point.
|
521 |
+
|
522 |
+
The following functions compute roots and quadrature weights for
|
523 |
+
orthogonal polynomials:
|
524 |
+
|
525 |
+
.. autosummary::
|
526 |
+
:toctree: generated/
|
527 |
+
|
528 |
+
roots_legendre -- Gauss-Legendre quadrature.
|
529 |
+
roots_chebyt -- Gauss-Chebyshev (first kind) quadrature.
|
530 |
+
roots_chebyu -- Gauss-Chebyshev (second kind) quadrature.
|
531 |
+
roots_chebyc -- Gauss-Chebyshev (first kind) quadrature.
|
532 |
+
roots_chebys -- Gauss-Chebyshev (second kind) quadrature.
|
533 |
+
roots_jacobi -- Gauss-Jacobi quadrature.
|
534 |
+
roots_laguerre -- Gauss-Laguerre quadrature.
|
535 |
+
roots_genlaguerre -- Gauss-generalized Laguerre quadrature.
|
536 |
+
roots_hermite -- Gauss-Hermite (physicst's) quadrature.
|
537 |
+
roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature.
|
538 |
+
roots_gegenbauer -- Gauss-Gegenbauer quadrature.
|
539 |
+
roots_sh_legendre -- Gauss-Legendre (shifted) quadrature.
|
540 |
+
roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature.
|
541 |
+
roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature.
|
542 |
+
roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature.
|
543 |
+
|
544 |
+
The functions below, in turn, return the polynomial coefficients in
|
545 |
+
``orthopoly1d`` objects, which function similarly as `numpy.poly1d`.
|
546 |
+
The ``orthopoly1d`` class also has an attribute ``weights``, which returns
|
547 |
+
the roots, weights, and total weights for the appropriate form of Gaussian
|
548 |
+
quadrature. These are returned in an ``n x 3`` array with roots in the first
|
549 |
+
column, weights in the second column, and total weights in the final column.
|
550 |
+
Note that ``orthopoly1d`` objects are converted to `~numpy.poly1d` when doing
|
551 |
+
arithmetic, and lose information of the original orthogonal polynomial.
|
552 |
+
|
553 |
+
.. autosummary::
|
554 |
+
:toctree: generated/
|
555 |
+
|
556 |
+
legendre -- Legendre polynomial.
|
557 |
+
chebyt -- Chebyshev polynomial of the first kind.
|
558 |
+
chebyu -- Chebyshev polynomial of the second kind.
|
559 |
+
chebyc -- Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
|
560 |
+
chebys -- Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
|
561 |
+
jacobi -- Jacobi polynomial.
|
562 |
+
laguerre -- Laguerre polynomial.
|
563 |
+
genlaguerre -- Generalized (associated) Laguerre polynomial.
|
564 |
+
hermite -- Physicist's Hermite polynomial.
|
565 |
+
hermitenorm -- Normalized (probabilist's) Hermite polynomial.
|
566 |
+
gegenbauer -- Gegenbauer (ultraspherical) polynomial.
|
567 |
+
sh_legendre -- Shifted Legendre polynomial.
|
568 |
+
sh_chebyt -- Shifted Chebyshev polynomial of the first kind.
|
569 |
+
sh_chebyu -- Shifted Chebyshev polynomial of the second kind.
|
570 |
+
sh_jacobi -- Shifted Jacobi polynomial.
|
571 |
+
|
572 |
+
.. warning::
|
573 |
+
|
574 |
+
Computing values of high-order polynomials (around ``order > 20``) using
|
575 |
+
polynomial coefficients is numerically unstable. To evaluate polynomial
|
576 |
+
values, the ``eval_*`` functions should be used instead.
|
577 |
+
|
578 |
+
|
579 |
+
Hypergeometric functions
|
580 |
+
------------------------
|
581 |
+
|
582 |
+
.. autosummary::
|
583 |
+
:toctree: generated/
|
584 |
+
|
585 |
+
hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z).
|
586 |
+
hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x).
|
587 |
+
hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind.
|
588 |
+
hyp0f1 -- Confluent hypergeometric limit function 0F1.
|
589 |
+
|
590 |
+
|
591 |
+
Parabolic cylinder functions
|
592 |
+
----------------------------
|
593 |
+
|
594 |
+
.. autosummary::
|
595 |
+
:toctree: generated/
|
596 |
+
|
597 |
+
pbdv -- Parabolic cylinder function D.
|
598 |
+
pbvv -- Parabolic cylinder function V.
|
599 |
+
pbwa -- Parabolic cylinder function W.
|
600 |
+
|
601 |
+
The following functions do not accept NumPy arrays (they are not
|
602 |
+
universal functions):
|
603 |
+
|
604 |
+
.. autosummary::
|
605 |
+
:toctree: generated/
|
606 |
+
|
607 |
+
pbdv_seq -- Parabolic cylinder functions Dv(x) and derivatives.
|
608 |
+
pbvv_seq -- Parabolic cylinder functions Vv(x) and derivatives.
|
609 |
+
pbdn_seq -- Parabolic cylinder functions Dn(z) and derivatives.
|
610 |
+
|
611 |
+
Mathieu and related functions
|
612 |
+
-----------------------------
|
613 |
+
|
614 |
+
.. autosummary::
|
615 |
+
:toctree: generated/
|
616 |
+
|
617 |
+
mathieu_a -- Characteristic value of even Mathieu functions.
|
618 |
+
mathieu_b -- Characteristic value of odd Mathieu functions.
|
619 |
+
|
620 |
+
The following functions do not accept NumPy arrays (they are not
|
621 |
+
universal functions):
|
622 |
+
|
623 |
+
.. autosummary::
|
624 |
+
:toctree: generated/
|
625 |
+
|
626 |
+
mathieu_even_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions.
|
627 |
+
mathieu_odd_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions.
|
628 |
+
|
629 |
+
The following return both function and first derivative:
|
630 |
+
|
631 |
+
.. autosummary::
|
632 |
+
:toctree: generated/
|
633 |
+
|
634 |
+
mathieu_cem -- Even Mathieu function and its derivative.
|
635 |
+
mathieu_sem -- Odd Mathieu function and its derivative.
|
636 |
+
mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative.
|
637 |
+
mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative.
|
638 |
+
mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative.
|
639 |
+
mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative.
|
640 |
+
|
641 |
+
Spheroidal wave functions
|
642 |
+
-------------------------
|
643 |
+
|
644 |
+
.. autosummary::
|
645 |
+
:toctree: generated/
|
646 |
+
|
647 |
+
pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative.
|
648 |
+
pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative.
|
649 |
+
pro_rad2 -- Prolate spheroidal radial function of the second kind and its derivative.
|
650 |
+
obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative.
|
651 |
+
obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative.
|
652 |
+
obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative.
|
653 |
+
pro_cv -- Characteristic value of prolate spheroidal function.
|
654 |
+
obl_cv -- Characteristic value of oblate spheroidal function.
|
655 |
+
pro_cv_seq -- Characteristic values for prolate spheroidal wave functions.
|
656 |
+
obl_cv_seq -- Characteristic values for oblate spheroidal wave functions.
|
657 |
+
|
658 |
+
The following functions require pre-computed characteristic value:
|
659 |
+
|
660 |
+
.. autosummary::
|
661 |
+
:toctree: generated/
|
662 |
+
|
663 |
+
pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value.
|
664 |
+
pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value.
|
665 |
+
pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value.
|
666 |
+
obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value.
|
667 |
+
obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value.
|
668 |
+
obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value.
|
669 |
+
|
670 |
+
Kelvin functions
|
671 |
+
----------------
|
672 |
+
|
673 |
+
.. autosummary::
|
674 |
+
:toctree: generated/
|
675 |
+
|
676 |
+
kelvin -- Kelvin functions as complex numbers.
|
677 |
+
kelvin_zeros -- Compute nt zeros of all Kelvin functions.
|
678 |
+
ber -- Kelvin function ber.
|
679 |
+
bei -- Kelvin function bei
|
680 |
+
berp -- Derivative of the Kelvin function `ber`.
|
681 |
+
beip -- Derivative of the Kelvin function `bei`.
|
682 |
+
ker -- Kelvin function ker.
|
683 |
+
kei -- Kelvin function ker.
|
684 |
+
kerp -- Derivative of the Kelvin function ker.
|
685 |
+
keip -- Derivative of the Kelvin function kei.
|
686 |
+
|
687 |
+
The following functions do not accept NumPy arrays (they are not
|
688 |
+
universal functions):
|
689 |
+
|
690 |
+
.. autosummary::
|
691 |
+
:toctree: generated/
|
692 |
+
|
693 |
+
ber_zeros -- Compute nt zeros of the Kelvin function ber(x).
|
694 |
+
bei_zeros -- Compute nt zeros of the Kelvin function bei(x).
|
695 |
+
berp_zeros -- Compute nt zeros of the Kelvin function ber'(x).
|
696 |
+
beip_zeros -- Compute nt zeros of the Kelvin function bei'(x).
|
697 |
+
ker_zeros -- Compute nt zeros of the Kelvin function ker(x).
|
698 |
+
kei_zeros -- Compute nt zeros of the Kelvin function kei(x).
|
699 |
+
kerp_zeros -- Compute nt zeros of the Kelvin function ker'(x).
|
700 |
+
keip_zeros -- Compute nt zeros of the Kelvin function kei'(x).
|
701 |
+
|
702 |
+
Combinatorics
|
703 |
+
-------------
|
704 |
+
|
705 |
+
.. autosummary::
|
706 |
+
:toctree: generated/
|
707 |
+
|
708 |
+
comb -- The number of combinations of N things taken k at a time.
|
709 |
+
perm -- Permutations of N things taken k at a time, i.e., k-permutations of N.
|
710 |
+
stirling2 -- Stirling numbers of the second kind.
|
711 |
+
|
712 |
+
Lambert W and related functions
|
713 |
+
-------------------------------
|
714 |
+
|
715 |
+
.. autosummary::
|
716 |
+
:toctree: generated/
|
717 |
+
|
718 |
+
lambertw -- Lambert W function.
|
719 |
+
wrightomega -- Wright Omega function.
|
720 |
+
|
721 |
+
Other special functions
|
722 |
+
-----------------------
|
723 |
+
|
724 |
+
.. autosummary::
|
725 |
+
:toctree: generated/
|
726 |
+
|
727 |
+
agm -- Arithmetic, Geometric Mean.
|
728 |
+
bernoulli -- Bernoulli numbers B0..Bn (inclusive).
|
729 |
+
binom -- Binomial coefficient
|
730 |
+
diric -- Periodic sinc function, also called the Dirichlet function.
|
731 |
+
euler -- Euler numbers E0..En (inclusive).
|
732 |
+
expn -- Exponential integral E_n.
|
733 |
+
exp1 -- Exponential integral E_1 of complex argument z.
|
734 |
+
expi -- Exponential integral Ei.
|
735 |
+
factorial -- The factorial of a number or array of numbers.
|
736 |
+
factorial2 -- Double factorial.
|
737 |
+
factorialk -- Multifactorial of n of order k, n(!!...!).
|
738 |
+
shichi -- Hyperbolic sine and cosine integrals.
|
739 |
+
sici -- Sine and cosine integrals.
|
740 |
+
softmax -- Softmax function.
|
741 |
+
log_softmax -- Logarithm of softmax function.
|
742 |
+
spence -- Spence's function, also known as the dilogarithm.
|
743 |
+
zeta -- Riemann zeta function.
|
744 |
+
zetac -- Riemann zeta function minus 1.
|
745 |
+
|
746 |
+
Convenience functions
|
747 |
+
---------------------
|
748 |
+
|
749 |
+
.. autosummary::
|
750 |
+
:toctree: generated/
|
751 |
+
|
752 |
+
cbrt -- Cube root of `x`.
|
753 |
+
exp10 -- 10**x.
|
754 |
+
exp2 -- 2**x.
|
755 |
+
radian -- Convert from degrees to radians.
|
756 |
+
cosdg -- Cosine of the angle `x` given in degrees.
|
757 |
+
sindg -- Sine of angle given in degrees.
|
758 |
+
tandg -- Tangent of angle x given in degrees.
|
759 |
+
cotdg -- Cotangent of the angle `x` given in degrees.
|
760 |
+
log1p -- Calculates log(1+x) for use when `x` is near zero.
|
761 |
+
expm1 -- ``exp(x) - 1`` for use when `x` is near zero.
|
762 |
+
cosm1 -- ``cos(x) - 1`` for use when `x` is near zero.
|
763 |
+
powm1 -- ``x**y - 1`` for use when `y` is near zero or `x` is near 1.
|
764 |
+
round -- Round to nearest integer.
|
765 |
+
xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
|
766 |
+
xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
|
767 |
+
logsumexp -- Compute the log of the sum of exponentials of input elements.
|
768 |
+
exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
|
769 |
+
sinc -- Return the sinc function.
|
770 |
+
|
771 |
+
""" # noqa: E501
|
772 |
+
|
773 |
+
import warnings
|
774 |
+
|
775 |
+
from ._sf_error import SpecialFunctionWarning, SpecialFunctionError
|
776 |
+
|
777 |
+
from . import _ufuncs
|
778 |
+
from ._ufuncs import *
|
779 |
+
|
780 |
+
# Replace some function definitions from _ufuncs to add Array API support
|
781 |
+
from ._support_alternative_backends import (
|
782 |
+
log_ndtr, ndtr, ndtri, erf, erfc, i0, i0e, i1, i1e,
|
783 |
+
gammaln, gammainc, gammaincc, logit, expit)
|
784 |
+
|
785 |
+
from . import _basic
|
786 |
+
from ._basic import *
|
787 |
+
|
788 |
+
from ._logsumexp import logsumexp, softmax, log_softmax
|
789 |
+
|
790 |
+
from . import _orthogonal
|
791 |
+
from ._orthogonal import *
|
792 |
+
|
793 |
+
from ._spfun_stats import multigammaln
|
794 |
+
from ._ellip_harm import (
|
795 |
+
ellip_harm,
|
796 |
+
ellip_harm_2,
|
797 |
+
ellip_normal
|
798 |
+
)
|
799 |
+
from ._lambertw import lambertw
|
800 |
+
from ._spherical_bessel import (
|
801 |
+
spherical_jn,
|
802 |
+
spherical_yn,
|
803 |
+
spherical_in,
|
804 |
+
spherical_kn
|
805 |
+
)
|
806 |
+
|
807 |
+
# Deprecated namespaces, to be removed in v2.0.0
|
808 |
+
from . import add_newdocs, basic, orthogonal, specfun, sf_error, spfun_stats
|
809 |
+
|
810 |
+
# We replace some function definitions from _ufuncs with those from
|
811 |
+
# _support_alternative_backends above, but those are all listed in _ufuncs.__all__,
|
812 |
+
# so there is no need to consider _support_alternative_backends.__all__ here.
|
813 |
+
__all__ = _ufuncs.__all__ + _basic.__all__ + _orthogonal.__all__
|
814 |
+
__all__ += [
|
815 |
+
'SpecialFunctionWarning',
|
816 |
+
'SpecialFunctionError',
|
817 |
+
'logsumexp',
|
818 |
+
'softmax',
|
819 |
+
'log_softmax',
|
820 |
+
'multigammaln',
|
821 |
+
'ellip_harm',
|
822 |
+
'ellip_harm_2',
|
823 |
+
'ellip_normal',
|
824 |
+
'lambertw',
|
825 |
+
'spherical_jn',
|
826 |
+
'spherical_yn',
|
827 |
+
'spherical_in',
|
828 |
+
'spherical_kn',
|
829 |
+
]
|
830 |
+
|
831 |
+
from scipy._lib._testutils import PytestTester
|
832 |
+
test = PytestTester(__name__)
|
833 |
+
del PytestTester
|
834 |
+
|
835 |
+
_depr_msg = ('\nThis function was deprecated in SciPy 1.12.0, and will be '
|
836 |
+
'removed in SciPy 1.14.0. Use scipy.special.{} instead.')
|
837 |
+
|
838 |
+
|
839 |
+
def btdtr(*args, **kwargs): # type: ignore [no-redef]
|
840 |
+
warnings.warn(_depr_msg.format('betainc'), category=DeprecationWarning,
|
841 |
+
stacklevel=2)
|
842 |
+
return _ufuncs.btdtr(*args, **kwargs)
|
843 |
+
|
844 |
+
|
845 |
+
btdtr.__doc__ = _ufuncs.btdtr.__doc__ # type: ignore [misc]
|
846 |
+
|
847 |
+
|
848 |
+
def btdtri(*args, **kwargs): # type: ignore [no-redef]
|
849 |
+
warnings.warn(_depr_msg.format('betaincinv'), category=DeprecationWarning,
|
850 |
+
stacklevel=2)
|
851 |
+
return _ufuncs.btdtri(*args, **kwargs)
|
852 |
+
|
853 |
+
|
854 |
+
btdtri.__doc__ = _ufuncs.btdtri.__doc__ # type: ignore [misc]
|
855 |
+
|
856 |
+
|
857 |
+
def _get_include():
|
858 |
+
"""This function is for development purposes only.
|
859 |
+
|
860 |
+
This function could disappear or its behavior could change at any time.
|
861 |
+
"""
|
862 |
+
import os
|
863 |
+
return os.path.dirname(__file__)
|
.venv/Lib/site-packages/scipy/special/__pycache__/_basic.cpython-39.pyc
ADDED
Binary file (96.9 kB). View file
|
|
.venv/Lib/site-packages/scipy/special/__pycache__/_ellip_harm.cpython-39.pyc
ADDED
Binary file (5.81 kB). View file
|
|
.venv/Lib/site-packages/scipy/special/__pycache__/_lambertw.cpython-39.pyc
ADDED
Binary file (4.16 kB). View file
|
|
.venv/Lib/site-packages/scipy/special/__pycache__/_logsumexp.cpython-39.pyc
ADDED
Binary file (8.64 kB). View file
|
|
.venv/Lib/site-packages/scipy/special/__pycache__/add_newdocs.cpython-39.pyc
ADDED
Binary file (639 Bytes). View file
|
|
.venv/Lib/site-packages/scipy/special/__pycache__/basic.cpython-39.pyc
ADDED
Binary file (1.26 kB). View file
|
|
.venv/Lib/site-packages/scipy/special/__pycache__/orthogonal.cpython-39.pyc
ADDED
Binary file (1.44 kB). View file
|
|
.venv/Lib/site-packages/scipy/special/__pycache__/sf_error.cpython-39.pyc
ADDED
Binary file (652 Bytes). View file
|
|
.venv/Lib/site-packages/scipy/special/__pycache__/specfun.cpython-39.pyc
ADDED
Binary file (764 Bytes). View file
|
|
.venv/Lib/site-packages/scipy/special/__pycache__/spfun_stats.cpython-39.pyc
ADDED
Binary file (633 Bytes). View file
|
|
.venv/Lib/site-packages/scipy/special/_precompute/__init__.py
ADDED
File without changes
|
.venv/Lib/site-packages/scipy/special/_precompute/cosine_cdf.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import mpmath
|
2 |
+
|
3 |
+
|
4 |
+
def f(x):
|
5 |
+
return (mpmath.pi + x + mpmath.sin(x)) / (2*mpmath.pi)
|
6 |
+
|
7 |
+
|
8 |
+
# Note: 40 digits might be overkill; a few more digits than the default
|
9 |
+
# might be sufficient.
|
10 |
+
mpmath.mp.dps = 40
|
11 |
+
ts = mpmath.taylor(f, -mpmath.pi, 20)
|
12 |
+
p, q = mpmath.pade(ts, 9, 10)
|
13 |
+
|
14 |
+
p = [float(c) for c in p]
|
15 |
+
q = [float(c) for c in q]
|
16 |
+
print('p =', p)
|
17 |
+
print('q =', q)
|
.venv/Lib/site-packages/scipy/special/_precompute/expn_asy.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Precompute the polynomials for the asymptotic expansion of the
|
2 |
+
generalized exponential integral.
|
3 |
+
|
4 |
+
Sources
|
5 |
+
-------
|
6 |
+
[1] NIST, Digital Library of Mathematical Functions,
|
7 |
+
https://dlmf.nist.gov/8.20#ii
|
8 |
+
|
9 |
+
"""
|
10 |
+
import os
|
11 |
+
|
12 |
+
try:
|
13 |
+
import sympy
|
14 |
+
from sympy import Poly
|
15 |
+
x = sympy.symbols('x')
|
16 |
+
except ImportError:
|
17 |
+
pass
|
18 |
+
|
19 |
+
|
20 |
+
def generate_A(K):
|
21 |
+
A = [Poly(1, x)]
|
22 |
+
for k in range(K):
|
23 |
+
A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
|
24 |
+
return A
|
25 |
+
|
26 |
+
|
27 |
+
WARNING = """\
|
28 |
+
/* This file was automatically generated by _precompute/expn_asy.py.
|
29 |
+
* Do not edit it manually!
|
30 |
+
*/
|
31 |
+
"""
|
32 |
+
|
33 |
+
|
34 |
+
def main():
|
35 |
+
print(__doc__)
|
36 |
+
fn = os.path.join('..', 'cephes', 'expn.h')
|
37 |
+
|
38 |
+
K = 12
|
39 |
+
A = generate_A(K)
|
40 |
+
with open(fn + '.new', 'w') as f:
|
41 |
+
f.write(WARNING)
|
42 |
+
f.write(f"#define nA {len(A)}\n")
|
43 |
+
for k, Ak in enumerate(A):
|
44 |
+
', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
|
45 |
+
f.write(f"static const double A{k}[] = {{tmp}};\n")
|
46 |
+
", ".join([f"A{k}" for k in range(K + 1)])
|
47 |
+
f.write("static const double *A[] = {{tmp}};\n")
|
48 |
+
", ".join([str(Ak.degree()) for Ak in A])
|
49 |
+
f.write("static const int Adegs[] = {{tmp}};\n")
|
50 |
+
os.rename(fn + '.new', fn)
|
51 |
+
|
52 |
+
|
53 |
+
if __name__ == "__main__":
|
54 |
+
main()
|
.venv/Lib/site-packages/scipy/special/_precompute/gammainc_asy.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Precompute coefficients of Temme's asymptotic expansion for gammainc.
|
3 |
+
|
4 |
+
This takes about 8 hours to run on a 2.3 GHz Macbook Pro with 4GB ram.
|
5 |
+
|
6 |
+
Sources:
|
7 |
+
[1] NIST, "Digital Library of Mathematical Functions",
|
8 |
+
https://dlmf.nist.gov/
|
9 |
+
|
10 |
+
"""
|
11 |
+
import os
|
12 |
+
from scipy.special._precompute.utils import lagrange_inversion
|
13 |
+
|
14 |
+
try:
|
15 |
+
import mpmath as mp
|
16 |
+
except ImportError:
|
17 |
+
pass
|
18 |
+
|
19 |
+
|
20 |
+
def compute_a(n):
|
21 |
+
"""a_k from DLMF 5.11.6"""
|
22 |
+
a = [mp.sqrt(2)/2]
|
23 |
+
for k in range(1, n):
|
24 |
+
ak = a[-1]/k
|
25 |
+
for j in range(1, len(a)):
|
26 |
+
ak -= a[j]*a[-j]/(j + 1)
|
27 |
+
ak /= a[0]*(1 + mp.mpf(1)/(k + 1))
|
28 |
+
a.append(ak)
|
29 |
+
return a
|
30 |
+
|
31 |
+
|
32 |
+
def compute_g(n):
|
33 |
+
"""g_k from DLMF 5.11.3/5.11.5"""
|
34 |
+
a = compute_a(2*n)
|
35 |
+
g = [mp.sqrt(2)*mp.rf(0.5, k)*a[2*k] for k in range(n)]
|
36 |
+
return g
|
37 |
+
|
38 |
+
|
39 |
+
def eta(lam):
|
40 |
+
"""Function from DLMF 8.12.1 shifted to be centered at 0."""
|
41 |
+
if lam > 0:
|
42 |
+
return mp.sqrt(2*(lam - mp.log(lam + 1)))
|
43 |
+
elif lam < 0:
|
44 |
+
return -mp.sqrt(2*(lam - mp.log(lam + 1)))
|
45 |
+
else:
|
46 |
+
return 0
|
47 |
+
|
48 |
+
|
49 |
+
def compute_alpha(n):
|
50 |
+
"""alpha_n from DLMF 8.12.13"""
|
51 |
+
coeffs = mp.taylor(eta, 0, n - 1)
|
52 |
+
return lagrange_inversion(coeffs)
|
53 |
+
|
54 |
+
|
55 |
+
def compute_d(K, N):
|
56 |
+
"""d_{k, n} from DLMF 8.12.12"""
|
57 |
+
M = N + 2*K
|
58 |
+
d0 = [-mp.mpf(1)/3]
|
59 |
+
alpha = compute_alpha(M + 2)
|
60 |
+
for n in range(1, M):
|
61 |
+
d0.append((n + 2)*alpha[n+2])
|
62 |
+
d = [d0]
|
63 |
+
g = compute_g(K)
|
64 |
+
for k in range(1, K):
|
65 |
+
dk = []
|
66 |
+
for n in range(M - 2*k):
|
67 |
+
dk.append((-1)**k*g[k]*d[0][n] + (n + 2)*d[k-1][n+2])
|
68 |
+
d.append(dk)
|
69 |
+
for k in range(K):
|
70 |
+
d[k] = d[k][:N]
|
71 |
+
return d
|
72 |
+
|
73 |
+
|
74 |
+
header = \
|
75 |
+
r"""/* This file was automatically generated by _precomp/gammainc.py.
|
76 |
+
* Do not edit it manually!
|
77 |
+
*/
|
78 |
+
|
79 |
+
#ifndef IGAM_H
|
80 |
+
#define IGAM_H
|
81 |
+
|
82 |
+
#define K {}
|
83 |
+
#define N {}
|
84 |
+
|
85 |
+
static const double d[K][N] =
|
86 |
+
{{"""
|
87 |
+
|
88 |
+
footer = \
|
89 |
+
r"""
|
90 |
+
#endif
|
91 |
+
"""
|
92 |
+
|
93 |
+
|
94 |
+
def main():
|
95 |
+
print(__doc__)
|
96 |
+
K = 25
|
97 |
+
N = 25
|
98 |
+
with mp.workdps(50):
|
99 |
+
d = compute_d(K, N)
|
100 |
+
fn = os.path.join(os.path.dirname(__file__), '..', 'cephes', 'igam.h')
|
101 |
+
with open(fn + '.new', 'w') as f:
|
102 |
+
f.write(header.format(K, N))
|
103 |
+
for k, row in enumerate(d):
|
104 |
+
row = [mp.nstr(x, 17, min_fixed=0, max_fixed=0) for x in row]
|
105 |
+
f.write('{')
|
106 |
+
f.write(", ".join(row))
|
107 |
+
if k < K - 1:
|
108 |
+
f.write('},\n')
|
109 |
+
else:
|
110 |
+
f.write('}};\n')
|
111 |
+
f.write(footer)
|
112 |
+
os.rename(fn + '.new', fn)
|
113 |
+
|
114 |
+
|
115 |
+
if __name__ == "__main__":
|
116 |
+
main()
|
.venv/Lib/site-packages/scipy/special/_precompute/gammainc_data.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Compute gammainc and gammaincc for large arguments and parameters
|
2 |
+
and save the values to data files for use in tests. We can't just
|
3 |
+
compare to mpmath's gammainc in test_mpmath.TestSystematic because it
|
4 |
+
would take too long.
|
5 |
+
|
6 |
+
Note that mpmath's gammainc is computed using hypercomb, but since it
|
7 |
+
doesn't allow the user to increase the maximum number of terms used in
|
8 |
+
the series it doesn't converge for many arguments. To get around this
|
9 |
+
we copy the mpmath implementation but use more terms.
|
10 |
+
|
11 |
+
This takes about 17 minutes to run on a 2.3 GHz Macbook Pro with 4GB
|
12 |
+
ram.
|
13 |
+
|
14 |
+
Sources:
|
15 |
+
[1] Fredrik Johansson and others. mpmath: a Python library for
|
16 |
+
arbitrary-precision floating-point arithmetic (version 0.19),
|
17 |
+
December 2013. http://mpmath.org/.
|
18 |
+
|
19 |
+
"""
|
20 |
+
import os
|
21 |
+
from time import time
|
22 |
+
import numpy as np
|
23 |
+
from numpy import pi
|
24 |
+
|
25 |
+
from scipy.special._mptestutils import mpf2float
|
26 |
+
|
27 |
+
try:
|
28 |
+
import mpmath as mp
|
29 |
+
except ImportError:
|
30 |
+
pass
|
31 |
+
|
32 |
+
|
33 |
+
def gammainc(a, x, dps=50, maxterms=10**8):
|
34 |
+
"""Compute gammainc exactly like mpmath does but allow for more
|
35 |
+
summands in hypercomb. See
|
36 |
+
|
37 |
+
mpmath/functions/expintegrals.py#L134
|
38 |
+
|
39 |
+
in the mpmath github repository.
|
40 |
+
|
41 |
+
"""
|
42 |
+
with mp.workdps(dps):
|
43 |
+
z, a, b = mp.mpf(a), mp.mpf(x), mp.mpf(x)
|
44 |
+
G = [z]
|
45 |
+
negb = mp.fneg(b, exact=True)
|
46 |
+
|
47 |
+
def h(z):
|
48 |
+
T1 = [mp.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
|
49 |
+
return (T1,)
|
50 |
+
|
51 |
+
res = mp.hypercomb(h, [z], maxterms=maxterms)
|
52 |
+
return mpf2float(res)
|
53 |
+
|
54 |
+
|
55 |
+
def gammaincc(a, x, dps=50, maxterms=10**8):
|
56 |
+
"""Compute gammaincc exactly like mpmath does but allow for more
|
57 |
+
terms in hypercomb. See
|
58 |
+
|
59 |
+
mpmath/functions/expintegrals.py#L187
|
60 |
+
|
61 |
+
in the mpmath github repository.
|
62 |
+
|
63 |
+
"""
|
64 |
+
with mp.workdps(dps):
|
65 |
+
z, a = a, x
|
66 |
+
|
67 |
+
if mp.isint(z):
|
68 |
+
try:
|
69 |
+
# mpmath has a fast integer path
|
70 |
+
return mpf2float(mp.gammainc(z, a=a, regularized=True))
|
71 |
+
except mp.libmp.NoConvergence:
|
72 |
+
pass
|
73 |
+
nega = mp.fneg(a, exact=True)
|
74 |
+
G = [z]
|
75 |
+
# Use 2F0 series when possible; fall back to lower gamma representation
|
76 |
+
try:
|
77 |
+
def h(z):
|
78 |
+
r = z-1
|
79 |
+
return [([mp.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
|
80 |
+
return mpf2float(mp.hypercomb(h, [z], force_series=True))
|
81 |
+
except mp.libmp.NoConvergence:
|
82 |
+
def h(z):
|
83 |
+
T1 = [], [1, z-1], [z], G, [], [], 0
|
84 |
+
T2 = [-mp.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
|
85 |
+
return T1, T2
|
86 |
+
return mpf2float(mp.hypercomb(h, [z], maxterms=maxterms))
|
87 |
+
|
88 |
+
|
89 |
+
def main():
|
90 |
+
t0 = time()
|
91 |
+
# It would be nice to have data for larger values, but either this
|
92 |
+
# requires prohibitively large precision (dps > 800) or mpmath has
|
93 |
+
# a bug. For example, gammainc(1e20, 1e20, dps=800) returns a
|
94 |
+
# value around 0.03, while the true value should be close to 0.5
|
95 |
+
# (DLMF 8.12.15).
|
96 |
+
print(__doc__)
|
97 |
+
pwd = os.path.dirname(__file__)
|
98 |
+
r = np.logspace(4, 14, 30)
|
99 |
+
ltheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(0.6)), 30)
|
100 |
+
utheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(1.4)), 30)
|
101 |
+
|
102 |
+
regimes = [(gammainc, ltheta), (gammaincc, utheta)]
|
103 |
+
for func, theta in regimes:
|
104 |
+
rg, thetag = np.meshgrid(r, theta)
|
105 |
+
a, x = rg*np.cos(thetag), rg*np.sin(thetag)
|
106 |
+
a, x = a.flatten(), x.flatten()
|
107 |
+
dataset = []
|
108 |
+
for i, (a0, x0) in enumerate(zip(a, x)):
|
109 |
+
if func == gammaincc:
|
110 |
+
# Exploit the fast integer path in gammaincc whenever
|
111 |
+
# possible so that the computation doesn't take too
|
112 |
+
# long
|
113 |
+
a0, x0 = np.floor(a0), np.floor(x0)
|
114 |
+
dataset.append((a0, x0, func(a0, x0)))
|
115 |
+
dataset = np.array(dataset)
|
116 |
+
filename = os.path.join(pwd, '..', 'tests', 'data', 'local',
|
117 |
+
f'{func.__name__}.txt')
|
118 |
+
np.savetxt(filename, dataset)
|
119 |
+
|
120 |
+
print(f"{(time() - t0)/60} minutes elapsed")
|
121 |
+
|
122 |
+
|
123 |
+
if __name__ == "__main__":
|
124 |
+
main()
|
.venv/Lib/site-packages/scipy/special/_precompute/lambertw.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Compute a Pade approximation for the principal branch of the
|
2 |
+
Lambert W function around 0 and compare it to various other
|
3 |
+
approximations.
|
4 |
+
|
5 |
+
"""
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
try:
|
9 |
+
import mpmath
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
except ImportError:
|
12 |
+
pass
|
13 |
+
|
14 |
+
|
15 |
+
def lambertw_pade():
|
16 |
+
derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]
|
17 |
+
p, q = mpmath.pade(derivs, 3, 2)
|
18 |
+
return p, q
|
19 |
+
|
20 |
+
|
21 |
+
def main():
|
22 |
+
print(__doc__)
|
23 |
+
with mpmath.workdps(50):
|
24 |
+
p, q = lambertw_pade()
|
25 |
+
p, q = p[::-1], q[::-1]
|
26 |
+
print(f"p = {p}")
|
27 |
+
print(f"q = {q}")
|
28 |
+
|
29 |
+
x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)
|
30 |
+
x, y = np.meshgrid(x, y)
|
31 |
+
z = x + 1j*y
|
32 |
+
lambertw_std = []
|
33 |
+
for z0 in z.flatten():
|
34 |
+
lambertw_std.append(complex(mpmath.lambertw(z0)))
|
35 |
+
lambertw_std = np.array(lambertw_std).reshape(x.shape)
|
36 |
+
|
37 |
+
fig, axes = plt.subplots(nrows=3, ncols=1)
|
38 |
+
# Compare Pade approximation to true result
|
39 |
+
p = np.array([float(p0) for p0 in p])
|
40 |
+
q = np.array([float(q0) for q0 in q])
|
41 |
+
pade_approx = np.polyval(p, z)/np.polyval(q, z)
|
42 |
+
pade_err = abs(pade_approx - lambertw_std)
|
43 |
+
axes[0].pcolormesh(x, y, pade_err)
|
44 |
+
# Compare two terms of asymptotic series to true result
|
45 |
+
asy_approx = np.log(z) - np.log(np.log(z))
|
46 |
+
asy_err = abs(asy_approx - lambertw_std)
|
47 |
+
axes[1].pcolormesh(x, y, asy_err)
|
48 |
+
# Compare two terms of the series around the branch point to the
|
49 |
+
# true result
|
50 |
+
p = np.sqrt(2*(np.exp(1)*z + 1))
|
51 |
+
series_approx = -1 + p - p**2/3
|
52 |
+
series_err = abs(series_approx - lambertw_std)
|
53 |
+
im = axes[2].pcolormesh(x, y, series_err)
|
54 |
+
|
55 |
+
fig.colorbar(im, ax=axes.ravel().tolist())
|
56 |
+
plt.show()
|
57 |
+
|
58 |
+
fig, ax = plt.subplots(nrows=1, ncols=1)
|
59 |
+
pade_better = pade_err < asy_err
|
60 |
+
im = ax.pcolormesh(x, y, pade_better)
|
61 |
+
t = np.linspace(-0.3, 0.3)
|
62 |
+
ax.plot(-2.5*abs(t) - 0.2, t, 'r')
|
63 |
+
fig.colorbar(im, ax=ax)
|
64 |
+
plt.show()
|
65 |
+
|
66 |
+
|
67 |
+
if __name__ == '__main__':
|
68 |
+
main()
|
.venv/Lib/site-packages/scipy/special/_precompute/loggamma.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Precompute series coefficients for log-Gamma."""
|
2 |
+
|
3 |
+
try:
|
4 |
+
import mpmath
|
5 |
+
except ImportError:
|
6 |
+
pass
|
7 |
+
|
8 |
+
|
9 |
+
def stirling_series(N):
|
10 |
+
with mpmath.workdps(100):
|
11 |
+
coeffs = [mpmath.bernoulli(2*n)/(2*n*(2*n - 1))
|
12 |
+
for n in range(1, N + 1)]
|
13 |
+
return coeffs
|
14 |
+
|
15 |
+
|
16 |
+
def taylor_series_at_1(N):
|
17 |
+
coeffs = []
|
18 |
+
with mpmath.workdps(100):
|
19 |
+
coeffs.append(-mpmath.euler)
|
20 |
+
for n in range(2, N + 1):
|
21 |
+
coeffs.append((-1)**n*mpmath.zeta(n)/n)
|
22 |
+
return coeffs
|
23 |
+
|
24 |
+
|
25 |
+
def main():
|
26 |
+
print(__doc__)
|
27 |
+
print()
|
28 |
+
stirling_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
|
29 |
+
for x in stirling_series(8)[::-1]]
|
30 |
+
taylor_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
|
31 |
+
for x in taylor_series_at_1(23)[::-1]]
|
32 |
+
print("Stirling series coefficients")
|
33 |
+
print("----------------------------")
|
34 |
+
print("\n".join(stirling_coeffs))
|
35 |
+
print()
|
36 |
+
print("Taylor series coefficients")
|
37 |
+
print("--------------------------")
|
38 |
+
print("\n".join(taylor_coeffs))
|
39 |
+
print()
|
40 |
+
|
41 |
+
|
42 |
+
if __name__ == '__main__':
|
43 |
+
main()
|
.venv/Lib/site-packages/scipy/special/_precompute/struve_convergence.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Convergence regions of the expansions used in ``struve.c``
|
3 |
+
|
4 |
+
Note that for v >> z both functions tend rapidly to 0,
|
5 |
+
and for v << -z, they tend to infinity.
|
6 |
+
|
7 |
+
The floating-point functions over/underflow in the lower left and right
|
8 |
+
corners of the figure.
|
9 |
+
|
10 |
+
|
11 |
+
Figure legend
|
12 |
+
=============
|
13 |
+
|
14 |
+
Red region
|
15 |
+
Power series is close (1e-12) to the mpmath result
|
16 |
+
|
17 |
+
Blue region
|
18 |
+
Asymptotic series is close to the mpmath result
|
19 |
+
|
20 |
+
Green region
|
21 |
+
Bessel series is close to the mpmath result
|
22 |
+
|
23 |
+
Dotted colored lines
|
24 |
+
Boundaries of the regions
|
25 |
+
|
26 |
+
Solid colored lines
|
27 |
+
Boundaries estimated by the routine itself. These will be used
|
28 |
+
for determining which of the results to use.
|
29 |
+
|
30 |
+
Black dashed line
|
31 |
+
The line z = 0.7*|v| + 12
|
32 |
+
|
33 |
+
"""
|
34 |
+
import numpy as np
|
35 |
+
import matplotlib.pyplot as plt
|
36 |
+
|
37 |
+
import mpmath
|
38 |
+
|
39 |
+
|
40 |
+
def err_metric(a, b, atol=1e-290):
|
41 |
+
m = abs(a - b) / (atol + abs(b))
|
42 |
+
m[np.isinf(b) & (a == b)] = 0
|
43 |
+
return m
|
44 |
+
|
45 |
+
|
46 |
+
def do_plot(is_h=True):
|
47 |
+
from scipy.special._ufuncs import (_struve_power_series,
|
48 |
+
_struve_asymp_large_z,
|
49 |
+
_struve_bessel_series)
|
50 |
+
|
51 |
+
vs = np.linspace(-1000, 1000, 91)
|
52 |
+
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
|
53 |
+
|
54 |
+
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
|
55 |
+
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
|
56 |
+
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
|
57 |
+
|
58 |
+
mpmath.mp.dps = 50
|
59 |
+
if is_h:
|
60 |
+
def sh(v, z):
|
61 |
+
return float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
|
62 |
+
else:
|
63 |
+
def sh(v, z):
|
64 |
+
return float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
|
65 |
+
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
|
66 |
+
|
67 |
+
err_a = err_metric(ra[0], ex) + 1e-300
|
68 |
+
err_p = err_metric(rp[0], ex) + 1e-300
|
69 |
+
err_b = err_metric(rb[0], ex) + 1e-300
|
70 |
+
|
71 |
+
err_est_a = abs(ra[1]/ra[0])
|
72 |
+
err_est_p = abs(rp[1]/rp[0])
|
73 |
+
err_est_b = abs(rb[1]/rb[0])
|
74 |
+
|
75 |
+
z_cutoff = 0.7*abs(vs) + 12
|
76 |
+
|
77 |
+
levels = [-1000, -12]
|
78 |
+
|
79 |
+
plt.cla()
|
80 |
+
|
81 |
+
plt.hold(1)
|
82 |
+
plt.contourf(vs, zs, np.log10(err_p).T,
|
83 |
+
levels=levels, colors=['r', 'r'], alpha=0.1)
|
84 |
+
plt.contourf(vs, zs, np.log10(err_a).T,
|
85 |
+
levels=levels, colors=['b', 'b'], alpha=0.1)
|
86 |
+
plt.contourf(vs, zs, np.log10(err_b).T,
|
87 |
+
levels=levels, colors=['g', 'g'], alpha=0.1)
|
88 |
+
|
89 |
+
plt.contour(vs, zs, np.log10(err_p).T,
|
90 |
+
levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
|
91 |
+
plt.contour(vs, zs, np.log10(err_a).T,
|
92 |
+
levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
|
93 |
+
plt.contour(vs, zs, np.log10(err_b).T,
|
94 |
+
levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
|
95 |
+
|
96 |
+
lp = plt.contour(vs, zs, np.log10(err_est_p).T,
|
97 |
+
levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
|
98 |
+
la = plt.contour(vs, zs, np.log10(err_est_a).T,
|
99 |
+
levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
|
100 |
+
lb = plt.contour(vs, zs, np.log10(err_est_b).T,
|
101 |
+
levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
|
102 |
+
|
103 |
+
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
|
104 |
+
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
|
105 |
+
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
|
106 |
+
|
107 |
+
plt.plot(vs, z_cutoff, 'k--')
|
108 |
+
|
109 |
+
plt.xlim(vs.min(), vs.max())
|
110 |
+
plt.ylim(zs.min(), zs.max())
|
111 |
+
|
112 |
+
plt.xlabel('v')
|
113 |
+
plt.ylabel('z')
|
114 |
+
|
115 |
+
|
116 |
+
def main():
|
117 |
+
plt.clf()
|
118 |
+
plt.subplot(121)
|
119 |
+
do_plot(True)
|
120 |
+
plt.title('Struve H')
|
121 |
+
|
122 |
+
plt.subplot(122)
|
123 |
+
do_plot(False)
|
124 |
+
plt.title('Struve L')
|
125 |
+
|
126 |
+
plt.savefig('struve_convergence.png')
|
127 |
+
plt.show()
|
128 |
+
|
129 |
+
|
130 |
+
if __name__ == "__main__":
|
131 |
+
main()
|
.venv/Lib/site-packages/scipy/special/_precompute/utils.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
try:
|
2 |
+
import mpmath as mp
|
3 |
+
except ImportError:
|
4 |
+
pass
|
5 |
+
|
6 |
+
try:
|
7 |
+
from sympy.abc import x
|
8 |
+
except ImportError:
|
9 |
+
pass
|
10 |
+
|
11 |
+
|
12 |
+
def lagrange_inversion(a):
|
13 |
+
"""Given a series
|
14 |
+
|
15 |
+
f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1),
|
16 |
+
|
17 |
+
use the Lagrange inversion formula to compute a series
|
18 |
+
|
19 |
+
g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1)
|
20 |
+
|
21 |
+
so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so
|
22 |
+
necessarily b[0] = 0 too.
|
23 |
+
|
24 |
+
The algorithm is naive and could be improved, but speed isn't an
|
25 |
+
issue here and it's easy to read.
|
26 |
+
|
27 |
+
"""
|
28 |
+
n = len(a)
|
29 |
+
f = sum(a[i]*x**i for i in range(n))
|
30 |
+
h = (x/f).series(x, 0, n).removeO()
|
31 |
+
hpower = [h**0]
|
32 |
+
for k in range(n):
|
33 |
+
hpower.append((hpower[-1]*h).expand())
|
34 |
+
b = [mp.mpf(0)]
|
35 |
+
for k in range(1, n):
|
36 |
+
b.append(hpower[k].coeff(x, k - 1)/k)
|
37 |
+
b = [mp.mpf(x) for x in b]
|
38 |
+
return b
|
.venv/Lib/site-packages/scipy/special/_precompute/wright_bessel.py
ADDED
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Precompute coefficients of several series expansions
|
2 |
+
of Wright's generalized Bessel function Phi(a, b, x).
|
3 |
+
|
4 |
+
See https://dlmf.nist.gov/10.46.E1 with rho=a, beta=b, z=x.
|
5 |
+
"""
|
6 |
+
from argparse import ArgumentParser, RawTextHelpFormatter
|
7 |
+
import numpy as np
|
8 |
+
from scipy.integrate import quad
|
9 |
+
from scipy.optimize import minimize_scalar, curve_fit
|
10 |
+
from time import time
|
11 |
+
|
12 |
+
try:
|
13 |
+
import sympy
|
14 |
+
from sympy import EulerGamma, Rational, S, Sum, \
|
15 |
+
factorial, gamma, gammasimp, pi, polygamma, symbols, zeta
|
16 |
+
from sympy.polys.polyfuncs import horner
|
17 |
+
except ImportError:
|
18 |
+
pass
|
19 |
+
|
20 |
+
|
21 |
+
def series_small_a():
|
22 |
+
"""Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.
|
23 |
+
"""
|
24 |
+
order = 5
|
25 |
+
a, b, x, k = symbols("a b x k")
|
26 |
+
A = [] # terms with a
|
27 |
+
X = [] # terms with x
|
28 |
+
B = [] # terms with b (polygammas)
|
29 |
+
# Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i])
|
30 |
+
expression = Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
|
31 |
+
expression = gamma(b)/sympy.exp(x) * expression
|
32 |
+
|
33 |
+
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
|
34 |
+
for n in range(0, order+1):
|
35 |
+
term = expression.diff(a, n).subs(a, 0).simplify().doit()
|
36 |
+
# set the whole bracket involving polygammas to 1
|
37 |
+
x_part = (term.subs(polygamma(0, b), 1)
|
38 |
+
.replace(polygamma, lambda *args: 0))
|
39 |
+
# sign convention: x part always positive
|
40 |
+
x_part *= (-1)**n
|
41 |
+
|
42 |
+
A.append(a**n/factorial(n))
|
43 |
+
X.append(horner(x_part))
|
44 |
+
B.append(horner((term/x_part).simplify()))
|
45 |
+
|
46 |
+
s = "Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.\n"
|
47 |
+
s += "Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5)\n"
|
48 |
+
for name, c in zip(['A', 'X', 'B'], [A, X, B]):
|
49 |
+
for i in range(len(c)):
|
50 |
+
s += f"\n{name}[{i}] = " + str(c[i])
|
51 |
+
return s
|
52 |
+
|
53 |
+
|
54 |
+
# expansion of digamma
|
55 |
+
def dg_series(z, n):
|
56 |
+
"""Symbolic expansion of digamma(z) in z=0 to order n.
|
57 |
+
|
58 |
+
See https://dlmf.nist.gov/5.7.E4 and with https://dlmf.nist.gov/5.5.E2
|
59 |
+
"""
|
60 |
+
k = symbols("k")
|
61 |
+
return -1/z - EulerGamma + \
|
62 |
+
sympy.summation((-1)**k * zeta(k) * z**(k-1), (k, 2, n+1))
|
63 |
+
|
64 |
+
|
65 |
+
def pg_series(k, z, n):
|
66 |
+
"""Symbolic expansion of polygamma(k, z) in z=0 to order n."""
|
67 |
+
return sympy.diff(dg_series(z, n+k), z, k)
|
68 |
+
|
69 |
+
|
70 |
+
def series_small_a_small_b():
|
71 |
+
"""Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5.
|
72 |
+
|
73 |
+
Be aware of cancellation of poles in b=0 of digamma(b)/Gamma(b) and
|
74 |
+
polygamma functions.
|
75 |
+
|
76 |
+
digamma(b)/Gamma(b) = -1 - 2*M_EG*b + O(b^2)
|
77 |
+
digamma(b)^2/Gamma(b) = 1/b + 3*M_EG + b*(-5/12*PI^2+7/2*M_EG^2) + O(b^2)
|
78 |
+
polygamma(1, b)/Gamma(b) = 1/b + M_EG + b*(1/12*PI^2 + 1/2*M_EG^2) + O(b^2)
|
79 |
+
and so on.
|
80 |
+
"""
|
81 |
+
order = 5
|
82 |
+
a, b, x, k = symbols("a b x k")
|
83 |
+
M_PI, M_EG, M_Z3 = symbols("M_PI M_EG M_Z3")
|
84 |
+
c_subs = {pi: M_PI, EulerGamma: M_EG, zeta(3): M_Z3}
|
85 |
+
A = [] # terms with a
|
86 |
+
X = [] # terms with x
|
87 |
+
B = [] # terms with b (polygammas expanded)
|
88 |
+
C = [] # terms that generate B
|
89 |
+
# Phi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i])
|
90 |
+
# B[0] = 1
|
91 |
+
# B[k] = sum(C[k] * b**k/k!, k=0..)
|
92 |
+
# Note: C[k] can be obtained from a series expansion of 1/gamma(b).
|
93 |
+
expression = gamma(b)/sympy.exp(x) * \
|
94 |
+
Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
|
95 |
+
|
96 |
+
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
|
97 |
+
for n in range(0, order+1):
|
98 |
+
term = expression.diff(a, n).subs(a, 0).simplify().doit()
|
99 |
+
# set the whole bracket involving polygammas to 1
|
100 |
+
x_part = (term.subs(polygamma(0, b), 1)
|
101 |
+
.replace(polygamma, lambda *args: 0))
|
102 |
+
# sign convention: x part always positive
|
103 |
+
x_part *= (-1)**n
|
104 |
+
# expansion of polygamma part with 1/gamma(b)
|
105 |
+
pg_part = term/x_part/gamma(b)
|
106 |
+
if n >= 1:
|
107 |
+
# Note: highest term is digamma^n
|
108 |
+
pg_part = pg_part.replace(polygamma,
|
109 |
+
lambda k, x: pg_series(k, x, order+1+n))
|
110 |
+
pg_part = (pg_part.series(b, 0, n=order+1-n)
|
111 |
+
.removeO()
|
112 |
+
.subs(polygamma(2, 1), -2*zeta(3))
|
113 |
+
.simplify()
|
114 |
+
)
|
115 |
+
|
116 |
+
A.append(a**n/factorial(n))
|
117 |
+
X.append(horner(x_part))
|
118 |
+
B.append(pg_part)
|
119 |
+
|
120 |
+
# Calculate C and put in the k!
|
121 |
+
C = sympy.Poly(B[1].subs(c_subs), b).coeffs()
|
122 |
+
C.reverse()
|
123 |
+
for i in range(len(C)):
|
124 |
+
C[i] = (C[i] * factorial(i)).simplify()
|
125 |
+
|
126 |
+
s = "Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5."
|
127 |
+
s += "\nPhi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i], i=0..5)\n"
|
128 |
+
s += "B[0] = 1\n"
|
129 |
+
s += "B[i] = sum(C[k+i-1] * b**k/k!, k=0..)\n"
|
130 |
+
s += "\nM_PI = pi"
|
131 |
+
s += "\nM_EG = EulerGamma"
|
132 |
+
s += "\nM_Z3 = zeta(3)"
|
133 |
+
for name, c in zip(['A', 'X'], [A, X]):
|
134 |
+
for i in range(len(c)):
|
135 |
+
s += f"\n{name}[{i}] = "
|
136 |
+
s += str(c[i])
|
137 |
+
# For C, do also compute the values numerically
|
138 |
+
for i in range(len(C)):
|
139 |
+
s += f"\n# C[{i}] = "
|
140 |
+
s += str(C[i])
|
141 |
+
s += f"\nC[{i}] = "
|
142 |
+
s += str(C[i].subs({M_EG: EulerGamma, M_PI: pi, M_Z3: zeta(3)})
|
143 |
+
.evalf(17))
|
144 |
+
|
145 |
+
# Does B have the assumed structure?
|
146 |
+
s += "\n\nTest if B[i] does have the assumed structure."
|
147 |
+
s += "\nC[i] are derived from B[1] alone."
|
148 |
+
s += "\nTest B[2] == C[1] + b*C[2] + b^2/2*C[3] + b^3/6*C[4] + .."
|
149 |
+
test = sum([b**k/factorial(k) * C[k+1] for k in range(order-1)])
|
150 |
+
test = (test - B[2].subs(c_subs)).simplify()
|
151 |
+
s += f"\ntest successful = {test==S(0)}"
|
152 |
+
s += "\nTest B[3] == C[2] + b*C[3] + b^2/2*C[4] + .."
|
153 |
+
test = sum([b**k/factorial(k) * C[k+2] for k in range(order-2)])
|
154 |
+
test = (test - B[3].subs(c_subs)).simplify()
|
155 |
+
s += f"\ntest successful = {test==S(0)}"
|
156 |
+
return s
|
157 |
+
|
158 |
+
|
159 |
+
def asymptotic_series():
|
160 |
+
"""Asymptotic expansion for large x.
|
161 |
+
|
162 |
+
Phi(a, b, x) ~ Z^(1/2-b) * exp((1+a)/a * Z) * sum_k (-1)^k * C_k / Z^k
|
163 |
+
Z = (a*x)^(1/(1+a))
|
164 |
+
|
165 |
+
Wright (1935) lists the coefficients C_0 and C_1 (he calls them a_0 and
|
166 |
+
a_1). With slightly different notation, Paris (2017) lists coefficients
|
167 |
+
c_k up to order k=3.
|
168 |
+
Paris (2017) uses ZP = (1+a)/a * Z (ZP = Z of Paris) and
|
169 |
+
C_k = C_0 * (-a/(1+a))^k * c_k
|
170 |
+
"""
|
171 |
+
order = 8
|
172 |
+
|
173 |
+
class g(sympy.Function):
|
174 |
+
"""Helper function g according to Wright (1935)
|
175 |
+
|
176 |
+
g(n, rho, v) = (1 + (rho+2)/3 * v + (rho+2)*(rho+3)/(2*3) * v^2 + ...)
|
177 |
+
|
178 |
+
Note: Wright (1935) uses square root of above definition.
|
179 |
+
"""
|
180 |
+
nargs = 3
|
181 |
+
|
182 |
+
@classmethod
|
183 |
+
def eval(cls, n, rho, v):
|
184 |
+
if not n >= 0:
|
185 |
+
raise ValueError("must have n >= 0")
|
186 |
+
elif n == 0:
|
187 |
+
return 1
|
188 |
+
else:
|
189 |
+
return g(n-1, rho, v) \
|
190 |
+
+ gammasimp(gamma(rho+2+n)/gamma(rho+2)) \
|
191 |
+
/ gammasimp(gamma(3+n)/gamma(3))*v**n
|
192 |
+
|
193 |
+
class coef_C(sympy.Function):
|
194 |
+
"""Calculate coefficients C_m for integer m.
|
195 |
+
|
196 |
+
C_m is the coefficient of v^(2*m) in the Taylor expansion in v=0 of
|
197 |
+
Gamma(m+1/2)/(2*pi) * (2/(rho+1))^(m+1/2) * (1-v)^(-b)
|
198 |
+
* g(rho, v)^(-m-1/2)
|
199 |
+
"""
|
200 |
+
nargs = 3
|
201 |
+
|
202 |
+
@classmethod
|
203 |
+
def eval(cls, m, rho, beta):
|
204 |
+
if not m >= 0:
|
205 |
+
raise ValueError("must have m >= 0")
|
206 |
+
|
207 |
+
v = symbols("v")
|
208 |
+
expression = (1-v)**(-beta) * g(2*m, rho, v)**(-m-Rational(1, 2))
|
209 |
+
res = expression.diff(v, 2*m).subs(v, 0) / factorial(2*m)
|
210 |
+
res = res * (gamma(m + Rational(1, 2)) / (2*pi)
|
211 |
+
* (2/(rho+1))**(m + Rational(1, 2)))
|
212 |
+
return res
|
213 |
+
|
214 |
+
# in order to have nice ordering/sorting of expressions, we set a = xa.
|
215 |
+
xa, b, xap1 = symbols("xa b xap1")
|
216 |
+
C0 = coef_C(0, xa, b)
|
217 |
+
# a1 = a(1, rho, beta)
|
218 |
+
s = "Asymptotic expansion for large x\n"
|
219 |
+
s += "Phi(a, b, x) = Z**(1/2-b) * exp((1+a)/a * Z) \n"
|
220 |
+
s += " * sum((-1)**k * C[k]/Z**k, k=0..6)\n\n"
|
221 |
+
s += "Z = pow(a * x, 1/(1+a))\n"
|
222 |
+
s += "A[k] = pow(a, k)\n"
|
223 |
+
s += "B[k] = pow(b, k)\n"
|
224 |
+
s += "Ap1[k] = pow(1+a, k)\n\n"
|
225 |
+
s += "C[0] = 1./sqrt(2. * M_PI * Ap1[1])\n"
|
226 |
+
for i in range(1, order+1):
|
227 |
+
expr = (coef_C(i, xa, b) / (C0/(1+xa)**i)).simplify()
|
228 |
+
factor = [x.denominator() for x in sympy.Poly(expr).coeffs()]
|
229 |
+
factor = sympy.lcm(factor)
|
230 |
+
expr = (expr * factor).simplify().collect(b, sympy.factor)
|
231 |
+
expr = expr.xreplace({xa+1: xap1})
|
232 |
+
s += f"C[{i}] = C[0] / ({factor} * Ap1[{i}])\n"
|
233 |
+
s += f"C[{i}] *= {str(expr)}\n\n"
|
234 |
+
import re
|
235 |
+
re_a = re.compile(r'xa\*\*(\d+)')
|
236 |
+
s = re_a.sub(r'A[\1]', s)
|
237 |
+
re_b = re.compile(r'b\*\*(\d+)')
|
238 |
+
s = re_b.sub(r'B[\1]', s)
|
239 |
+
s = s.replace('xap1', 'Ap1[1]')
|
240 |
+
s = s.replace('xa', 'a')
|
241 |
+
# max integer = 2^31-1 = 2,147,483,647. Solution: Put a point after 10
|
242 |
+
# or more digits.
|
243 |
+
re_digits = re.compile(r'(\d{10,})')
|
244 |
+
s = re_digits.sub(r'\1.', s)
|
245 |
+
return s
|
246 |
+
|
247 |
+
|
248 |
+
def optimal_epsilon_integral():
|
249 |
+
"""Fit optimal choice of epsilon for integral representation.
|
250 |
+
|
251 |
+
The integrand of
|
252 |
+
int_0^pi P(eps, a, b, x, phi) * dphi
|
253 |
+
can exhibit oscillatory behaviour. It stems from the cosine of P and can be
|
254 |
+
minimized by minimizing the arc length of the argument
|
255 |
+
f(phi) = eps * sin(phi) - x * eps^(-a) * sin(a * phi) + (1 - b) * phi
|
256 |
+
of cos(f(phi)).
|
257 |
+
We minimize the arc length in eps for a grid of values (a, b, x) and fit a
|
258 |
+
parametric function to it.
|
259 |
+
"""
|
260 |
+
def fp(eps, a, b, x, phi):
|
261 |
+
"""Derivative of f w.r.t. phi."""
|
262 |
+
eps_a = np.power(1. * eps, -a)
|
263 |
+
return eps * np.cos(phi) - a * x * eps_a * np.cos(a * phi) + 1 - b
|
264 |
+
|
265 |
+
def arclength(eps, a, b, x, epsrel=1e-2, limit=100):
|
266 |
+
"""Compute Arc length of f.
|
267 |
+
|
268 |
+
Note that the arc length of a function f from t0 to t1 is given by
|
269 |
+
int_t0^t1 sqrt(1 + f'(t)^2) dt
|
270 |
+
"""
|
271 |
+
return quad(lambda phi: np.sqrt(1 + fp(eps, a, b, x, phi)**2),
|
272 |
+
0, np.pi,
|
273 |
+
epsrel=epsrel, limit=100)[0]
|
274 |
+
|
275 |
+
# grid of minimal arc length values
|
276 |
+
data_a = [1e-3, 0.1, 0.5, 0.9, 1, 2, 4, 5, 6, 8]
|
277 |
+
data_b = [0, 1, 4, 7, 10]
|
278 |
+
data_x = [1, 1.5, 2, 4, 10, 20, 50, 100, 200, 500, 1e3, 5e3, 1e4]
|
279 |
+
data_a, data_b, data_x = np.meshgrid(data_a, data_b, data_x)
|
280 |
+
data_a, data_b, data_x = (data_a.flatten(), data_b.flatten(),
|
281 |
+
data_x.flatten())
|
282 |
+
best_eps = []
|
283 |
+
for i in range(data_x.size):
|
284 |
+
best_eps.append(
|
285 |
+
minimize_scalar(lambda eps: arclength(eps, data_a[i], data_b[i],
|
286 |
+
data_x[i]),
|
287 |
+
bounds=(1e-3, 1000),
|
288 |
+
method='Bounded', options={'xatol': 1e-3}).x
|
289 |
+
)
|
290 |
+
best_eps = np.array(best_eps)
|
291 |
+
# pandas would be nice, but here a dictionary is enough
|
292 |
+
df = {'a': data_a,
|
293 |
+
'b': data_b,
|
294 |
+
'x': data_x,
|
295 |
+
'eps': best_eps,
|
296 |
+
}
|
297 |
+
|
298 |
+
def func(data, A0, A1, A2, A3, A4, A5):
|
299 |
+
"""Compute parametric function to fit."""
|
300 |
+
a = data['a']
|
301 |
+
b = data['b']
|
302 |
+
x = data['x']
|
303 |
+
return (A0 * b * np.exp(-0.5 * a)
|
304 |
+
+ np.exp(A1 + 1 / (1 + a) * np.log(x) - A2 * np.exp(-A3 * a)
|
305 |
+
+ A4 / (1 + np.exp(A5 * a))))
|
306 |
+
|
307 |
+
func_params = list(curve_fit(func, df, df['eps'], method='trf')[0])
|
308 |
+
|
309 |
+
s = "Fit optimal eps for integrand P via minimal arc length\n"
|
310 |
+
s += "with parametric function:\n"
|
311 |
+
s += "optimal_eps = (A0 * b * exp(-a/2) + exp(A1 + 1 / (1 + a) * log(x)\n"
|
312 |
+
s += " - A2 * exp(-A3 * a) + A4 / (1 + exp(A5 * a)))\n\n"
|
313 |
+
s += "Fitted parameters A0 to A5 are:\n"
|
314 |
+
s += ', '.join([f'{x:.5g}' for x in func_params])
|
315 |
+
return s
|
316 |
+
|
317 |
+
|
318 |
+
def main():
|
319 |
+
t0 = time()
|
320 |
+
parser = ArgumentParser(description=__doc__,
|
321 |
+
formatter_class=RawTextHelpFormatter)
|
322 |
+
parser.add_argument('action', type=int, choices=[1, 2, 3, 4],
|
323 |
+
help='chose what expansion to precompute\n'
|
324 |
+
'1 : Series for small a\n'
|
325 |
+
'2 : Series for small a and small b\n'
|
326 |
+
'3 : Asymptotic series for large x\n'
|
327 |
+
' This may take some time (>4h).\n'
|
328 |
+
'4 : Fit optimal eps for integral representation.'
|
329 |
+
)
|
330 |
+
args = parser.parse_args()
|
331 |
+
|
332 |
+
switch = {1: lambda: print(series_small_a()),
|
333 |
+
2: lambda: print(series_small_a_small_b()),
|
334 |
+
3: lambda: print(asymptotic_series()),
|
335 |
+
4: lambda: print(optimal_epsilon_integral())
|
336 |
+
}
|
337 |
+
switch.get(args.action, lambda: print("Invalid input."))()
|
338 |
+
print(f"\n{(time() - t0)/60:.1f} minutes elapsed.\n")
|
339 |
+
|
340 |
+
|
341 |
+
if __name__ == '__main__':
|
342 |
+
main()
|
.venv/Lib/site-packages/scipy/special/_precompute/wright_bessel_data.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Compute a grid of values for Wright's generalized Bessel function
|
2 |
+
and save the values to data files for use in tests. Using mpmath directly in
|
3 |
+
tests would take too long.
|
4 |
+
|
5 |
+
This takes about 10 minutes to run on a 2.7 GHz i7 Macbook Pro.
|
6 |
+
"""
|
7 |
+
from functools import lru_cache
|
8 |
+
import os
|
9 |
+
from time import time
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
from scipy.special._mptestutils import mpf2float
|
13 |
+
|
14 |
+
try:
|
15 |
+
import mpmath as mp
|
16 |
+
except ImportError:
|
17 |
+
pass
|
18 |
+
|
19 |
+
# exp_inf: smallest value x for which exp(x) == inf
|
20 |
+
exp_inf = 709.78271289338403
|
21 |
+
|
22 |
+
|
23 |
+
# 64 Byte per value
|
24 |
+
@lru_cache(maxsize=100_000)
|
25 |
+
def rgamma_cached(x, dps):
|
26 |
+
with mp.workdps(dps):
|
27 |
+
return mp.rgamma(x)
|
28 |
+
|
29 |
+
|
30 |
+
def mp_wright_bessel(a, b, x, dps=50, maxterms=2000):
|
31 |
+
"""Compute Wright's generalized Bessel function as Series with mpmath.
|
32 |
+
"""
|
33 |
+
with mp.workdps(dps):
|
34 |
+
a, b, x = mp.mpf(a), mp.mpf(b), mp.mpf(x)
|
35 |
+
res = mp.nsum(lambda k: x**k / mp.fac(k)
|
36 |
+
* rgamma_cached(a * k + b, dps=dps),
|
37 |
+
[0, mp.inf],
|
38 |
+
tol=dps, method='s', steps=[maxterms]
|
39 |
+
)
|
40 |
+
return mpf2float(res)
|
41 |
+
|
42 |
+
|
43 |
+
def main():
|
44 |
+
t0 = time()
|
45 |
+
print(__doc__)
|
46 |
+
pwd = os.path.dirname(__file__)
|
47 |
+
eps = np.finfo(float).eps * 100
|
48 |
+
|
49 |
+
a_range = np.array([eps,
|
50 |
+
1e-4 * (1 - eps), 1e-4, 1e-4 * (1 + eps),
|
51 |
+
1e-3 * (1 - eps), 1e-3, 1e-3 * (1 + eps),
|
52 |
+
0.1, 0.5,
|
53 |
+
1 * (1 - eps), 1, 1 * (1 + eps),
|
54 |
+
1.5, 2, 4.999, 5, 10])
|
55 |
+
b_range = np.array([0, eps, 1e-10, 1e-5, 0.1, 1, 2, 10, 20, 100])
|
56 |
+
x_range = np.array([0, eps, 1 - eps, 1, 1 + eps,
|
57 |
+
1.5,
|
58 |
+
2 - eps, 2, 2 + eps,
|
59 |
+
9 - eps, 9, 9 + eps,
|
60 |
+
10 * (1 - eps), 10, 10 * (1 + eps),
|
61 |
+
100 * (1 - eps), 100, 100 * (1 + eps),
|
62 |
+
500, exp_inf, 1e3, 1e5, 1e10, 1e20])
|
63 |
+
|
64 |
+
a_range, b_range, x_range = np.meshgrid(a_range, b_range, x_range,
|
65 |
+
indexing='ij')
|
66 |
+
a_range = a_range.flatten()
|
67 |
+
b_range = b_range.flatten()
|
68 |
+
x_range = x_range.flatten()
|
69 |
+
|
70 |
+
# filter out some values, especially too large x
|
71 |
+
bool_filter = ~((a_range < 5e-3) & (x_range >= exp_inf))
|
72 |
+
bool_filter = bool_filter & ~((a_range < 0.2) & (x_range > exp_inf))
|
73 |
+
bool_filter = bool_filter & ~((a_range < 0.5) & (x_range > 1e3))
|
74 |
+
bool_filter = bool_filter & ~((a_range < 0.56) & (x_range > 5e3))
|
75 |
+
bool_filter = bool_filter & ~((a_range < 1) & (x_range > 1e4))
|
76 |
+
bool_filter = bool_filter & ~((a_range < 1.4) & (x_range > 1e5))
|
77 |
+
bool_filter = bool_filter & ~((a_range < 1.8) & (x_range > 1e6))
|
78 |
+
bool_filter = bool_filter & ~((a_range < 2.2) & (x_range > 1e7))
|
79 |
+
bool_filter = bool_filter & ~((a_range < 2.5) & (x_range > 1e8))
|
80 |
+
bool_filter = bool_filter & ~((a_range < 2.9) & (x_range > 1e9))
|
81 |
+
bool_filter = bool_filter & ~((a_range < 3.3) & (x_range > 1e10))
|
82 |
+
bool_filter = bool_filter & ~((a_range < 3.7) & (x_range > 1e11))
|
83 |
+
bool_filter = bool_filter & ~((a_range < 4) & (x_range > 1e12))
|
84 |
+
bool_filter = bool_filter & ~((a_range < 4.4) & (x_range > 1e13))
|
85 |
+
bool_filter = bool_filter & ~((a_range < 4.7) & (x_range > 1e14))
|
86 |
+
bool_filter = bool_filter & ~((a_range < 5.1) & (x_range > 1e15))
|
87 |
+
bool_filter = bool_filter & ~((a_range < 5.4) & (x_range > 1e16))
|
88 |
+
bool_filter = bool_filter & ~((a_range < 5.8) & (x_range > 1e17))
|
89 |
+
bool_filter = bool_filter & ~((a_range < 6.2) & (x_range > 1e18))
|
90 |
+
bool_filter = bool_filter & ~((a_range < 6.2) & (x_range > 1e18))
|
91 |
+
bool_filter = bool_filter & ~((a_range < 6.5) & (x_range > 1e19))
|
92 |
+
bool_filter = bool_filter & ~((a_range < 6.9) & (x_range > 1e20))
|
93 |
+
|
94 |
+
# filter out known values that do not meet the required numerical accuracy
|
95 |
+
# see test test_wright_data_grid_failures
|
96 |
+
failing = np.array([
|
97 |
+
[0.1, 100, 709.7827128933841],
|
98 |
+
[0.5, 10, 709.7827128933841],
|
99 |
+
[0.5, 10, 1000],
|
100 |
+
[0.5, 100, 1000],
|
101 |
+
[1, 20, 100000],
|
102 |
+
[1, 100, 100000],
|
103 |
+
[1.0000000000000222, 20, 100000],
|
104 |
+
[1.0000000000000222, 100, 100000],
|
105 |
+
[1.5, 0, 500],
|
106 |
+
[1.5, 2.220446049250313e-14, 500],
|
107 |
+
[1.5, 1.e-10, 500],
|
108 |
+
[1.5, 1.e-05, 500],
|
109 |
+
[1.5, 0.1, 500],
|
110 |
+
[1.5, 20, 100000],
|
111 |
+
[1.5, 100, 100000],
|
112 |
+
]).tolist()
|
113 |
+
|
114 |
+
does_fail = np.full_like(a_range, False, dtype=bool)
|
115 |
+
for i in range(x_range.size):
|
116 |
+
if [a_range[i], b_range[i], x_range[i]] in failing:
|
117 |
+
does_fail[i] = True
|
118 |
+
|
119 |
+
# filter and flatten
|
120 |
+
a_range = a_range[bool_filter]
|
121 |
+
b_range = b_range[bool_filter]
|
122 |
+
x_range = x_range[bool_filter]
|
123 |
+
does_fail = does_fail[bool_filter]
|
124 |
+
|
125 |
+
dataset = []
|
126 |
+
print(f"Computing {x_range.size} single points.")
|
127 |
+
print("Tests will fail for the following data points:")
|
128 |
+
for i in range(x_range.size):
|
129 |
+
a = a_range[i]
|
130 |
+
b = b_range[i]
|
131 |
+
x = x_range[i]
|
132 |
+
# take care of difficult corner cases
|
133 |
+
maxterms = 1000
|
134 |
+
if a < 1e-6 and x >= exp_inf/10:
|
135 |
+
maxterms = 2000
|
136 |
+
f = mp_wright_bessel(a, b, x, maxterms=maxterms)
|
137 |
+
if does_fail[i]:
|
138 |
+
print("failing data point a, b, x, value = "
|
139 |
+
f"[{a}, {b}, {x}, {f}]")
|
140 |
+
else:
|
141 |
+
dataset.append((a, b, x, f))
|
142 |
+
dataset = np.array(dataset)
|
143 |
+
|
144 |
+
filename = os.path.join(pwd, '..', 'tests', 'data', 'local',
|
145 |
+
'wright_bessel.txt')
|
146 |
+
np.savetxt(filename, dataset)
|
147 |
+
|
148 |
+
print(f"{(time() - t0)/60:.1f} minutes elapsed")
|
149 |
+
|
150 |
+
|
151 |
+
if __name__ == "__main__":
|
152 |
+
main()
|
.venv/Lib/site-packages/scipy/special/_precompute/wrightomega.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
try:
|
4 |
+
import mpmath
|
5 |
+
except ImportError:
|
6 |
+
pass
|
7 |
+
|
8 |
+
|
9 |
+
def mpmath_wrightomega(x):
|
10 |
+
return mpmath.lambertw(mpmath.exp(x), mpmath.mpf('-0.5'))
|
11 |
+
|
12 |
+
|
13 |
+
def wrightomega_series_error(x):
|
14 |
+
series = x
|
15 |
+
desired = mpmath_wrightomega(x)
|
16 |
+
return abs(series - desired) / desired
|
17 |
+
|
18 |
+
|
19 |
+
def wrightomega_exp_error(x):
|
20 |
+
exponential_approx = mpmath.exp(x)
|
21 |
+
desired = mpmath_wrightomega(x)
|
22 |
+
return abs(exponential_approx - desired) / desired
|
23 |
+
|
24 |
+
|
25 |
+
def main():
|
26 |
+
desired_error = 2 * np.finfo(float).eps
|
27 |
+
print('Series Error')
|
28 |
+
for x in [1e5, 1e10, 1e15, 1e20]:
|
29 |
+
with mpmath.workdps(100):
|
30 |
+
error = wrightomega_series_error(x)
|
31 |
+
print(x, error, error < desired_error)
|
32 |
+
|
33 |
+
print('Exp error')
|
34 |
+
for x in [-10, -25, -50, -100, -200, -400, -700, -740]:
|
35 |
+
with mpmath.workdps(100):
|
36 |
+
error = wrightomega_exp_error(x)
|
37 |
+
print(x, error, error < desired_error)
|
38 |
+
|
39 |
+
|
40 |
+
if __name__ == '__main__':
|
41 |
+
main()
|
.venv/Lib/site-packages/scipy/special/_precompute/zetac.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Compute the Taylor series for zeta(x) - 1 around x = 0."""
|
2 |
+
try:
|
3 |
+
import mpmath
|
4 |
+
except ImportError:
|
5 |
+
pass
|
6 |
+
|
7 |
+
|
8 |
+
def zetac_series(N):
|
9 |
+
coeffs = []
|
10 |
+
with mpmath.workdps(100):
|
11 |
+
coeffs.append(-1.5)
|
12 |
+
for n in range(1, N):
|
13 |
+
coeff = mpmath.diff(mpmath.zeta, 0, n)/mpmath.factorial(n)
|
14 |
+
coeffs.append(coeff)
|
15 |
+
return coeffs
|
16 |
+
|
17 |
+
|
18 |
+
def main():
|
19 |
+
print(__doc__)
|
20 |
+
coeffs = zetac_series(10)
|
21 |
+
coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
|
22 |
+
for x in coeffs]
|
23 |
+
print("\n".join(coeffs[::-1]))
|
24 |
+
|
25 |
+
|
26 |
+
if __name__ == '__main__':
|
27 |
+
main()
|
.venv/Lib/site-packages/scipy/special/_ufuncs_cxx.pyx
ADDED
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is automatically generated by _generate_pyx.py.
|
2 |
+
# Do not edit manually!
|
3 |
+
|
4 |
+
from libc.math cimport NAN
|
5 |
+
|
6 |
+
include "_ufuncs_extra_code_common.pxi"
|
7 |
+
|
8 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
9 |
+
cdef double complex _func_ccospi "ccospi"(double complex) noexcept nogil
|
10 |
+
cdef void *_export_ccospi = <void*>_func_ccospi
|
11 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
12 |
+
cdef double complex _func_lambertw_scalar "lambertw_scalar"(double complex, long, double) noexcept nogil
|
13 |
+
cdef void *_export_lambertw_scalar = <void*>_func_lambertw_scalar
|
14 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
15 |
+
cdef double complex _func_csinpi "csinpi"(double complex) noexcept nogil
|
16 |
+
cdef void *_export_csinpi = <void*>_func_csinpi
|
17 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
18 |
+
cdef double _func__stirling2_inexact "_stirling2_inexact"(double, double) noexcept nogil
|
19 |
+
cdef void *_export__stirling2_inexact = <void*>_func__stirling2_inexact
|
20 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
21 |
+
cdef float _func_ibeta_float "ibeta_float"(float, float, float) noexcept nogil
|
22 |
+
cdef void *_export_ibeta_float = <void*>_func_ibeta_float
|
23 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
24 |
+
cdef double _func_ibeta_double "ibeta_double"(double, double, double) noexcept nogil
|
25 |
+
cdef void *_export_ibeta_double = <void*>_func_ibeta_double
|
26 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
27 |
+
cdef float _func_ibetac_float "ibetac_float"(float, float, float) noexcept nogil
|
28 |
+
cdef void *_export_ibetac_float = <void*>_func_ibetac_float
|
29 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
30 |
+
cdef double _func_ibetac_double "ibetac_double"(double, double, double) noexcept nogil
|
31 |
+
cdef void *_export_ibetac_double = <void*>_func_ibetac_double
|
32 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
33 |
+
cdef float _func_ibetac_inv_float "ibetac_inv_float"(float, float, float) noexcept nogil
|
34 |
+
cdef void *_export_ibetac_inv_float = <void*>_func_ibetac_inv_float
|
35 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
36 |
+
cdef double _func_ibetac_inv_double "ibetac_inv_double"(double, double, double) noexcept nogil
|
37 |
+
cdef void *_export_ibetac_inv_double = <void*>_func_ibetac_inv_double
|
38 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
39 |
+
cdef float _func_ibeta_inv_float "ibeta_inv_float"(float, float, float) noexcept nogil
|
40 |
+
cdef void *_export_ibeta_inv_float = <void*>_func_ibeta_inv_float
|
41 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
42 |
+
cdef double _func_ibeta_inv_double "ibeta_inv_double"(double, double, double) noexcept nogil
|
43 |
+
cdef void *_export_ibeta_inv_double = <void*>_func_ibeta_inv_double
|
44 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
45 |
+
cdef double _func_binom "binom"(double, double) noexcept nogil
|
46 |
+
cdef void *_export_binom = <void*>_func_binom
|
47 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
48 |
+
cdef double _func_faddeeva_dawsn "faddeeva_dawsn"(double) noexcept nogil
|
49 |
+
cdef void *_export_faddeeva_dawsn = <void*>_func_faddeeva_dawsn
|
50 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
51 |
+
cdef double complex _func_faddeeva_dawsn_complex "faddeeva_dawsn_complex"(double complex) noexcept nogil
|
52 |
+
cdef void *_export_faddeeva_dawsn_complex = <void*>_func_faddeeva_dawsn_complex
|
53 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
54 |
+
cdef double _func_fellint_RC "fellint_RC"(double, double) noexcept nogil
|
55 |
+
cdef void *_export_fellint_RC = <void*>_func_fellint_RC
|
56 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
57 |
+
cdef double complex _func_cellint_RC "cellint_RC"(double complex, double complex) noexcept nogil
|
58 |
+
cdef void *_export_cellint_RC = <void*>_func_cellint_RC
|
59 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
60 |
+
cdef double _func_fellint_RD "fellint_RD"(double, double, double) noexcept nogil
|
61 |
+
cdef void *_export_fellint_RD = <void*>_func_fellint_RD
|
62 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
63 |
+
cdef double complex _func_cellint_RD "cellint_RD"(double complex, double complex, double complex) noexcept nogil
|
64 |
+
cdef void *_export_cellint_RD = <void*>_func_cellint_RD
|
65 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
66 |
+
cdef double _func_fellint_RF "fellint_RF"(double, double, double) noexcept nogil
|
67 |
+
cdef void *_export_fellint_RF = <void*>_func_fellint_RF
|
68 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
69 |
+
cdef double complex _func_cellint_RF "cellint_RF"(double complex, double complex, double complex) noexcept nogil
|
70 |
+
cdef void *_export_cellint_RF = <void*>_func_cellint_RF
|
71 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
72 |
+
cdef double _func_fellint_RG "fellint_RG"(double, double, double) noexcept nogil
|
73 |
+
cdef void *_export_fellint_RG = <void*>_func_fellint_RG
|
74 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
75 |
+
cdef double complex _func_cellint_RG "cellint_RG"(double complex, double complex, double complex) noexcept nogil
|
76 |
+
cdef void *_export_cellint_RG = <void*>_func_cellint_RG
|
77 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
78 |
+
cdef double _func_fellint_RJ "fellint_RJ"(double, double, double, double) noexcept nogil
|
79 |
+
cdef void *_export_fellint_RJ = <void*>_func_fellint_RJ
|
80 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
81 |
+
cdef double complex _func_cellint_RJ "cellint_RJ"(double complex, double complex, double complex, double complex) noexcept nogil
|
82 |
+
cdef void *_export_cellint_RJ = <void*>_func_cellint_RJ
|
83 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
84 |
+
cdef double complex _func_faddeeva_erf "faddeeva_erf"(double complex) noexcept nogil
|
85 |
+
cdef void *_export_faddeeva_erf = <void*>_func_faddeeva_erf
|
86 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
87 |
+
cdef double complex _func_faddeeva_erfc_complex "faddeeva_erfc_complex"(double complex) noexcept nogil
|
88 |
+
cdef void *_export_faddeeva_erfc_complex = <void*>_func_faddeeva_erfc_complex
|
89 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
90 |
+
cdef double _func_faddeeva_erfcx "faddeeva_erfcx"(double) noexcept nogil
|
91 |
+
cdef void *_export_faddeeva_erfcx = <void*>_func_faddeeva_erfcx
|
92 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
93 |
+
cdef double complex _func_faddeeva_erfcx_complex "faddeeva_erfcx_complex"(double complex) noexcept nogil
|
94 |
+
cdef void *_export_faddeeva_erfcx_complex = <void*>_func_faddeeva_erfcx_complex
|
95 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
96 |
+
cdef double _func_faddeeva_erfi "faddeeva_erfi"(double) noexcept nogil
|
97 |
+
cdef void *_export_faddeeva_erfi = <void*>_func_faddeeva_erfi
|
98 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
99 |
+
cdef double complex _func_faddeeva_erfi_complex "faddeeva_erfi_complex"(double complex) noexcept nogil
|
100 |
+
cdef void *_export_faddeeva_erfi_complex = <void*>_func_faddeeva_erfi_complex
|
101 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
102 |
+
cdef float _func_erfinv_float "erfinv_float"(float) noexcept nogil
|
103 |
+
cdef void *_export_erfinv_float = <void*>_func_erfinv_float
|
104 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
105 |
+
cdef double _func_erfinv_double "erfinv_double"(double) noexcept nogil
|
106 |
+
cdef void *_export_erfinv_double = <void*>_func_erfinv_double
|
107 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
108 |
+
cdef double _func_expit "expit"(double) noexcept nogil
|
109 |
+
cdef void *_export_expit = <void*>_func_expit
|
110 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
111 |
+
cdef float _func_expitf "expitf"(float) noexcept nogil
|
112 |
+
cdef void *_export_expitf = <void*>_func_expitf
|
113 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
114 |
+
cdef long double _func_expitl "expitl"(long double) noexcept nogil
|
115 |
+
cdef void *_export_expitl = <void*>_func_expitl
|
116 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
117 |
+
cdef double complex _func_cgamma "cgamma"(double complex) noexcept nogil
|
118 |
+
cdef void *_export_cgamma = <void*>_func_cgamma
|
119 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
120 |
+
cdef double _func_hyp1f1_double "hyp1f1_double"(double, double, double) noexcept nogil
|
121 |
+
cdef void *_export_hyp1f1_double = <void*>_func_hyp1f1_double
|
122 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
123 |
+
cdef double _func_log_expit "log_expit"(double) noexcept nogil
|
124 |
+
cdef void *_export_log_expit = <void*>_func_log_expit
|
125 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
126 |
+
cdef float _func_log_expitf "log_expitf"(float) noexcept nogil
|
127 |
+
cdef void *_export_log_expitf = <void*>_func_log_expitf
|
128 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
129 |
+
cdef long double _func_log_expitl "log_expitl"(long double) noexcept nogil
|
130 |
+
cdef void *_export_log_expitl = <void*>_func_log_expitl
|
131 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
132 |
+
cdef double _func_faddeeva_log_ndtr "faddeeva_log_ndtr"(double) noexcept nogil
|
133 |
+
cdef void *_export_faddeeva_log_ndtr = <void*>_func_faddeeva_log_ndtr
|
134 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
135 |
+
cdef double complex _func_faddeeva_log_ndtr_complex "faddeeva_log_ndtr_complex"(double complex) noexcept nogil
|
136 |
+
cdef void *_export_faddeeva_log_ndtr_complex = <void*>_func_faddeeva_log_ndtr_complex
|
137 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
138 |
+
cdef double _func_loggamma_real "loggamma_real"(double) noexcept nogil
|
139 |
+
cdef void *_export_loggamma_real = <void*>_func_loggamma_real
|
140 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
141 |
+
cdef double complex _func_loggamma "loggamma"(double complex) noexcept nogil
|
142 |
+
cdef void *_export_loggamma = <void*>_func_loggamma
|
143 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
144 |
+
cdef double _func_logit "logit"(double) noexcept nogil
|
145 |
+
cdef void *_export_logit = <void*>_func_logit
|
146 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
147 |
+
cdef float _func_logitf "logitf"(float) noexcept nogil
|
148 |
+
cdef void *_export_logitf = <void*>_func_logitf
|
149 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
150 |
+
cdef long double _func_logitl "logitl"(long double) noexcept nogil
|
151 |
+
cdef void *_export_logitl = <void*>_func_logitl
|
152 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
153 |
+
cdef double complex _func_faddeeva_ndtr "faddeeva_ndtr"(double complex) noexcept nogil
|
154 |
+
cdef void *_export_faddeeva_ndtr = <void*>_func_faddeeva_ndtr
|
155 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
156 |
+
cdef float _func_powm1_float "powm1_float"(float, float) noexcept nogil
|
157 |
+
cdef void *_export_powm1_float = <void*>_func_powm1_float
|
158 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
159 |
+
cdef double _func_powm1_double "powm1_double"(double, double) noexcept nogil
|
160 |
+
cdef void *_export_powm1_double = <void*>_func_powm1_double
|
161 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
162 |
+
cdef double complex _func_cdigamma "cdigamma"(double complex) noexcept nogil
|
163 |
+
cdef void *_export_cdigamma = <void*>_func_cdigamma
|
164 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
165 |
+
cdef double _func_digamma "digamma"(double) noexcept nogil
|
166 |
+
cdef void *_export_digamma = <void*>_func_digamma
|
167 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
168 |
+
cdef double complex _func_crgamma "crgamma"(double complex) noexcept nogil
|
169 |
+
cdef void *_export_crgamma = <void*>_func_crgamma
|
170 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
171 |
+
cdef double _func_faddeeva_voigt_profile "faddeeva_voigt_profile"(double, double, double) noexcept nogil
|
172 |
+
cdef void *_export_faddeeva_voigt_profile = <void*>_func_faddeeva_voigt_profile
|
173 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
174 |
+
cdef double complex _func_faddeeva_w "faddeeva_w"(double complex) noexcept nogil
|
175 |
+
cdef void *_export_faddeeva_w = <void*>_func_faddeeva_w
|
176 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
177 |
+
cdef double complex _func_wrightomega "wrightomega"(double complex) noexcept nogil
|
178 |
+
cdef void *_export_wrightomega = <void*>_func_wrightomega
|
179 |
+
cdef extern from r"_ufuncs_cxx_defs.h":
|
180 |
+
cdef double _func_wrightomega_real "wrightomega_real"(double) noexcept nogil
|
181 |
+
cdef void *_export_wrightomega_real = <void*>_func_wrightomega_real
|
.venv/Lib/site-packages/scipy/special/_ufuncs_cxx_defs.h
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef UFUNCS_PROTO_H
|
2 |
+
#define UFUNCS_PROTO_H 1
|
3 |
+
#include "_special.h"
|
4 |
+
npy_cdouble ccospi(npy_cdouble);
|
5 |
+
npy_cdouble lambertw_scalar(npy_cdouble, npy_long, npy_double);
|
6 |
+
npy_cdouble csinpi(npy_cdouble);
|
7 |
+
#include "stirling2.h"
|
8 |
+
npy_double _stirling2_inexact(npy_double, npy_double);
|
9 |
+
#include "boost_special_functions.h"
|
10 |
+
npy_float ibeta_float(npy_float, npy_float, npy_float);
|
11 |
+
npy_double ibeta_double(npy_double, npy_double, npy_double);
|
12 |
+
npy_float ibetac_float(npy_float, npy_float, npy_float);
|
13 |
+
npy_double ibetac_double(npy_double, npy_double, npy_double);
|
14 |
+
npy_float ibetac_inv_float(npy_float, npy_float, npy_float);
|
15 |
+
npy_double ibetac_inv_double(npy_double, npy_double, npy_double);
|
16 |
+
npy_float ibeta_inv_float(npy_float, npy_float, npy_float);
|
17 |
+
npy_double ibeta_inv_double(npy_double, npy_double, npy_double);
|
18 |
+
npy_double binom(npy_double, npy_double);
|
19 |
+
#include "_faddeeva.h"
|
20 |
+
npy_double faddeeva_dawsn(npy_double);
|
21 |
+
npy_cdouble faddeeva_dawsn_complex(npy_cdouble);
|
22 |
+
#include "ellint_carlson_wrap.hh"
|
23 |
+
npy_double fellint_RC(npy_double, npy_double);
|
24 |
+
npy_cdouble cellint_RC(npy_cdouble, npy_cdouble);
|
25 |
+
npy_double fellint_RD(npy_double, npy_double, npy_double);
|
26 |
+
npy_cdouble cellint_RD(npy_cdouble, npy_cdouble, npy_cdouble);
|
27 |
+
npy_double fellint_RF(npy_double, npy_double, npy_double);
|
28 |
+
npy_cdouble cellint_RF(npy_cdouble, npy_cdouble, npy_cdouble);
|
29 |
+
npy_double fellint_RG(npy_double, npy_double, npy_double);
|
30 |
+
npy_cdouble cellint_RG(npy_cdouble, npy_cdouble, npy_cdouble);
|
31 |
+
npy_double fellint_RJ(npy_double, npy_double, npy_double, npy_double);
|
32 |
+
npy_cdouble cellint_RJ(npy_cdouble, npy_cdouble, npy_cdouble, npy_cdouble);
|
33 |
+
npy_cdouble faddeeva_erf(npy_cdouble);
|
34 |
+
npy_cdouble faddeeva_erfc_complex(npy_cdouble);
|
35 |
+
npy_double faddeeva_erfcx(npy_double);
|
36 |
+
npy_cdouble faddeeva_erfcx_complex(npy_cdouble);
|
37 |
+
npy_double faddeeva_erfi(npy_double);
|
38 |
+
npy_cdouble faddeeva_erfi_complex(npy_cdouble);
|
39 |
+
npy_float erfinv_float(npy_float);
|
40 |
+
npy_double erfinv_double(npy_double);
|
41 |
+
#include "_logit.h"
|
42 |
+
npy_double expit(npy_double);
|
43 |
+
npy_float expitf(npy_float);
|
44 |
+
npy_longdouble expitl(npy_longdouble);
|
45 |
+
npy_cdouble cgamma(npy_cdouble);
|
46 |
+
npy_double hyp1f1_double(npy_double, npy_double, npy_double);
|
47 |
+
npy_double log_expit(npy_double);
|
48 |
+
npy_float log_expitf(npy_float);
|
49 |
+
npy_longdouble log_expitl(npy_longdouble);
|
50 |
+
npy_double faddeeva_log_ndtr(npy_double);
|
51 |
+
npy_cdouble faddeeva_log_ndtr_complex(npy_cdouble);
|
52 |
+
npy_double loggamma_real(npy_double);
|
53 |
+
npy_cdouble loggamma(npy_cdouble);
|
54 |
+
npy_double logit(npy_double);
|
55 |
+
npy_float logitf(npy_float);
|
56 |
+
npy_longdouble logitl(npy_longdouble);
|
57 |
+
npy_cdouble faddeeva_ndtr(npy_cdouble);
|
58 |
+
npy_float powm1_float(npy_float, npy_float);
|
59 |
+
npy_double powm1_double(npy_double, npy_double);
|
60 |
+
npy_cdouble cdigamma(npy_cdouble);
|
61 |
+
npy_double digamma(npy_double);
|
62 |
+
npy_cdouble crgamma(npy_cdouble);
|
63 |
+
npy_double faddeeva_voigt_profile(npy_double, npy_double, npy_double);
|
64 |
+
npy_cdouble faddeeva_w(npy_cdouble);
|
65 |
+
#include "_wright.h"
|
66 |
+
npy_cdouble wrightomega(npy_cdouble);
|
67 |
+
npy_double wrightomega_real(npy_double);
|
68 |
+
#endif
|
.venv/Lib/site-packages/scipy/special/_ufuncs_defs.h
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef UFUNCS_PROTO_H
|
2 |
+
#define UFUNCS_PROTO_H 1
|
3 |
+
#include "_cosine.h"
|
4 |
+
npy_double cosine_cdf(npy_double);
|
5 |
+
npy_double cosine_invcdf(npy_double);
|
6 |
+
#include "cephes.h"
|
7 |
+
npy_double cospi(npy_double);
|
8 |
+
npy_double igam_fac(npy_double, npy_double);
|
9 |
+
npy_double kolmogc(npy_double);
|
10 |
+
npy_double kolmogci(npy_double);
|
11 |
+
npy_double kolmogp(npy_double);
|
12 |
+
npy_double lanczos_sum_expg_scaled(npy_double);
|
13 |
+
npy_double lgam1p(npy_double);
|
14 |
+
npy_double log1pmx(npy_double);
|
15 |
+
npy_double riemann_zeta(npy_double);
|
16 |
+
#include "scaled_exp1.h"
|
17 |
+
npy_double scaled_exp1(npy_double);
|
18 |
+
npy_double sinpi(npy_double);
|
19 |
+
npy_double smirnovc(npy_int, npy_double);
|
20 |
+
npy_double smirnovci(npy_int, npy_double);
|
21 |
+
npy_double smirnovp(npy_int, npy_double);
|
22 |
+
npy_double struve_asymp_large_z(npy_double, npy_double, npy_int, npy_double *);
|
23 |
+
npy_double struve_bessel_series(npy_double, npy_double, npy_int, npy_double *);
|
24 |
+
npy_double struve_power_series(npy_double, npy_double, npy_int, npy_double *);
|
25 |
+
npy_double zeta(npy_double, npy_double);
|
26 |
+
#include "amos_wrappers.h"
|
27 |
+
npy_int airy_wrap(npy_double, npy_double *, npy_double *, npy_double *, npy_double *);
|
28 |
+
npy_int cairy_wrap(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *);
|
29 |
+
npy_int cairy_wrap_e(npy_cdouble, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *);
|
30 |
+
npy_int cairy_wrap_e_real(npy_double, npy_double *, npy_double *, npy_double *, npy_double *);
|
31 |
+
npy_double bdtr(npy_double, npy_int, npy_double);
|
32 |
+
npy_double bdtrc(npy_double, npy_int, npy_double);
|
33 |
+
npy_double bdtri(npy_double, npy_int, npy_double);
|
34 |
+
#include "specfun_wrappers.h"
|
35 |
+
npy_double bei_wrap(npy_double);
|
36 |
+
npy_double beip_wrap(npy_double);
|
37 |
+
npy_double ber_wrap(npy_double);
|
38 |
+
npy_double berp_wrap(npy_double);
|
39 |
+
npy_double besselpoly(npy_double, npy_double, npy_double);
|
40 |
+
npy_double beta(npy_double, npy_double);
|
41 |
+
npy_double lbeta(npy_double, npy_double);
|
42 |
+
npy_double btdtr(npy_double, npy_double, npy_double);
|
43 |
+
npy_double incbi(npy_double, npy_double, npy_double);
|
44 |
+
npy_double cbrt(npy_double);
|
45 |
+
npy_double chdtr(npy_double, npy_double);
|
46 |
+
npy_double chdtrc(npy_double, npy_double);
|
47 |
+
npy_double chdtri(npy_double, npy_double);
|
48 |
+
npy_double cosdg(npy_double);
|
49 |
+
npy_double cosm1(npy_double);
|
50 |
+
npy_double cotdg(npy_double);
|
51 |
+
npy_double ellpe(npy_double);
|
52 |
+
npy_double ellie(npy_double, npy_double);
|
53 |
+
npy_int ellpj(npy_double, npy_double, npy_double *, npy_double *, npy_double *, npy_double *);
|
54 |
+
npy_double ellik(npy_double, npy_double);
|
55 |
+
npy_double ellpk(npy_double);
|
56 |
+
npy_double erf(npy_double);
|
57 |
+
npy_double erfc(npy_double);
|
58 |
+
npy_double erfcinv(npy_double);
|
59 |
+
npy_cdouble cexp1_wrap(npy_cdouble);
|
60 |
+
npy_double exp1_wrap(npy_double);
|
61 |
+
npy_double exp10(npy_double);
|
62 |
+
npy_double exp2(npy_double);
|
63 |
+
npy_cdouble cexpi_wrap(npy_cdouble);
|
64 |
+
npy_double expi_wrap(npy_double);
|
65 |
+
npy_double expm1(npy_double);
|
66 |
+
npy_double expn(npy_int, npy_double);
|
67 |
+
npy_double fdtr(npy_double, npy_double, npy_double);
|
68 |
+
npy_double fdtrc(npy_double, npy_double, npy_double);
|
69 |
+
npy_double fdtri(npy_double, npy_double, npy_double);
|
70 |
+
npy_int fresnl(npy_double, npy_double *, npy_double *);
|
71 |
+
npy_int cfresnl_wrap(npy_cdouble, npy_cdouble *, npy_cdouble *);
|
72 |
+
npy_double Gamma(npy_double);
|
73 |
+
npy_double igam(npy_double, npy_double);
|
74 |
+
npy_double igamc(npy_double, npy_double);
|
75 |
+
npy_double igamci(npy_double, npy_double);
|
76 |
+
npy_double igami(npy_double, npy_double);
|
77 |
+
npy_double lgam(npy_double);
|
78 |
+
npy_double gammasgn(npy_double);
|
79 |
+
npy_double gdtr(npy_double, npy_double, npy_double);
|
80 |
+
npy_double gdtrc(npy_double, npy_double, npy_double);
|
81 |
+
npy_cdouble cbesh_wrap1(npy_double, npy_cdouble);
|
82 |
+
npy_cdouble cbesh_wrap1_e(npy_double, npy_cdouble);
|
83 |
+
npy_cdouble cbesh_wrap2(npy_double, npy_cdouble);
|
84 |
+
npy_cdouble cbesh_wrap2_e(npy_double, npy_cdouble);
|
85 |
+
npy_cdouble chyp1f1_wrap(npy_double, npy_double, npy_cdouble);
|
86 |
+
npy_double hyp2f1(npy_double, npy_double, npy_double, npy_double);
|
87 |
+
npy_double i0(npy_double);
|
88 |
+
npy_double i0e(npy_double);
|
89 |
+
npy_double i1(npy_double);
|
90 |
+
npy_double i1e(npy_double);
|
91 |
+
npy_int it2i0k0_wrap(npy_double, npy_double *, npy_double *);
|
92 |
+
npy_int it2j0y0_wrap(npy_double, npy_double *, npy_double *);
|
93 |
+
npy_double it2struve0_wrap(npy_double);
|
94 |
+
npy_int itairy_wrap(npy_double, npy_double *, npy_double *, npy_double *, npy_double *);
|
95 |
+
npy_int it1i0k0_wrap(npy_double, npy_double *, npy_double *);
|
96 |
+
npy_int it1j0y0_wrap(npy_double, npy_double *, npy_double *);
|
97 |
+
npy_double itmodstruve0_wrap(npy_double);
|
98 |
+
npy_double itstruve0_wrap(npy_double);
|
99 |
+
npy_cdouble cbesi_wrap(npy_double, npy_cdouble);
|
100 |
+
npy_double iv(npy_double, npy_double);
|
101 |
+
npy_cdouble cbesi_wrap_e(npy_double, npy_cdouble);
|
102 |
+
npy_double cbesi_wrap_e_real(npy_double, npy_double);
|
103 |
+
npy_double j0(npy_double);
|
104 |
+
npy_double j1(npy_double);
|
105 |
+
npy_cdouble cbesj_wrap(npy_double, npy_cdouble);
|
106 |
+
npy_double cbesj_wrap_real(npy_double, npy_double);
|
107 |
+
npy_cdouble cbesj_wrap_e(npy_double, npy_cdouble);
|
108 |
+
npy_double cbesj_wrap_e_real(npy_double, npy_double);
|
109 |
+
npy_double k0(npy_double);
|
110 |
+
npy_double k0e(npy_double);
|
111 |
+
npy_double k1(npy_double);
|
112 |
+
npy_double k1e(npy_double);
|
113 |
+
npy_double kei_wrap(npy_double);
|
114 |
+
npy_double keip_wrap(npy_double);
|
115 |
+
npy_int kelvin_wrap(npy_double, npy_cdouble *, npy_cdouble *, npy_cdouble *, npy_cdouble *);
|
116 |
+
npy_double ker_wrap(npy_double);
|
117 |
+
npy_double kerp_wrap(npy_double);
|
118 |
+
npy_double cbesk_wrap_real_int(npy_int, npy_double);
|
119 |
+
npy_double kolmogi(npy_double);
|
120 |
+
npy_double kolmogorov(npy_double);
|
121 |
+
npy_cdouble cbesk_wrap(npy_double, npy_cdouble);
|
122 |
+
npy_double cbesk_wrap_real(npy_double, npy_double);
|
123 |
+
npy_cdouble cbesk_wrap_e(npy_double, npy_cdouble);
|
124 |
+
npy_double cbesk_wrap_e_real(npy_double, npy_double);
|
125 |
+
npy_double log1p(npy_double);
|
126 |
+
npy_double pmv_wrap(npy_double, npy_double, npy_double);
|
127 |
+
npy_double cem_cva_wrap(npy_double, npy_double);
|
128 |
+
npy_double sem_cva_wrap(npy_double, npy_double);
|
129 |
+
npy_int cem_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
130 |
+
npy_int mcm1_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
131 |
+
npy_int mcm2_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
132 |
+
npy_int msm1_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
133 |
+
npy_int msm2_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
134 |
+
npy_int sem_wrap(npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
135 |
+
npy_int modified_fresnel_minus_wrap(npy_double, npy_cdouble *, npy_cdouble *);
|
136 |
+
npy_int modified_fresnel_plus_wrap(npy_double, npy_cdouble *, npy_cdouble *);
|
137 |
+
npy_double struve_l(npy_double, npy_double);
|
138 |
+
npy_double nbdtr(npy_int, npy_int, npy_double);
|
139 |
+
npy_double nbdtrc(npy_int, npy_int, npy_double);
|
140 |
+
npy_double nbdtri(npy_int, npy_int, npy_double);
|
141 |
+
npy_double ndtr(npy_double);
|
142 |
+
npy_double ndtri(npy_double);
|
143 |
+
npy_double oblate_aswfa_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
|
144 |
+
npy_int oblate_aswfa_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
145 |
+
npy_double oblate_segv_wrap(npy_double, npy_double, npy_double);
|
146 |
+
npy_double oblate_radial1_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
|
147 |
+
npy_int oblate_radial1_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
148 |
+
npy_double oblate_radial2_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
|
149 |
+
npy_int oblate_radial2_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
150 |
+
npy_double owens_t(npy_double, npy_double);
|
151 |
+
npy_int pbdv_wrap(npy_double, npy_double, npy_double *, npy_double *);
|
152 |
+
npy_int pbvv_wrap(npy_double, npy_double, npy_double *, npy_double *);
|
153 |
+
npy_int pbwa_wrap(npy_double, npy_double, npy_double *, npy_double *);
|
154 |
+
npy_double pdtr(npy_double, npy_double);
|
155 |
+
npy_double pdtrc(npy_double, npy_double);
|
156 |
+
npy_double pdtri(npy_int, npy_double);
|
157 |
+
npy_double poch(npy_double, npy_double);
|
158 |
+
npy_double prolate_aswfa_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
|
159 |
+
npy_int prolate_aswfa_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
160 |
+
npy_double prolate_segv_wrap(npy_double, npy_double, npy_double);
|
161 |
+
npy_double prolate_radial1_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
|
162 |
+
npy_int prolate_radial1_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
163 |
+
npy_double prolate_radial2_nocv_wrap(npy_double, npy_double, npy_double, npy_double, npy_double *);
|
164 |
+
npy_int prolate_radial2_wrap(npy_double, npy_double, npy_double, npy_double, npy_double, npy_double *, npy_double *);
|
165 |
+
npy_double radian(npy_double, npy_double, npy_double);
|
166 |
+
npy_double rgamma(npy_double);
|
167 |
+
npy_double round(npy_double);
|
168 |
+
npy_int shichi(npy_double, npy_double *, npy_double *);
|
169 |
+
npy_int sici(npy_double, npy_double *, npy_double *);
|
170 |
+
npy_double sindg(npy_double);
|
171 |
+
npy_double smirnov(npy_int, npy_double);
|
172 |
+
npy_double smirnovi(npy_int, npy_double);
|
173 |
+
npy_double spence(npy_double);
|
174 |
+
npy_double struve_h(npy_double, npy_double);
|
175 |
+
npy_double tandg(npy_double);
|
176 |
+
npy_double tukeylambdacdf(npy_double, npy_double);
|
177 |
+
npy_double y0(npy_double);
|
178 |
+
npy_double y1(npy_double);
|
179 |
+
npy_double yn(npy_int, npy_double);
|
180 |
+
npy_cdouble cbesy_wrap(npy_double, npy_cdouble);
|
181 |
+
npy_double cbesy_wrap_real(npy_double, npy_double);
|
182 |
+
npy_cdouble cbesy_wrap_e(npy_double, npy_cdouble);
|
183 |
+
npy_double cbesy_wrap_e_real(npy_double, npy_double);
|
184 |
+
npy_double zetac(npy_double);
|
185 |
+
#endif
|
.venv/Lib/site-packages/scipy/special/special/binom.h
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Translated from Cython into C++ by SciPy developers in 2024.
|
2 |
+
*
|
3 |
+
* Original authors: Pauli Virtanen, Eric Moore
|
4 |
+
*/
|
5 |
+
|
6 |
+
// Binomial coefficient
|
7 |
+
|
8 |
+
#pragma once
|
9 |
+
|
10 |
+
#include "config.h"
|
11 |
+
|
12 |
+
#include "cephes/beta.h"
|
13 |
+
#include "cephes/gamma.h"
|
14 |
+
|
15 |
+
namespace special {
|
16 |
+
|
17 |
+
SPECFUN_HOST_DEVICE inline double binom(double n, double k) {
|
18 |
+
double kx, nx, num, den, dk, sgn;
|
19 |
+
|
20 |
+
if (n < 0) {
|
21 |
+
nx = std::floor(n);
|
22 |
+
if (n == nx) {
|
23 |
+
// Undefined
|
24 |
+
return std::numeric_limits<double>::quiet_NaN();
|
25 |
+
}
|
26 |
+
}
|
27 |
+
|
28 |
+
kx = std::floor(k);
|
29 |
+
if (k == kx && (std::abs(n) > 1E-8 || n == 0)) {
|
30 |
+
/* Integer case: use multiplication formula for less rounding
|
31 |
+
* error for cases where the result is an integer.
|
32 |
+
*
|
33 |
+
* This cannot be used for small nonzero n due to loss of
|
34 |
+
* precision. */
|
35 |
+
nx = std::floor(n);
|
36 |
+
if (nx == n && kx > nx / 2 && nx > 0) {
|
37 |
+
// Reduce kx by symmetry
|
38 |
+
kx = nx - kx;
|
39 |
+
}
|
40 |
+
|
41 |
+
if (kx >= 0 && kx < 20) {
|
42 |
+
num = 1.0;
|
43 |
+
den = 1.0;
|
44 |
+
for (int i = 1; i < 1 + static_cast<int>(kx); i++) {
|
45 |
+
num *= i + n - kx;
|
46 |
+
den *= i;
|
47 |
+
if (std::abs(num) > 1E50) {
|
48 |
+
num /= den;
|
49 |
+
den = 1.0;
|
50 |
+
}
|
51 |
+
}
|
52 |
+
return num / den;
|
53 |
+
}
|
54 |
+
}
|
55 |
+
|
56 |
+
// general case
|
57 |
+
if (n >= 1E10 * k and k > 0) {
|
58 |
+
// avoid under/overflows intermediate results
|
59 |
+
return std::exp(-cephes::lbeta(1 + n - k, 1 + k) - std::log(n + 1));
|
60 |
+
}
|
61 |
+
if (k > 1E8 * std::abs(n)) {
|
62 |
+
// avoid loss of precision
|
63 |
+
num = cephes::Gamma(1 + n) / std::abs(k) + cephes::Gamma(1 + n) * n / (2 * k * k); // + ...
|
64 |
+
num /= M_PI * std::pow(std::abs(k), n);
|
65 |
+
if (k > 0) {
|
66 |
+
kx = std::floor(k);
|
67 |
+
if (static_cast<int>(kx) == kx) {
|
68 |
+
dk = k - kx;
|
69 |
+
sgn = (static_cast<int>(kx) % 2 == 0) ? 1 : -1;
|
70 |
+
} else {
|
71 |
+
dk = k;
|
72 |
+
sgn = 1;
|
73 |
+
}
|
74 |
+
return num * std::sin((dk - n) * M_PI) * sgn;
|
75 |
+
}
|
76 |
+
kx = std::floor(k);
|
77 |
+
if (static_cast<int>(kx) == kx) {
|
78 |
+
return 0;
|
79 |
+
}
|
80 |
+
return num * std::sin(k * M_PI);
|
81 |
+
}
|
82 |
+
return 1 / (n + 1) / cephes::beta(1 + n - k, 1 + k);
|
83 |
+
}
|
84 |
+
|
85 |
+
} // namespace special
|
.venv/Lib/site-packages/scipy/special/special/config.h
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// Define math constants if they are not available
|
4 |
+
#ifndef M_E
|
5 |
+
#define M_E 2.71828182845904523536
|
6 |
+
#endif
|
7 |
+
|
8 |
+
#ifndef M_LOG2E
|
9 |
+
#define M_LOG2E 1.44269504088896340736
|
10 |
+
#endif
|
11 |
+
|
12 |
+
#ifndef M_LOG10E
|
13 |
+
#define M_LOG10E 0.434294481903251827651
|
14 |
+
#endif
|
15 |
+
|
16 |
+
#ifndef M_LN2
|
17 |
+
#define M_LN2 0.693147180559945309417
|
18 |
+
#endif
|
19 |
+
|
20 |
+
#ifndef M_LN10
|
21 |
+
#define M_LN10 2.30258509299404568402
|
22 |
+
#endif
|
23 |
+
|
24 |
+
#ifndef M_PI
|
25 |
+
#define M_PI 3.14159265358979323846
|
26 |
+
#endif
|
27 |
+
|
28 |
+
#ifndef M_PI_2
|
29 |
+
#define M_PI_2 1.57079632679489661923
|
30 |
+
#endif
|
31 |
+
|
32 |
+
#ifndef M_PI_4
|
33 |
+
#define M_PI_4 0.785398163397448309616
|
34 |
+
#endif
|
35 |
+
|
36 |
+
#ifndef M_1_PI
|
37 |
+
#define M_1_PI 0.318309886183790671538
|
38 |
+
#endif
|
39 |
+
|
40 |
+
#ifndef M_2_PI
|
41 |
+
#define M_2_PI 0.636619772367581343076
|
42 |
+
#endif
|
43 |
+
|
44 |
+
#ifndef M_2_SQRTPI
|
45 |
+
#define M_2_SQRTPI 1.12837916709551257390
|
46 |
+
#endif
|
47 |
+
|
48 |
+
#ifndef M_SQRT2
|
49 |
+
#define M_SQRT2 1.41421356237309504880
|
50 |
+
#endif
|
51 |
+
|
52 |
+
#ifndef M_SQRT1_2
|
53 |
+
#define M_SQRT1_2 0.707106781186547524401
|
54 |
+
#endif
|
55 |
+
|
56 |
+
#ifdef __CUDACC__
|
57 |
+
#define SPECFUN_HOST_DEVICE __host__ __device__
|
58 |
+
|
59 |
+
#include <cuda/std/cmath>
|
60 |
+
#include <cuda/std/limits>
|
61 |
+
|
62 |
+
// Fallback to global namespace for functions unsupported on NVRTC Jit
|
63 |
+
#ifdef _LIBCUDACXX_COMPILER_NVRTC
|
64 |
+
#include <cuda_runtime.h>
|
65 |
+
#endif
|
66 |
+
|
67 |
+
namespace std {
|
68 |
+
|
69 |
+
SPECFUN_HOST_DEVICE inline double abs(double num) { return cuda::std::abs(num); }
|
70 |
+
|
71 |
+
SPECFUN_HOST_DEVICE inline double exp(double num) { return cuda::std::exp(num); }
|
72 |
+
|
73 |
+
SPECFUN_HOST_DEVICE inline double log(double num) { return cuda::std::log(num); }
|
74 |
+
|
75 |
+
SPECFUN_HOST_DEVICE inline double sqrt(double num) { return cuda::std::sqrt(num); }
|
76 |
+
|
77 |
+
SPECFUN_HOST_DEVICE inline bool isnan(double num) { return cuda::std::isnan(num); }
|
78 |
+
|
79 |
+
SPECFUN_HOST_DEVICE inline bool isfinite(double num) { return cuda::std::isfinite(num); }
|
80 |
+
|
81 |
+
SPECFUN_HOST_DEVICE inline double pow(double x, double y) { return cuda::std::pow(x, y); }
|
82 |
+
|
83 |
+
SPECFUN_HOST_DEVICE inline double sin(double x) { return cuda::std::sin(x); }
|
84 |
+
|
85 |
+
SPECFUN_HOST_DEVICE inline double tan(double x) { return cuda::std::tan(x); }
|
86 |
+
|
87 |
+
SPECFUN_HOST_DEVICE inline double sinh(double x) { return cuda::std::sinh(x); }
|
88 |
+
|
89 |
+
SPECFUN_HOST_DEVICE inline double cosh(double x) { return cuda::std::cosh(x); }
|
90 |
+
|
91 |
+
SPECFUN_HOST_DEVICE inline bool signbit(double x) { return cuda::std::signbit(x); }
|
92 |
+
|
93 |
+
// Fallback to global namespace for functions unsupported on NVRTC
|
94 |
+
#ifndef _LIBCUDACXX_COMPILER_NVRTC
|
95 |
+
SPECFUN_HOST_DEVICE inline double ceil(double x) { return cuda::std::ceil(x); }
|
96 |
+
SPECFUN_HOST_DEVICE inline double floor(double x) { return cuda::std::floor(x); }
|
97 |
+
SPECFUN_HOST_DEVICE inline double trunc(double x) { return cuda::std::trunc(x); }
|
98 |
+
SPECFUN_HOST_DEVICE inline double fma(double x, double y, double z) { return cuda::std::fma(x, y, z); }
|
99 |
+
SPECFUN_HOST_DEVICE inline double copysign(double x, double y) { return cuda::std::copysign(x, y); }
|
100 |
+
SPECFUN_HOST_DEVICE inline double modf(double value, double *iptr) { return cuda::std::modf(value, iptr); }
|
101 |
+
|
102 |
+
#else
|
103 |
+
SPECFUN_HOST_DEVICE inline double ceil(double x) { return ::ceil(x); }
|
104 |
+
SPECFUN_HOST_DEVICE inline double floor(double x) { return ::floor(x); }
|
105 |
+
SPECFUN_HOST_DEVICE inline double trunc(double x) { return ::trunc(x); }
|
106 |
+
SPECFUN_HOST_DEVICE inline double fma(double x, double y, double z) { return ::fma(x, y, z); }
|
107 |
+
SPECFUN_HOST_DEVICE inline double copysign(double x, double y) { return ::copysign(x, y); }
|
108 |
+
SPECFUN_HOST_DEVICE inline double modf(double value, double *iptr) { return ::modf(value, iptr); }
|
109 |
+
#endif
|
110 |
+
|
111 |
+
template <typename T>
|
112 |
+
using numeric_limits = cuda::std::numeric_limits<T>;
|
113 |
+
|
114 |
+
// Must use thrust for complex types in order to support CuPy
|
115 |
+
template <typename T>
|
116 |
+
using complex = thrust::complex<T>;
|
117 |
+
|
118 |
+
template <typename T>
|
119 |
+
SPECFUN_HOST_DEVICE T abs(const complex<T> &z) {
|
120 |
+
return thrust::abs(z);
|
121 |
+
}
|
122 |
+
|
123 |
+
template <typename T>
|
124 |
+
SPECFUN_HOST_DEVICE complex<T> exp(const complex<T> &z) {
|
125 |
+
return thrust::exp(z);
|
126 |
+
}
|
127 |
+
|
128 |
+
template <typename T>
|
129 |
+
SPECFUN_HOST_DEVICE complex<T> log(const complex<T> &z) {
|
130 |
+
return thrust::log(z);
|
131 |
+
}
|
132 |
+
|
133 |
+
template <typename T>
|
134 |
+
SPECFUN_HOST_DEVICE T norm(const complex<T> &z) {
|
135 |
+
return thrust::norm(z);
|
136 |
+
}
|
137 |
+
|
138 |
+
template <typename T>
|
139 |
+
SPECFUN_HOST_DEVICE complex<T> sqrt(const complex<T> &z) {
|
140 |
+
return thrust::sqrt(z);
|
141 |
+
}
|
142 |
+
|
143 |
+
template <typename T>
|
144 |
+
SPECFUN_HOST_DEVICE complex<T> conj(const complex<T> &z) {
|
145 |
+
return thrust::conj(z);
|
146 |
+
}
|
147 |
+
|
148 |
+
} // namespace std
|
149 |
+
|
150 |
+
#else
|
151 |
+
#define SPECFUN_HOST_DEVICE
|
152 |
+
|
153 |
+
#include <cmath>
|
154 |
+
#include <complex>
|
155 |
+
#include <limits>
|
156 |
+
#include <math.h>
|
157 |
+
|
158 |
+
#endif
|
.venv/Lib/site-packages/scipy/special/special/digamma.h
ADDED
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Translated from Cython into C++ by SciPy developers in 2024.
|
2 |
+
* Original header comment appears below.
|
3 |
+
*/
|
4 |
+
|
5 |
+
/* An implementation of the digamma function for complex arguments.
|
6 |
+
*
|
7 |
+
* Author: Josh Wilson
|
8 |
+
*
|
9 |
+
* Distributed under the same license as Scipy.
|
10 |
+
*
|
11 |
+
* Sources:
|
12 |
+
* [1] "The Digital Library of Mathematical Functions", dlmf.nist.gov
|
13 |
+
*
|
14 |
+
* [2] mpmath (version 0.19), http://mpmath.org
|
15 |
+
*/
|
16 |
+
|
17 |
+
#pragma once
|
18 |
+
|
19 |
+
#include "cephes/psi.h"
|
20 |
+
#include "cephes/zeta.h"
|
21 |
+
#include "config.h"
|
22 |
+
#include "error.h"
|
23 |
+
#include "trig.h"
|
24 |
+
|
25 |
+
namespace special {
|
26 |
+
namespace detail {
|
27 |
+
// All of the following were computed with mpmath
|
28 |
+
// Location of the positive root
|
29 |
+
constexpr double digamma_posroot = 1.4616321449683623;
|
30 |
+
// Value of the positive root
|
31 |
+
constexpr double digamma_posrootval = -9.2412655217294275e-17;
|
32 |
+
// Location of the negative root
|
33 |
+
constexpr double digamma_negroot = -0.504083008264455409;
|
34 |
+
// Value of the negative root
|
35 |
+
constexpr double digamma_negrootval = 7.2897639029768949e-17;
|
36 |
+
|
37 |
+
template <typename T>
|
38 |
+
SPECFUN_HOST_DEVICE T digamma_zeta_series(T z, double root, double rootval) {
|
39 |
+
T res = rootval;
|
40 |
+
T coeff = -1.0;
|
41 |
+
|
42 |
+
z = z - root;
|
43 |
+
T term;
|
44 |
+
for (int n = 1; n < 100; n++) {
|
45 |
+
coeff *= -z;
|
46 |
+
term = coeff * cephes::zeta(n + 1, root);
|
47 |
+
res += term;
|
48 |
+
if (std::abs(term) < std::numeric_limits<double>::epsilon() * std::abs(res)) {
|
49 |
+
break;
|
50 |
+
}
|
51 |
+
}
|
52 |
+
return res;
|
53 |
+
}
|
54 |
+
|
55 |
+
SPECFUN_HOST_DEVICE inline std::complex<double> digamma_forward_recurrence(std::complex<double> z,
|
56 |
+
std::complex<double> psiz, int n) {
|
57 |
+
/* Compute digamma(z + n) using digamma(z) using the recurrence
|
58 |
+
* relation
|
59 |
+
*
|
60 |
+
* digamma(z + 1) = digamma(z) + 1/z.
|
61 |
+
*
|
62 |
+
* See https://dlmf.nist.gov/5.5#E2 */
|
63 |
+
std::complex<double> res = psiz;
|
64 |
+
|
65 |
+
for (int k = 0; k < n; k++) {
|
66 |
+
res += 1.0 / (z + static_cast<double>(k));
|
67 |
+
}
|
68 |
+
return res;
|
69 |
+
}
|
70 |
+
|
71 |
+
SPECFUN_HOST_DEVICE inline std::complex<double> digamma_backward_recurrence(std::complex<double> z,
|
72 |
+
std::complex<double> psiz, int n) {
|
73 |
+
/* Compute digamma(z - n) using digamma(z) and a recurrence relation. */
|
74 |
+
std::complex<double> res = psiz;
|
75 |
+
|
76 |
+
for (int k = 1; k < n + 1; k++) {
|
77 |
+
res -= 1.0 / (z - static_cast<double>(k));
|
78 |
+
}
|
79 |
+
return res;
|
80 |
+
}
|
81 |
+
|
82 |
+
SPECFUN_HOST_DEVICE inline std::complex<double> digamma_asymptotic_series(std::complex<double> z) {
|
83 |
+
/* Evaluate digamma using an asymptotic series. See
|
84 |
+
*
|
85 |
+
* https://dlmf.nist.gov/5.11#E2 */
|
86 |
+
double bernoulli2k[] = {
|
87 |
+
0.166666666666666667, -0.0333333333333333333, 0.0238095238095238095, -0.0333333333333333333,
|
88 |
+
0.0757575757575757576, -0.253113553113553114, 1.16666666666666667, -7.09215686274509804,
|
89 |
+
54.9711779448621554, -529.124242424242424, 6192.12318840579710, -86580.2531135531136,
|
90 |
+
1425517.16666666667, -27298231.0678160920, 601580873.900642368, -15116315767.0921569};
|
91 |
+
std::complex<double> rzz = 1.0 / z / z;
|
92 |
+
std::complex<double> zfac = 1.0;
|
93 |
+
std::complex<double> term;
|
94 |
+
std::complex<double> res;
|
95 |
+
|
96 |
+
if (!(std::isfinite(z.real()) && std::isfinite(z.imag()))) {
|
97 |
+
/* Check for infinity (or nan) and return early.
|
98 |
+
* Result of division by complex infinity is implementation dependent.
|
99 |
+
* and has been observed to vary between C++ stdlib and CUDA stdlib.
|
100 |
+
*/
|
101 |
+
return std::log(z);
|
102 |
+
}
|
103 |
+
|
104 |
+
res = std::log(z) - 0.5 / z;
|
105 |
+
|
106 |
+
for (int k = 1; k < 17; k++) {
|
107 |
+
zfac *= rzz;
|
108 |
+
term = -bernoulli2k[k - 1] * zfac / (2 * static_cast<double>(k));
|
109 |
+
res += term;
|
110 |
+
if (std::abs(term) < std::numeric_limits<double>::epsilon() * std::abs(res)) {
|
111 |
+
break;
|
112 |
+
}
|
113 |
+
}
|
114 |
+
return res;
|
115 |
+
}
|
116 |
+
|
117 |
+
} // namespace detail
|
118 |
+
|
119 |
+
SPECFUN_HOST_DEVICE inline double digamma(double z) {
|
120 |
+
/* Wrap Cephes' psi to take advantage of the series expansion around
|
121 |
+
* the smallest negative zero.
|
122 |
+
*/
|
123 |
+
if (std::abs(z - detail::digamma_negroot) < 0.3) {
|
124 |
+
return detail::digamma_zeta_series(z, detail::digamma_negroot, detail::digamma_negrootval);
|
125 |
+
}
|
126 |
+
return cephes::psi(z);
|
127 |
+
}
|
128 |
+
|
129 |
+
SPECFUN_HOST_DEVICE inline std::complex<double> digamma(std::complex<double> z) {
|
130 |
+
/*
|
131 |
+
* Compute the digamma function for complex arguments. The strategy
|
132 |
+
* is:
|
133 |
+
*
|
134 |
+
* - Around the two zeros closest to the origin (posroot and negroot)
|
135 |
+
* use a Taylor series with precomputed zero order coefficient.
|
136 |
+
* - If close to the origin, use a recurrence relation to step away
|
137 |
+
* from the origin.
|
138 |
+
* - If close to the negative real axis, use the reflection formula
|
139 |
+
* to move to the right halfplane.
|
140 |
+
* - If |z| is large (> 16), use the asymptotic series.
|
141 |
+
* - If |z| is small, use a recurrence relation to make |z| large
|
142 |
+
* enough to use the asymptotic series.
|
143 |
+
*/
|
144 |
+
double absz = std::abs(z);
|
145 |
+
std::complex<double> res = 0;
|
146 |
+
/* Use the asymptotic series for z away from the negative real axis
|
147 |
+
* with abs(z) > smallabsz. */
|
148 |
+
int smallabsz = 16;
|
149 |
+
/* Use the reflection principle for z with z.real < 0 that are within
|
150 |
+
* smallimag of the negative real axis.
|
151 |
+
* int smallimag = 6 # unused below except in a comment */
|
152 |
+
|
153 |
+
if (z.real() <= 0.0 && std::ceil(z.real()) == z) {
|
154 |
+
// Poles
|
155 |
+
set_error("digamma", SF_ERROR_SINGULAR, NULL);
|
156 |
+
return {std::numeric_limits<double>::quiet_NaN(), std::numeric_limits<double>::quiet_NaN()};
|
157 |
+
}
|
158 |
+
if (std::abs(z - detail::digamma_negroot) < 0.3) {
|
159 |
+
// First negative root.
|
160 |
+
return detail::digamma_zeta_series(z, detail::digamma_negroot, detail::digamma_negrootval);
|
161 |
+
}
|
162 |
+
|
163 |
+
if (z.real() < 0 and std::abs(z.imag()) < smallabsz) {
|
164 |
+
/* Reflection formula for digamma. See
|
165 |
+
*
|
166 |
+
*https://dlmf.nist.gov/5.5#E4
|
167 |
+
*/
|
168 |
+
res = -M_PI * cospi(z) / sinpi(z);
|
169 |
+
z = 1.0 - z;
|
170 |
+
absz = std::abs(z);
|
171 |
+
}
|
172 |
+
|
173 |
+
if (absz < 0.5) {
|
174 |
+
/* Use one step of the recurrence relation to step away from
|
175 |
+
* the pole. */
|
176 |
+
res = -1.0 / z;
|
177 |
+
z += 1.0;
|
178 |
+
absz = std::abs(z);
|
179 |
+
}
|
180 |
+
|
181 |
+
if (std::abs(z - detail::digamma_posroot) < 0.5) {
|
182 |
+
res += detail::digamma_zeta_series(z, detail::digamma_posroot, detail::digamma_posrootval);
|
183 |
+
} else if (absz > smallabsz) {
|
184 |
+
res += detail::digamma_asymptotic_series(z);
|
185 |
+
} else if (z.real() >= 0.0) {
|
186 |
+
double n = std::trunc(smallabsz - absz) + 1;
|
187 |
+
std::complex<double> init = detail::digamma_asymptotic_series(z + n);
|
188 |
+
res += detail::digamma_backward_recurrence(z + n, init, n);
|
189 |
+
} else {
|
190 |
+
// z.real() < 0, absz < smallabsz, and z.imag() > smallimag
|
191 |
+
double n = std::trunc(smallabsz - absz) - 1;
|
192 |
+
std::complex<double> init = detail::digamma_asymptotic_series(z - n);
|
193 |
+
res += detail::digamma_forward_recurrence(z - n, init, n);
|
194 |
+
}
|
195 |
+
return res;
|
196 |
+
}
|
197 |
+
|
198 |
+
} // namespace special
|
.venv/Lib/site-packages/scipy/special/special/error.h
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// should be included from config.h, but that won't work until we've cleanly separated out the C and C++ parts of the
|
4 |
+
// code
|
5 |
+
#ifdef __CUDACC__
|
6 |
+
#define SPECFUN_HOST_DEVICE __host__ __device__
|
7 |
+
#else
|
8 |
+
#define SPECFUN_HOST_DEVICE
|
9 |
+
#endif
|
10 |
+
|
11 |
+
#ifdef __cplusplus
|
12 |
+
extern "C" {
|
13 |
+
#endif
|
14 |
+
|
15 |
+
typedef enum {
|
16 |
+
SF_ERROR_OK = 0, /* no error */
|
17 |
+
SF_ERROR_SINGULAR, /* singularity encountered */
|
18 |
+
SF_ERROR_UNDERFLOW, /* floating point underflow */
|
19 |
+
SF_ERROR_OVERFLOW, /* floating point overflow */
|
20 |
+
SF_ERROR_SLOW, /* too many iterations required */
|
21 |
+
SF_ERROR_LOSS, /* loss of precision */
|
22 |
+
SF_ERROR_NO_RESULT, /* no result obtained */
|
23 |
+
SF_ERROR_DOMAIN, /* out of domain */
|
24 |
+
SF_ERROR_ARG, /* invalid input parameter */
|
25 |
+
SF_ERROR_OTHER, /* unclassified error */
|
26 |
+
SF_ERROR__LAST
|
27 |
+
} sf_error_t;
|
28 |
+
|
29 |
+
#ifdef __cplusplus
|
30 |
+
namespace special {
|
31 |
+
|
32 |
+
#ifndef SP_SPECFUN_ERROR
|
33 |
+
SPECFUN_HOST_DEVICE inline void set_error(const char *func_name, sf_error_t code, const char *fmt, ...) {
|
34 |
+
// nothing
|
35 |
+
}
|
36 |
+
#else
|
37 |
+
void set_error(const char *func_name, sf_error_t code, const char *fmt, ...);
|
38 |
+
#endif
|
39 |
+
} // namespace special
|
40 |
+
|
41 |
+
} // closes extern "C"
|
42 |
+
#endif
|
.venv/Lib/site-packages/scipy/special/tests/__init__.py
ADDED
File without changes
|
.venv/Lib/site-packages/scipy/special/tests/data/__init__.py
ADDED
File without changes
|
.venv/Lib/site-packages/scipy/special/tests/test_wright_bessel.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Reference MPMATH implementation:
|
2 |
+
#
|
3 |
+
# import mpmath
|
4 |
+
# from mpmath import nsum
|
5 |
+
#
|
6 |
+
# def Wright_Series_MPMATH(a, b, z, dps=50, method='r+s+e', steps=[1000]):
|
7 |
+
# """Compute Wright' generalized Bessel function as Series.
|
8 |
+
#
|
9 |
+
# This uses mpmath for arbitrary precision.
|
10 |
+
# """
|
11 |
+
# with mpmath.workdps(dps):
|
12 |
+
# res = nsum(lambda k: z**k/mpmath.fac(k) * mpmath.rgamma(a*k+b),
|
13 |
+
# [0, mpmath.inf],
|
14 |
+
# tol=dps, method=method, steps=steps
|
15 |
+
# )
|
16 |
+
#
|
17 |
+
# return res
|
18 |
+
|
19 |
+
import pytest
|
20 |
+
import numpy as np
|
21 |
+
from numpy.testing import assert_equal, assert_allclose
|
22 |
+
|
23 |
+
import scipy.special as sc
|
24 |
+
from scipy.special import rgamma, wright_bessel
|
25 |
+
|
26 |
+
|
27 |
+
@pytest.mark.parametrize('a', [0, 1e-6, 0.1, 0.5, 1, 10])
|
28 |
+
@pytest.mark.parametrize('b', [0, 1e-6, 0.1, 0.5, 1, 10])
|
29 |
+
def test_wright_bessel_zero(a, b):
|
30 |
+
"""Test at x = 0."""
|
31 |
+
assert_equal(wright_bessel(a, b, 0.), rgamma(b))
|
32 |
+
|
33 |
+
|
34 |
+
@pytest.mark.parametrize('b', [0, 1e-6, 0.1, 0.5, 1, 10])
|
35 |
+
@pytest.mark.parametrize('x', [0, 1e-6, 0.1, 0.5, 1])
|
36 |
+
def test_wright_bessel_iv(b, x):
|
37 |
+
"""Test relation of wright_bessel and modified bessel function iv.
|
38 |
+
|
39 |
+
iv(z) = (1/2*z)**v * Phi(1, v+1; 1/4*z**2).
|
40 |
+
See https://dlmf.nist.gov/10.46.E2
|
41 |
+
"""
|
42 |
+
if x != 0:
|
43 |
+
v = b - 1
|
44 |
+
wb = wright_bessel(1, v + 1, x**2 / 4.)
|
45 |
+
# Note: iv(v, x) has precision of less than 1e-12 for some cases
|
46 |
+
# e.g v=1-1e-6 and x=1e-06)
|
47 |
+
assert_allclose(np.power(x / 2., v) * wb,
|
48 |
+
sc.iv(v, x),
|
49 |
+
rtol=1e-11, atol=1e-11)
|
50 |
+
|
51 |
+
|
52 |
+
@pytest.mark.parametrize('a', [0, 1e-6, 0.1, 0.5, 1, 10])
|
53 |
+
@pytest.mark.parametrize('b', [1, 1 + 1e-3, 2, 5, 10])
|
54 |
+
@pytest.mark.parametrize('x', [0, 1e-6, 0.1, 0.5, 1, 5, 10, 100])
|
55 |
+
def test_wright_functional(a, b, x):
|
56 |
+
"""Test functional relation of wright_bessel.
|
57 |
+
|
58 |
+
Phi(a, b-1, z) = a*z*Phi(a, b+a, z) + (b-1)*Phi(a, b, z)
|
59 |
+
|
60 |
+
Note that d/dx Phi(a, b, x) = Phi(a, b-1, x)
|
61 |
+
See Eq. (22) of
|
62 |
+
B. Stankovic, On the Function of E. M. Wright,
|
63 |
+
Publ. de l' Institut Mathematique, Beograd,
|
64 |
+
Nouvelle S`er. 10 (1970), 113-124.
|
65 |
+
"""
|
66 |
+
assert_allclose(wright_bessel(a, b - 1, x),
|
67 |
+
a * x * wright_bessel(a, b + a, x)
|
68 |
+
+ (b - 1) * wright_bessel(a, b, x),
|
69 |
+
rtol=1e-8, atol=1e-8)
|
70 |
+
|
71 |
+
|
72 |
+
# grid of rows [a, b, x, value, accuracy] that do not reach 1e-11 accuracy
|
73 |
+
# see output of:
|
74 |
+
# cd scipy/scipy/_precompute
|
75 |
+
# python wright_bessel_data.py
|
76 |
+
grid_a_b_x_value_acc = np.array([
|
77 |
+
[0.1, 100.0, 709.7827128933841, 8.026353022981087e+34, 2e-8],
|
78 |
+
[0.5, 10.0, 709.7827128933841, 2.680788404494657e+48, 9e-8],
|
79 |
+
[0.5, 10.0, 1000.0, 2.005901980702872e+64, 1e-8],
|
80 |
+
[0.5, 100.0, 1000.0, 3.4112367580445246e-117, 6e-8],
|
81 |
+
[1.0, 20.0, 100000.0, 1.7717158630699857e+225, 3e-11],
|
82 |
+
[1.0, 100.0, 100000.0, 1.0269334596230763e+22, np.nan],
|
83 |
+
[1.0000000000000222, 20.0, 100000.0, 1.7717158630001672e+225, 3e-11],
|
84 |
+
[1.0000000000000222, 100.0, 100000.0, 1.0269334595866202e+22, np.nan],
|
85 |
+
[1.5, 0.0, 500.0, 15648961196.432373, 3e-11],
|
86 |
+
[1.5, 2.220446049250313e-14, 500.0, 15648961196.431465, 3e-11],
|
87 |
+
[1.5, 1e-10, 500.0, 15648961192.344728, 3e-11],
|
88 |
+
[1.5, 1e-05, 500.0, 15648552437.334162, 3e-11],
|
89 |
+
[1.5, 0.1, 500.0, 12049870581.10317, 2e-11],
|
90 |
+
[1.5, 20.0, 100000.0, 7.81930438331405e+43, 3e-9],
|
91 |
+
[1.5, 100.0, 100000.0, 9.653370857459075e-130, np.nan],
|
92 |
+
])
|
93 |
+
|
94 |
+
|
95 |
+
@pytest.mark.xfail
|
96 |
+
@pytest.mark.parametrize(
|
97 |
+
'a, b, x, phi',
|
98 |
+
grid_a_b_x_value_acc[:, :4].tolist())
|
99 |
+
def test_wright_data_grid_failures(a, b, x, phi):
|
100 |
+
"""Test cases of test_data that do not reach relative accuracy of 1e-11"""
|
101 |
+
assert_allclose(wright_bessel(a, b, x), phi, rtol=1e-11)
|
102 |
+
|
103 |
+
|
104 |
+
@pytest.mark.parametrize(
|
105 |
+
'a, b, x, phi, accuracy',
|
106 |
+
grid_a_b_x_value_acc.tolist())
|
107 |
+
def test_wright_data_grid_less_accurate(a, b, x, phi, accuracy):
|
108 |
+
"""Test cases of test_data that do not reach relative accuracy of 1e-11
|
109 |
+
|
110 |
+
Here we test for reduced accuracy or even nan.
|
111 |
+
"""
|
112 |
+
if np.isnan(accuracy):
|
113 |
+
assert np.isnan(wright_bessel(a, b, x))
|
114 |
+
else:
|
115 |
+
assert_allclose(wright_bessel(a, b, x), phi, rtol=accuracy)
|
.venv/Lib/site-packages/scipy/special/tests/test_zeta.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import scipy.special as sc
|
2 |
+
import numpy as np
|
3 |
+
from numpy.testing import assert_equal, assert_allclose
|
4 |
+
|
5 |
+
|
6 |
+
def test_zeta():
|
7 |
+
assert_allclose(sc.zeta(2,2), np.pi**2/6 - 1, rtol=1e-12)
|
8 |
+
|
9 |
+
|
10 |
+
def test_zetac():
|
11 |
+
# Expected values in the following were computed using Wolfram
|
12 |
+
# Alpha's `Zeta[x] - 1`
|
13 |
+
x = [-2.1, 0.8, 0.9999, 9, 50, 75]
|
14 |
+
desired = [
|
15 |
+
-0.9972705002153750,
|
16 |
+
-5.437538415895550,
|
17 |
+
-10000.42279161673,
|
18 |
+
0.002008392826082214,
|
19 |
+
8.881784210930816e-16,
|
20 |
+
2.646977960169853e-23,
|
21 |
+
]
|
22 |
+
assert_allclose(sc.zetac(x), desired, rtol=1e-12)
|
23 |
+
|
24 |
+
|
25 |
+
def test_zetac_special_cases():
|
26 |
+
assert sc.zetac(np.inf) == 0
|
27 |
+
assert np.isnan(sc.zetac(-np.inf))
|
28 |
+
assert sc.zetac(0) == -1.5
|
29 |
+
assert sc.zetac(1.0) == np.inf
|
30 |
+
|
31 |
+
assert_equal(sc.zetac([-2, -50, -100]), -1)
|
32 |
+
|
33 |
+
|
34 |
+
def test_riemann_zeta_special_cases():
|
35 |
+
assert np.isnan(sc.zeta(np.nan))
|
36 |
+
assert sc.zeta(np.inf) == 1
|
37 |
+
assert sc.zeta(0) == -0.5
|
38 |
+
|
39 |
+
# Riemann zeta is zero add negative even integers.
|
40 |
+
assert_equal(sc.zeta([-2, -4, -6, -8, -10]), 0)
|
41 |
+
|
42 |
+
assert_allclose(sc.zeta(2), np.pi**2/6, rtol=1e-12)
|
43 |
+
assert_allclose(sc.zeta(4), np.pi**4/90, rtol=1e-12)
|
44 |
+
|
45 |
+
|
46 |
+
def test_riemann_zeta_avoid_overflow():
|
47 |
+
s = -260.00000000001
|
48 |
+
desired = -5.6966307844402683127e+297 # Computed with Mpmath
|
49 |
+
assert_allclose(sc.zeta(s), desired, atol=0, rtol=5e-14)
|
.venv/Lib/site-packages/scipy/stats/morestats.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
2 |
+
# Use the `scipy.stats` namespace for importing the functions
|
3 |
+
# included below.
|
4 |
+
|
5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = [ # noqa: F822
|
9 |
+
'mvsdist',
|
10 |
+
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
|
11 |
+
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
|
12 |
+
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene',
|
13 |
+
'fligner', 'mood', 'wilcoxon', 'median_test',
|
14 |
+
'circmean', 'circvar', 'circstd', 'anderson_ksamp',
|
15 |
+
'yeojohnson_llf', 'yeojohnson', 'yeojohnson_normmax',
|
16 |
+
'yeojohnson_normplot', 'annotations', 'namedtuple', 'isscalar', 'log',
|
17 |
+
'around', 'unique', 'arange', 'sort', 'amin', 'amax', 'atleast_1d',
|
18 |
+
'array', 'compress', 'exp', 'ravel', 'count_nonzero', 'arctan2',
|
19 |
+
'hypot', 'optimize', 'find_repeats',
|
20 |
+
'chi2_contingency', 'distributions', 'rv_generic', 'Mean',
|
21 |
+
'Variance', 'Std_dev', 'ShapiroResult', 'AndersonResult',
|
22 |
+
'Anderson_ksampResult', 'AnsariResult', 'BartlettResult',
|
23 |
+
'LeveneResult', 'FlignerResult', 'WilcoxonResult'
|
24 |
+
]
|
25 |
+
|
26 |
+
|
27 |
+
def __dir__():
|
28 |
+
return __all__
|
29 |
+
|
30 |
+
|
31 |
+
def __getattr__(name):
|
32 |
+
return _sub_module_deprecation(sub_package="stats", module="morestats",
|
33 |
+
private_modules=["_morestats"], all=__all__,
|
34 |
+
attribute=name)
|
.venv/Lib/site-packages/scipy/stats/mstats.py
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
===================================================================
|
3 |
+
Statistical functions for masked arrays (:mod:`scipy.stats.mstats`)
|
4 |
+
===================================================================
|
5 |
+
|
6 |
+
.. currentmodule:: scipy.stats.mstats
|
7 |
+
|
8 |
+
This module contains a large number of statistical functions that can
|
9 |
+
be used with masked arrays.
|
10 |
+
|
11 |
+
Most of these functions are similar to those in `scipy.stats` but might
|
12 |
+
have small differences in the API or in the algorithm used. Since this
|
13 |
+
is a relatively new package, some API changes are still possible.
|
14 |
+
|
15 |
+
Summary statistics
|
16 |
+
==================
|
17 |
+
|
18 |
+
.. autosummary::
|
19 |
+
:toctree: generated/
|
20 |
+
|
21 |
+
describe
|
22 |
+
gmean
|
23 |
+
hmean
|
24 |
+
kurtosis
|
25 |
+
mode
|
26 |
+
mquantiles
|
27 |
+
hdmedian
|
28 |
+
hdquantiles
|
29 |
+
hdquantiles_sd
|
30 |
+
idealfourths
|
31 |
+
plotting_positions
|
32 |
+
meppf
|
33 |
+
moment
|
34 |
+
skew
|
35 |
+
tmean
|
36 |
+
tvar
|
37 |
+
tmin
|
38 |
+
tmax
|
39 |
+
tsem
|
40 |
+
variation
|
41 |
+
find_repeats
|
42 |
+
sem
|
43 |
+
trimmed_mean
|
44 |
+
trimmed_mean_ci
|
45 |
+
trimmed_std
|
46 |
+
trimmed_var
|
47 |
+
|
48 |
+
Frequency statistics
|
49 |
+
====================
|
50 |
+
|
51 |
+
.. autosummary::
|
52 |
+
:toctree: generated/
|
53 |
+
|
54 |
+
scoreatpercentile
|
55 |
+
|
56 |
+
Correlation functions
|
57 |
+
=====================
|
58 |
+
|
59 |
+
.. autosummary::
|
60 |
+
:toctree: generated/
|
61 |
+
|
62 |
+
f_oneway
|
63 |
+
pearsonr
|
64 |
+
spearmanr
|
65 |
+
pointbiserialr
|
66 |
+
kendalltau
|
67 |
+
kendalltau_seasonal
|
68 |
+
linregress
|
69 |
+
siegelslopes
|
70 |
+
theilslopes
|
71 |
+
sen_seasonal_slopes
|
72 |
+
|
73 |
+
Statistical tests
|
74 |
+
=================
|
75 |
+
|
76 |
+
.. autosummary::
|
77 |
+
:toctree: generated/
|
78 |
+
|
79 |
+
ttest_1samp
|
80 |
+
ttest_onesamp
|
81 |
+
ttest_ind
|
82 |
+
ttest_rel
|
83 |
+
chisquare
|
84 |
+
kstest
|
85 |
+
ks_2samp
|
86 |
+
ks_1samp
|
87 |
+
ks_twosamp
|
88 |
+
mannwhitneyu
|
89 |
+
rankdata
|
90 |
+
kruskal
|
91 |
+
kruskalwallis
|
92 |
+
friedmanchisquare
|
93 |
+
brunnermunzel
|
94 |
+
skewtest
|
95 |
+
kurtosistest
|
96 |
+
normaltest
|
97 |
+
|
98 |
+
Transformations
|
99 |
+
===============
|
100 |
+
|
101 |
+
.. autosummary::
|
102 |
+
:toctree: generated/
|
103 |
+
|
104 |
+
obrientransform
|
105 |
+
trim
|
106 |
+
trima
|
107 |
+
trimmed_stde
|
108 |
+
trimr
|
109 |
+
trimtail
|
110 |
+
trimboth
|
111 |
+
winsorize
|
112 |
+
zmap
|
113 |
+
zscore
|
114 |
+
|
115 |
+
Other
|
116 |
+
=====
|
117 |
+
|
118 |
+
.. autosummary::
|
119 |
+
:toctree: generated/
|
120 |
+
|
121 |
+
argstoarray
|
122 |
+
count_tied_groups
|
123 |
+
msign
|
124 |
+
compare_medians_ms
|
125 |
+
median_cihs
|
126 |
+
mjci
|
127 |
+
mquantiles_cimj
|
128 |
+
rsh
|
129 |
+
|
130 |
+
"""
|
131 |
+
from . import _mstats_basic
|
132 |
+
from . import _mstats_extras
|
133 |
+
from ._mstats_basic import * # noqa: F403
|
134 |
+
from ._mstats_extras import * # noqa: F403
|
135 |
+
# Functions that support masked array input in stats but need to be kept in the
|
136 |
+
# mstats namespace for backwards compatibility:
|
137 |
+
from scipy.stats import gmean, hmean, zmap, zscore, chisquare
|
138 |
+
|
139 |
+
__all__ = _mstats_basic.__all__ + _mstats_extras.__all__
|
140 |
+
__all__ += ['gmean', 'hmean', 'zmap', 'zscore', 'chisquare']
|
.venv/Lib/site-packages/scipy/stats/mstats_basic.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
2 |
+
# Use the `scipy.stats` namespace for importing the functions
|
3 |
+
# included below.
|
4 |
+
|
5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = [ # noqa: F822
|
9 |
+
'argstoarray',
|
10 |
+
'count_tied_groups',
|
11 |
+
'describe',
|
12 |
+
'f_oneway', 'find_repeats','friedmanchisquare',
|
13 |
+
'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
|
14 |
+
'ks_twosamp', 'ks_2samp', 'kurtosis', 'kurtosistest',
|
15 |
+
'ks_1samp', 'kstest',
|
16 |
+
'linregress',
|
17 |
+
'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
|
18 |
+
'normaltest',
|
19 |
+
'obrientransform',
|
20 |
+
'pearsonr','plotting_positions','pointbiserialr',
|
21 |
+
'rankdata',
|
22 |
+
'scoreatpercentile','sem',
|
23 |
+
'sen_seasonal_slopes','skew','skewtest','spearmanr',
|
24 |
+
'siegelslopes', 'theilslopes',
|
25 |
+
'tmax','tmean','tmin','trim','trimboth',
|
26 |
+
'trimtail','trima','trimr','trimmed_mean','trimmed_std',
|
27 |
+
'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
|
28 |
+
'ttest_ind','ttest_rel','tvar',
|
29 |
+
'variation',
|
30 |
+
'winsorize',
|
31 |
+
'brunnermunzel', 'ma', 'masked', 'nomask', 'namedtuple',
|
32 |
+
'distributions', 'stats_linregress', 'stats_LinregressResult',
|
33 |
+
'stats_theilslopes', 'stats_siegelslopes', 'ModeResult',
|
34 |
+
'PointbiserialrResult',
|
35 |
+
'Ttest_1sampResult', 'Ttest_indResult', 'Ttest_relResult',
|
36 |
+
'MannwhitneyuResult', 'KruskalResult', 'trimdoc', 'trim1',
|
37 |
+
'DescribeResult', 'stde_median', 'SkewtestResult', 'KurtosistestResult',
|
38 |
+
'NormaltestResult', 'F_onewayResult', 'FriedmanchisquareResult',
|
39 |
+
'BrunnerMunzelResult'
|
40 |
+
]
|
41 |
+
|
42 |
+
|
43 |
+
def __dir__():
|
44 |
+
return __all__
|
45 |
+
|
46 |
+
|
47 |
+
def __getattr__(name):
|
48 |
+
return _sub_module_deprecation(sub_package="stats", module="mstats_basic",
|
49 |
+
private_modules=["_mstats_basic"], all=__all__,
|
50 |
+
attribute=name, correct_module="mstats")
|
.venv/Lib/site-packages/scipy/stats/mstats_extras.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
2 |
+
# Use the `scipy.stats` namespace for importing the functions
|
3 |
+
# included below.
|
4 |
+
|
5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = [ # noqa: F822
|
9 |
+
'compare_medians_ms',
|
10 |
+
'hdquantiles', 'hdmedian', 'hdquantiles_sd',
|
11 |
+
'idealfourths',
|
12 |
+
'median_cihs','mjci','mquantiles_cimj',
|
13 |
+
'rsh',
|
14 |
+
'trimmed_mean_ci', 'ma', 'MaskedArray', 'mstats',
|
15 |
+
'norm', 'beta', 't', 'binom'
|
16 |
+
]
|
17 |
+
|
18 |
+
|
19 |
+
def __dir__():
|
20 |
+
return __all__
|
21 |
+
|
22 |
+
|
23 |
+
def __getattr__(name):
|
24 |
+
return _sub_module_deprecation(sub_package="stats", module="mstats_extras",
|
25 |
+
private_modules=["_mstats_extras"], all=__all__,
|
26 |
+
attribute=name, correct_module="mstats")
|
.venv/Lib/site-packages/scipy/stats/tests/test_axis_nan_policy.py
ADDED
@@ -0,0 +1,1188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Many scipy.stats functions support `axis` and `nan_policy` parameters.
|
2 |
+
# When the two are combined, it can be tricky to get all the behavior just
|
3 |
+
# right. This file contains a suite of common tests for scipy.stats functions
|
4 |
+
# that support `axis` and `nan_policy` and additional tests for some associated
|
5 |
+
# functions in stats._util.
|
6 |
+
|
7 |
+
from itertools import product, combinations_with_replacement, permutations
|
8 |
+
import re
|
9 |
+
import pickle
|
10 |
+
import pytest
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
|
14 |
+
from scipy import stats
|
15 |
+
from scipy.stats import norm # type: ignore[attr-defined]
|
16 |
+
from scipy.stats._axis_nan_policy import _masked_arrays_2_sentinel_arrays
|
17 |
+
from scipy._lib._util import AxisError
|
18 |
+
|
19 |
+
|
20 |
+
def unpack_ttest_result(res):
|
21 |
+
low, high = res.confidence_interval()
|
22 |
+
return (res.statistic, res.pvalue, res.df, res._standard_error,
|
23 |
+
res._estimate, low, high)
|
24 |
+
|
25 |
+
|
26 |
+
def _get_ttest_ci(ttest):
|
27 |
+
# get a function that returns the CI bounds of provided `ttest`
|
28 |
+
def ttest_ci(*args, **kwargs):
|
29 |
+
res = ttest(*args, **kwargs)
|
30 |
+
return res.confidence_interval()
|
31 |
+
return ttest_ci
|
32 |
+
|
33 |
+
|
34 |
+
axis_nan_policy_cases = [
|
35 |
+
# function, args, kwds, number of samples, number of outputs,
|
36 |
+
# ... paired, unpacker function
|
37 |
+
# args, kwds typically aren't needed; just showing that they work
|
38 |
+
(stats.kruskal, tuple(), dict(), 3, 2, False, None), # 4 samples is slow
|
39 |
+
(stats.ranksums, ('less',), dict(), 2, 2, False, None),
|
40 |
+
(stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, 2, False, None),
|
41 |
+
(stats.wilcoxon, ('pratt',), {'mode': 'auto'}, 2, 2, True,
|
42 |
+
lambda res: (res.statistic, res.pvalue)),
|
43 |
+
(stats.wilcoxon, tuple(), dict(), 1, 2, True,
|
44 |
+
lambda res: (res.statistic, res.pvalue)),
|
45 |
+
(stats.wilcoxon, tuple(), {'mode': 'approx'}, 1, 3, True,
|
46 |
+
lambda res: (res.statistic, res.pvalue, res.zstatistic)),
|
47 |
+
(stats.gmean, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
48 |
+
(stats.hmean, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
49 |
+
(stats.pmean, (1.42,), dict(), 1, 1, False, lambda x: (x,)),
|
50 |
+
(stats.sem, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
51 |
+
(stats.iqr, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
52 |
+
(stats.kurtosis, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
53 |
+
(stats.skew, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
54 |
+
(stats.kstat, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
55 |
+
(stats.kstatvar, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
56 |
+
(stats.moment, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
57 |
+
(stats.moment, tuple(), dict(order=[1, 2]), 1, 2, False, None),
|
58 |
+
(stats.jarque_bera, tuple(), dict(), 1, 2, False, None),
|
59 |
+
(stats.ttest_1samp, (np.array([0]),), dict(), 1, 7, False,
|
60 |
+
unpack_ttest_result),
|
61 |
+
(stats.ttest_rel, tuple(), dict(), 2, 7, True, unpack_ttest_result),
|
62 |
+
(stats.ttest_ind, tuple(), dict(), 2, 7, False, unpack_ttest_result),
|
63 |
+
(_get_ttest_ci(stats.ttest_1samp), (0,), dict(), 1, 2, False, None),
|
64 |
+
(_get_ttest_ci(stats.ttest_rel), tuple(), dict(), 2, 2, True, None),
|
65 |
+
(_get_ttest_ci(stats.ttest_ind), tuple(), dict(), 2, 2, False, None),
|
66 |
+
(stats.mode, tuple(), dict(), 1, 2, True, lambda x: (x.mode, x.count)),
|
67 |
+
(stats.differential_entropy, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
68 |
+
(stats.variation, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
69 |
+
(stats.friedmanchisquare, tuple(), dict(), 3, 2, True, None),
|
70 |
+
(stats.brunnermunzel, tuple(), dict(), 2, 2, False, None),
|
71 |
+
(stats.mood, tuple(), {}, 2, 2, False, None),
|
72 |
+
(stats.shapiro, tuple(), {}, 1, 2, False, None),
|
73 |
+
(stats.ks_1samp, (norm().cdf,), dict(), 1, 4, False,
|
74 |
+
lambda res: (*res, res.statistic_location, res.statistic_sign)),
|
75 |
+
(stats.ks_2samp, tuple(), dict(), 2, 4, False,
|
76 |
+
lambda res: (*res, res.statistic_location, res.statistic_sign)),
|
77 |
+
(stats.kstest, (norm().cdf,), dict(), 1, 4, False,
|
78 |
+
lambda res: (*res, res.statistic_location, res.statistic_sign)),
|
79 |
+
(stats.kstest, tuple(), dict(), 2, 4, False,
|
80 |
+
lambda res: (*res, res.statistic_location, res.statistic_sign)),
|
81 |
+
(stats.levene, tuple(), {}, 2, 2, False, None),
|
82 |
+
(stats.fligner, tuple(), {'center': 'trimmed', 'proportiontocut': 0.01},
|
83 |
+
2, 2, False, None),
|
84 |
+
(stats.ansari, tuple(), {}, 2, 2, False, None),
|
85 |
+
(stats.entropy, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
86 |
+
(stats.entropy, tuple(), dict(), 2, 1, True, lambda x: (x,)),
|
87 |
+
(stats.skewtest, tuple(), dict(), 1, 2, False, None),
|
88 |
+
(stats.kurtosistest, tuple(), dict(), 1, 2, False, None),
|
89 |
+
(stats.normaltest, tuple(), dict(), 1, 2, False, None),
|
90 |
+
(stats.cramervonmises, ("norm",), dict(), 1, 2, False,
|
91 |
+
lambda res: (res.statistic, res.pvalue)),
|
92 |
+
(stats.cramervonmises_2samp, tuple(), dict(), 2, 2, False,
|
93 |
+
lambda res: (res.statistic, res.pvalue)),
|
94 |
+
(stats.epps_singleton_2samp, tuple(), dict(), 2, 2, False, None),
|
95 |
+
(stats.bartlett, tuple(), {}, 2, 2, False, None),
|
96 |
+
(stats.tmean, tuple(), {}, 1, 1, False, lambda x: (x,)),
|
97 |
+
(stats.tvar, tuple(), {}, 1, 1, False, lambda x: (x,)),
|
98 |
+
(stats.tmin, tuple(), {}, 1, 1, False, lambda x: (x,)),
|
99 |
+
(stats.tmax, tuple(), {}, 1, 1, False, lambda x: (x,)),
|
100 |
+
(stats.tstd, tuple(), {}, 1, 1, False, lambda x: (x,)),
|
101 |
+
(stats.tsem, tuple(), {}, 1, 1, False, lambda x: (x,)),
|
102 |
+
(stats.circmean, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
103 |
+
(stats.circvar, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
104 |
+
(stats.circstd, tuple(), dict(), 1, 1, False, lambda x: (x,)),
|
105 |
+
(stats.f_oneway, tuple(), {}, 2, 2, False, None),
|
106 |
+
(stats.alexandergovern, tuple(), {}, 2, 2, False,
|
107 |
+
lambda res: (res.statistic, res.pvalue)),
|
108 |
+
(stats.combine_pvalues, tuple(), {}, 1, 2, False, None),
|
109 |
+
]
|
110 |
+
|
111 |
+
# If the message is one of those expected, put nans in
|
112 |
+
# appropriate places of `statistics` and `pvalues`
|
113 |
+
too_small_messages = {"The input contains nan", # for nan_policy="raise"
|
114 |
+
"Degrees of freedom <= 0 for slice",
|
115 |
+
"x and y should have at least 5 elements",
|
116 |
+
"Data must be at least length 3",
|
117 |
+
"The sample must contain at least two",
|
118 |
+
"x and y must contain at least two",
|
119 |
+
"division by zero",
|
120 |
+
"Mean of empty slice",
|
121 |
+
"Data passed to ks_2samp must not be empty",
|
122 |
+
"Not enough test observations",
|
123 |
+
"Not enough other observations",
|
124 |
+
"Not enough observations.",
|
125 |
+
"At least one observation is required",
|
126 |
+
"zero-size array to reduction operation maximum",
|
127 |
+
"`x` and `y` must be of nonzero size.",
|
128 |
+
"The exact distribution of the Wilcoxon test",
|
129 |
+
"Data input must not be empty",
|
130 |
+
"Window length (0) must be positive and less",
|
131 |
+
"Window length (1) must be positive and less",
|
132 |
+
"Window length (2) must be positive and less",
|
133 |
+
"skewtest is not valid with less than",
|
134 |
+
"kurtosistest requires at least 5",
|
135 |
+
"attempt to get argmax of an empty sequence",
|
136 |
+
"No array values within given limits",
|
137 |
+
"Input sample size must be greater than one.",}
|
138 |
+
|
139 |
+
# If the message is one of these, results of the function may be inaccurate,
|
140 |
+
# but NaNs are not to be placed
|
141 |
+
inaccuracy_messages = {"Precision loss occurred in moment calculation",
|
142 |
+
"Sample size too small for normal approximation."}
|
143 |
+
|
144 |
+
# For some functions, nan_policy='propagate' should not just return NaNs
|
145 |
+
override_propagate_funcs = {stats.mode}
|
146 |
+
|
147 |
+
# For some functions, empty arrays produce non-NaN results
|
148 |
+
empty_special_case_funcs = {stats.entropy}
|
149 |
+
|
150 |
+
def _mixed_data_generator(n_samples, n_repetitions, axis, rng,
|
151 |
+
paired=False):
|
152 |
+
# generate random samples to check the response of hypothesis tests to
|
153 |
+
# samples with different (but broadcastable) shapes and various
|
154 |
+
# nan patterns (e.g. all nans, some nans, no nans) along axis-slices
|
155 |
+
|
156 |
+
data = []
|
157 |
+
for i in range(n_samples):
|
158 |
+
n_patterns = 6 # number of distinct nan patterns
|
159 |
+
n_obs = 20 if paired else 20 + i # observations per axis-slice
|
160 |
+
x = np.ones((n_repetitions, n_patterns, n_obs)) * np.nan
|
161 |
+
|
162 |
+
for j in range(n_repetitions):
|
163 |
+
samples = x[j, :, :]
|
164 |
+
|
165 |
+
# case 0: axis-slice with all nans (0 reals)
|
166 |
+
# cases 1-3: axis-slice with 1-3 reals (the rest nans)
|
167 |
+
# case 4: axis-slice with mostly (all but two) reals
|
168 |
+
# case 5: axis slice with all reals
|
169 |
+
for k, n_reals in enumerate([0, 1, 2, 3, n_obs-2, n_obs]):
|
170 |
+
# for cases 1-3, need paired nansw to be in the same place
|
171 |
+
indices = rng.permutation(n_obs)[:n_reals]
|
172 |
+
samples[k, indices] = rng.random(size=n_reals)
|
173 |
+
|
174 |
+
# permute the axis-slices just to show that order doesn't matter
|
175 |
+
samples[:] = rng.permutation(samples, axis=0)
|
176 |
+
|
177 |
+
# For multi-sample tests, we want to test broadcasting and check
|
178 |
+
# that nan policy works correctly for each nan pattern for each input.
|
179 |
+
# This takes care of both simultaneously.
|
180 |
+
new_shape = [n_repetitions] + [1]*n_samples + [n_obs]
|
181 |
+
new_shape[1 + i] = 6
|
182 |
+
x = x.reshape(new_shape)
|
183 |
+
|
184 |
+
x = np.moveaxis(x, -1, axis)
|
185 |
+
data.append(x)
|
186 |
+
return data
|
187 |
+
|
188 |
+
|
189 |
+
def _homogeneous_data_generator(n_samples, n_repetitions, axis, rng,
|
190 |
+
paired=False, all_nans=True):
|
191 |
+
# generate random samples to check the response of hypothesis tests to
|
192 |
+
# samples with different (but broadcastable) shapes and homogeneous
|
193 |
+
# data (all nans or all finite)
|
194 |
+
data = []
|
195 |
+
for i in range(n_samples):
|
196 |
+
n_obs = 20 if paired else 20 + i # observations per axis-slice
|
197 |
+
shape = [n_repetitions] + [1]*n_samples + [n_obs]
|
198 |
+
shape[1 + i] = 2
|
199 |
+
x = np.ones(shape) * np.nan if all_nans else rng.random(shape)
|
200 |
+
x = np.moveaxis(x, -1, axis)
|
201 |
+
data.append(x)
|
202 |
+
return data
|
203 |
+
|
204 |
+
|
205 |
+
def nan_policy_1d(hypotest, data1d, unpacker, *args, n_outputs=2,
|
206 |
+
nan_policy='raise', paired=False, _no_deco=True, **kwds):
|
207 |
+
# Reference implementation for how `nan_policy` should work for 1d samples
|
208 |
+
|
209 |
+
if nan_policy == 'raise':
|
210 |
+
for sample in data1d:
|
211 |
+
if np.any(np.isnan(sample)):
|
212 |
+
raise ValueError("The input contains nan values")
|
213 |
+
|
214 |
+
elif (nan_policy == 'propagate'
|
215 |
+
and hypotest not in override_propagate_funcs):
|
216 |
+
# For all hypothesis tests tested, returning nans is the right thing.
|
217 |
+
# But many hypothesis tests don't propagate correctly (e.g. they treat
|
218 |
+
# np.nan the same as np.inf, which doesn't make sense when ranks are
|
219 |
+
# involved) so override that behavior here.
|
220 |
+
for sample in data1d:
|
221 |
+
if np.any(np.isnan(sample)):
|
222 |
+
return np.full(n_outputs, np.nan)
|
223 |
+
|
224 |
+
elif nan_policy == 'omit':
|
225 |
+
# manually omit nans (or pairs in which at least one element is nan)
|
226 |
+
if not paired:
|
227 |
+
data1d = [sample[~np.isnan(sample)] for sample in data1d]
|
228 |
+
else:
|
229 |
+
nan_mask = np.isnan(data1d[0])
|
230 |
+
for sample in data1d[1:]:
|
231 |
+
nan_mask = np.logical_or(nan_mask, np.isnan(sample))
|
232 |
+
data1d = [sample[~nan_mask] for sample in data1d]
|
233 |
+
|
234 |
+
return unpacker(hypotest(*data1d, *args, _no_deco=_no_deco, **kwds))
|
235 |
+
|
236 |
+
|
237 |
+
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
|
238 |
+
@pytest.mark.filterwarnings('ignore::UserWarning')
|
239 |
+
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
|
240 |
+
"paired", "unpacker"), axis_nan_policy_cases)
|
241 |
+
@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
|
242 |
+
@pytest.mark.parametrize(("axis"), (1,))
|
243 |
+
@pytest.mark.parametrize(("data_generator"), ("mixed",))
|
244 |
+
def test_axis_nan_policy_fast(hypotest, args, kwds, n_samples, n_outputs,
|
245 |
+
paired, unpacker, nan_policy, axis,
|
246 |
+
data_generator):
|
247 |
+
_axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
|
248 |
+
unpacker, nan_policy, axis, data_generator)
|
249 |
+
|
250 |
+
|
251 |
+
@pytest.mark.slow
|
252 |
+
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
|
253 |
+
@pytest.mark.filterwarnings('ignore::UserWarning')
|
254 |
+
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
|
255 |
+
"paired", "unpacker"), axis_nan_policy_cases)
|
256 |
+
@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
|
257 |
+
@pytest.mark.parametrize(("axis"), range(-3, 3))
|
258 |
+
@pytest.mark.parametrize(("data_generator"),
|
259 |
+
("all_nans", "all_finite", "mixed"))
|
260 |
+
def test_axis_nan_policy_full(hypotest, args, kwds, n_samples, n_outputs,
|
261 |
+
paired, unpacker, nan_policy, axis,
|
262 |
+
data_generator):
|
263 |
+
_axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
|
264 |
+
unpacker, nan_policy, axis, data_generator)
|
265 |
+
|
266 |
+
|
267 |
+
def _axis_nan_policy_test(hypotest, args, kwds, n_samples, n_outputs, paired,
|
268 |
+
unpacker, nan_policy, axis, data_generator):
|
269 |
+
# Tests the 1D and vectorized behavior of hypothesis tests against a
|
270 |
+
# reference implementation (nan_policy_1d with np.ndenumerate)
|
271 |
+
|
272 |
+
# Some hypothesis tests return a non-iterable that needs an `unpacker` to
|
273 |
+
# extract the statistic and p-value. For those that don't:
|
274 |
+
if not unpacker:
|
275 |
+
def unpacker(res):
|
276 |
+
return res
|
277 |
+
|
278 |
+
rng = np.random.default_rng(0)
|
279 |
+
|
280 |
+
# Generate multi-dimensional test data with all important combinations
|
281 |
+
# of patterns of nans along `axis`
|
282 |
+
n_repetitions = 3 # number of repetitions of each pattern
|
283 |
+
data_gen_kwds = {'n_samples': n_samples, 'n_repetitions': n_repetitions,
|
284 |
+
'axis': axis, 'rng': rng, 'paired': paired}
|
285 |
+
if data_generator == 'mixed':
|
286 |
+
inherent_size = 6 # number of distinct types of patterns
|
287 |
+
data = _mixed_data_generator(**data_gen_kwds)
|
288 |
+
elif data_generator == 'all_nans':
|
289 |
+
inherent_size = 2 # hard-coded in _homogeneous_data_generator
|
290 |
+
data_gen_kwds['all_nans'] = True
|
291 |
+
data = _homogeneous_data_generator(**data_gen_kwds)
|
292 |
+
elif data_generator == 'all_finite':
|
293 |
+
inherent_size = 2 # hard-coded in _homogeneous_data_generator
|
294 |
+
data_gen_kwds['all_nans'] = False
|
295 |
+
data = _homogeneous_data_generator(**data_gen_kwds)
|
296 |
+
|
297 |
+
output_shape = [n_repetitions] + [inherent_size]*n_samples
|
298 |
+
|
299 |
+
# To generate reference behavior to compare against, loop over the axis-
|
300 |
+
# slices in data. Make indexing easier by moving `axis` to the end and
|
301 |
+
# broadcasting all samples to the same shape.
|
302 |
+
data_b = [np.moveaxis(sample, axis, -1) for sample in data]
|
303 |
+
data_b = [np.broadcast_to(sample, output_shape + [sample.shape[-1]])
|
304 |
+
for sample in data_b]
|
305 |
+
statistics = np.zeros(output_shape)
|
306 |
+
pvalues = np.zeros(output_shape)
|
307 |
+
|
308 |
+
for i, _ in np.ndenumerate(statistics):
|
309 |
+
data1d = [sample[i] for sample in data_b]
|
310 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
311 |
+
try:
|
312 |
+
res1d = nan_policy_1d(hypotest, data1d, unpacker, *args,
|
313 |
+
n_outputs=n_outputs,
|
314 |
+
nan_policy=nan_policy,
|
315 |
+
paired=paired, _no_deco=True, **kwds)
|
316 |
+
|
317 |
+
# Eventually we'll check the results of a single, vectorized
|
318 |
+
# call of `hypotest` against the arrays `statistics` and
|
319 |
+
# `pvalues` populated using the reference `nan_policy_1d`.
|
320 |
+
# But while we're at it, check the results of a 1D call to
|
321 |
+
# `hypotest` against the reference `nan_policy_1d`.
|
322 |
+
res1db = unpacker(hypotest(*data1d, *args,
|
323 |
+
nan_policy=nan_policy, **kwds))
|
324 |
+
assert_equal(res1db[0], res1d[0])
|
325 |
+
if len(res1db) == 2:
|
326 |
+
assert_equal(res1db[1], res1d[1])
|
327 |
+
|
328 |
+
# When there is not enough data in 1D samples, many existing
|
329 |
+
# hypothesis tests raise errors instead of returning nans .
|
330 |
+
# For vectorized calls, we put nans in the corresponding elements
|
331 |
+
# of the output.
|
332 |
+
except (RuntimeWarning, UserWarning, ValueError,
|
333 |
+
ZeroDivisionError) as e:
|
334 |
+
|
335 |
+
# whatever it is, make sure same error is raised by both
|
336 |
+
# `nan_policy_1d` and `hypotest`
|
337 |
+
with pytest.raises(type(e), match=re.escape(str(e))):
|
338 |
+
nan_policy_1d(hypotest, data1d, unpacker, *args,
|
339 |
+
n_outputs=n_outputs, nan_policy=nan_policy,
|
340 |
+
paired=paired, _no_deco=True, **kwds)
|
341 |
+
with pytest.raises(type(e), match=re.escape(str(e))):
|
342 |
+
hypotest(*data1d, *args, nan_policy=nan_policy, **kwds)
|
343 |
+
|
344 |
+
if any([str(e).startswith(message)
|
345 |
+
for message in too_small_messages]):
|
346 |
+
res1d = np.full(n_outputs, np.nan)
|
347 |
+
elif any([str(e).startswith(message)
|
348 |
+
for message in inaccuracy_messages]):
|
349 |
+
with suppress_warnings() as sup:
|
350 |
+
sup.filter(RuntimeWarning)
|
351 |
+
sup.filter(UserWarning)
|
352 |
+
res1d = nan_policy_1d(hypotest, data1d, unpacker,
|
353 |
+
*args, n_outputs=n_outputs,
|
354 |
+
nan_policy=nan_policy,
|
355 |
+
paired=paired, _no_deco=True,
|
356 |
+
**kwds)
|
357 |
+
else:
|
358 |
+
raise e
|
359 |
+
statistics[i] = res1d[0]
|
360 |
+
if len(res1d) == 2:
|
361 |
+
pvalues[i] = res1d[1]
|
362 |
+
|
363 |
+
# Perform a vectorized call to the hypothesis test.
|
364 |
+
# If `nan_policy == 'raise'`, check that it raises the appropriate error.
|
365 |
+
# If not, compare against the output against `statistics` and `pvalues`
|
366 |
+
if nan_policy == 'raise' and not data_generator == "all_finite":
|
367 |
+
message = 'The input contains nan values'
|
368 |
+
with pytest.raises(ValueError, match=message):
|
369 |
+
hypotest(*data, axis=axis, nan_policy=nan_policy, *args, **kwds)
|
370 |
+
|
371 |
+
else:
|
372 |
+
with suppress_warnings() as sup, \
|
373 |
+
np.errstate(divide='ignore', invalid='ignore'):
|
374 |
+
sup.filter(RuntimeWarning, "Precision loss occurred in moment")
|
375 |
+
sup.filter(UserWarning, "Sample size too small for normal "
|
376 |
+
"approximation.")
|
377 |
+
res = unpacker(hypotest(*data, axis=axis, nan_policy=nan_policy,
|
378 |
+
*args, **kwds))
|
379 |
+
assert_allclose(res[0], statistics, rtol=1e-15)
|
380 |
+
assert_equal(res[0].dtype, statistics.dtype)
|
381 |
+
|
382 |
+
if len(res) == 2:
|
383 |
+
assert_allclose(res[1], pvalues, rtol=1e-15)
|
384 |
+
assert_equal(res[1].dtype, pvalues.dtype)
|
385 |
+
|
386 |
+
|
387 |
+
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
|
388 |
+
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
|
389 |
+
"paired", "unpacker"), axis_nan_policy_cases)
|
390 |
+
@pytest.mark.parametrize(("nan_policy"), ("propagate", "omit", "raise"))
|
391 |
+
@pytest.mark.parametrize(("data_generator"),
|
392 |
+
("all_nans", "all_finite", "mixed", "empty"))
|
393 |
+
def test_axis_nan_policy_axis_is_None(hypotest, args, kwds, n_samples,
|
394 |
+
n_outputs, paired, unpacker, nan_policy,
|
395 |
+
data_generator):
|
396 |
+
# check for correct behavior when `axis=None`
|
397 |
+
|
398 |
+
if not unpacker:
|
399 |
+
def unpacker(res):
|
400 |
+
return res
|
401 |
+
|
402 |
+
rng = np.random.default_rng(0)
|
403 |
+
|
404 |
+
if data_generator == "empty":
|
405 |
+
data = [rng.random((2, 0)) for i in range(n_samples)]
|
406 |
+
else:
|
407 |
+
data = [rng.random((2, 20)) for i in range(n_samples)]
|
408 |
+
|
409 |
+
if data_generator == "mixed":
|
410 |
+
masks = [rng.random((2, 20)) > 0.9 for i in range(n_samples)]
|
411 |
+
for sample, mask in zip(data, masks):
|
412 |
+
sample[mask] = np.nan
|
413 |
+
elif data_generator == "all_nans":
|
414 |
+
data = [sample * np.nan for sample in data]
|
415 |
+
|
416 |
+
data_raveled = [sample.ravel() for sample in data]
|
417 |
+
|
418 |
+
if nan_policy == 'raise' and data_generator not in {"all_finite", "empty"}:
|
419 |
+
message = 'The input contains nan values'
|
420 |
+
|
421 |
+
# check for correct behavior whether or not data is 1d to begin with
|
422 |
+
with pytest.raises(ValueError, match=message):
|
423 |
+
hypotest(*data, axis=None, nan_policy=nan_policy,
|
424 |
+
*args, **kwds)
|
425 |
+
with pytest.raises(ValueError, match=message):
|
426 |
+
hypotest(*data_raveled, axis=None, nan_policy=nan_policy,
|
427 |
+
*args, **kwds)
|
428 |
+
|
429 |
+
else:
|
430 |
+
# behavior of reference implementation with 1d input, hypotest with 1d
|
431 |
+
# input, and hypotest with Nd input should match, whether that means
|
432 |
+
# that outputs are equal or they raise the same exception
|
433 |
+
|
434 |
+
ea_str, eb_str, ec_str = None, None, None
|
435 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
436 |
+
try:
|
437 |
+
res1da = nan_policy_1d(hypotest, data_raveled, unpacker, *args,
|
438 |
+
n_outputs=n_outputs,
|
439 |
+
nan_policy=nan_policy, paired=paired,
|
440 |
+
_no_deco=True, **kwds)
|
441 |
+
except (RuntimeWarning, ValueError, ZeroDivisionError) as ea:
|
442 |
+
ea_str = str(ea)
|
443 |
+
|
444 |
+
try:
|
445 |
+
res1db = unpacker(hypotest(*data_raveled, *args,
|
446 |
+
nan_policy=nan_policy, **kwds))
|
447 |
+
except (RuntimeWarning, ValueError, ZeroDivisionError) as eb:
|
448 |
+
eb_str = str(eb)
|
449 |
+
|
450 |
+
try:
|
451 |
+
res1dc = unpacker(hypotest(*data, *args, axis=None,
|
452 |
+
nan_policy=nan_policy, **kwds))
|
453 |
+
except (RuntimeWarning, ValueError, ZeroDivisionError) as ec:
|
454 |
+
ec_str = str(ec)
|
455 |
+
|
456 |
+
if ea_str or eb_str or ec_str:
|
457 |
+
assert any([str(ea_str).startswith(message)
|
458 |
+
for message in too_small_messages])
|
459 |
+
assert ea_str == eb_str == ec_str
|
460 |
+
else:
|
461 |
+
assert_equal(res1db, res1da)
|
462 |
+
assert_equal(res1dc, res1da)
|
463 |
+
for item in list(res1da) + list(res1db) + list(res1dc):
|
464 |
+
# Most functions naturally return NumPy numbers, which
|
465 |
+
# are drop-in replacements for the Python versions but with
|
466 |
+
# desirable attributes. Make sure this is consistent.
|
467 |
+
assert np.issubdtype(item.dtype, np.number)
|
468 |
+
|
469 |
+
# Test keepdims for:
|
470 |
+
# - single-output and multi-output functions (gmean and mannwhitneyu)
|
471 |
+
# - Axis negative, positive, None, and tuple
|
472 |
+
# - 1D with no NaNs
|
473 |
+
# - 1D with NaN propagation
|
474 |
+
# - Zero-sized output
|
475 |
+
@pytest.mark.parametrize("nan_policy", ("omit", "propagate"))
|
476 |
+
@pytest.mark.parametrize(
|
477 |
+
("hypotest", "args", "kwds", "n_samples", "unpacker"),
|
478 |
+
((stats.gmean, tuple(), dict(), 1, lambda x: (x,)),
|
479 |
+
(stats.mannwhitneyu, tuple(), {'method': 'asymptotic'}, 2, None))
|
480 |
+
)
|
481 |
+
@pytest.mark.parametrize(
|
482 |
+
("sample_shape", "axis_cases"),
|
483 |
+
(((2, 3, 3, 4), (None, 0, -1, (0, 2), (1, -1), (3, 1, 2, 0))),
|
484 |
+
((10, ), (0, -1)),
|
485 |
+
((20, 0), (0, 1)))
|
486 |
+
)
|
487 |
+
def test_keepdims(hypotest, args, kwds, n_samples, unpacker,
|
488 |
+
sample_shape, axis_cases, nan_policy):
|
489 |
+
# test if keepdims parameter works correctly
|
490 |
+
if not unpacker:
|
491 |
+
def unpacker(res):
|
492 |
+
return res
|
493 |
+
rng = np.random.default_rng(0)
|
494 |
+
data = [rng.random(sample_shape) for _ in range(n_samples)]
|
495 |
+
nan_data = [sample.copy() for sample in data]
|
496 |
+
nan_mask = [rng.random(sample_shape) < 0.2 for _ in range(n_samples)]
|
497 |
+
for sample, mask in zip(nan_data, nan_mask):
|
498 |
+
sample[mask] = np.nan
|
499 |
+
for axis in axis_cases:
|
500 |
+
expected_shape = list(sample_shape)
|
501 |
+
if axis is None:
|
502 |
+
expected_shape = np.ones(len(sample_shape))
|
503 |
+
else:
|
504 |
+
if isinstance(axis, int):
|
505 |
+
expected_shape[axis] = 1
|
506 |
+
else:
|
507 |
+
for ax in axis:
|
508 |
+
expected_shape[ax] = 1
|
509 |
+
expected_shape = tuple(expected_shape)
|
510 |
+
res = unpacker(hypotest(*data, *args, axis=axis, keepdims=True,
|
511 |
+
**kwds))
|
512 |
+
res_base = unpacker(hypotest(*data, *args, axis=axis, keepdims=False,
|
513 |
+
**kwds))
|
514 |
+
nan_res = unpacker(hypotest(*nan_data, *args, axis=axis,
|
515 |
+
keepdims=True, nan_policy=nan_policy,
|
516 |
+
**kwds))
|
517 |
+
nan_res_base = unpacker(hypotest(*nan_data, *args, axis=axis,
|
518 |
+
keepdims=False,
|
519 |
+
nan_policy=nan_policy, **kwds))
|
520 |
+
for r, r_base, rn, rn_base in zip(res, res_base, nan_res,
|
521 |
+
nan_res_base):
|
522 |
+
assert r.shape == expected_shape
|
523 |
+
r = np.squeeze(r, axis=axis)
|
524 |
+
assert_equal(r, r_base)
|
525 |
+
assert rn.shape == expected_shape
|
526 |
+
rn = np.squeeze(rn, axis=axis)
|
527 |
+
assert_equal(rn, rn_base)
|
528 |
+
|
529 |
+
|
530 |
+
@pytest.mark.parametrize(("fun", "nsamp"),
|
531 |
+
[(stats.kstat, 1),
|
532 |
+
(stats.kstatvar, 1)])
|
533 |
+
def test_hypotest_back_compat_no_axis(fun, nsamp):
|
534 |
+
m, n = 8, 9
|
535 |
+
|
536 |
+
rng = np.random.default_rng(0)
|
537 |
+
x = rng.random((nsamp, m, n))
|
538 |
+
res = fun(*x)
|
539 |
+
res2 = fun(*x, _no_deco=True)
|
540 |
+
res3 = fun([xi.ravel() for xi in x])
|
541 |
+
assert_equal(res, res2)
|
542 |
+
assert_equal(res, res3)
|
543 |
+
|
544 |
+
|
545 |
+
@pytest.mark.parametrize(("axis"), (0, 1, 2))
|
546 |
+
def test_axis_nan_policy_decorated_positional_axis(axis):
|
547 |
+
# Test for correct behavior of function decorated with
|
548 |
+
# _axis_nan_policy_decorator whether `axis` is provided as positional or
|
549 |
+
# keyword argument
|
550 |
+
|
551 |
+
shape = (8, 9, 10)
|
552 |
+
rng = np.random.default_rng(0)
|
553 |
+
x = rng.random(shape)
|
554 |
+
y = rng.random(shape)
|
555 |
+
res1 = stats.mannwhitneyu(x, y, True, 'two-sided', axis)
|
556 |
+
res2 = stats.mannwhitneyu(x, y, True, 'two-sided', axis=axis)
|
557 |
+
assert_equal(res1, res2)
|
558 |
+
|
559 |
+
message = "mannwhitneyu() got multiple values for argument 'axis'"
|
560 |
+
with pytest.raises(TypeError, match=re.escape(message)):
|
561 |
+
stats.mannwhitneyu(x, y, True, 'two-sided', axis, axis=axis)
|
562 |
+
|
563 |
+
|
564 |
+
def test_axis_nan_policy_decorated_positional_args():
|
565 |
+
# Test for correct behavior of function decorated with
|
566 |
+
# _axis_nan_policy_decorator when function accepts *args
|
567 |
+
|
568 |
+
shape = (3, 8, 9, 10)
|
569 |
+
rng = np.random.default_rng(0)
|
570 |
+
x = rng.random(shape)
|
571 |
+
x[0, 0, 0, 0] = np.nan
|
572 |
+
stats.kruskal(*x)
|
573 |
+
|
574 |
+
message = "kruskal() got an unexpected keyword argument 'samples'"
|
575 |
+
with pytest.raises(TypeError, match=re.escape(message)):
|
576 |
+
stats.kruskal(samples=x)
|
577 |
+
|
578 |
+
with pytest.raises(TypeError, match=re.escape(message)):
|
579 |
+
stats.kruskal(*x, samples=x)
|
580 |
+
|
581 |
+
|
582 |
+
def test_axis_nan_policy_decorated_keyword_samples():
|
583 |
+
# Test for correct behavior of function decorated with
|
584 |
+
# _axis_nan_policy_decorator whether samples are provided as positional or
|
585 |
+
# keyword arguments
|
586 |
+
|
587 |
+
shape = (2, 8, 9, 10)
|
588 |
+
rng = np.random.default_rng(0)
|
589 |
+
x = rng.random(shape)
|
590 |
+
x[0, 0, 0, 0] = np.nan
|
591 |
+
res1 = stats.mannwhitneyu(*x)
|
592 |
+
res2 = stats.mannwhitneyu(x=x[0], y=x[1])
|
593 |
+
assert_equal(res1, res2)
|
594 |
+
|
595 |
+
message = "mannwhitneyu() got multiple values for argument"
|
596 |
+
with pytest.raises(TypeError, match=re.escape(message)):
|
597 |
+
stats.mannwhitneyu(*x, x=x[0], y=x[1])
|
598 |
+
|
599 |
+
|
600 |
+
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
|
601 |
+
"paired", "unpacker"), axis_nan_policy_cases)
|
602 |
+
def test_axis_nan_policy_decorated_pickled(hypotest, args, kwds, n_samples,
|
603 |
+
n_outputs, paired, unpacker):
|
604 |
+
if "ttest_ci" in hypotest.__name__:
|
605 |
+
pytest.skip("Can't pickle functions defined within functions.")
|
606 |
+
|
607 |
+
rng = np.random.default_rng(0)
|
608 |
+
|
609 |
+
# Some hypothesis tests return a non-iterable that needs an `unpacker` to
|
610 |
+
# extract the statistic and p-value. For those that don't:
|
611 |
+
if not unpacker:
|
612 |
+
def unpacker(res):
|
613 |
+
return res
|
614 |
+
|
615 |
+
data = rng.uniform(size=(n_samples, 2, 30))
|
616 |
+
pickled_hypotest = pickle.dumps(hypotest)
|
617 |
+
unpickled_hypotest = pickle.loads(pickled_hypotest)
|
618 |
+
res1 = unpacker(hypotest(*data, *args, axis=-1, **kwds))
|
619 |
+
res2 = unpacker(unpickled_hypotest(*data, *args, axis=-1, **kwds))
|
620 |
+
assert_allclose(res1, res2, rtol=1e-12)
|
621 |
+
|
622 |
+
|
623 |
+
def test_check_empty_inputs():
|
624 |
+
# Test that _check_empty_inputs is doing its job, at least for single-
|
625 |
+
# sample inputs. (Multi-sample functionality is tested below.)
|
626 |
+
# If the input sample is not empty, it should return None.
|
627 |
+
# If the input sample is empty, it should return an array of NaNs or an
|
628 |
+
# empty array of appropriate shape. np.mean is used as a reference for the
|
629 |
+
# output because, like the statistics calculated by these functions,
|
630 |
+
# it works along and "consumes" `axis` but preserves the other axes.
|
631 |
+
for i in range(5):
|
632 |
+
for combo in combinations_with_replacement([0, 1, 2], i):
|
633 |
+
for axis in range(len(combo)):
|
634 |
+
samples = (np.zeros(combo),)
|
635 |
+
output = stats._axis_nan_policy._check_empty_inputs(samples,
|
636 |
+
axis)
|
637 |
+
if output is not None:
|
638 |
+
with np.testing.suppress_warnings() as sup:
|
639 |
+
sup.filter(RuntimeWarning, "Mean of empty slice.")
|
640 |
+
sup.filter(RuntimeWarning, "invalid value encountered")
|
641 |
+
reference = samples[0].mean(axis=axis)
|
642 |
+
np.testing.assert_equal(output, reference)
|
643 |
+
|
644 |
+
|
645 |
+
def _check_arrays_broadcastable(arrays, axis):
|
646 |
+
# https://numpy.org/doc/stable/user/basics.broadcasting.html
|
647 |
+
# "When operating on two arrays, NumPy compares their shapes element-wise.
|
648 |
+
# It starts with the trailing (i.e. rightmost) dimensions and works its
|
649 |
+
# way left.
|
650 |
+
# Two dimensions are compatible when
|
651 |
+
# 1. they are equal, or
|
652 |
+
# 2. one of them is 1
|
653 |
+
# ...
|
654 |
+
# Arrays do not need to have the same number of dimensions."
|
655 |
+
# (Clarification: if the arrays are compatible according to the criteria
|
656 |
+
# above and an array runs out of dimensions, it is still compatible.)
|
657 |
+
# Below, we follow the rules above except ignoring `axis`
|
658 |
+
|
659 |
+
n_dims = max([arr.ndim for arr in arrays])
|
660 |
+
if axis is not None:
|
661 |
+
# convert to negative axis
|
662 |
+
axis = (-n_dims + axis) if axis >= 0 else axis
|
663 |
+
|
664 |
+
for dim in range(1, n_dims+1): # we'll index from -1 to -n_dims, inclusive
|
665 |
+
if -dim == axis:
|
666 |
+
continue # ignore lengths along `axis`
|
667 |
+
|
668 |
+
dim_lengths = set()
|
669 |
+
for arr in arrays:
|
670 |
+
if dim <= arr.ndim and arr.shape[-dim] != 1:
|
671 |
+
dim_lengths.add(arr.shape[-dim])
|
672 |
+
|
673 |
+
if len(dim_lengths) > 1:
|
674 |
+
return False
|
675 |
+
return True
|
676 |
+
|
677 |
+
|
678 |
+
@pytest.mark.slow
|
679 |
+
@pytest.mark.parametrize(("hypotest", "args", "kwds", "n_samples", "n_outputs",
|
680 |
+
"paired", "unpacker"), axis_nan_policy_cases)
|
681 |
+
def test_empty(hypotest, args, kwds, n_samples, n_outputs, paired, unpacker):
|
682 |
+
# test for correct output shape when at least one input is empty
|
683 |
+
|
684 |
+
if hypotest in override_propagate_funcs:
|
685 |
+
reason = "Doesn't follow the usual pattern. Tested separately."
|
686 |
+
pytest.skip(reason=reason)
|
687 |
+
|
688 |
+
if unpacker is None:
|
689 |
+
unpacker = lambda res: (res[0], res[1]) # noqa: E731
|
690 |
+
|
691 |
+
def small_data_generator(n_samples, n_dims):
|
692 |
+
|
693 |
+
def small_sample_generator(n_dims):
|
694 |
+
# return all possible "small" arrays in up to n_dim dimensions
|
695 |
+
for i in n_dims:
|
696 |
+
# "small" means with size along dimension either 0 or 1
|
697 |
+
for combo in combinations_with_replacement([0, 1, 2], i):
|
698 |
+
yield np.zeros(combo)
|
699 |
+
|
700 |
+
# yield all possible combinations of small samples
|
701 |
+
gens = [small_sample_generator(n_dims) for i in range(n_samples)]
|
702 |
+
yield from product(*gens)
|
703 |
+
|
704 |
+
n_dims = [2, 3]
|
705 |
+
for samples in small_data_generator(n_samples, n_dims):
|
706 |
+
|
707 |
+
# this test is only for arrays of zero size
|
708 |
+
if not any(sample.size == 0 for sample in samples):
|
709 |
+
continue
|
710 |
+
|
711 |
+
max_axis = max(sample.ndim for sample in samples)
|
712 |
+
|
713 |
+
# need to test for all valid values of `axis` parameter, too
|
714 |
+
for axis in range(-max_axis, max_axis):
|
715 |
+
|
716 |
+
try:
|
717 |
+
# After broadcasting, all arrays are the same shape, so
|
718 |
+
# the shape of the output should be the same as a single-
|
719 |
+
# sample statistic. Use np.mean as a reference.
|
720 |
+
concat = stats._stats_py._broadcast_concatenate(samples, axis)
|
721 |
+
with np.testing.suppress_warnings() as sup:
|
722 |
+
sup.filter(RuntimeWarning, "Mean of empty slice.")
|
723 |
+
sup.filter(RuntimeWarning, "invalid value encountered")
|
724 |
+
expected = np.mean(concat, axis=axis) * np.nan
|
725 |
+
|
726 |
+
if hypotest in empty_special_case_funcs:
|
727 |
+
empty_val = hypotest(*([[]]*len(samples)), *args, **kwds)
|
728 |
+
mask = np.isnan(expected)
|
729 |
+
expected[mask] = empty_val
|
730 |
+
|
731 |
+
with np.testing.suppress_warnings() as sup:
|
732 |
+
# generated by f_oneway for too_small inputs
|
733 |
+
sup.filter(stats.DegenerateDataWarning)
|
734 |
+
res = hypotest(*samples, *args, axis=axis, **kwds)
|
735 |
+
res = unpacker(res)
|
736 |
+
|
737 |
+
for i in range(n_outputs):
|
738 |
+
assert_equal(res[i], expected)
|
739 |
+
|
740 |
+
except ValueError:
|
741 |
+
# confirm that the arrays truly are not broadcastable
|
742 |
+
assert not _check_arrays_broadcastable(samples,
|
743 |
+
None if paired else axis)
|
744 |
+
|
745 |
+
# confirm that _both_ `_broadcast_concatenate` and `hypotest`
|
746 |
+
# produce this information.
|
747 |
+
message = "Array shapes are incompatible for broadcasting."
|
748 |
+
with pytest.raises(ValueError, match=message):
|
749 |
+
stats._stats_py._broadcast_concatenate(samples, axis, paired)
|
750 |
+
with pytest.raises(ValueError, match=message):
|
751 |
+
hypotest(*samples, *args, axis=axis, **kwds)
|
752 |
+
|
753 |
+
|
754 |
+
def test_masked_array_2_sentinel_array():
|
755 |
+
# prepare arrays
|
756 |
+
np.random.seed(0)
|
757 |
+
A = np.random.rand(10, 11, 12)
|
758 |
+
B = np.random.rand(12)
|
759 |
+
mask = A < 0.5
|
760 |
+
A = np.ma.masked_array(A, mask)
|
761 |
+
|
762 |
+
# set arbitrary elements to special values
|
763 |
+
# (these values might have been considered for use as sentinel values)
|
764 |
+
max_float = np.finfo(np.float64).max
|
765 |
+
max_float2 = np.nextafter(max_float, -np.inf)
|
766 |
+
max_float3 = np.nextafter(max_float2, -np.inf)
|
767 |
+
A[3, 4, 1] = np.nan
|
768 |
+
A[4, 5, 2] = np.inf
|
769 |
+
A[5, 6, 3] = max_float
|
770 |
+
B[8] = np.nan
|
771 |
+
B[7] = np.inf
|
772 |
+
B[6] = max_float2
|
773 |
+
|
774 |
+
# convert masked A to array with sentinel value, don't modify B
|
775 |
+
out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([A, B])
|
776 |
+
A_out, B_out = out_arrays
|
777 |
+
|
778 |
+
# check that good sentinel value was chosen (according to intended logic)
|
779 |
+
assert (sentinel != max_float) and (sentinel != max_float2)
|
780 |
+
assert sentinel == max_float3
|
781 |
+
|
782 |
+
# check that output arrays are as intended
|
783 |
+
A_reference = A.data
|
784 |
+
A_reference[A.mask] = sentinel
|
785 |
+
np.testing.assert_array_equal(A_out, A_reference)
|
786 |
+
assert B_out is B
|
787 |
+
|
788 |
+
|
789 |
+
def test_masked_dtype():
|
790 |
+
# When _masked_arrays_2_sentinel_arrays was first added, it always
|
791 |
+
# upcast the arrays to np.float64. After gh16662, check expected promotion
|
792 |
+
# and that the expected sentinel is found.
|
793 |
+
|
794 |
+
# these are important because the max of the promoted dtype is the first
|
795 |
+
# candidate to be the sentinel value
|
796 |
+
max16 = np.iinfo(np.int16).max
|
797 |
+
max128c = np.finfo(np.complex128).max
|
798 |
+
|
799 |
+
# a is a regular array, b has masked elements, and c has no masked elements
|
800 |
+
a = np.array([1, 2, max16], dtype=np.int16)
|
801 |
+
b = np.ma.array([1, 2, 1], dtype=np.int8, mask=[0, 1, 0])
|
802 |
+
c = np.ma.array([1, 2, 1], dtype=np.complex128, mask=[0, 0, 0])
|
803 |
+
|
804 |
+
# check integer masked -> sentinel conversion
|
805 |
+
out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([a, b])
|
806 |
+
a_out, b_out = out_arrays
|
807 |
+
assert sentinel == max16-1 # not max16 because max16 was in the data
|
808 |
+
assert b_out.dtype == np.int16 # check expected promotion
|
809 |
+
assert_allclose(b_out, [b[0], sentinel, b[-1]]) # check sentinel placement
|
810 |
+
assert a_out is a # not a masked array, so left untouched
|
811 |
+
assert not isinstance(b_out, np.ma.MaskedArray) # b became regular array
|
812 |
+
|
813 |
+
# similarly with complex
|
814 |
+
out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([b, c])
|
815 |
+
b_out, c_out = out_arrays
|
816 |
+
assert sentinel == max128c # max128c was not in the data
|
817 |
+
assert b_out.dtype == np.complex128 # b got promoted
|
818 |
+
assert_allclose(b_out, [b[0], sentinel, b[-1]]) # check sentinel placement
|
819 |
+
assert not isinstance(b_out, np.ma.MaskedArray) # b became regular array
|
820 |
+
assert not isinstance(c_out, np.ma.MaskedArray) # c became regular array
|
821 |
+
|
822 |
+
# Also, check edge case when a sentinel value cannot be found in the data
|
823 |
+
min8, max8 = np.iinfo(np.int8).min, np.iinfo(np.int8).max
|
824 |
+
a = np.arange(min8, max8+1, dtype=np.int8) # use all possible values
|
825 |
+
mask1 = np.zeros_like(a, dtype=bool)
|
826 |
+
mask0 = np.zeros_like(a, dtype=bool)
|
827 |
+
|
828 |
+
# a masked value can be used as the sentinel
|
829 |
+
mask1[1] = True
|
830 |
+
a1 = np.ma.array(a, mask=mask1)
|
831 |
+
out_arrays, sentinel = _masked_arrays_2_sentinel_arrays([a1])
|
832 |
+
assert sentinel == min8+1
|
833 |
+
|
834 |
+
# unless it's the smallest possible; skipped for simiplicity (see code)
|
835 |
+
mask0[0] = True
|
836 |
+
a0 = np.ma.array(a, mask=mask0)
|
837 |
+
message = "This function replaces masked elements with sentinel..."
|
838 |
+
with pytest.raises(ValueError, match=message):
|
839 |
+
_masked_arrays_2_sentinel_arrays([a0])
|
840 |
+
|
841 |
+
# test that dtype is preserved in functions
|
842 |
+
a = np.ma.array([1, 2, 3], mask=[0, 1, 0], dtype=np.float32)
|
843 |
+
assert stats.gmean(a).dtype == np.float32
|
844 |
+
|
845 |
+
|
846 |
+
def test_masked_stat_1d():
|
847 |
+
# basic test of _axis_nan_policy_factory with 1D masked sample
|
848 |
+
males = [19, 22, 16, 29, 24]
|
849 |
+
females = [20, 11, 17, 12]
|
850 |
+
res = stats.mannwhitneyu(males, females)
|
851 |
+
|
852 |
+
# same result when extra nan is omitted
|
853 |
+
females2 = [20, 11, 17, np.nan, 12]
|
854 |
+
res2 = stats.mannwhitneyu(males, females2, nan_policy='omit')
|
855 |
+
np.testing.assert_array_equal(res2, res)
|
856 |
+
|
857 |
+
# same result when extra element is masked
|
858 |
+
females3 = [20, 11, 17, 1000, 12]
|
859 |
+
mask3 = [False, False, False, True, False]
|
860 |
+
females3 = np.ma.masked_array(females3, mask=mask3)
|
861 |
+
res3 = stats.mannwhitneyu(males, females3)
|
862 |
+
np.testing.assert_array_equal(res3, res)
|
863 |
+
|
864 |
+
# same result when extra nan is omitted and additional element is masked
|
865 |
+
females4 = [20, 11, 17, np.nan, 1000, 12]
|
866 |
+
mask4 = [False, False, False, False, True, False]
|
867 |
+
females4 = np.ma.masked_array(females4, mask=mask4)
|
868 |
+
res4 = stats.mannwhitneyu(males, females4, nan_policy='omit')
|
869 |
+
np.testing.assert_array_equal(res4, res)
|
870 |
+
|
871 |
+
# same result when extra elements, including nan, are masked
|
872 |
+
females5 = [20, 11, 17, np.nan, 1000, 12]
|
873 |
+
mask5 = [False, False, False, True, True, False]
|
874 |
+
females5 = np.ma.masked_array(females5, mask=mask5)
|
875 |
+
res5 = stats.mannwhitneyu(males, females5, nan_policy='propagate')
|
876 |
+
res6 = stats.mannwhitneyu(males, females5, nan_policy='raise')
|
877 |
+
np.testing.assert_array_equal(res5, res)
|
878 |
+
np.testing.assert_array_equal(res6, res)
|
879 |
+
|
880 |
+
|
881 |
+
@pytest.mark.parametrize(("axis"), range(-3, 3))
|
882 |
+
def test_masked_stat_3d(axis):
|
883 |
+
# basic test of _axis_nan_policy_factory with 3D masked sample
|
884 |
+
np.random.seed(0)
|
885 |
+
a = np.random.rand(3, 4, 5)
|
886 |
+
b = np.random.rand(4, 5)
|
887 |
+
c = np.random.rand(4, 1)
|
888 |
+
|
889 |
+
mask_a = a < 0.1
|
890 |
+
mask_c = [False, False, False, True]
|
891 |
+
a_masked = np.ma.masked_array(a, mask=mask_a)
|
892 |
+
c_masked = np.ma.masked_array(c, mask=mask_c)
|
893 |
+
|
894 |
+
a_nans = a.copy()
|
895 |
+
a_nans[mask_a] = np.nan
|
896 |
+
c_nans = c.copy()
|
897 |
+
c_nans[mask_c] = np.nan
|
898 |
+
|
899 |
+
res = stats.kruskal(a_nans, b, c_nans, nan_policy='omit', axis=axis)
|
900 |
+
res2 = stats.kruskal(a_masked, b, c_masked, axis=axis)
|
901 |
+
np.testing.assert_array_equal(res, res2)
|
902 |
+
|
903 |
+
|
904 |
+
def test_mixed_mask_nan_1():
|
905 |
+
# targeted test of _axis_nan_policy_factory with 2D masked sample:
|
906 |
+
# omitting samples with masks and nan_policy='omit' are equivalent
|
907 |
+
# also checks paired-sample sentinel value removal
|
908 |
+
m, n = 3, 20
|
909 |
+
axis = -1
|
910 |
+
|
911 |
+
np.random.seed(0)
|
912 |
+
a = np.random.rand(m, n)
|
913 |
+
b = np.random.rand(m, n)
|
914 |
+
mask_a1 = np.random.rand(m, n) < 0.2
|
915 |
+
mask_a2 = np.random.rand(m, n) < 0.1
|
916 |
+
mask_b1 = np.random.rand(m, n) < 0.15
|
917 |
+
mask_b2 = np.random.rand(m, n) < 0.15
|
918 |
+
mask_a1[2, :] = True
|
919 |
+
|
920 |
+
a_nans = a.copy()
|
921 |
+
b_nans = b.copy()
|
922 |
+
a_nans[mask_a1 | mask_a2] = np.nan
|
923 |
+
b_nans[mask_b1 | mask_b2] = np.nan
|
924 |
+
|
925 |
+
a_masked1 = np.ma.masked_array(a, mask=mask_a1)
|
926 |
+
b_masked1 = np.ma.masked_array(b, mask=mask_b1)
|
927 |
+
a_masked1[mask_a2] = np.nan
|
928 |
+
b_masked1[mask_b2] = np.nan
|
929 |
+
|
930 |
+
a_masked2 = np.ma.masked_array(a, mask=mask_a2)
|
931 |
+
b_masked2 = np.ma.masked_array(b, mask=mask_b2)
|
932 |
+
a_masked2[mask_a1] = np.nan
|
933 |
+
b_masked2[mask_b1] = np.nan
|
934 |
+
|
935 |
+
a_masked3 = np.ma.masked_array(a, mask=(mask_a1 | mask_a2))
|
936 |
+
b_masked3 = np.ma.masked_array(b, mask=(mask_b1 | mask_b2))
|
937 |
+
|
938 |
+
res = stats.wilcoxon(a_nans, b_nans, nan_policy='omit', axis=axis)
|
939 |
+
res1 = stats.wilcoxon(a_masked1, b_masked1, nan_policy='omit', axis=axis)
|
940 |
+
res2 = stats.wilcoxon(a_masked2, b_masked2, nan_policy='omit', axis=axis)
|
941 |
+
res3 = stats.wilcoxon(a_masked3, b_masked3, nan_policy='raise', axis=axis)
|
942 |
+
res4 = stats.wilcoxon(a_masked3, b_masked3,
|
943 |
+
nan_policy='propagate', axis=axis)
|
944 |
+
|
945 |
+
np.testing.assert_array_equal(res1, res)
|
946 |
+
np.testing.assert_array_equal(res2, res)
|
947 |
+
np.testing.assert_array_equal(res3, res)
|
948 |
+
np.testing.assert_array_equal(res4, res)
|
949 |
+
|
950 |
+
|
951 |
+
def test_mixed_mask_nan_2():
|
952 |
+
# targeted test of _axis_nan_policy_factory with 2D masked sample:
|
953 |
+
# check for expected interaction between masks and nans
|
954 |
+
|
955 |
+
# Cases here are
|
956 |
+
# [mixed nan/mask, all nans, all masked,
|
957 |
+
# unmasked nan, masked nan, unmasked non-nan]
|
958 |
+
a = [[1, np.nan, 2], [np.nan, np.nan, np.nan], [1, 2, 3],
|
959 |
+
[1, np.nan, 3], [1, np.nan, 3], [1, 2, 3]]
|
960 |
+
mask = [[1, 0, 1], [0, 0, 0], [1, 1, 1],
|
961 |
+
[0, 0, 0], [0, 1, 0], [0, 0, 0]]
|
962 |
+
a_masked = np.ma.masked_array(a, mask=mask)
|
963 |
+
b = [[4, 5, 6]]
|
964 |
+
ref1 = stats.ranksums([1, 3], [4, 5, 6])
|
965 |
+
ref2 = stats.ranksums([1, 2, 3], [4, 5, 6])
|
966 |
+
|
967 |
+
# nan_policy = 'omit'
|
968 |
+
# all elements are removed from first three rows
|
969 |
+
# middle element is removed from fourth and fifth rows
|
970 |
+
# no elements removed from last row
|
971 |
+
res = stats.ranksums(a_masked, b, nan_policy='omit', axis=-1)
|
972 |
+
stat_ref = [np.nan, np.nan, np.nan,
|
973 |
+
ref1.statistic, ref1.statistic, ref2.statistic]
|
974 |
+
p_ref = [np.nan, np.nan, np.nan,
|
975 |
+
ref1.pvalue, ref1.pvalue, ref2.pvalue]
|
976 |
+
np.testing.assert_array_equal(res.statistic, stat_ref)
|
977 |
+
np.testing.assert_array_equal(res.pvalue, p_ref)
|
978 |
+
|
979 |
+
# nan_policy = 'propagate'
|
980 |
+
# nans propagate in first, second, and fourth row
|
981 |
+
# all elements are removed by mask from third row
|
982 |
+
# middle element is removed from fifth row
|
983 |
+
# no elements removed from last row
|
984 |
+
res = stats.ranksums(a_masked, b, nan_policy='propagate', axis=-1)
|
985 |
+
stat_ref = [np.nan, np.nan, np.nan,
|
986 |
+
np.nan, ref1.statistic, ref2.statistic]
|
987 |
+
p_ref = [np.nan, np.nan, np.nan,
|
988 |
+
np.nan, ref1.pvalue, ref2.pvalue]
|
989 |
+
np.testing.assert_array_equal(res.statistic, stat_ref)
|
990 |
+
np.testing.assert_array_equal(res.pvalue, p_ref)
|
991 |
+
|
992 |
+
|
993 |
+
def test_axis_None_vs_tuple():
|
994 |
+
# `axis` `None` should be equivalent to tuple with all axes
|
995 |
+
shape = (3, 8, 9, 10)
|
996 |
+
rng = np.random.default_rng(0)
|
997 |
+
x = rng.random(shape)
|
998 |
+
res = stats.kruskal(*x, axis=None)
|
999 |
+
res2 = stats.kruskal(*x, axis=(0, 1, 2))
|
1000 |
+
np.testing.assert_array_equal(res, res2)
|
1001 |
+
|
1002 |
+
|
1003 |
+
def test_axis_None_vs_tuple_with_broadcasting():
|
1004 |
+
# `axis` `None` should be equivalent to tuple with all axes,
|
1005 |
+
# which should be equivalent to raveling the arrays before passing them
|
1006 |
+
rng = np.random.default_rng(0)
|
1007 |
+
x = rng.random((5, 1))
|
1008 |
+
y = rng.random((1, 5))
|
1009 |
+
x2, y2 = np.broadcast_arrays(x, y)
|
1010 |
+
|
1011 |
+
res0 = stats.mannwhitneyu(x.ravel(), y.ravel())
|
1012 |
+
res1 = stats.mannwhitneyu(x, y, axis=None)
|
1013 |
+
res2 = stats.mannwhitneyu(x, y, axis=(0, 1))
|
1014 |
+
res3 = stats.mannwhitneyu(x2.ravel(), y2.ravel())
|
1015 |
+
|
1016 |
+
assert res1 == res0
|
1017 |
+
assert res2 == res0
|
1018 |
+
assert res3 != res0
|
1019 |
+
|
1020 |
+
|
1021 |
+
@pytest.mark.parametrize(("axis"),
|
1022 |
+
list(permutations(range(-3, 3), 2)) + [(-4, 1)])
|
1023 |
+
def test_other_axis_tuples(axis):
|
1024 |
+
# Check that _axis_nan_policy_factory treats all `axis` tuples as expected
|
1025 |
+
rng = np.random.default_rng(0)
|
1026 |
+
shape_x = (4, 5, 6)
|
1027 |
+
shape_y = (1, 6)
|
1028 |
+
x = rng.random(shape_x)
|
1029 |
+
y = rng.random(shape_y)
|
1030 |
+
axis_original = axis
|
1031 |
+
|
1032 |
+
# convert axis elements to positive
|
1033 |
+
axis = tuple([(i if i >= 0 else 3 + i) for i in axis])
|
1034 |
+
axis = sorted(axis)
|
1035 |
+
|
1036 |
+
if len(set(axis)) != len(axis):
|
1037 |
+
message = "`axis` must contain only distinct elements"
|
1038 |
+
with pytest.raises(AxisError, match=re.escape(message)):
|
1039 |
+
stats.mannwhitneyu(x, y, axis=axis_original)
|
1040 |
+
return
|
1041 |
+
|
1042 |
+
if axis[0] < 0 or axis[-1] > 2:
|
1043 |
+
message = "`axis` is out of bounds for array of dimension 3"
|
1044 |
+
with pytest.raises(AxisError, match=re.escape(message)):
|
1045 |
+
stats.mannwhitneyu(x, y, axis=axis_original)
|
1046 |
+
return
|
1047 |
+
|
1048 |
+
res = stats.mannwhitneyu(x, y, axis=axis_original)
|
1049 |
+
|
1050 |
+
# reference behavior
|
1051 |
+
not_axis = {0, 1, 2} - set(axis) # which axis is not part of `axis`
|
1052 |
+
not_axis = next(iter(not_axis)) # take it out of the set
|
1053 |
+
|
1054 |
+
x2 = x
|
1055 |
+
shape_y_broadcasted = [1, 1, 6]
|
1056 |
+
shape_y_broadcasted[not_axis] = shape_x[not_axis]
|
1057 |
+
y2 = np.broadcast_to(y, shape_y_broadcasted)
|
1058 |
+
|
1059 |
+
m = x2.shape[not_axis]
|
1060 |
+
x2 = np.moveaxis(x2, axis, (1, 2))
|
1061 |
+
y2 = np.moveaxis(y2, axis, (1, 2))
|
1062 |
+
x2 = np.reshape(x2, (m, -1))
|
1063 |
+
y2 = np.reshape(y2, (m, -1))
|
1064 |
+
res2 = stats.mannwhitneyu(x2, y2, axis=1)
|
1065 |
+
|
1066 |
+
np.testing.assert_array_equal(res, res2)
|
1067 |
+
|
1068 |
+
|
1069 |
+
@pytest.mark.parametrize(
|
1070 |
+
("weighted_fun_name, unpacker"),
|
1071 |
+
[
|
1072 |
+
("gmean", lambda x: x),
|
1073 |
+
("hmean", lambda x: x),
|
1074 |
+
("pmean", lambda x: x),
|
1075 |
+
("combine_pvalues", lambda x: (x.pvalue, x.statistic)),
|
1076 |
+
],
|
1077 |
+
)
|
1078 |
+
def test_mean_mixed_mask_nan_weights(weighted_fun_name, unpacker):
|
1079 |
+
# targeted test of _axis_nan_policy_factory with 2D masked sample:
|
1080 |
+
# omitting samples with masks and nan_policy='omit' are equivalent
|
1081 |
+
# also checks paired-sample sentinel value removal
|
1082 |
+
|
1083 |
+
if weighted_fun_name == 'pmean':
|
1084 |
+
def weighted_fun(a, **kwargs):
|
1085 |
+
return stats.pmean(a, p=0.42, **kwargs)
|
1086 |
+
else:
|
1087 |
+
weighted_fun = getattr(stats, weighted_fun_name)
|
1088 |
+
|
1089 |
+
def func(*args, **kwargs):
|
1090 |
+
return unpacker(weighted_fun(*args, **kwargs))
|
1091 |
+
|
1092 |
+
m, n = 3, 20
|
1093 |
+
axis = -1
|
1094 |
+
|
1095 |
+
rng = np.random.default_rng(6541968121)
|
1096 |
+
a = rng.uniform(size=(m, n))
|
1097 |
+
b = rng.uniform(size=(m, n))
|
1098 |
+
mask_a1 = rng.uniform(size=(m, n)) < 0.2
|
1099 |
+
mask_a2 = rng.uniform(size=(m, n)) < 0.1
|
1100 |
+
mask_b1 = rng.uniform(size=(m, n)) < 0.15
|
1101 |
+
mask_b2 = rng.uniform(size=(m, n)) < 0.15
|
1102 |
+
mask_a1[2, :] = True
|
1103 |
+
|
1104 |
+
a_nans = a.copy()
|
1105 |
+
b_nans = b.copy()
|
1106 |
+
a_nans[mask_a1 | mask_a2] = np.nan
|
1107 |
+
b_nans[mask_b1 | mask_b2] = np.nan
|
1108 |
+
|
1109 |
+
a_masked1 = np.ma.masked_array(a, mask=mask_a1)
|
1110 |
+
b_masked1 = np.ma.masked_array(b, mask=mask_b1)
|
1111 |
+
a_masked1[mask_a2] = np.nan
|
1112 |
+
b_masked1[mask_b2] = np.nan
|
1113 |
+
|
1114 |
+
a_masked2 = np.ma.masked_array(a, mask=mask_a2)
|
1115 |
+
b_masked2 = np.ma.masked_array(b, mask=mask_b2)
|
1116 |
+
a_masked2[mask_a1] = np.nan
|
1117 |
+
b_masked2[mask_b1] = np.nan
|
1118 |
+
|
1119 |
+
a_masked3 = np.ma.masked_array(a, mask=(mask_a1 | mask_a2))
|
1120 |
+
b_masked3 = np.ma.masked_array(b, mask=(mask_b1 | mask_b2))
|
1121 |
+
|
1122 |
+
mask_all = (mask_a1 | mask_a2 | mask_b1 | mask_b2)
|
1123 |
+
a_masked4 = np.ma.masked_array(a, mask=mask_all)
|
1124 |
+
b_masked4 = np.ma.masked_array(b, mask=mask_all)
|
1125 |
+
|
1126 |
+
with np.testing.suppress_warnings() as sup:
|
1127 |
+
message = 'invalid value encountered'
|
1128 |
+
sup.filter(RuntimeWarning, message)
|
1129 |
+
res = func(a_nans, weights=b_nans, nan_policy="omit", axis=axis)
|
1130 |
+
res1 = func(a_masked1, weights=b_masked1, nan_policy="omit", axis=axis)
|
1131 |
+
res2 = func(a_masked2, weights=b_masked2, nan_policy="omit", axis=axis)
|
1132 |
+
res3 = func(a_masked3, weights=b_masked3, nan_policy="raise", axis=axis)
|
1133 |
+
res4 = func(a_masked3, weights=b_masked3, nan_policy="propagate", axis=axis)
|
1134 |
+
# Would test with a_masked3/b_masked3, but there is a bug in np.average
|
1135 |
+
# that causes a bug in _no_deco mean with masked weights. Would use
|
1136 |
+
# np.ma.average, but that causes other problems. See numpy/numpy#7330.
|
1137 |
+
if weighted_fun_name in {"hmean"}:
|
1138 |
+
weighted_fun_ma = getattr(stats.mstats, weighted_fun_name)
|
1139 |
+
res5 = weighted_fun_ma(a_masked4, weights=b_masked4,
|
1140 |
+
axis=axis, _no_deco=True)
|
1141 |
+
|
1142 |
+
np.testing.assert_array_equal(res1, res)
|
1143 |
+
np.testing.assert_array_equal(res2, res)
|
1144 |
+
np.testing.assert_array_equal(res3, res)
|
1145 |
+
np.testing.assert_array_equal(res4, res)
|
1146 |
+
if weighted_fun_name in {"hmean"}:
|
1147 |
+
# _no_deco mean returns masked array, last element was masked
|
1148 |
+
np.testing.assert_allclose(res5.compressed(), res[~np.isnan(res)])
|
1149 |
+
|
1150 |
+
|
1151 |
+
def test_raise_invalid_args_g17713():
|
1152 |
+
# other cases are handled in:
|
1153 |
+
# test_axis_nan_policy_decorated_positional_axis - multiple values for arg
|
1154 |
+
# test_axis_nan_policy_decorated_positional_args - unexpected kwd arg
|
1155 |
+
message = "got an unexpected keyword argument"
|
1156 |
+
with pytest.raises(TypeError, match=message):
|
1157 |
+
stats.gmean([1, 2, 3], invalid_arg=True)
|
1158 |
+
|
1159 |
+
message = " got multiple values for argument"
|
1160 |
+
with pytest.raises(TypeError, match=message):
|
1161 |
+
stats.gmean([1, 2, 3], a=True)
|
1162 |
+
|
1163 |
+
message = "missing 1 required positional argument"
|
1164 |
+
with pytest.raises(TypeError, match=message):
|
1165 |
+
stats.gmean()
|
1166 |
+
|
1167 |
+
message = "takes from 1 to 4 positional arguments but 5 were given"
|
1168 |
+
with pytest.raises(TypeError, match=message):
|
1169 |
+
stats.gmean([1, 2, 3], 0, float, [1, 1, 1], 10)
|
1170 |
+
|
1171 |
+
|
1172 |
+
@pytest.mark.parametrize('dtype', [np.int16, np.float32, np.complex128])
|
1173 |
+
def test_array_like_input(dtype):
|
1174 |
+
# Check that `_axis_nan_policy`-decorated functions work with custom
|
1175 |
+
# containers that are coercible to numeric arrays
|
1176 |
+
|
1177 |
+
class ArrLike:
|
1178 |
+
def __init__(self, x, dtype):
|
1179 |
+
self._x = x
|
1180 |
+
self._dtype = dtype
|
1181 |
+
|
1182 |
+
def __array__(self, dtype=None, copy=None):
|
1183 |
+
return np.asarray(x, dtype=self._dtype)
|
1184 |
+
|
1185 |
+
x = [1]*2 + [3, 4, 5]
|
1186 |
+
res = stats.mode(ArrLike(x, dtype=dtype))
|
1187 |
+
assert res.mode == 1
|
1188 |
+
assert res.count == 2
|
.venv/Lib/site-packages/scipy/stats/tests/test_binned_statistic.py
ADDED
@@ -0,0 +1,568 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.testing import assert_allclose
|
3 |
+
import pytest
|
4 |
+
from pytest import raises as assert_raises
|
5 |
+
from scipy.stats import (binned_statistic, binned_statistic_2d,
|
6 |
+
binned_statistic_dd)
|
7 |
+
from scipy._lib._util import check_random_state
|
8 |
+
|
9 |
+
from .common_tests import check_named_results
|
10 |
+
|
11 |
+
|
12 |
+
class TestBinnedStatistic:
|
13 |
+
|
14 |
+
@classmethod
|
15 |
+
def setup_class(cls):
|
16 |
+
rng = check_random_state(9865)
|
17 |
+
cls.x = rng.uniform(size=100)
|
18 |
+
cls.y = rng.uniform(size=100)
|
19 |
+
cls.v = rng.uniform(size=100)
|
20 |
+
cls.X = rng.uniform(size=(100, 3))
|
21 |
+
cls.w = rng.uniform(size=100)
|
22 |
+
cls.u = rng.uniform(size=100) + 1e6
|
23 |
+
|
24 |
+
def test_1d_count(self):
|
25 |
+
x = self.x
|
26 |
+
v = self.v
|
27 |
+
|
28 |
+
count1, edges1, bc = binned_statistic(x, v, 'count', bins=10)
|
29 |
+
count2, edges2 = np.histogram(x, bins=10)
|
30 |
+
|
31 |
+
assert_allclose(count1, count2)
|
32 |
+
assert_allclose(edges1, edges2)
|
33 |
+
|
34 |
+
def test_gh5927(self):
|
35 |
+
# smoke test for gh5927 - binned_statistic was using `is` for string
|
36 |
+
# comparison
|
37 |
+
x = self.x
|
38 |
+
v = self.v
|
39 |
+
statistics = ['mean', 'median', 'count', 'sum']
|
40 |
+
for statistic in statistics:
|
41 |
+
binned_statistic(x, v, statistic, bins=10)
|
42 |
+
|
43 |
+
def test_big_number_std(self):
|
44 |
+
# tests for numerical stability of std calculation
|
45 |
+
# see issue gh-10126 for more
|
46 |
+
x = self.x
|
47 |
+
u = self.u
|
48 |
+
stat1, edges1, bc = binned_statistic(x, u, 'std', bins=10)
|
49 |
+
stat2, edges2, bc = binned_statistic(x, u, np.std, bins=10)
|
50 |
+
|
51 |
+
assert_allclose(stat1, stat2)
|
52 |
+
|
53 |
+
def test_empty_bins_std(self):
|
54 |
+
# tests that std returns gives nan for empty bins
|
55 |
+
x = self.x
|
56 |
+
u = self.u
|
57 |
+
print(binned_statistic(x, u, 'count', bins=1000))
|
58 |
+
stat1, edges1, bc = binned_statistic(x, u, 'std', bins=1000)
|
59 |
+
stat2, edges2, bc = binned_statistic(x, u, np.std, bins=1000)
|
60 |
+
|
61 |
+
assert_allclose(stat1, stat2)
|
62 |
+
|
63 |
+
def test_non_finite_inputs_and_int_bins(self):
|
64 |
+
# if either `values` or `sample` contain np.inf or np.nan throw
|
65 |
+
# see issue gh-9010 for more
|
66 |
+
x = self.x
|
67 |
+
u = self.u
|
68 |
+
orig = u[0]
|
69 |
+
u[0] = np.inf
|
70 |
+
assert_raises(ValueError, binned_statistic, u, x, 'std', bins=10)
|
71 |
+
# need to test for non-python specific ints, e.g. np.int8, np.int64
|
72 |
+
assert_raises(ValueError, binned_statistic, u, x, 'std',
|
73 |
+
bins=np.int64(10))
|
74 |
+
u[0] = np.nan
|
75 |
+
assert_raises(ValueError, binned_statistic, u, x, 'count', bins=10)
|
76 |
+
# replace original value, u belongs the class
|
77 |
+
u[0] = orig
|
78 |
+
|
79 |
+
def test_1d_result_attributes(self):
|
80 |
+
x = self.x
|
81 |
+
v = self.v
|
82 |
+
|
83 |
+
res = binned_statistic(x, v, 'count', bins=10)
|
84 |
+
attributes = ('statistic', 'bin_edges', 'binnumber')
|
85 |
+
check_named_results(res, attributes)
|
86 |
+
|
87 |
+
def test_1d_sum(self):
|
88 |
+
x = self.x
|
89 |
+
v = self.v
|
90 |
+
|
91 |
+
sum1, edges1, bc = binned_statistic(x, v, 'sum', bins=10)
|
92 |
+
sum2, edges2 = np.histogram(x, bins=10, weights=v)
|
93 |
+
|
94 |
+
assert_allclose(sum1, sum2)
|
95 |
+
assert_allclose(edges1, edges2)
|
96 |
+
|
97 |
+
def test_1d_mean(self):
|
98 |
+
x = self.x
|
99 |
+
v = self.v
|
100 |
+
|
101 |
+
stat1, edges1, bc = binned_statistic(x, v, 'mean', bins=10)
|
102 |
+
stat2, edges2, bc = binned_statistic(x, v, np.mean, bins=10)
|
103 |
+
|
104 |
+
assert_allclose(stat1, stat2)
|
105 |
+
assert_allclose(edges1, edges2)
|
106 |
+
|
107 |
+
def test_1d_std(self):
|
108 |
+
x = self.x
|
109 |
+
v = self.v
|
110 |
+
|
111 |
+
stat1, edges1, bc = binned_statistic(x, v, 'std', bins=10)
|
112 |
+
stat2, edges2, bc = binned_statistic(x, v, np.std, bins=10)
|
113 |
+
|
114 |
+
assert_allclose(stat1, stat2)
|
115 |
+
assert_allclose(edges1, edges2)
|
116 |
+
|
117 |
+
def test_1d_min(self):
|
118 |
+
x = self.x
|
119 |
+
v = self.v
|
120 |
+
|
121 |
+
stat1, edges1, bc = binned_statistic(x, v, 'min', bins=10)
|
122 |
+
stat2, edges2, bc = binned_statistic(x, v, np.min, bins=10)
|
123 |
+
|
124 |
+
assert_allclose(stat1, stat2)
|
125 |
+
assert_allclose(edges1, edges2)
|
126 |
+
|
127 |
+
def test_1d_max(self):
|
128 |
+
x = self.x
|
129 |
+
v = self.v
|
130 |
+
|
131 |
+
stat1, edges1, bc = binned_statistic(x, v, 'max', bins=10)
|
132 |
+
stat2, edges2, bc = binned_statistic(x, v, np.max, bins=10)
|
133 |
+
|
134 |
+
assert_allclose(stat1, stat2)
|
135 |
+
assert_allclose(edges1, edges2)
|
136 |
+
|
137 |
+
def test_1d_median(self):
|
138 |
+
x = self.x
|
139 |
+
v = self.v
|
140 |
+
|
141 |
+
stat1, edges1, bc = binned_statistic(x, v, 'median', bins=10)
|
142 |
+
stat2, edges2, bc = binned_statistic(x, v, np.median, bins=10)
|
143 |
+
|
144 |
+
assert_allclose(stat1, stat2)
|
145 |
+
assert_allclose(edges1, edges2)
|
146 |
+
|
147 |
+
def test_1d_bincode(self):
|
148 |
+
x = self.x[:20]
|
149 |
+
v = self.v[:20]
|
150 |
+
|
151 |
+
count1, edges1, bc = binned_statistic(x, v, 'count', bins=3)
|
152 |
+
bc2 = np.array([3, 2, 1, 3, 2, 3, 3, 3, 3, 1, 1, 3, 3, 1, 2, 3, 1,
|
153 |
+
1, 2, 1])
|
154 |
+
|
155 |
+
bcount = [(bc == i).sum() for i in np.unique(bc)]
|
156 |
+
|
157 |
+
assert_allclose(bc, bc2)
|
158 |
+
assert_allclose(bcount, count1)
|
159 |
+
|
160 |
+
def test_1d_range_keyword(self):
|
161 |
+
# Regression test for gh-3063, range can be (min, max) or [(min, max)]
|
162 |
+
np.random.seed(9865)
|
163 |
+
x = np.arange(30)
|
164 |
+
data = np.random.random(30)
|
165 |
+
|
166 |
+
mean, bins, _ = binned_statistic(x[:15], data[:15])
|
167 |
+
mean_range, bins_range, _ = binned_statistic(x, data, range=[(0, 14)])
|
168 |
+
mean_range2, bins_range2, _ = binned_statistic(x, data, range=(0, 14))
|
169 |
+
|
170 |
+
assert_allclose(mean, mean_range)
|
171 |
+
assert_allclose(bins, bins_range)
|
172 |
+
assert_allclose(mean, mean_range2)
|
173 |
+
assert_allclose(bins, bins_range2)
|
174 |
+
|
175 |
+
def test_1d_multi_values(self):
|
176 |
+
x = self.x
|
177 |
+
v = self.v
|
178 |
+
w = self.w
|
179 |
+
|
180 |
+
stat1v, edges1v, bc1v = binned_statistic(x, v, 'mean', bins=10)
|
181 |
+
stat1w, edges1w, bc1w = binned_statistic(x, w, 'mean', bins=10)
|
182 |
+
stat2, edges2, bc2 = binned_statistic(x, [v, w], 'mean', bins=10)
|
183 |
+
|
184 |
+
assert_allclose(stat2[0], stat1v)
|
185 |
+
assert_allclose(stat2[1], stat1w)
|
186 |
+
assert_allclose(edges1v, edges2)
|
187 |
+
assert_allclose(bc1v, bc2)
|
188 |
+
|
189 |
+
def test_2d_count(self):
|
190 |
+
x = self.x
|
191 |
+
y = self.y
|
192 |
+
v = self.v
|
193 |
+
|
194 |
+
count1, binx1, biny1, bc = binned_statistic_2d(
|
195 |
+
x, y, v, 'count', bins=5)
|
196 |
+
count2, binx2, biny2 = np.histogram2d(x, y, bins=5)
|
197 |
+
|
198 |
+
assert_allclose(count1, count2)
|
199 |
+
assert_allclose(binx1, binx2)
|
200 |
+
assert_allclose(biny1, biny2)
|
201 |
+
|
202 |
+
def test_2d_result_attributes(self):
|
203 |
+
x = self.x
|
204 |
+
y = self.y
|
205 |
+
v = self.v
|
206 |
+
|
207 |
+
res = binned_statistic_2d(x, y, v, 'count', bins=5)
|
208 |
+
attributes = ('statistic', 'x_edge', 'y_edge', 'binnumber')
|
209 |
+
check_named_results(res, attributes)
|
210 |
+
|
211 |
+
def test_2d_sum(self):
|
212 |
+
x = self.x
|
213 |
+
y = self.y
|
214 |
+
v = self.v
|
215 |
+
|
216 |
+
sum1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'sum', bins=5)
|
217 |
+
sum2, binx2, biny2 = np.histogram2d(x, y, bins=5, weights=v)
|
218 |
+
|
219 |
+
assert_allclose(sum1, sum2)
|
220 |
+
assert_allclose(binx1, binx2)
|
221 |
+
assert_allclose(biny1, biny2)
|
222 |
+
|
223 |
+
def test_2d_mean(self):
|
224 |
+
x = self.x
|
225 |
+
y = self.y
|
226 |
+
v = self.v
|
227 |
+
|
228 |
+
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'mean', bins=5)
|
229 |
+
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
|
230 |
+
|
231 |
+
assert_allclose(stat1, stat2)
|
232 |
+
assert_allclose(binx1, binx2)
|
233 |
+
assert_allclose(biny1, biny2)
|
234 |
+
|
235 |
+
def test_2d_mean_unicode(self):
|
236 |
+
x = self.x
|
237 |
+
y = self.y
|
238 |
+
v = self.v
|
239 |
+
stat1, binx1, biny1, bc = binned_statistic_2d(
|
240 |
+
x, y, v, 'mean', bins=5)
|
241 |
+
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.mean, bins=5)
|
242 |
+
assert_allclose(stat1, stat2)
|
243 |
+
assert_allclose(binx1, binx2)
|
244 |
+
assert_allclose(biny1, biny2)
|
245 |
+
|
246 |
+
def test_2d_std(self):
|
247 |
+
x = self.x
|
248 |
+
y = self.y
|
249 |
+
v = self.v
|
250 |
+
|
251 |
+
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'std', bins=5)
|
252 |
+
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.std, bins=5)
|
253 |
+
|
254 |
+
assert_allclose(stat1, stat2)
|
255 |
+
assert_allclose(binx1, binx2)
|
256 |
+
assert_allclose(biny1, biny2)
|
257 |
+
|
258 |
+
def test_2d_min(self):
|
259 |
+
x = self.x
|
260 |
+
y = self.y
|
261 |
+
v = self.v
|
262 |
+
|
263 |
+
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'min', bins=5)
|
264 |
+
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.min, bins=5)
|
265 |
+
|
266 |
+
assert_allclose(stat1, stat2)
|
267 |
+
assert_allclose(binx1, binx2)
|
268 |
+
assert_allclose(biny1, biny2)
|
269 |
+
|
270 |
+
def test_2d_max(self):
|
271 |
+
x = self.x
|
272 |
+
y = self.y
|
273 |
+
v = self.v
|
274 |
+
|
275 |
+
stat1, binx1, biny1, bc = binned_statistic_2d(x, y, v, 'max', bins=5)
|
276 |
+
stat2, binx2, biny2, bc = binned_statistic_2d(x, y, v, np.max, bins=5)
|
277 |
+
|
278 |
+
assert_allclose(stat1, stat2)
|
279 |
+
assert_allclose(binx1, binx2)
|
280 |
+
assert_allclose(biny1, biny2)
|
281 |
+
|
282 |
+
def test_2d_median(self):
|
283 |
+
x = self.x
|
284 |
+
y = self.y
|
285 |
+
v = self.v
|
286 |
+
|
287 |
+
stat1, binx1, biny1, bc = binned_statistic_2d(
|
288 |
+
x, y, v, 'median', bins=5)
|
289 |
+
stat2, binx2, biny2, bc = binned_statistic_2d(
|
290 |
+
x, y, v, np.median, bins=5)
|
291 |
+
|
292 |
+
assert_allclose(stat1, stat2)
|
293 |
+
assert_allclose(binx1, binx2)
|
294 |
+
assert_allclose(biny1, biny2)
|
295 |
+
|
296 |
+
def test_2d_bincode(self):
|
297 |
+
x = self.x[:20]
|
298 |
+
y = self.y[:20]
|
299 |
+
v = self.v[:20]
|
300 |
+
|
301 |
+
count1, binx1, biny1, bc = binned_statistic_2d(
|
302 |
+
x, y, v, 'count', bins=3)
|
303 |
+
bc2 = np.array([17, 11, 6, 16, 11, 17, 18, 17, 17, 7, 6, 18, 16,
|
304 |
+
6, 11, 16, 6, 6, 11, 8])
|
305 |
+
|
306 |
+
bcount = [(bc == i).sum() for i in np.unique(bc)]
|
307 |
+
|
308 |
+
assert_allclose(bc, bc2)
|
309 |
+
count1adj = count1[count1.nonzero()]
|
310 |
+
assert_allclose(bcount, count1adj)
|
311 |
+
|
312 |
+
def test_2d_multi_values(self):
|
313 |
+
x = self.x
|
314 |
+
y = self.y
|
315 |
+
v = self.v
|
316 |
+
w = self.w
|
317 |
+
|
318 |
+
stat1v, binx1v, biny1v, bc1v = binned_statistic_2d(
|
319 |
+
x, y, v, 'mean', bins=8)
|
320 |
+
stat1w, binx1w, biny1w, bc1w = binned_statistic_2d(
|
321 |
+
x, y, w, 'mean', bins=8)
|
322 |
+
stat2, binx2, biny2, bc2 = binned_statistic_2d(
|
323 |
+
x, y, [v, w], 'mean', bins=8)
|
324 |
+
|
325 |
+
assert_allclose(stat2[0], stat1v)
|
326 |
+
assert_allclose(stat2[1], stat1w)
|
327 |
+
assert_allclose(binx1v, binx2)
|
328 |
+
assert_allclose(biny1w, biny2)
|
329 |
+
assert_allclose(bc1v, bc2)
|
330 |
+
|
331 |
+
def test_2d_binnumbers_unraveled(self):
|
332 |
+
x = self.x
|
333 |
+
y = self.y
|
334 |
+
v = self.v
|
335 |
+
|
336 |
+
stat, edgesx, bcx = binned_statistic(x, v, 'mean', bins=20)
|
337 |
+
stat, edgesy, bcy = binned_statistic(y, v, 'mean', bins=10)
|
338 |
+
|
339 |
+
stat2, edgesx2, edgesy2, bc2 = binned_statistic_2d(
|
340 |
+
x, y, v, 'mean', bins=(20, 10), expand_binnumbers=True)
|
341 |
+
|
342 |
+
bcx3 = np.searchsorted(edgesx, x, side='right')
|
343 |
+
bcy3 = np.searchsorted(edgesy, y, side='right')
|
344 |
+
|
345 |
+
# `numpy.searchsorted` is non-inclusive on right-edge, compensate
|
346 |
+
bcx3[x == x.max()] -= 1
|
347 |
+
bcy3[y == y.max()] -= 1
|
348 |
+
|
349 |
+
assert_allclose(bcx, bc2[0])
|
350 |
+
assert_allclose(bcy, bc2[1])
|
351 |
+
assert_allclose(bcx3, bc2[0])
|
352 |
+
assert_allclose(bcy3, bc2[1])
|
353 |
+
|
354 |
+
def test_dd_count(self):
|
355 |
+
X = self.X
|
356 |
+
v = self.v
|
357 |
+
|
358 |
+
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
|
359 |
+
count2, edges2 = np.histogramdd(X, bins=3)
|
360 |
+
|
361 |
+
assert_allclose(count1, count2)
|
362 |
+
assert_allclose(edges1, edges2)
|
363 |
+
|
364 |
+
def test_dd_result_attributes(self):
|
365 |
+
X = self.X
|
366 |
+
v = self.v
|
367 |
+
|
368 |
+
res = binned_statistic_dd(X, v, 'count', bins=3)
|
369 |
+
attributes = ('statistic', 'bin_edges', 'binnumber')
|
370 |
+
check_named_results(res, attributes)
|
371 |
+
|
372 |
+
def test_dd_sum(self):
|
373 |
+
X = self.X
|
374 |
+
v = self.v
|
375 |
+
|
376 |
+
sum1, edges1, bc = binned_statistic_dd(X, v, 'sum', bins=3)
|
377 |
+
sum2, edges2 = np.histogramdd(X, bins=3, weights=v)
|
378 |
+
sum3, edges3, bc = binned_statistic_dd(X, v, np.sum, bins=3)
|
379 |
+
|
380 |
+
assert_allclose(sum1, sum2)
|
381 |
+
assert_allclose(edges1, edges2)
|
382 |
+
assert_allclose(sum1, sum3)
|
383 |
+
assert_allclose(edges1, edges3)
|
384 |
+
|
385 |
+
def test_dd_mean(self):
|
386 |
+
X = self.X
|
387 |
+
v = self.v
|
388 |
+
|
389 |
+
stat1, edges1, bc = binned_statistic_dd(X, v, 'mean', bins=3)
|
390 |
+
stat2, edges2, bc = binned_statistic_dd(X, v, np.mean, bins=3)
|
391 |
+
|
392 |
+
assert_allclose(stat1, stat2)
|
393 |
+
assert_allclose(edges1, edges2)
|
394 |
+
|
395 |
+
def test_dd_std(self):
|
396 |
+
X = self.X
|
397 |
+
v = self.v
|
398 |
+
|
399 |
+
stat1, edges1, bc = binned_statistic_dd(X, v, 'std', bins=3)
|
400 |
+
stat2, edges2, bc = binned_statistic_dd(X, v, np.std, bins=3)
|
401 |
+
|
402 |
+
assert_allclose(stat1, stat2)
|
403 |
+
assert_allclose(edges1, edges2)
|
404 |
+
|
405 |
+
def test_dd_min(self):
|
406 |
+
X = self.X
|
407 |
+
v = self.v
|
408 |
+
|
409 |
+
stat1, edges1, bc = binned_statistic_dd(X, v, 'min', bins=3)
|
410 |
+
stat2, edges2, bc = binned_statistic_dd(X, v, np.min, bins=3)
|
411 |
+
|
412 |
+
assert_allclose(stat1, stat2)
|
413 |
+
assert_allclose(edges1, edges2)
|
414 |
+
|
415 |
+
def test_dd_max(self):
|
416 |
+
X = self.X
|
417 |
+
v = self.v
|
418 |
+
|
419 |
+
stat1, edges1, bc = binned_statistic_dd(X, v, 'max', bins=3)
|
420 |
+
stat2, edges2, bc = binned_statistic_dd(X, v, np.max, bins=3)
|
421 |
+
|
422 |
+
assert_allclose(stat1, stat2)
|
423 |
+
assert_allclose(edges1, edges2)
|
424 |
+
|
425 |
+
def test_dd_median(self):
|
426 |
+
X = self.X
|
427 |
+
v = self.v
|
428 |
+
|
429 |
+
stat1, edges1, bc = binned_statistic_dd(X, v, 'median', bins=3)
|
430 |
+
stat2, edges2, bc = binned_statistic_dd(X, v, np.median, bins=3)
|
431 |
+
|
432 |
+
assert_allclose(stat1, stat2)
|
433 |
+
assert_allclose(edges1, edges2)
|
434 |
+
|
435 |
+
def test_dd_bincode(self):
|
436 |
+
X = self.X[:20]
|
437 |
+
v = self.v[:20]
|
438 |
+
|
439 |
+
count1, edges1, bc = binned_statistic_dd(X, v, 'count', bins=3)
|
440 |
+
bc2 = np.array([63, 33, 86, 83, 88, 67, 57, 33, 42, 41, 82, 83, 92,
|
441 |
+
32, 36, 91, 43, 87, 81, 81])
|
442 |
+
|
443 |
+
bcount = [(bc == i).sum() for i in np.unique(bc)]
|
444 |
+
|
445 |
+
assert_allclose(bc, bc2)
|
446 |
+
count1adj = count1[count1.nonzero()]
|
447 |
+
assert_allclose(bcount, count1adj)
|
448 |
+
|
449 |
+
def test_dd_multi_values(self):
|
450 |
+
X = self.X
|
451 |
+
v = self.v
|
452 |
+
w = self.w
|
453 |
+
|
454 |
+
for stat in ["count", "sum", "mean", "std", "min", "max", "median",
|
455 |
+
np.std]:
|
456 |
+
stat1v, edges1v, bc1v = binned_statistic_dd(X, v, stat, bins=8)
|
457 |
+
stat1w, edges1w, bc1w = binned_statistic_dd(X, w, stat, bins=8)
|
458 |
+
stat2, edges2, bc2 = binned_statistic_dd(X, [v, w], stat, bins=8)
|
459 |
+
assert_allclose(stat2[0], stat1v)
|
460 |
+
assert_allclose(stat2[1], stat1w)
|
461 |
+
assert_allclose(edges1v, edges2)
|
462 |
+
assert_allclose(edges1w, edges2)
|
463 |
+
assert_allclose(bc1v, bc2)
|
464 |
+
|
465 |
+
def test_dd_binnumbers_unraveled(self):
|
466 |
+
X = self.X
|
467 |
+
v = self.v
|
468 |
+
|
469 |
+
stat, edgesx, bcx = binned_statistic(X[:, 0], v, 'mean', bins=15)
|
470 |
+
stat, edgesy, bcy = binned_statistic(X[:, 1], v, 'mean', bins=20)
|
471 |
+
stat, edgesz, bcz = binned_statistic(X[:, 2], v, 'mean', bins=10)
|
472 |
+
|
473 |
+
stat2, edges2, bc2 = binned_statistic_dd(
|
474 |
+
X, v, 'mean', bins=(15, 20, 10), expand_binnumbers=True)
|
475 |
+
|
476 |
+
assert_allclose(bcx, bc2[0])
|
477 |
+
assert_allclose(bcy, bc2[1])
|
478 |
+
assert_allclose(bcz, bc2[2])
|
479 |
+
|
480 |
+
def test_dd_binned_statistic_result(self):
|
481 |
+
# NOTE: tests the reuse of bin_edges from previous call
|
482 |
+
x = np.random.random((10000, 3))
|
483 |
+
v = np.random.random(10000)
|
484 |
+
bins = np.linspace(0, 1, 10)
|
485 |
+
bins = (bins, bins, bins)
|
486 |
+
|
487 |
+
result = binned_statistic_dd(x, v, 'mean', bins=bins)
|
488 |
+
stat = result.statistic
|
489 |
+
|
490 |
+
result = binned_statistic_dd(x, v, 'mean',
|
491 |
+
binned_statistic_result=result)
|
492 |
+
stat2 = result.statistic
|
493 |
+
|
494 |
+
assert_allclose(stat, stat2)
|
495 |
+
|
496 |
+
def test_dd_zero_dedges(self):
|
497 |
+
x = np.random.random((10000, 3))
|
498 |
+
v = np.random.random(10000)
|
499 |
+
bins = np.linspace(0, 1, 10)
|
500 |
+
bins = np.append(bins, 1)
|
501 |
+
bins = (bins, bins, bins)
|
502 |
+
with assert_raises(ValueError, match='difference is numerically 0'):
|
503 |
+
binned_statistic_dd(x, v, 'mean', bins=bins)
|
504 |
+
|
505 |
+
def test_dd_range_errors(self):
|
506 |
+
# Test that descriptive exceptions are raised as appropriate for bad
|
507 |
+
# values of the `range` argument. (See gh-12996)
|
508 |
+
with assert_raises(ValueError,
|
509 |
+
match='In range, start must be <= stop'):
|
510 |
+
binned_statistic_dd([self.y], self.v,
|
511 |
+
range=[[1, 0]])
|
512 |
+
with assert_raises(
|
513 |
+
ValueError,
|
514 |
+
match='In dimension 1 of range, start must be <= stop'):
|
515 |
+
binned_statistic_dd([self.x, self.y], self.v,
|
516 |
+
range=[[1, 0], [0, 1]])
|
517 |
+
with assert_raises(
|
518 |
+
ValueError,
|
519 |
+
match='In dimension 2 of range, start must be <= stop'):
|
520 |
+
binned_statistic_dd([self.x, self.y], self.v,
|
521 |
+
range=[[0, 1], [1, 0]])
|
522 |
+
with assert_raises(
|
523 |
+
ValueError,
|
524 |
+
match='range given for 1 dimensions; 2 required'):
|
525 |
+
binned_statistic_dd([self.x, self.y], self.v,
|
526 |
+
range=[[0, 1]])
|
527 |
+
|
528 |
+
def test_binned_statistic_float32(self):
|
529 |
+
X = np.array([0, 0.42358226], dtype=np.float32)
|
530 |
+
stat, _, _ = binned_statistic(X, None, 'count', bins=5)
|
531 |
+
assert_allclose(stat, np.array([1, 0, 0, 0, 1], dtype=np.float64))
|
532 |
+
|
533 |
+
def test_gh14332(self):
|
534 |
+
# Test the wrong output when the `sample` is close to bin edge
|
535 |
+
x = []
|
536 |
+
size = 20
|
537 |
+
for i in range(size):
|
538 |
+
x += [1-0.1**i]
|
539 |
+
|
540 |
+
bins = np.linspace(0,1,11)
|
541 |
+
sum1, edges1, bc = binned_statistic_dd(x, np.ones(len(x)),
|
542 |
+
bins=[bins], statistic='sum')
|
543 |
+
sum2, edges2 = np.histogram(x, bins=bins)
|
544 |
+
|
545 |
+
assert_allclose(sum1, sum2)
|
546 |
+
assert_allclose(edges1[0], edges2)
|
547 |
+
|
548 |
+
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
|
549 |
+
@pytest.mark.parametrize("statistic", [np.mean, np.median, np.sum, np.std,
|
550 |
+
np.min, np.max, 'count',
|
551 |
+
lambda x: (x**2).sum(),
|
552 |
+
lambda x: (x**2).sum() * 1j])
|
553 |
+
def test_dd_all(self, dtype, statistic):
|
554 |
+
def ref_statistic(x):
|
555 |
+
return len(x) if statistic == 'count' else statistic(x)
|
556 |
+
|
557 |
+
rng = np.random.default_rng(3704743126639371)
|
558 |
+
n = 10
|
559 |
+
x = rng.random(size=n)
|
560 |
+
i = x >= 0.5
|
561 |
+
v = rng.random(size=n)
|
562 |
+
if dtype is np.complex128:
|
563 |
+
v = v + rng.random(size=n)*1j
|
564 |
+
|
565 |
+
stat, _, _ = binned_statistic_dd(x, v, statistic, bins=2)
|
566 |
+
ref = np.array([ref_statistic(v[~i]), ref_statistic(v[i])])
|
567 |
+
assert_allclose(stat, ref)
|
568 |
+
assert stat.dtype == np.result_type(ref.dtype, np.float64)
|
.venv/Lib/site-packages/scipy/stats/tests/test_boost_ufuncs.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import numpy as np
|
3 |
+
from numpy.testing import assert_allclose
|
4 |
+
from scipy.stats import _boost
|
5 |
+
|
6 |
+
|
7 |
+
type_char_to_type_tol = {'f': (np.float32, 32*np.finfo(np.float32).eps),
|
8 |
+
'd': (np.float64, 32*np.finfo(np.float64).eps)}
|
9 |
+
|
10 |
+
|
11 |
+
# Each item in this list is
|
12 |
+
# (func, args, expected_value)
|
13 |
+
# All the values can be represented exactly, even with np.float32.
|
14 |
+
#
|
15 |
+
# This is not an exhaustive test data set of all the functions!
|
16 |
+
# It is a spot check of several functions, primarily for
|
17 |
+
# checking that the different data types are handled correctly.
|
18 |
+
test_data = [
|
19 |
+
(_boost._beta_cdf, (0.5, 2, 3), 0.6875),
|
20 |
+
(_boost._beta_ppf, (0.6875, 2, 3), 0.5),
|
21 |
+
(_boost._beta_pdf, (0.5, 2, 3), 1.5),
|
22 |
+
(_boost._beta_pdf, (0, 1, 5), 5.0),
|
23 |
+
(_boost._beta_pdf, (1, 5, 1), 5.0),
|
24 |
+
(_boost._beta_sf, (0.5, 2, 1), 0.75),
|
25 |
+
(_boost._beta_isf, (0.75, 2, 1), 0.5),
|
26 |
+
(_boost._binom_cdf, (1, 3, 0.5), 0.5),
|
27 |
+
(_boost._binom_pdf, (1, 4, 0.5), 0.25),
|
28 |
+
(_boost._hypergeom_cdf, (2, 3, 5, 6), 0.5),
|
29 |
+
(_boost._nbinom_cdf, (1, 4, 0.25), 0.015625),
|
30 |
+
(_boost._ncf_mean, (10, 12, 2.5), 1.5),
|
31 |
+
]
|
32 |
+
|
33 |
+
|
34 |
+
@pytest.mark.parametrize('func, args, expected', test_data)
|
35 |
+
def test_stats_boost_ufunc(func, args, expected):
|
36 |
+
type_sigs = func.types
|
37 |
+
type_chars = [sig.split('->')[-1] for sig in type_sigs]
|
38 |
+
for type_char in type_chars:
|
39 |
+
typ, rtol = type_char_to_type_tol[type_char]
|
40 |
+
args = [typ(arg) for arg in args]
|
41 |
+
# Harmless overflow warnings are a "feature" of some wrappers on some
|
42 |
+
# platforms. This test is about dtype and accuracy, so let's avoid false
|
43 |
+
# test failures cause by these warnings. See gh-17432.
|
44 |
+
with np.errstate(over='ignore'):
|
45 |
+
value = func(*args)
|
46 |
+
assert isinstance(value, typ)
|
47 |
+
assert_allclose(value, expected, rtol=rtol)
|
.venv/Lib/site-packages/scipy/stats/tests/test_censored_data.py
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Tests for the CensoredData class.
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
import numpy as np
|
5 |
+
from numpy.testing import assert_equal, assert_array_equal
|
6 |
+
from scipy.stats import CensoredData
|
7 |
+
|
8 |
+
|
9 |
+
class TestCensoredData:
|
10 |
+
|
11 |
+
def test_basic(self):
|
12 |
+
uncensored = [1]
|
13 |
+
left = [0]
|
14 |
+
right = [2, 5]
|
15 |
+
interval = [[2, 3]]
|
16 |
+
data = CensoredData(uncensored, left=left, right=right,
|
17 |
+
interval=interval)
|
18 |
+
assert_equal(data._uncensored, uncensored)
|
19 |
+
assert_equal(data._left, left)
|
20 |
+
assert_equal(data._right, right)
|
21 |
+
assert_equal(data._interval, interval)
|
22 |
+
|
23 |
+
udata = data._uncensor()
|
24 |
+
assert_equal(udata, np.concatenate((uncensored, left, right,
|
25 |
+
np.mean(interval, axis=1))))
|
26 |
+
|
27 |
+
def test_right_censored(self):
|
28 |
+
x = np.array([0, 3, 2.5])
|
29 |
+
is_censored = np.array([0, 1, 0], dtype=bool)
|
30 |
+
data = CensoredData.right_censored(x, is_censored)
|
31 |
+
assert_equal(data._uncensored, x[~is_censored])
|
32 |
+
assert_equal(data._right, x[is_censored])
|
33 |
+
assert_equal(data._left, [])
|
34 |
+
assert_equal(data._interval, np.empty((0, 2)))
|
35 |
+
|
36 |
+
def test_left_censored(self):
|
37 |
+
x = np.array([0, 3, 2.5])
|
38 |
+
is_censored = np.array([0, 1, 0], dtype=bool)
|
39 |
+
data = CensoredData.left_censored(x, is_censored)
|
40 |
+
assert_equal(data._uncensored, x[~is_censored])
|
41 |
+
assert_equal(data._left, x[is_censored])
|
42 |
+
assert_equal(data._right, [])
|
43 |
+
assert_equal(data._interval, np.empty((0, 2)))
|
44 |
+
|
45 |
+
def test_interval_censored_basic(self):
|
46 |
+
a = [0.5, 2.0, 3.0, 5.5]
|
47 |
+
b = [1.0, 2.5, 3.5, 7.0]
|
48 |
+
data = CensoredData.interval_censored(low=a, high=b)
|
49 |
+
assert_array_equal(data._interval, np.array(list(zip(a, b))))
|
50 |
+
assert data._uncensored.shape == (0,)
|
51 |
+
assert data._left.shape == (0,)
|
52 |
+
assert data._right.shape == (0,)
|
53 |
+
|
54 |
+
def test_interval_censored_mixed(self):
|
55 |
+
# This is actually a mix of uncensored, left-censored, right-censored
|
56 |
+
# and interval-censored data. Check that when the `interval_censored`
|
57 |
+
# class method is used, the data is correctly separated into the
|
58 |
+
# appropriate arrays.
|
59 |
+
a = [0.5, -np.inf, -13.0, 2.0, 1.0, 10.0, -1.0]
|
60 |
+
b = [0.5, 2500.0, np.inf, 3.0, 1.0, 11.0, np.inf]
|
61 |
+
data = CensoredData.interval_censored(low=a, high=b)
|
62 |
+
assert_array_equal(data._interval, [[2.0, 3.0], [10.0, 11.0]])
|
63 |
+
assert_array_equal(data._uncensored, [0.5, 1.0])
|
64 |
+
assert_array_equal(data._left, [2500.0])
|
65 |
+
assert_array_equal(data._right, [-13.0, -1.0])
|
66 |
+
|
67 |
+
def test_interval_to_other_types(self):
|
68 |
+
# The interval parameter can represent uncensored and
|
69 |
+
# left- or right-censored data. Test the conversion of such
|
70 |
+
# an example to the canonical form in which the different
|
71 |
+
# types have been split into the separate arrays.
|
72 |
+
interval = np.array([[0, 1], # interval-censored
|
73 |
+
[2, 2], # not censored
|
74 |
+
[3, 3], # not censored
|
75 |
+
[9, np.inf], # right-censored
|
76 |
+
[8, np.inf], # right-censored
|
77 |
+
[-np.inf, 0], # left-censored
|
78 |
+
[1, 2]]) # interval-censored
|
79 |
+
data = CensoredData(interval=interval)
|
80 |
+
assert_equal(data._uncensored, [2, 3])
|
81 |
+
assert_equal(data._left, [0])
|
82 |
+
assert_equal(data._right, [9, 8])
|
83 |
+
assert_equal(data._interval, [[0, 1], [1, 2]])
|
84 |
+
|
85 |
+
def test_empty_arrays(self):
|
86 |
+
data = CensoredData(uncensored=[], left=[], right=[], interval=[])
|
87 |
+
assert data._uncensored.shape == (0,)
|
88 |
+
assert data._left.shape == (0,)
|
89 |
+
assert data._right.shape == (0,)
|
90 |
+
assert data._interval.shape == (0, 2)
|
91 |
+
assert len(data) == 0
|
92 |
+
|
93 |
+
def test_invalid_constructor_args(self):
|
94 |
+
with pytest.raises(ValueError, match='must be a one-dimensional'):
|
95 |
+
CensoredData(uncensored=[[1, 2, 3]])
|
96 |
+
with pytest.raises(ValueError, match='must be a one-dimensional'):
|
97 |
+
CensoredData(left=[[1, 2, 3]])
|
98 |
+
with pytest.raises(ValueError, match='must be a one-dimensional'):
|
99 |
+
CensoredData(right=[[1, 2, 3]])
|
100 |
+
with pytest.raises(ValueError, match='must be a two-dimensional'):
|
101 |
+
CensoredData(interval=[[1, 2, 3]])
|
102 |
+
|
103 |
+
with pytest.raises(ValueError, match='must not contain nan'):
|
104 |
+
CensoredData(uncensored=[1, np.nan, 2])
|
105 |
+
with pytest.raises(ValueError, match='must not contain nan'):
|
106 |
+
CensoredData(left=[1, np.nan, 2])
|
107 |
+
with pytest.raises(ValueError, match='must not contain nan'):
|
108 |
+
CensoredData(right=[1, np.nan, 2])
|
109 |
+
with pytest.raises(ValueError, match='must not contain nan'):
|
110 |
+
CensoredData(interval=[[1, np.nan], [2, 3]])
|
111 |
+
|
112 |
+
with pytest.raises(ValueError,
|
113 |
+
match='both values must not be infinite'):
|
114 |
+
CensoredData(interval=[[1, 3], [2, 9], [np.inf, np.inf]])
|
115 |
+
|
116 |
+
with pytest.raises(ValueError,
|
117 |
+
match='left value must not exceed the right'):
|
118 |
+
CensoredData(interval=[[1, 0], [2, 2]])
|
119 |
+
|
120 |
+
@pytest.mark.parametrize('func', [CensoredData.left_censored,
|
121 |
+
CensoredData.right_censored])
|
122 |
+
def test_invalid_left_right_censored_args(self, func):
|
123 |
+
with pytest.raises(ValueError,
|
124 |
+
match='`x` must be one-dimensional'):
|
125 |
+
func([[1, 2, 3]], [0, 1, 1])
|
126 |
+
with pytest.raises(ValueError,
|
127 |
+
match='`censored` must be one-dimensional'):
|
128 |
+
func([1, 2, 3], [[0, 1, 1]])
|
129 |
+
with pytest.raises(ValueError, match='`x` must not contain'):
|
130 |
+
func([1, 2, np.nan], [0, 1, 1])
|
131 |
+
with pytest.raises(ValueError, match='must have the same length'):
|
132 |
+
func([1, 2, 3], [0, 0, 1, 1])
|
133 |
+
|
134 |
+
def test_invalid_censored_args(self):
|
135 |
+
with pytest.raises(ValueError,
|
136 |
+
match='`low` must be a one-dimensional'):
|
137 |
+
CensoredData.interval_censored(low=[[3]], high=[4, 5])
|
138 |
+
with pytest.raises(ValueError,
|
139 |
+
match='`high` must be a one-dimensional'):
|
140 |
+
CensoredData.interval_censored(low=[3], high=[[4, 5]])
|
141 |
+
with pytest.raises(ValueError, match='`low` must not contain'):
|
142 |
+
CensoredData.interval_censored([1, 2, np.nan], [0, 1, 1])
|
143 |
+
with pytest.raises(ValueError, match='must have the same length'):
|
144 |
+
CensoredData.interval_censored([1, 2, 3], [0, 0, 1, 1])
|
145 |
+
|
146 |
+
def test_count_censored(self):
|
147 |
+
x = [1, 2, 3]
|
148 |
+
# data1 has no censored data.
|
149 |
+
data1 = CensoredData(x)
|
150 |
+
assert data1.num_censored() == 0
|
151 |
+
data2 = CensoredData(uncensored=[2.5], left=[10], interval=[[0, 1]])
|
152 |
+
assert data2.num_censored() == 2
|
.venv/Lib/site-packages/scipy/stats/tests/test_contingency.py
ADDED
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.testing import (assert_equal, assert_array_equal,
|
3 |
+
assert_array_almost_equal, assert_approx_equal,
|
4 |
+
assert_allclose)
|
5 |
+
import pytest
|
6 |
+
from pytest import raises as assert_raises
|
7 |
+
from scipy.special import xlogy
|
8 |
+
from scipy.stats.contingency import (margins, expected_freq,
|
9 |
+
chi2_contingency, association)
|
10 |
+
|
11 |
+
|
12 |
+
def test_margins():
|
13 |
+
a = np.array([1])
|
14 |
+
m = margins(a)
|
15 |
+
assert_equal(len(m), 1)
|
16 |
+
m0 = m[0]
|
17 |
+
assert_array_equal(m0, np.array([1]))
|
18 |
+
|
19 |
+
a = np.array([[1]])
|
20 |
+
m0, m1 = margins(a)
|
21 |
+
expected0 = np.array([[1]])
|
22 |
+
expected1 = np.array([[1]])
|
23 |
+
assert_array_equal(m0, expected0)
|
24 |
+
assert_array_equal(m1, expected1)
|
25 |
+
|
26 |
+
a = np.arange(12).reshape(2, 6)
|
27 |
+
m0, m1 = margins(a)
|
28 |
+
expected0 = np.array([[15], [51]])
|
29 |
+
expected1 = np.array([[6, 8, 10, 12, 14, 16]])
|
30 |
+
assert_array_equal(m0, expected0)
|
31 |
+
assert_array_equal(m1, expected1)
|
32 |
+
|
33 |
+
a = np.arange(24).reshape(2, 3, 4)
|
34 |
+
m0, m1, m2 = margins(a)
|
35 |
+
expected0 = np.array([[[66]], [[210]]])
|
36 |
+
expected1 = np.array([[[60], [92], [124]]])
|
37 |
+
expected2 = np.array([[[60, 66, 72, 78]]])
|
38 |
+
assert_array_equal(m0, expected0)
|
39 |
+
assert_array_equal(m1, expected1)
|
40 |
+
assert_array_equal(m2, expected2)
|
41 |
+
|
42 |
+
|
43 |
+
def test_expected_freq():
|
44 |
+
assert_array_equal(expected_freq([1]), np.array([1.0]))
|
45 |
+
|
46 |
+
observed = np.array([[[2, 0], [0, 2]], [[0, 2], [2, 0]], [[1, 1], [1, 1]]])
|
47 |
+
e = expected_freq(observed)
|
48 |
+
assert_array_equal(e, np.ones_like(observed))
|
49 |
+
|
50 |
+
observed = np.array([[10, 10, 20], [20, 20, 20]])
|
51 |
+
e = expected_freq(observed)
|
52 |
+
correct = np.array([[12., 12., 16.], [18., 18., 24.]])
|
53 |
+
assert_array_almost_equal(e, correct)
|
54 |
+
|
55 |
+
|
56 |
+
def test_chi2_contingency_trivial():
|
57 |
+
# Some very simple tests for chi2_contingency.
|
58 |
+
|
59 |
+
# A trivial case
|
60 |
+
obs = np.array([[1, 2], [1, 2]])
|
61 |
+
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
|
62 |
+
assert_equal(chi2, 0.0)
|
63 |
+
assert_equal(p, 1.0)
|
64 |
+
assert_equal(dof, 1)
|
65 |
+
assert_array_equal(obs, expected)
|
66 |
+
|
67 |
+
# A *really* trivial case: 1-D data.
|
68 |
+
obs = np.array([1, 2, 3])
|
69 |
+
chi2, p, dof, expected = chi2_contingency(obs, correction=False)
|
70 |
+
assert_equal(chi2, 0.0)
|
71 |
+
assert_equal(p, 1.0)
|
72 |
+
assert_equal(dof, 0)
|
73 |
+
assert_array_equal(obs, expected)
|
74 |
+
|
75 |
+
|
76 |
+
def test_chi2_contingency_R():
|
77 |
+
# Some test cases that were computed independently, using R.
|
78 |
+
|
79 |
+
# Rcode = \
|
80 |
+
# """
|
81 |
+
# # Data vector.
|
82 |
+
# data <- c(
|
83 |
+
# 12, 34, 23, 4, 47, 11,
|
84 |
+
# 35, 31, 11, 34, 10, 18,
|
85 |
+
# 12, 32, 9, 18, 13, 19,
|
86 |
+
# 12, 12, 14, 9, 33, 25
|
87 |
+
# )
|
88 |
+
#
|
89 |
+
# # Create factor tags:r=rows, c=columns, t=tiers
|
90 |
+
# r <- factor(gl(4, 2*3, 2*3*4, labels=c("r1", "r2", "r3", "r4")))
|
91 |
+
# c <- factor(gl(3, 1, 2*3*4, labels=c("c1", "c2", "c3")))
|
92 |
+
# t <- factor(gl(2, 3, 2*3*4, labels=c("t1", "t2")))
|
93 |
+
#
|
94 |
+
# # 3-way Chi squared test of independence
|
95 |
+
# s = summary(xtabs(data~r+c+t))
|
96 |
+
# print(s)
|
97 |
+
# """
|
98 |
+
# Routput = \
|
99 |
+
# """
|
100 |
+
# Call: xtabs(formula = data ~ r + c + t)
|
101 |
+
# Number of cases in table: 478
|
102 |
+
# Number of factors: 3
|
103 |
+
# Test for independence of all factors:
|
104 |
+
# Chisq = 102.17, df = 17, p-value = 3.514e-14
|
105 |
+
# """
|
106 |
+
obs = np.array(
|
107 |
+
[[[12, 34, 23],
|
108 |
+
[35, 31, 11],
|
109 |
+
[12, 32, 9],
|
110 |
+
[12, 12, 14]],
|
111 |
+
[[4, 47, 11],
|
112 |
+
[34, 10, 18],
|
113 |
+
[18, 13, 19],
|
114 |
+
[9, 33, 25]]])
|
115 |
+
chi2, p, dof, expected = chi2_contingency(obs)
|
116 |
+
assert_approx_equal(chi2, 102.17, significant=5)
|
117 |
+
assert_approx_equal(p, 3.514e-14, significant=4)
|
118 |
+
assert_equal(dof, 17)
|
119 |
+
|
120 |
+
# Rcode = \
|
121 |
+
# """
|
122 |
+
# # Data vector.
|
123 |
+
# data <- c(
|
124 |
+
# #
|
125 |
+
# 12, 17,
|
126 |
+
# 11, 16,
|
127 |
+
# #
|
128 |
+
# 11, 12,
|
129 |
+
# 15, 16,
|
130 |
+
# #
|
131 |
+
# 23, 15,
|
132 |
+
# 30, 22,
|
133 |
+
# #
|
134 |
+
# 14, 17,
|
135 |
+
# 15, 16
|
136 |
+
# )
|
137 |
+
#
|
138 |
+
# # Create factor tags:r=rows, c=columns, d=depths(?), t=tiers
|
139 |
+
# r <- factor(gl(2, 2, 2*2*2*2, labels=c("r1", "r2")))
|
140 |
+
# c <- factor(gl(2, 1, 2*2*2*2, labels=c("c1", "c2")))
|
141 |
+
# d <- factor(gl(2, 4, 2*2*2*2, labels=c("d1", "d2")))
|
142 |
+
# t <- factor(gl(2, 8, 2*2*2*2, labels=c("t1", "t2")))
|
143 |
+
#
|
144 |
+
# # 4-way Chi squared test of independence
|
145 |
+
# s = summary(xtabs(data~r+c+d+t))
|
146 |
+
# print(s)
|
147 |
+
# """
|
148 |
+
# Routput = \
|
149 |
+
# """
|
150 |
+
# Call: xtabs(formula = data ~ r + c + d + t)
|
151 |
+
# Number of cases in table: 262
|
152 |
+
# Number of factors: 4
|
153 |
+
# Test for independence of all factors:
|
154 |
+
# Chisq = 8.758, df = 11, p-value = 0.6442
|
155 |
+
# """
|
156 |
+
obs = np.array(
|
157 |
+
[[[[12, 17],
|
158 |
+
[11, 16]],
|
159 |
+
[[11, 12],
|
160 |
+
[15, 16]]],
|
161 |
+
[[[23, 15],
|
162 |
+
[30, 22]],
|
163 |
+
[[14, 17],
|
164 |
+
[15, 16]]]])
|
165 |
+
chi2, p, dof, expected = chi2_contingency(obs)
|
166 |
+
assert_approx_equal(chi2, 8.758, significant=4)
|
167 |
+
assert_approx_equal(p, 0.6442, significant=4)
|
168 |
+
assert_equal(dof, 11)
|
169 |
+
|
170 |
+
|
171 |
+
def test_chi2_contingency_g():
|
172 |
+
c = np.array([[15, 60], [15, 90]])
|
173 |
+
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood',
|
174 |
+
correction=False)
|
175 |
+
assert_allclose(g, 2*xlogy(c, c/e).sum())
|
176 |
+
|
177 |
+
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood',
|
178 |
+
correction=True)
|
179 |
+
c_corr = c + np.array([[-0.5, 0.5], [0.5, -0.5]])
|
180 |
+
assert_allclose(g, 2*xlogy(c_corr, c_corr/e).sum())
|
181 |
+
|
182 |
+
c = np.array([[10, 12, 10], [12, 10, 10]])
|
183 |
+
g, p, dof, e = chi2_contingency(c, lambda_='log-likelihood')
|
184 |
+
assert_allclose(g, 2*xlogy(c, c/e).sum())
|
185 |
+
|
186 |
+
|
187 |
+
def test_chi2_contingency_bad_args():
|
188 |
+
# Test that "bad" inputs raise a ValueError.
|
189 |
+
|
190 |
+
# Negative value in the array of observed frequencies.
|
191 |
+
obs = np.array([[-1, 10], [1, 2]])
|
192 |
+
assert_raises(ValueError, chi2_contingency, obs)
|
193 |
+
|
194 |
+
# The zeros in this will result in zeros in the array
|
195 |
+
# of expected frequencies.
|
196 |
+
obs = np.array([[0, 1], [0, 1]])
|
197 |
+
assert_raises(ValueError, chi2_contingency, obs)
|
198 |
+
|
199 |
+
# A degenerate case: `observed` has size 0.
|
200 |
+
obs = np.empty((0, 8))
|
201 |
+
assert_raises(ValueError, chi2_contingency, obs)
|
202 |
+
|
203 |
+
|
204 |
+
def test_chi2_contingency_yates_gh13875():
|
205 |
+
# Magnitude of Yates' continuity correction should not exceed difference
|
206 |
+
# between expected and observed value of the statistic; see gh-13875
|
207 |
+
observed = np.array([[1573, 3], [4, 0]])
|
208 |
+
p = chi2_contingency(observed)[1]
|
209 |
+
assert_allclose(p, 1, rtol=1e-12)
|
210 |
+
|
211 |
+
|
212 |
+
@pytest.mark.parametrize("correction", [False, True])
|
213 |
+
def test_result(correction):
|
214 |
+
obs = np.array([[1, 2], [1, 2]])
|
215 |
+
res = chi2_contingency(obs, correction=correction)
|
216 |
+
assert_equal((res.statistic, res.pvalue, res.dof, res.expected_freq), res)
|
217 |
+
|
218 |
+
|
219 |
+
def test_bad_association_args():
|
220 |
+
# Invalid Test Statistic
|
221 |
+
assert_raises(ValueError, association, [[1, 2], [3, 4]], "X")
|
222 |
+
# Invalid array shape
|
223 |
+
assert_raises(ValueError, association, [[[1, 2]], [[3, 4]]], "cramer")
|
224 |
+
# chi2_contingency exception
|
225 |
+
assert_raises(ValueError, association, [[-1, 10], [1, 2]], 'cramer')
|
226 |
+
# Invalid Array Item Data Type
|
227 |
+
assert_raises(ValueError, association,
|
228 |
+
np.array([[1, 2], ["dd", 4]], dtype=object), 'cramer')
|
229 |
+
|
230 |
+
|
231 |
+
@pytest.mark.parametrize('stat, expected',
|
232 |
+
[('cramer', 0.09222412010290792),
|
233 |
+
('tschuprow', 0.0775509319944633),
|
234 |
+
('pearson', 0.12932925727138758)])
|
235 |
+
def test_assoc(stat, expected):
|
236 |
+
# 2d Array
|
237 |
+
obs1 = np.array([[12, 13, 14, 15, 16],
|
238 |
+
[17, 16, 18, 19, 11],
|
239 |
+
[9, 15, 14, 12, 11]])
|
240 |
+
a = association(observed=obs1, method=stat)
|
241 |
+
assert_allclose(a, expected)
|
.venv/Lib/site-packages/scipy/stats/tests/test_continuous_basic.py
ADDED
@@ -0,0 +1,1016 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import numpy as np
|
3 |
+
import numpy.testing as npt
|
4 |
+
import pytest
|
5 |
+
from pytest import raises as assert_raises
|
6 |
+
from scipy.integrate import IntegrationWarning
|
7 |
+
import itertools
|
8 |
+
|
9 |
+
from scipy import stats
|
10 |
+
from .common_tests import (check_normalization, check_moment,
|
11 |
+
check_mean_expect,
|
12 |
+
check_var_expect, check_skew_expect,
|
13 |
+
check_kurt_expect, check_entropy,
|
14 |
+
check_private_entropy, check_entropy_vect_scale,
|
15 |
+
check_edge_support, check_named_args,
|
16 |
+
check_random_state_property,
|
17 |
+
check_meth_dtype, check_ppf_dtype,
|
18 |
+
check_cmplx_deriv,
|
19 |
+
check_pickling, check_rvs_broadcast,
|
20 |
+
check_freezing, check_munp_expect,)
|
21 |
+
from scipy.stats._distr_params import distcont
|
22 |
+
from scipy.stats._distn_infrastructure import rv_continuous_frozen
|
23 |
+
|
24 |
+
"""
|
25 |
+
Test all continuous distributions.
|
26 |
+
|
27 |
+
Parameters were chosen for those distributions that pass the
|
28 |
+
Kolmogorov-Smirnov test. This provides safe parameters for each
|
29 |
+
distributions so that we can perform further testing of class methods.
|
30 |
+
|
31 |
+
These tests currently check only/mostly for serious errors and exceptions,
|
32 |
+
not for numerically exact results.
|
33 |
+
"""
|
34 |
+
|
35 |
+
# Note that you need to add new distributions you want tested
|
36 |
+
# to _distr_params
|
37 |
+
|
38 |
+
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
|
39 |
+
_IS_32BIT = (sys.maxsize < 2**32)
|
40 |
+
|
41 |
+
# For skipping test_cont_basic
|
42 |
+
distslow = ['recipinvgauss', 'vonmises', 'kappa4', 'vonmises_line',
|
43 |
+
'gausshyper', 'norminvgauss', 'geninvgauss', 'genhyperbolic',
|
44 |
+
'truncnorm', 'truncweibull_min']
|
45 |
+
|
46 |
+
# distxslow are sorted by speed (very slow to slow)
|
47 |
+
distxslow = ['studentized_range', 'kstwo', 'ksone', 'wrapcauchy', 'genexpon']
|
48 |
+
|
49 |
+
# For skipping test_moments, which is already marked slow
|
50 |
+
distxslow_test_moments = ['studentized_range', 'vonmises', 'vonmises_line',
|
51 |
+
'ksone', 'kstwo', 'recipinvgauss', 'genexpon']
|
52 |
+
|
53 |
+
# skip check_fit_args (test is slow)
|
54 |
+
skip_fit_test_mle = ['exponpow', 'exponweib', 'gausshyper', 'genexpon',
|
55 |
+
'halfgennorm', 'gompertz', 'johnsonsb', 'johnsonsu',
|
56 |
+
'kappa4', 'ksone', 'kstwo', 'kstwobign', 'mielke', 'ncf',
|
57 |
+
'nct', 'powerlognorm', 'powernorm', 'recipinvgauss',
|
58 |
+
'trapezoid', 'vonmises', 'vonmises_line', 'levy_stable',
|
59 |
+
'rv_histogram_instance', 'studentized_range']
|
60 |
+
|
61 |
+
# these were really slow in `test_fit`.py.
|
62 |
+
# note that this list is used to skip both fit_test and fit_fix tests
|
63 |
+
slow_fit_test_mm = ['argus', 'exponpow', 'exponweib', 'gausshyper', 'genexpon',
|
64 |
+
'genhalflogistic', 'halfgennorm', 'gompertz', 'johnsonsb',
|
65 |
+
'kappa4', 'kstwobign', 'recipinvgauss',
|
66 |
+
'trapezoid', 'truncexpon', 'vonmises', 'vonmises_line',
|
67 |
+
'studentized_range']
|
68 |
+
# pearson3 fails due to something weird
|
69 |
+
# the first list fails due to non-finite distribution moments encountered
|
70 |
+
# most of the rest fail due to integration warnings
|
71 |
+
# pearson3 is overridden as not implemented due to gh-11746
|
72 |
+
fail_fit_test_mm = (['alpha', 'betaprime', 'bradford', 'burr', 'burr12',
|
73 |
+
'cauchy', 'crystalball', 'f', 'fisk', 'foldcauchy',
|
74 |
+
'genextreme', 'genpareto', 'halfcauchy', 'invgamma',
|
75 |
+
'jf_skew_t', 'kappa3', 'levy', 'levy_l', 'loglaplace',
|
76 |
+
'lomax', 'mielke', 'nakagami', 'ncf', 'skewcauchy', 't',
|
77 |
+
'tukeylambda', 'invweibull', 'rel_breitwigner']
|
78 |
+
+ ['genhyperbolic', 'johnsonsu', 'ksone', 'kstwo',
|
79 |
+
'nct', 'pareto', 'powernorm', 'powerlognorm']
|
80 |
+
+ ['pearson3'])
|
81 |
+
|
82 |
+
skip_fit_test = {"MLE": skip_fit_test_mle,
|
83 |
+
"MM": slow_fit_test_mm + fail_fit_test_mm}
|
84 |
+
|
85 |
+
# skip check_fit_args_fix (test is slow)
|
86 |
+
skip_fit_fix_test_mle = ['burr', 'exponpow', 'exponweib', 'gausshyper',
|
87 |
+
'genexpon', 'halfgennorm', 'gompertz', 'johnsonsb',
|
88 |
+
'johnsonsu', 'kappa4', 'ksone', 'kstwo', 'kstwobign',
|
89 |
+
'levy_stable', 'mielke', 'ncf', 'ncx2',
|
90 |
+
'powerlognorm', 'powernorm', 'rdist', 'recipinvgauss',
|
91 |
+
'trapezoid', 'truncpareto', 'vonmises', 'vonmises_line',
|
92 |
+
'studentized_range']
|
93 |
+
# the first list fails due to non-finite distribution moments encountered
|
94 |
+
# most of the rest fail due to integration warnings
|
95 |
+
# pearson3 is overridden as not implemented due to gh-11746
|
96 |
+
fail_fit_fix_test_mm = (['alpha', 'betaprime', 'burr', 'burr12', 'cauchy',
|
97 |
+
'crystalball', 'f', 'fisk', 'foldcauchy',
|
98 |
+
'genextreme', 'genpareto', 'halfcauchy', 'invgamma',
|
99 |
+
'jf_skew_t', 'kappa3', 'levy', 'levy_l', 'loglaplace',
|
100 |
+
'lomax', 'mielke', 'nakagami', 'ncf', 'nct',
|
101 |
+
'skewcauchy', 't', 'truncpareto', 'invweibull']
|
102 |
+
+ ['genhyperbolic', 'johnsonsu', 'ksone', 'kstwo',
|
103 |
+
'pareto', 'powernorm', 'powerlognorm']
|
104 |
+
+ ['pearson3'])
|
105 |
+
skip_fit_fix_test = {"MLE": skip_fit_fix_test_mle,
|
106 |
+
"MM": slow_fit_test_mm + fail_fit_fix_test_mm}
|
107 |
+
|
108 |
+
# These distributions fail the complex derivative test below.
|
109 |
+
# Here 'fail' mean produce wrong results and/or raise exceptions, depending
|
110 |
+
# on the implementation details of corresponding special functions.
|
111 |
+
# cf https://github.com/scipy/scipy/pull/4979 for a discussion.
|
112 |
+
fails_cmplx = {'argus', 'beta', 'betaprime', 'chi', 'chi2', 'cosine',
|
113 |
+
'dgamma', 'dweibull', 'erlang', 'f', 'foldcauchy', 'gamma',
|
114 |
+
'gausshyper', 'gengamma', 'genhyperbolic',
|
115 |
+
'geninvgauss', 'gennorm', 'genpareto',
|
116 |
+
'halfcauchy', 'halfgennorm', 'invgamma', 'jf_skew_t',
|
117 |
+
'ksone', 'kstwo', 'kstwobign', 'levy_l', 'loggamma',
|
118 |
+
'logistic', 'loguniform', 'maxwell', 'nakagami',
|
119 |
+
'ncf', 'nct', 'ncx2', 'norminvgauss', 'pearson3',
|
120 |
+
'powerlaw', 'rdist', 'reciprocal', 'rice',
|
121 |
+
'skewnorm', 't', 'truncweibull_min',
|
122 |
+
'tukeylambda', 'vonmises', 'vonmises_line',
|
123 |
+
'rv_histogram_instance', 'truncnorm', 'studentized_range',
|
124 |
+
'johnsonsb', 'halflogistic', 'rel_breitwigner'}
|
125 |
+
|
126 |
+
|
127 |
+
# rv_histogram instances, with uniform and non-uniform bins;
|
128 |
+
# stored as (dist, arg) tuples for cases_test_cont_basic
|
129 |
+
# and cases_test_moments.
|
130 |
+
histogram_test_instances = []
|
131 |
+
case1 = {'a': [1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6,
|
132 |
+
6, 6, 6, 7, 7, 7, 8, 8, 9], 'bins': 8} # equal width bins
|
133 |
+
case2 = {'a': [1, 1], 'bins': [0, 1, 10]} # unequal width bins
|
134 |
+
for case, density in itertools.product([case1, case2], [True, False]):
|
135 |
+
_hist = np.histogram(**case, density=density)
|
136 |
+
_rv_hist = stats.rv_histogram(_hist, density=density)
|
137 |
+
histogram_test_instances.append((_rv_hist, tuple()))
|
138 |
+
|
139 |
+
|
140 |
+
def cases_test_cont_basic():
|
141 |
+
for distname, arg in distcont[:] + histogram_test_instances:
|
142 |
+
if distname == 'levy_stable':
|
143 |
+
continue
|
144 |
+
elif distname in distslow:
|
145 |
+
yield pytest.param(distname, arg, marks=pytest.mark.slow)
|
146 |
+
elif distname in distxslow:
|
147 |
+
yield pytest.param(distname, arg, marks=pytest.mark.xslow)
|
148 |
+
else:
|
149 |
+
yield distname, arg
|
150 |
+
|
151 |
+
|
152 |
+
@pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
|
153 |
+
@pytest.mark.parametrize('sn, n_fit_samples', [(500, 200)])
|
154 |
+
def test_cont_basic(distname, arg, sn, n_fit_samples):
|
155 |
+
# this test skips slow distributions
|
156 |
+
|
157 |
+
try:
|
158 |
+
distfn = getattr(stats, distname)
|
159 |
+
except TypeError:
|
160 |
+
distfn = distname
|
161 |
+
distname = 'rv_histogram_instance'
|
162 |
+
|
163 |
+
rng = np.random.RandomState(765456)
|
164 |
+
rvs = distfn.rvs(size=sn, *arg, random_state=rng)
|
165 |
+
m, v = distfn.stats(*arg)
|
166 |
+
|
167 |
+
if distname not in {'laplace_asymmetric'}:
|
168 |
+
check_sample_meanvar_(m, v, rvs)
|
169 |
+
check_cdf_ppf(distfn, arg, distname)
|
170 |
+
check_sf_isf(distfn, arg, distname)
|
171 |
+
check_cdf_sf(distfn, arg, distname)
|
172 |
+
check_ppf_isf(distfn, arg, distname)
|
173 |
+
check_pdf(distfn, arg, distname)
|
174 |
+
check_pdf_logpdf(distfn, arg, distname)
|
175 |
+
check_pdf_logpdf_at_endpoints(distfn, arg, distname)
|
176 |
+
check_cdf_logcdf(distfn, arg, distname)
|
177 |
+
check_sf_logsf(distfn, arg, distname)
|
178 |
+
check_ppf_broadcast(distfn, arg, distname)
|
179 |
+
|
180 |
+
alpha = 0.01
|
181 |
+
if distname == 'rv_histogram_instance':
|
182 |
+
check_distribution_rvs(distfn.cdf, arg, alpha, rvs)
|
183 |
+
elif distname != 'geninvgauss':
|
184 |
+
# skip kstest for geninvgauss since cdf is too slow; see test for
|
185 |
+
# rv generation in TestGenInvGauss in test_distributions.py
|
186 |
+
check_distribution_rvs(distname, arg, alpha, rvs)
|
187 |
+
|
188 |
+
locscale_defaults = (0, 1)
|
189 |
+
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
|
190 |
+
distfn.logsf]
|
191 |
+
# make sure arguments are within support
|
192 |
+
spec_x = {'weibull_max': -0.5, 'levy_l': -0.5,
|
193 |
+
'pareto': 1.5, 'truncpareto': 3.2, 'tukeylambda': 0.3,
|
194 |
+
'rv_histogram_instance': 5.0}
|
195 |
+
x = spec_x.get(distname, 0.5)
|
196 |
+
if distname == 'invweibull':
|
197 |
+
arg = (1,)
|
198 |
+
elif distname == 'ksone':
|
199 |
+
arg = (3,)
|
200 |
+
|
201 |
+
check_named_args(distfn, x, arg, locscale_defaults, meths)
|
202 |
+
check_random_state_property(distfn, arg)
|
203 |
+
|
204 |
+
if distname in ['rel_breitwigner'] and _IS_32BIT:
|
205 |
+
# gh18414
|
206 |
+
pytest.skip("fails on Linux 32-bit")
|
207 |
+
else:
|
208 |
+
check_pickling(distfn, arg)
|
209 |
+
check_freezing(distfn, arg)
|
210 |
+
|
211 |
+
# Entropy
|
212 |
+
if distname not in ['kstwobign', 'kstwo', 'ncf']:
|
213 |
+
check_entropy(distfn, arg, distname)
|
214 |
+
|
215 |
+
if distfn.numargs == 0:
|
216 |
+
check_vecentropy(distfn, arg)
|
217 |
+
|
218 |
+
if (distfn.__class__._entropy != stats.rv_continuous._entropy
|
219 |
+
and distname != 'vonmises'):
|
220 |
+
check_private_entropy(distfn, arg, stats.rv_continuous)
|
221 |
+
|
222 |
+
with npt.suppress_warnings() as sup:
|
223 |
+
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
|
224 |
+
sup.filter(IntegrationWarning, "Extremely bad integrand")
|
225 |
+
sup.filter(RuntimeWarning, "invalid value")
|
226 |
+
check_entropy_vect_scale(distfn, arg)
|
227 |
+
|
228 |
+
check_retrieving_support(distfn, arg)
|
229 |
+
check_edge_support(distfn, arg)
|
230 |
+
|
231 |
+
check_meth_dtype(distfn, arg, meths)
|
232 |
+
check_ppf_dtype(distfn, arg)
|
233 |
+
|
234 |
+
if distname not in fails_cmplx:
|
235 |
+
check_cmplx_deriv(distfn, arg)
|
236 |
+
|
237 |
+
if distname != 'truncnorm':
|
238 |
+
check_ppf_private(distfn, arg, distname)
|
239 |
+
|
240 |
+
for method in ["MLE", "MM"]:
|
241 |
+
if distname not in skip_fit_test[method]:
|
242 |
+
check_fit_args(distfn, arg, rvs[:n_fit_samples], method)
|
243 |
+
|
244 |
+
if distname not in skip_fit_fix_test[method]:
|
245 |
+
check_fit_args_fix(distfn, arg, rvs[:n_fit_samples], method)
|
246 |
+
|
247 |
+
|
248 |
+
@pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
|
249 |
+
def test_rvs_scalar(distname, arg):
|
250 |
+
# rvs should return a scalar when given scalar arguments (gh-12428)
|
251 |
+
try:
|
252 |
+
distfn = getattr(stats, distname)
|
253 |
+
except TypeError:
|
254 |
+
distfn = distname
|
255 |
+
distname = 'rv_histogram_instance'
|
256 |
+
|
257 |
+
assert np.isscalar(distfn.rvs(*arg))
|
258 |
+
assert np.isscalar(distfn.rvs(*arg, size=()))
|
259 |
+
assert np.isscalar(distfn.rvs(*arg, size=None))
|
260 |
+
|
261 |
+
|
262 |
+
def test_levy_stable_random_state_property():
|
263 |
+
# levy_stable only implements rvs(), so it is skipped in the
|
264 |
+
# main loop in test_cont_basic(). Here we apply just the test
|
265 |
+
# check_random_state_property to levy_stable.
|
266 |
+
check_random_state_property(stats.levy_stable, (0.5, 0.1))
|
267 |
+
|
268 |
+
|
269 |
+
def cases_test_moments():
|
270 |
+
fail_normalization = set()
|
271 |
+
fail_higher = {'ncf'}
|
272 |
+
fail_moment = {'johnsonsu'} # generic `munp` is inaccurate for johnsonsu
|
273 |
+
|
274 |
+
for distname, arg in distcont[:] + histogram_test_instances:
|
275 |
+
if distname == 'levy_stable':
|
276 |
+
continue
|
277 |
+
|
278 |
+
if distname in distxslow_test_moments:
|
279 |
+
yield pytest.param(distname, arg, True, True, True, True,
|
280 |
+
marks=pytest.mark.xslow(reason="too slow"))
|
281 |
+
continue
|
282 |
+
|
283 |
+
cond1 = distname not in fail_normalization
|
284 |
+
cond2 = distname not in fail_higher
|
285 |
+
cond3 = distname not in fail_moment
|
286 |
+
|
287 |
+
marks = list()
|
288 |
+
# Currently unused, `marks` can be used to add a timeout to a test of
|
289 |
+
# a specific distribution. For example, this shows how a timeout could
|
290 |
+
# be added for the 'skewnorm' distribution:
|
291 |
+
#
|
292 |
+
# marks = list()
|
293 |
+
# if distname == 'skewnorm':
|
294 |
+
# marks.append(pytest.mark.timeout(300))
|
295 |
+
|
296 |
+
yield pytest.param(distname, arg, cond1, cond2, cond3,
|
297 |
+
False, marks=marks)
|
298 |
+
|
299 |
+
if not cond1 or not cond2 or not cond3:
|
300 |
+
# Run the distributions that have issues twice, once skipping the
|
301 |
+
# not_ok parts, once with the not_ok parts but marked as knownfail
|
302 |
+
yield pytest.param(distname, arg, True, True, True, True,
|
303 |
+
marks=[pytest.mark.xfail] + marks)
|
304 |
+
|
305 |
+
|
306 |
+
@pytest.mark.slow
|
307 |
+
@pytest.mark.parametrize('distname,arg,normalization_ok,higher_ok,moment_ok,'
|
308 |
+
'is_xfailing',
|
309 |
+
cases_test_moments())
|
310 |
+
def test_moments(distname, arg, normalization_ok, higher_ok, moment_ok,
|
311 |
+
is_xfailing):
|
312 |
+
try:
|
313 |
+
distfn = getattr(stats, distname)
|
314 |
+
except TypeError:
|
315 |
+
distfn = distname
|
316 |
+
distname = 'rv_histogram_instance'
|
317 |
+
|
318 |
+
with npt.suppress_warnings() as sup:
|
319 |
+
sup.filter(IntegrationWarning,
|
320 |
+
"The integral is probably divergent, or slowly convergent.")
|
321 |
+
sup.filter(IntegrationWarning,
|
322 |
+
"The maximum number of subdivisions.")
|
323 |
+
sup.filter(IntegrationWarning,
|
324 |
+
"The algorithm does not converge.")
|
325 |
+
|
326 |
+
if is_xfailing:
|
327 |
+
sup.filter(IntegrationWarning)
|
328 |
+
|
329 |
+
m, v, s, k = distfn.stats(*arg, moments='mvsk')
|
330 |
+
|
331 |
+
with np.errstate(all="ignore"):
|
332 |
+
if normalization_ok:
|
333 |
+
check_normalization(distfn, arg, distname)
|
334 |
+
|
335 |
+
if higher_ok:
|
336 |
+
check_mean_expect(distfn, arg, m, distname)
|
337 |
+
check_skew_expect(distfn, arg, m, v, s, distname)
|
338 |
+
check_var_expect(distfn, arg, m, v, distname)
|
339 |
+
check_kurt_expect(distfn, arg, m, v, k, distname)
|
340 |
+
check_munp_expect(distfn, arg, distname)
|
341 |
+
|
342 |
+
check_loc_scale(distfn, arg, m, v, distname)
|
343 |
+
|
344 |
+
if moment_ok:
|
345 |
+
check_moment(distfn, arg, m, v, distname)
|
346 |
+
|
347 |
+
|
348 |
+
@pytest.mark.parametrize('dist,shape_args', distcont)
|
349 |
+
def test_rvs_broadcast(dist, shape_args):
|
350 |
+
if dist in ['gausshyper', 'studentized_range']:
|
351 |
+
pytest.skip("too slow")
|
352 |
+
|
353 |
+
if dist in ['rel_breitwigner'] and _IS_32BIT:
|
354 |
+
# gh18414
|
355 |
+
pytest.skip("fails on Linux 32-bit")
|
356 |
+
|
357 |
+
# If shape_only is True, it means the _rvs method of the
|
358 |
+
# distribution uses more than one random number to generate a random
|
359 |
+
# variate. That means the result of using rvs with broadcasting or
|
360 |
+
# with a nontrivial size will not necessarily be the same as using the
|
361 |
+
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
|
362 |
+
# of the results, not the values.
|
363 |
+
# Whether or not a distribution is in the following list is an
|
364 |
+
# implementation detail of the distribution, not a requirement. If
|
365 |
+
# the implementation the rvs() method of a distribution changes, this
|
366 |
+
# test might also have to be changed.
|
367 |
+
shape_only = dist in ['argus', 'betaprime', 'dgamma', 'dweibull',
|
368 |
+
'exponnorm', 'genhyperbolic', 'geninvgauss',
|
369 |
+
'levy_stable', 'nct', 'norminvgauss', 'rice',
|
370 |
+
'skewnorm', 'semicircular', 'gennorm', 'loggamma']
|
371 |
+
|
372 |
+
distfunc = getattr(stats, dist)
|
373 |
+
loc = np.zeros(2)
|
374 |
+
scale = np.ones((3, 1))
|
375 |
+
nargs = distfunc.numargs
|
376 |
+
allargs = []
|
377 |
+
bshape = [3, 2]
|
378 |
+
# Generate shape parameter arguments...
|
379 |
+
for k in range(nargs):
|
380 |
+
shp = (k + 4,) + (1,)*(k + 2)
|
381 |
+
allargs.append(shape_args[k]*np.ones(shp))
|
382 |
+
bshape.insert(0, k + 4)
|
383 |
+
allargs.extend([loc, scale])
|
384 |
+
# bshape holds the expected shape when loc, scale, and the shape
|
385 |
+
# parameters are all broadcast together.
|
386 |
+
|
387 |
+
check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, 'd')
|
388 |
+
|
389 |
+
|
390 |
+
# Expected values of the SF, CDF, PDF were computed using
|
391 |
+
# mpmath with mpmath.mp.dps = 50 and output at 20:
|
392 |
+
#
|
393 |
+
# def ks(x, n):
|
394 |
+
# x = mpmath.mpf(x)
|
395 |
+
# logp = -mpmath.power(6.0*n*x+1.0, 2)/18.0/n
|
396 |
+
# sf, cdf = mpmath.exp(logp), -mpmath.expm1(logp)
|
397 |
+
# pdf = (6.0*n*x+1.0) * 2 * sf/3
|
398 |
+
# print(mpmath.nstr(sf, 20), mpmath.nstr(cdf, 20), mpmath.nstr(pdf, 20))
|
399 |
+
#
|
400 |
+
# Tests use 1/n < x < 1-1/n and n > 1e6 to use the asymptotic computation.
|
401 |
+
# Larger x has a smaller sf.
|
402 |
+
@pytest.mark.parametrize('x,n,sf,cdf,pdf,rtol',
|
403 |
+
[(2.0e-5, 1000000000,
|
404 |
+
0.44932297307934442379, 0.55067702692065557621,
|
405 |
+
35946.137394996276407, 5e-15),
|
406 |
+
(2.0e-9, 1000000000,
|
407 |
+
0.99999999061111115519, 9.3888888448132728224e-9,
|
408 |
+
8.6666665852962971765, 5e-14),
|
409 |
+
(5.0e-4, 1000000000,
|
410 |
+
7.1222019433090374624e-218, 1.0,
|
411 |
+
1.4244408634752704094e-211, 5e-14)])
|
412 |
+
def test_gh17775_regression(x, n, sf, cdf, pdf, rtol):
|
413 |
+
# Regression test for gh-17775. In scipy 1.9.3 and earlier,
|
414 |
+
# these test would fail.
|
415 |
+
#
|
416 |
+
# KS one asymptotic sf ~ e^(-(6nx+1)^2 / 18n)
|
417 |
+
# Given a large 32-bit integer n, 6n will overflow in the c implementation.
|
418 |
+
# Example of broken behaviour:
|
419 |
+
# ksone.sf(2.0e-5, 1000000000) == 0.9374359693473666
|
420 |
+
ks = stats.ksone
|
421 |
+
vals = np.array([ks.sf(x, n), ks.cdf(x, n), ks.pdf(x, n)])
|
422 |
+
expected = np.array([sf, cdf, pdf])
|
423 |
+
npt.assert_allclose(vals, expected, rtol=rtol)
|
424 |
+
# The sf+cdf must sum to 1.0.
|
425 |
+
npt.assert_equal(vals[0] + vals[1], 1.0)
|
426 |
+
# Check inverting the (potentially very small) sf (uses a lower tolerance)
|
427 |
+
npt.assert_allclose([ks.isf(sf, n)], [x], rtol=1e-8)
|
428 |
+
|
429 |
+
|
430 |
+
def test_rvs_gh2069_regression():
|
431 |
+
# Regression tests for gh-2069. In scipy 0.17 and earlier,
|
432 |
+
# these tests would fail.
|
433 |
+
#
|
434 |
+
# A typical example of the broken behavior:
|
435 |
+
# >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5))
|
436 |
+
# array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705])
|
437 |
+
rng = np.random.RandomState(123)
|
438 |
+
vals = stats.norm.rvs(loc=np.zeros(5), scale=1, random_state=rng)
|
439 |
+
d = np.diff(vals)
|
440 |
+
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
|
441 |
+
vals = stats.norm.rvs(loc=0, scale=np.ones(5), random_state=rng)
|
442 |
+
d = np.diff(vals)
|
443 |
+
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
|
444 |
+
vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5), random_state=rng)
|
445 |
+
d = np.diff(vals)
|
446 |
+
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
|
447 |
+
vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5),
|
448 |
+
random_state=rng)
|
449 |
+
d = np.diff(vals.ravel())
|
450 |
+
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
|
451 |
+
|
452 |
+
assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]],
|
453 |
+
[[1, 1], [1, 1]], 1)
|
454 |
+
assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2))
|
455 |
+
assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0],
|
456 |
+
[[1], [2]], (4,))
|
457 |
+
|
458 |
+
|
459 |
+
def test_nomodify_gh9900_regression():
|
460 |
+
# Regression test for gh-9990
|
461 |
+
# Prior to gh-9990, calls to stats.truncnorm._cdf() use what ever was
|
462 |
+
# set inside the stats.truncnorm instance during stats.truncnorm.cdf().
|
463 |
+
# This could cause issues with multi-threaded code.
|
464 |
+
# Since then, the calls to cdf() are not permitted to modify the global
|
465 |
+
# stats.truncnorm instance.
|
466 |
+
tn = stats.truncnorm
|
467 |
+
# Use the right-half truncated normal
|
468 |
+
# Check that the cdf and _cdf return the same result.
|
469 |
+
npt.assert_almost_equal(tn.cdf(1, 0, np.inf),
|
470 |
+
0.6826894921370859)
|
471 |
+
npt.assert_almost_equal(tn._cdf([1], [0], [np.inf]),
|
472 |
+
0.6826894921370859)
|
473 |
+
|
474 |
+
# Now use the left-half truncated normal
|
475 |
+
npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0),
|
476 |
+
0.31731050786291415)
|
477 |
+
npt.assert_almost_equal(tn._cdf([-1], [-np.inf], [0]),
|
478 |
+
0.31731050786291415)
|
479 |
+
|
480 |
+
# Check that the right-half truncated normal _cdf hasn't changed
|
481 |
+
npt.assert_almost_equal(tn._cdf([1], [0], [np.inf]),
|
482 |
+
0.6826894921370859) # Not 1.6826894921370859
|
483 |
+
npt.assert_almost_equal(tn.cdf(1, 0, np.inf),
|
484 |
+
0.6826894921370859)
|
485 |
+
|
486 |
+
# Check that the left-half truncated normal _cdf hasn't changed
|
487 |
+
npt.assert_almost_equal(tn._cdf([-1], [-np.inf], [0]),
|
488 |
+
0.31731050786291415) # Not -0.6826894921370859
|
489 |
+
npt.assert_almost_equal(tn.cdf(1, -np.inf, 0),
|
490 |
+
1) # Not 1.6826894921370859
|
491 |
+
npt.assert_almost_equal(tn.cdf(-1, -np.inf, 0),
|
492 |
+
0.31731050786291415) # Not -0.6826894921370859
|
493 |
+
|
494 |
+
|
495 |
+
def test_broadcast_gh9990_regression():
|
496 |
+
# Regression test for gh-9990
|
497 |
+
# The x-value 7 only lies within the support of 4 of the supplied
|
498 |
+
# distributions. Prior to 9990, one array passed to
|
499 |
+
# stats.reciprocal._cdf would have 4 elements, but an array
|
500 |
+
# previously stored by stats.reciprocal_argcheck() would have 6, leading
|
501 |
+
# to a broadcast error.
|
502 |
+
a = np.array([1, 2, 3, 4, 5, 6])
|
503 |
+
b = np.array([8, 16, 1, 32, 1, 48])
|
504 |
+
ans = [stats.reciprocal.cdf(7, _a, _b) for _a, _b in zip(a,b)]
|
505 |
+
npt.assert_array_almost_equal(stats.reciprocal.cdf(7, a, b), ans)
|
506 |
+
|
507 |
+
ans = [stats.reciprocal.cdf(1, _a, _b) for _a, _b in zip(a,b)]
|
508 |
+
npt.assert_array_almost_equal(stats.reciprocal.cdf(1, a, b), ans)
|
509 |
+
|
510 |
+
ans = [stats.reciprocal.cdf(_a, _a, _b) for _a, _b in zip(a,b)]
|
511 |
+
npt.assert_array_almost_equal(stats.reciprocal.cdf(a, a, b), ans)
|
512 |
+
|
513 |
+
ans = [stats.reciprocal.cdf(_b, _a, _b) for _a, _b in zip(a,b)]
|
514 |
+
npt.assert_array_almost_equal(stats.reciprocal.cdf(b, a, b), ans)
|
515 |
+
|
516 |
+
|
517 |
+
def test_broadcast_gh7933_regression():
|
518 |
+
# Check broadcast works
|
519 |
+
stats.truncnorm.logpdf(
|
520 |
+
np.array([3.0, 2.0, 1.0]),
|
521 |
+
a=(1.5 - np.array([6.0, 5.0, 4.0])) / 3.0,
|
522 |
+
b=np.inf,
|
523 |
+
loc=np.array([6.0, 5.0, 4.0]),
|
524 |
+
scale=3.0
|
525 |
+
)
|
526 |
+
|
527 |
+
|
528 |
+
def test_gh2002_regression():
|
529 |
+
# Add a check that broadcast works in situations where only some
|
530 |
+
# x-values are compatible with some of the shape arguments.
|
531 |
+
x = np.r_[-2:2:101j]
|
532 |
+
a = np.r_[-np.ones(50), np.ones(51)]
|
533 |
+
expected = [stats.truncnorm.pdf(_x, _a, np.inf) for _x, _a in zip(x, a)]
|
534 |
+
ans = stats.truncnorm.pdf(x, a, np.inf)
|
535 |
+
npt.assert_array_almost_equal(ans, expected)
|
536 |
+
|
537 |
+
|
538 |
+
def test_gh1320_regression():
|
539 |
+
# Check that the first example from gh-1320 now works.
|
540 |
+
c = 2.62
|
541 |
+
stats.genextreme.ppf(0.5, np.array([[c], [c + 0.5]]))
|
542 |
+
# The other examples in gh-1320 appear to have stopped working
|
543 |
+
# some time ago.
|
544 |
+
# ans = stats.genextreme.moment(2, np.array([c, c + 0.5]))
|
545 |
+
# expected = np.array([25.50105963, 115.11191437])
|
546 |
+
# stats.genextreme.moment(5, np.array([[c], [c + 0.5]]))
|
547 |
+
# stats.genextreme.moment(5, np.array([c, c + 0.5]))
|
548 |
+
|
549 |
+
|
550 |
+
def test_method_of_moments():
|
551 |
+
# example from https://en.wikipedia.org/wiki/Method_of_moments_(statistics)
|
552 |
+
np.random.seed(1234)
|
553 |
+
x = [0, 0, 0, 0, 1]
|
554 |
+
a = 1/5 - 2*np.sqrt(3)/5
|
555 |
+
b = 1/5 + 2*np.sqrt(3)/5
|
556 |
+
# force use of method of moments (uniform.fit is overridden)
|
557 |
+
loc, scale = super(type(stats.uniform), stats.uniform).fit(x, method="MM")
|
558 |
+
npt.assert_almost_equal(loc, a, decimal=4)
|
559 |
+
npt.assert_almost_equal(loc+scale, b, decimal=4)
|
560 |
+
|
561 |
+
|
562 |
+
def check_sample_meanvar_(popmean, popvar, sample):
|
563 |
+
if np.isfinite(popmean):
|
564 |
+
check_sample_mean(sample, popmean)
|
565 |
+
if np.isfinite(popvar):
|
566 |
+
check_sample_var(sample, popvar)
|
567 |
+
|
568 |
+
|
569 |
+
def check_sample_mean(sample, popmean):
|
570 |
+
# Checks for unlikely difference between sample mean and population mean
|
571 |
+
prob = stats.ttest_1samp(sample, popmean).pvalue
|
572 |
+
assert prob > 0.01
|
573 |
+
|
574 |
+
|
575 |
+
def check_sample_var(sample, popvar):
|
576 |
+
# check that population mean lies within the CI bootstrapped from the
|
577 |
+
# sample. This used to be a chi-squared test for variance, but there were
|
578 |
+
# too many false positives
|
579 |
+
res = stats.bootstrap(
|
580 |
+
(sample,),
|
581 |
+
lambda x, axis: x.var(ddof=1, axis=axis),
|
582 |
+
confidence_level=0.995,
|
583 |
+
)
|
584 |
+
conf = res.confidence_interval
|
585 |
+
low, high = conf.low, conf.high
|
586 |
+
assert low <= popvar <= high
|
587 |
+
|
588 |
+
|
589 |
+
def check_cdf_ppf(distfn, arg, msg):
|
590 |
+
values = [0.001, 0.5, 0.999]
|
591 |
+
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
|
592 |
+
values, decimal=DECIMAL, err_msg=msg +
|
593 |
+
' - cdf-ppf roundtrip')
|
594 |
+
|
595 |
+
|
596 |
+
def check_sf_isf(distfn, arg, msg):
|
597 |
+
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg),
|
598 |
+
[0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg +
|
599 |
+
' - sf-isf roundtrip')
|
600 |
+
|
601 |
+
|
602 |
+
def check_cdf_sf(distfn, arg, msg):
|
603 |
+
npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg),
|
604 |
+
1.0 - distfn.sf([0.1, 0.9], *arg),
|
605 |
+
decimal=DECIMAL, err_msg=msg +
|
606 |
+
' - cdf-sf relationship')
|
607 |
+
|
608 |
+
|
609 |
+
def check_ppf_isf(distfn, arg, msg):
|
610 |
+
p = np.array([0.1, 0.9])
|
611 |
+
npt.assert_almost_equal(distfn.isf(p, *arg), distfn.ppf(1-p, *arg),
|
612 |
+
decimal=DECIMAL, err_msg=msg +
|
613 |
+
' - ppf-isf relationship')
|
614 |
+
|
615 |
+
|
616 |
+
def check_pdf(distfn, arg, msg):
|
617 |
+
# compares pdf at median with numerical derivative of cdf
|
618 |
+
median = distfn.ppf(0.5, *arg)
|
619 |
+
eps = 1e-6
|
620 |
+
pdfv = distfn.pdf(median, *arg)
|
621 |
+
if (pdfv < 1e-4) or (pdfv > 1e4):
|
622 |
+
# avoid checking a case where pdf is close to zero or
|
623 |
+
# huge (singularity)
|
624 |
+
median = median + 0.1
|
625 |
+
pdfv = distfn.pdf(median, *arg)
|
626 |
+
cdfdiff = (distfn.cdf(median + eps, *arg) -
|
627 |
+
distfn.cdf(median - eps, *arg))/eps/2.0
|
628 |
+
# replace with better diff and better test (more points),
|
629 |
+
# actually, this works pretty well
|
630 |
+
msg += ' - cdf-pdf relationship'
|
631 |
+
npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg)
|
632 |
+
|
633 |
+
|
634 |
+
def check_pdf_logpdf(distfn, args, msg):
|
635 |
+
# compares pdf at several points with the log of the pdf
|
636 |
+
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
|
637 |
+
vals = distfn.ppf(points, *args)
|
638 |
+
vals = vals[np.isfinite(vals)]
|
639 |
+
pdf = distfn.pdf(vals, *args)
|
640 |
+
logpdf = distfn.logpdf(vals, *args)
|
641 |
+
pdf = pdf[(pdf != 0) & np.isfinite(pdf)]
|
642 |
+
logpdf = logpdf[np.isfinite(logpdf)]
|
643 |
+
msg += " - logpdf-log(pdf) relationship"
|
644 |
+
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
|
645 |
+
|
646 |
+
|
647 |
+
def check_pdf_logpdf_at_endpoints(distfn, args, msg):
|
648 |
+
# compares pdf with the log of the pdf at the (finite) end points
|
649 |
+
points = np.array([0, 1])
|
650 |
+
vals = distfn.ppf(points, *args)
|
651 |
+
vals = vals[np.isfinite(vals)]
|
652 |
+
pdf = distfn.pdf(vals, *args)
|
653 |
+
logpdf = distfn.logpdf(vals, *args)
|
654 |
+
pdf = pdf[(pdf != 0) & np.isfinite(pdf)]
|
655 |
+
logpdf = logpdf[np.isfinite(logpdf)]
|
656 |
+
msg += " - logpdf-log(pdf) relationship"
|
657 |
+
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
|
658 |
+
|
659 |
+
|
660 |
+
def check_sf_logsf(distfn, args, msg):
|
661 |
+
# compares sf at several points with the log of the sf
|
662 |
+
points = np.array([0.0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])
|
663 |
+
vals = distfn.ppf(points, *args)
|
664 |
+
vals = vals[np.isfinite(vals)]
|
665 |
+
sf = distfn.sf(vals, *args)
|
666 |
+
logsf = distfn.logsf(vals, *args)
|
667 |
+
sf = sf[sf != 0]
|
668 |
+
logsf = logsf[np.isfinite(logsf)]
|
669 |
+
msg += " - logsf-log(sf) relationship"
|
670 |
+
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg)
|
671 |
+
|
672 |
+
|
673 |
+
def check_cdf_logcdf(distfn, args, msg):
|
674 |
+
# compares cdf at several points with the log of the cdf
|
675 |
+
points = np.array([0, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 1.0])
|
676 |
+
vals = distfn.ppf(points, *args)
|
677 |
+
vals = vals[np.isfinite(vals)]
|
678 |
+
cdf = distfn.cdf(vals, *args)
|
679 |
+
logcdf = distfn.logcdf(vals, *args)
|
680 |
+
cdf = cdf[cdf != 0]
|
681 |
+
logcdf = logcdf[np.isfinite(logcdf)]
|
682 |
+
msg += " - logcdf-log(cdf) relationship"
|
683 |
+
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg)
|
684 |
+
|
685 |
+
|
686 |
+
def check_ppf_broadcast(distfn, arg, msg):
|
687 |
+
# compares ppf for multiple argsets.
|
688 |
+
num_repeats = 5
|
689 |
+
args = [] * num_repeats
|
690 |
+
if arg:
|
691 |
+
args = [np.array([_] * num_repeats) for _ in arg]
|
692 |
+
|
693 |
+
median = distfn.ppf(0.5, *arg)
|
694 |
+
medians = distfn.ppf(0.5, *args)
|
695 |
+
msg += " - ppf multiple"
|
696 |
+
npt.assert_almost_equal(medians, [median] * num_repeats, decimal=7, err_msg=msg)
|
697 |
+
|
698 |
+
|
699 |
+
def check_distribution_rvs(dist, args, alpha, rvs):
|
700 |
+
# dist is either a cdf function or name of a distribution in scipy.stats.
|
701 |
+
# args are the args for scipy.stats.dist(*args)
|
702 |
+
# alpha is a significance level, ~0.01
|
703 |
+
# rvs is array_like of random variables
|
704 |
+
# test from scipy.stats.tests
|
705 |
+
# this version reuses existing random variables
|
706 |
+
D, pval = stats.kstest(rvs, dist, args=args, N=1000)
|
707 |
+
if (pval < alpha):
|
708 |
+
# The rvs passed in failed the K-S test, which _could_ happen
|
709 |
+
# but is unlikely if alpha is small enough.
|
710 |
+
# Repeat the test with a new sample of rvs.
|
711 |
+
# Generate 1000 rvs, perform a K-S test that the new sample of rvs
|
712 |
+
# are distributed according to the distribution.
|
713 |
+
D, pval = stats.kstest(dist, dist, args=args, N=1000)
|
714 |
+
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
|
715 |
+
"; alpha = " + str(alpha) + "\nargs = " + str(args))
|
716 |
+
|
717 |
+
|
718 |
+
def check_vecentropy(distfn, args):
|
719 |
+
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
|
720 |
+
|
721 |
+
|
722 |
+
def check_loc_scale(distfn, arg, m, v, msg):
|
723 |
+
# Make `loc` and `scale` arrays to catch bugs like gh-13580 where
|
724 |
+
# `loc` and `scale` arrays improperly broadcast with shapes.
|
725 |
+
loc, scale = np.array([10.0, 20.0]), np.array([10.0, 20.0])
|
726 |
+
mt, vt = distfn.stats(*arg, loc=loc, scale=scale)
|
727 |
+
npt.assert_allclose(m*scale + loc, mt)
|
728 |
+
npt.assert_allclose(v*scale*scale, vt)
|
729 |
+
|
730 |
+
|
731 |
+
def check_ppf_private(distfn, arg, msg):
|
732 |
+
# fails by design for truncnorm self.nb not defined
|
733 |
+
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
|
734 |
+
npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
|
735 |
+
|
736 |
+
|
737 |
+
def check_retrieving_support(distfn, args):
|
738 |
+
loc, scale = 1, 2
|
739 |
+
supp = distfn.support(*args)
|
740 |
+
supp_loc_scale = distfn.support(*args, loc=loc, scale=scale)
|
741 |
+
npt.assert_almost_equal(np.array(supp)*scale + loc,
|
742 |
+
np.array(supp_loc_scale))
|
743 |
+
|
744 |
+
|
745 |
+
def check_fit_args(distfn, arg, rvs, method):
|
746 |
+
with np.errstate(all='ignore'), npt.suppress_warnings() as sup:
|
747 |
+
sup.filter(category=RuntimeWarning,
|
748 |
+
message="The shape parameter of the erlang")
|
749 |
+
sup.filter(category=RuntimeWarning,
|
750 |
+
message="floating point number truncated")
|
751 |
+
vals = distfn.fit(rvs, method=method)
|
752 |
+
vals2 = distfn.fit(rvs, optimizer='powell', method=method)
|
753 |
+
# Only check the length of the return; accuracy tested in test_fit.py
|
754 |
+
npt.assert_(len(vals) == 2+len(arg))
|
755 |
+
npt.assert_(len(vals2) == 2+len(arg))
|
756 |
+
|
757 |
+
|
758 |
+
def check_fit_args_fix(distfn, arg, rvs, method):
|
759 |
+
with np.errstate(all='ignore'), npt.suppress_warnings() as sup:
|
760 |
+
sup.filter(category=RuntimeWarning,
|
761 |
+
message="The shape parameter of the erlang")
|
762 |
+
|
763 |
+
vals = distfn.fit(rvs, floc=0, method=method)
|
764 |
+
vals2 = distfn.fit(rvs, fscale=1, method=method)
|
765 |
+
npt.assert_(len(vals) == 2+len(arg))
|
766 |
+
npt.assert_(vals[-2] == 0)
|
767 |
+
npt.assert_(vals2[-1] == 1)
|
768 |
+
npt.assert_(len(vals2) == 2+len(arg))
|
769 |
+
if len(arg) > 0:
|
770 |
+
vals3 = distfn.fit(rvs, f0=arg[0], method=method)
|
771 |
+
npt.assert_(len(vals3) == 2+len(arg))
|
772 |
+
npt.assert_(vals3[0] == arg[0])
|
773 |
+
if len(arg) > 1:
|
774 |
+
vals4 = distfn.fit(rvs, f1=arg[1], method=method)
|
775 |
+
npt.assert_(len(vals4) == 2+len(arg))
|
776 |
+
npt.assert_(vals4[1] == arg[1])
|
777 |
+
if len(arg) > 2:
|
778 |
+
vals5 = distfn.fit(rvs, f2=arg[2], method=method)
|
779 |
+
npt.assert_(len(vals5) == 2+len(arg))
|
780 |
+
npt.assert_(vals5[2] == arg[2])
|
781 |
+
|
782 |
+
|
783 |
+
@pytest.mark.parametrize('method', ['pdf', 'logpdf', 'cdf', 'logcdf',
|
784 |
+
'sf', 'logsf', 'ppf', 'isf'])
|
785 |
+
@pytest.mark.parametrize('distname, args', distcont)
|
786 |
+
def test_methods_with_lists(method, distname, args):
|
787 |
+
# Test that the continuous distributions can accept Python lists
|
788 |
+
# as arguments.
|
789 |
+
dist = getattr(stats, distname)
|
790 |
+
f = getattr(dist, method)
|
791 |
+
if distname == 'invweibull' and method.startswith('log'):
|
792 |
+
x = [1.5, 2]
|
793 |
+
else:
|
794 |
+
x = [0.1, 0.2]
|
795 |
+
|
796 |
+
shape2 = [[a]*2 for a in args]
|
797 |
+
loc = [0, 0.1]
|
798 |
+
scale = [1, 1.01]
|
799 |
+
result = f(x, *shape2, loc=loc, scale=scale)
|
800 |
+
npt.assert_allclose(result,
|
801 |
+
[f(*v) for v in zip(x, *shape2, loc, scale)],
|
802 |
+
rtol=1e-14, atol=5e-14)
|
803 |
+
|
804 |
+
|
805 |
+
def test_burr_fisk_moment_gh13234_regression():
|
806 |
+
vals0 = stats.burr.moment(1, 5, 4)
|
807 |
+
assert isinstance(vals0, float)
|
808 |
+
|
809 |
+
vals1 = stats.fisk.moment(1, 8)
|
810 |
+
assert isinstance(vals1, float)
|
811 |
+
|
812 |
+
|
813 |
+
def test_moments_with_array_gh12192_regression():
|
814 |
+
# array loc and scalar scale
|
815 |
+
vals0 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=1)
|
816 |
+
expected0 = np.array([1., 2., 3.])
|
817 |
+
npt.assert_equal(vals0, expected0)
|
818 |
+
|
819 |
+
# array loc and invalid scalar scale
|
820 |
+
vals1 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=-1)
|
821 |
+
expected1 = np.array([np.nan, np.nan, np.nan])
|
822 |
+
npt.assert_equal(vals1, expected1)
|
823 |
+
|
824 |
+
# array loc and array scale with invalid entries
|
825 |
+
vals2 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]),
|
826 |
+
scale=[-3, 1, 0])
|
827 |
+
expected2 = np.array([np.nan, 2., np.nan])
|
828 |
+
npt.assert_equal(vals2, expected2)
|
829 |
+
|
830 |
+
# (loc == 0) & (scale < 0)
|
831 |
+
vals3 = stats.norm.moment(order=2, loc=0, scale=-4)
|
832 |
+
expected3 = np.nan
|
833 |
+
npt.assert_equal(vals3, expected3)
|
834 |
+
assert isinstance(vals3, expected3.__class__)
|
835 |
+
|
836 |
+
# array loc with 0 entries and scale with invalid entries
|
837 |
+
vals4 = stats.norm.moment(order=2, loc=[1, 0, 2], scale=[3, -4, -5])
|
838 |
+
expected4 = np.array([10., np.nan, np.nan])
|
839 |
+
npt.assert_equal(vals4, expected4)
|
840 |
+
|
841 |
+
# all(loc == 0) & (array scale with invalid entries)
|
842 |
+
vals5 = stats.norm.moment(order=2, loc=[0, 0, 0], scale=[5., -2, 100.])
|
843 |
+
expected5 = np.array([25., np.nan, 10000.])
|
844 |
+
npt.assert_equal(vals5, expected5)
|
845 |
+
|
846 |
+
# all( (loc == 0) & (scale < 0) )
|
847 |
+
vals6 = stats.norm.moment(order=2, loc=[0, 0, 0], scale=[-5., -2, -100.])
|
848 |
+
expected6 = np.array([np.nan, np.nan, np.nan])
|
849 |
+
npt.assert_equal(vals6, expected6)
|
850 |
+
|
851 |
+
# scalar args, loc, and scale
|
852 |
+
vals7 = stats.chi.moment(order=2, df=1, loc=0, scale=0)
|
853 |
+
expected7 = np.nan
|
854 |
+
npt.assert_equal(vals7, expected7)
|
855 |
+
assert isinstance(vals7, expected7.__class__)
|
856 |
+
|
857 |
+
# array args, scalar loc, and scalar scale
|
858 |
+
vals8 = stats.chi.moment(order=2, df=[1, 2, 3], loc=0, scale=0)
|
859 |
+
expected8 = np.array([np.nan, np.nan, np.nan])
|
860 |
+
npt.assert_equal(vals8, expected8)
|
861 |
+
|
862 |
+
# array args, array loc, and array scale
|
863 |
+
vals9 = stats.chi.moment(order=2, df=[1, 2, 3], loc=[1., 0., 2.],
|
864 |
+
scale=[1., -3., 0.])
|
865 |
+
expected9 = np.array([3.59576912, np.nan, np.nan])
|
866 |
+
npt.assert_allclose(vals9, expected9, rtol=1e-8)
|
867 |
+
|
868 |
+
# (n > 4), all(loc != 0), and all(scale != 0)
|
869 |
+
vals10 = stats.norm.moment(5, [1., 2.], [1., 2.])
|
870 |
+
expected10 = np.array([26., 832.])
|
871 |
+
npt.assert_allclose(vals10, expected10, rtol=1e-13)
|
872 |
+
|
873 |
+
# test broadcasting and more
|
874 |
+
a = [-1.1, 0, 1, 2.2, np.pi]
|
875 |
+
b = [-1.1, 0, 1, 2.2, np.pi]
|
876 |
+
loc = [-1.1, 0, np.sqrt(2)]
|
877 |
+
scale = [-2.1, 0, 1, 2.2, np.pi]
|
878 |
+
|
879 |
+
a = np.array(a).reshape((-1, 1, 1, 1))
|
880 |
+
b = np.array(b).reshape((-1, 1, 1))
|
881 |
+
loc = np.array(loc).reshape((-1, 1))
|
882 |
+
scale = np.array(scale)
|
883 |
+
|
884 |
+
vals11 = stats.beta.moment(order=2, a=a, b=b, loc=loc, scale=scale)
|
885 |
+
|
886 |
+
a, b, loc, scale = np.broadcast_arrays(a, b, loc, scale)
|
887 |
+
|
888 |
+
for i in np.ndenumerate(a):
|
889 |
+
with np.errstate(invalid='ignore', divide='ignore'):
|
890 |
+
i = i[0] # just get the index
|
891 |
+
# check against same function with scalar input
|
892 |
+
expected = stats.beta.moment(order=2, a=a[i], b=b[i],
|
893 |
+
loc=loc[i], scale=scale[i])
|
894 |
+
np.testing.assert_equal(vals11[i], expected)
|
895 |
+
|
896 |
+
|
897 |
+
def test_broadcasting_in_moments_gh12192_regression():
|
898 |
+
vals0 = stats.norm.moment(order=1, loc=np.array([1, 2, 3]), scale=[[1]])
|
899 |
+
expected0 = np.array([[1., 2., 3.]])
|
900 |
+
npt.assert_equal(vals0, expected0)
|
901 |
+
assert vals0.shape == expected0.shape
|
902 |
+
|
903 |
+
vals1 = stats.norm.moment(order=1, loc=np.array([[1], [2], [3]]),
|
904 |
+
scale=[1, 2, 3])
|
905 |
+
expected1 = np.array([[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]])
|
906 |
+
npt.assert_equal(vals1, expected1)
|
907 |
+
assert vals1.shape == expected1.shape
|
908 |
+
|
909 |
+
vals2 = stats.chi.moment(order=1, df=[1., 2., 3.], loc=0., scale=1.)
|
910 |
+
expected2 = np.array([0.79788456, 1.25331414, 1.59576912])
|
911 |
+
npt.assert_allclose(vals2, expected2, rtol=1e-8)
|
912 |
+
assert vals2.shape == expected2.shape
|
913 |
+
|
914 |
+
vals3 = stats.chi.moment(order=1, df=[[1.], [2.], [3.]], loc=[0., 1., 2.],
|
915 |
+
scale=[-1., 0., 3.])
|
916 |
+
expected3 = np.array([[np.nan, np.nan, 4.39365368],
|
917 |
+
[np.nan, np.nan, 5.75994241],
|
918 |
+
[np.nan, np.nan, 6.78730736]])
|
919 |
+
npt.assert_allclose(vals3, expected3, rtol=1e-8)
|
920 |
+
assert vals3.shape == expected3.shape
|
921 |
+
|
922 |
+
|
923 |
+
def test_kappa3_array_gh13582():
|
924 |
+
# https://github.com/scipy/scipy/pull/15140#issuecomment-994958241
|
925 |
+
shapes = [0.5, 1.5, 2.5, 3.5, 4.5]
|
926 |
+
moments = 'mvsk'
|
927 |
+
res = np.array([[stats.kappa3.stats(shape, moments=moment)
|
928 |
+
for shape in shapes] for moment in moments])
|
929 |
+
res2 = np.array(stats.kappa3.stats(shapes, moments=moments))
|
930 |
+
npt.assert_allclose(res, res2)
|
931 |
+
|
932 |
+
|
933 |
+
@pytest.mark.xslow
|
934 |
+
def test_kappa4_array_gh13582():
|
935 |
+
h = np.array([-0.5, 2.5, 3.5, 4.5, -3])
|
936 |
+
k = np.array([-0.5, 1, -1.5, 0, 3.5])
|
937 |
+
moments = 'mvsk'
|
938 |
+
res = np.array([[stats.kappa4.stats(h[i], k[i], moments=moment)
|
939 |
+
for i in range(5)] for moment in moments])
|
940 |
+
res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
|
941 |
+
npt.assert_allclose(res, res2)
|
942 |
+
|
943 |
+
# https://github.com/scipy/scipy/pull/15250#discussion_r775112913
|
944 |
+
h = np.array([-1, -1/4, -1/4, 1, -1, 0])
|
945 |
+
k = np.array([1, 1, 1/2, -1/3, -1, 0])
|
946 |
+
res = np.array([[stats.kappa4.stats(h[i], k[i], moments=moment)
|
947 |
+
for i in range(6)] for moment in moments])
|
948 |
+
res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
|
949 |
+
npt.assert_allclose(res, res2)
|
950 |
+
|
951 |
+
# https://github.com/scipy/scipy/pull/15250#discussion_r775115021
|
952 |
+
h = np.array([-1, -0.5, 1])
|
953 |
+
k = np.array([-1, -0.5, 0, 1])[:, None]
|
954 |
+
res2 = np.array(stats.kappa4.stats(h, k, moments=moments))
|
955 |
+
assert res2.shape == (4, 4, 3)
|
956 |
+
|
957 |
+
|
958 |
+
def test_frozen_attributes():
|
959 |
+
# gh-14827 reported that all frozen distributions had both pmf and pdf
|
960 |
+
# attributes; continuous should have pdf and discrete should have pmf.
|
961 |
+
message = "'rv_continuous_frozen' object has no attribute"
|
962 |
+
with pytest.raises(AttributeError, match=message):
|
963 |
+
stats.norm().pmf
|
964 |
+
with pytest.raises(AttributeError, match=message):
|
965 |
+
stats.norm().logpmf
|
966 |
+
stats.norm.pmf = "herring"
|
967 |
+
frozen_norm = stats.norm()
|
968 |
+
assert isinstance(frozen_norm, rv_continuous_frozen)
|
969 |
+
delattr(stats.norm, 'pmf')
|
970 |
+
|
971 |
+
|
972 |
+
def test_skewnorm_pdf_gh16038():
|
973 |
+
rng = np.random.default_rng(0)
|
974 |
+
x, a = -np.inf, 0
|
975 |
+
npt.assert_equal(stats.skewnorm.pdf(x, a), stats.norm.pdf(x))
|
976 |
+
x, a = rng.random(size=(3, 3)), rng.random(size=(3, 3))
|
977 |
+
mask = rng.random(size=(3, 3)) < 0.5
|
978 |
+
a[mask] = 0
|
979 |
+
x_norm = x[mask]
|
980 |
+
res = stats.skewnorm.pdf(x, a)
|
981 |
+
npt.assert_equal(res[mask], stats.norm.pdf(x_norm))
|
982 |
+
npt.assert_equal(res[~mask], stats.skewnorm.pdf(x[~mask], a[~mask]))
|
983 |
+
|
984 |
+
|
985 |
+
# for scalar input, these functions should return scalar output
|
986 |
+
scalar_out = [['rvs', []], ['pdf', [0]], ['logpdf', [0]], ['cdf', [0]],
|
987 |
+
['logcdf', [0]], ['sf', [0]], ['logsf', [0]], ['ppf', [0]],
|
988 |
+
['isf', [0]], ['moment', [1]], ['entropy', []], ['expect', []],
|
989 |
+
['median', []], ['mean', []], ['std', []], ['var', []]]
|
990 |
+
scalars_out = [['interval', [0.95]], ['support', []], ['stats', ['mv']]]
|
991 |
+
|
992 |
+
|
993 |
+
@pytest.mark.parametrize('case', scalar_out + scalars_out)
|
994 |
+
def test_scalar_for_scalar(case):
|
995 |
+
# Some rv_continuous functions returned 0d array instead of NumPy scalar
|
996 |
+
# Guard against regression
|
997 |
+
method_name, args = case
|
998 |
+
method = getattr(stats.norm(), method_name)
|
999 |
+
res = method(*args)
|
1000 |
+
if case in scalar_out:
|
1001 |
+
assert isinstance(res, np.number)
|
1002 |
+
else:
|
1003 |
+
assert isinstance(res[0], np.number)
|
1004 |
+
assert isinstance(res[1], np.number)
|
1005 |
+
|
1006 |
+
|
1007 |
+
def test_scalar_for_scalar2():
|
1008 |
+
# test methods that are not attributes of frozen distributions
|
1009 |
+
res = stats.norm.fit([1, 2, 3])
|
1010 |
+
assert isinstance(res[0], np.number)
|
1011 |
+
assert isinstance(res[1], np.number)
|
1012 |
+
res = stats.norm.fit_loc_scale([1, 2, 3])
|
1013 |
+
assert isinstance(res[0], np.number)
|
1014 |
+
assert isinstance(res[1], np.number)
|
1015 |
+
res = stats.norm.nnlf((0, 1), [1, 2, 3])
|
1016 |
+
assert isinstance(res, np.number)
|
.venv/Lib/site-packages/scipy/stats/tests/test_continuous_fit_censored.py
ADDED
@@ -0,0 +1,683 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Tests for fitting specific distributions to censored data.
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
from numpy.testing import assert_allclose
|
5 |
+
|
6 |
+
from scipy.optimize import fmin
|
7 |
+
from scipy.stats import (CensoredData, beta, cauchy, chi2, expon, gamma,
|
8 |
+
gumbel_l, gumbel_r, invgauss, invweibull, laplace,
|
9 |
+
logistic, lognorm, nct, ncx2, norm, weibull_max,
|
10 |
+
weibull_min)
|
11 |
+
|
12 |
+
|
13 |
+
# In some tests, we'll use this optimizer for improved accuracy.
|
14 |
+
def optimizer(func, x0, args=(), disp=0):
|
15 |
+
return fmin(func, x0, args=args, disp=disp, xtol=1e-12, ftol=1e-12)
|
16 |
+
|
17 |
+
|
18 |
+
def test_beta():
|
19 |
+
"""
|
20 |
+
Test fitting beta shape parameters to interval-censored data.
|
21 |
+
|
22 |
+
Calculation in R:
|
23 |
+
|
24 |
+
> library(fitdistrplus)
|
25 |
+
> data <- data.frame(left=c(0.10, 0.50, 0.75, 0.80),
|
26 |
+
+ right=c(0.20, 0.55, 0.90, 0.95))
|
27 |
+
> result = fitdistcens(data, 'beta', control=list(reltol=1e-14))
|
28 |
+
|
29 |
+
> result
|
30 |
+
Fitting of the distribution ' beta ' on censored data by maximum likelihood
|
31 |
+
Parameters:
|
32 |
+
estimate
|
33 |
+
shape1 1.419941
|
34 |
+
shape2 1.027066
|
35 |
+
> result$sd
|
36 |
+
shape1 shape2
|
37 |
+
0.9914177 0.6866565
|
38 |
+
"""
|
39 |
+
data = CensoredData(interval=[[0.10, 0.20],
|
40 |
+
[0.50, 0.55],
|
41 |
+
[0.75, 0.90],
|
42 |
+
[0.80, 0.95]])
|
43 |
+
|
44 |
+
# For this test, fit only the shape parameters; loc and scale are fixed.
|
45 |
+
a, b, loc, scale = beta.fit(data, floc=0, fscale=1, optimizer=optimizer)
|
46 |
+
|
47 |
+
assert_allclose(a, 1.419941, rtol=5e-6)
|
48 |
+
assert_allclose(b, 1.027066, rtol=5e-6)
|
49 |
+
assert loc == 0
|
50 |
+
assert scale == 1
|
51 |
+
|
52 |
+
|
53 |
+
def test_cauchy_right_censored():
|
54 |
+
"""
|
55 |
+
Test fitting the Cauchy distribution to right-censored data.
|
56 |
+
|
57 |
+
Calculation in R, with two values not censored [1, 10] and
|
58 |
+
one right-censored value [30].
|
59 |
+
|
60 |
+
> library(fitdistrplus)
|
61 |
+
> data <- data.frame(left=c(1, 10, 30), right=c(1, 10, NA))
|
62 |
+
> result = fitdistcens(data, 'cauchy', control=list(reltol=1e-14))
|
63 |
+
> result
|
64 |
+
Fitting of the distribution ' cauchy ' on censored data by maximum
|
65 |
+
likelihood
|
66 |
+
Parameters:
|
67 |
+
estimate
|
68 |
+
location 7.100001
|
69 |
+
scale 7.455866
|
70 |
+
"""
|
71 |
+
data = CensoredData(uncensored=[1, 10], right=[30])
|
72 |
+
loc, scale = cauchy.fit(data, optimizer=optimizer)
|
73 |
+
assert_allclose(loc, 7.10001, rtol=5e-6)
|
74 |
+
assert_allclose(scale, 7.455866, rtol=5e-6)
|
75 |
+
|
76 |
+
|
77 |
+
def test_cauchy_mixed():
|
78 |
+
"""
|
79 |
+
Test fitting the Cauchy distribution to data with mixed censoring.
|
80 |
+
|
81 |
+
Calculation in R, with:
|
82 |
+
* two values not censored [1, 10],
|
83 |
+
* one left-censored [1],
|
84 |
+
* one right-censored [30], and
|
85 |
+
* one interval-censored [[4, 8]].
|
86 |
+
|
87 |
+
> library(fitdistrplus)
|
88 |
+
> data <- data.frame(left=c(NA, 1, 4, 10, 30), right=c(1, 1, 8, 10, NA))
|
89 |
+
> result = fitdistcens(data, 'cauchy', control=list(reltol=1e-14))
|
90 |
+
> result
|
91 |
+
Fitting of the distribution ' cauchy ' on censored data by maximum
|
92 |
+
likelihood
|
93 |
+
Parameters:
|
94 |
+
estimate
|
95 |
+
location 4.605150
|
96 |
+
scale 5.900852
|
97 |
+
"""
|
98 |
+
data = CensoredData(uncensored=[1, 10], left=[1], right=[30],
|
99 |
+
interval=[[4, 8]])
|
100 |
+
loc, scale = cauchy.fit(data, optimizer=optimizer)
|
101 |
+
assert_allclose(loc, 4.605150, rtol=5e-6)
|
102 |
+
assert_allclose(scale, 5.900852, rtol=5e-6)
|
103 |
+
|
104 |
+
|
105 |
+
def test_chi2_mixed():
|
106 |
+
"""
|
107 |
+
Test fitting just the shape parameter (df) of chi2 to mixed data.
|
108 |
+
|
109 |
+
Calculation in R, with:
|
110 |
+
* two values not censored [1, 10],
|
111 |
+
* one left-censored [1],
|
112 |
+
* one right-censored [30], and
|
113 |
+
* one interval-censored [[4, 8]].
|
114 |
+
|
115 |
+
> library(fitdistrplus)
|
116 |
+
> data <- data.frame(left=c(NA, 1, 4, 10, 30), right=c(1, 1, 8, 10, NA))
|
117 |
+
> result = fitdistcens(data, 'chisq', control=list(reltol=1e-14))
|
118 |
+
> result
|
119 |
+
Fitting of the distribution ' chisq ' on censored data by maximum
|
120 |
+
likelihood
|
121 |
+
Parameters:
|
122 |
+
estimate
|
123 |
+
df 5.060329
|
124 |
+
"""
|
125 |
+
data = CensoredData(uncensored=[1, 10], left=[1], right=[30],
|
126 |
+
interval=[[4, 8]])
|
127 |
+
df, loc, scale = chi2.fit(data, floc=0, fscale=1, optimizer=optimizer)
|
128 |
+
assert_allclose(df, 5.060329, rtol=5e-6)
|
129 |
+
assert loc == 0
|
130 |
+
assert scale == 1
|
131 |
+
|
132 |
+
|
133 |
+
def test_expon_right_censored():
|
134 |
+
"""
|
135 |
+
For the exponential distribution with loc=0, the exact solution for
|
136 |
+
fitting n uncensored points x[0]...x[n-1] and m right-censored points
|
137 |
+
x[n]..x[n+m-1] is
|
138 |
+
|
139 |
+
scale = sum(x)/n
|
140 |
+
|
141 |
+
That is, divide the sum of all the values (not censored and
|
142 |
+
right-censored) by the number of uncensored values. (See, for example,
|
143 |
+
https://en.wikipedia.org/wiki/Censoring_(statistics)#Likelihood.)
|
144 |
+
|
145 |
+
The second derivative of the log-likelihood function is
|
146 |
+
|
147 |
+
n/scale**2 - 2*sum(x)/scale**3
|
148 |
+
|
149 |
+
from which the estimate of the standard error can be computed.
|
150 |
+
|
151 |
+
-----
|
152 |
+
|
153 |
+
Calculation in R, for reference only. The R results are not
|
154 |
+
used in the test.
|
155 |
+
|
156 |
+
> library(fitdistrplus)
|
157 |
+
> dexps <- function(x, scale) {
|
158 |
+
+ return(dexp(x, 1/scale))
|
159 |
+
+ }
|
160 |
+
> pexps <- function(q, scale) {
|
161 |
+
+ return(pexp(q, 1/scale))
|
162 |
+
+ }
|
163 |
+
> left <- c(1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15,
|
164 |
+
+ 16, 16, 20, 20, 21, 22)
|
165 |
+
> right <- c(1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15,
|
166 |
+
+ NA, NA, NA, NA, NA, NA)
|
167 |
+
> result = fitdistcens(data, 'exps', start=list(scale=mean(data$left)),
|
168 |
+
+ control=list(reltol=1e-14))
|
169 |
+
> result
|
170 |
+
Fitting of the distribution ' exps ' on censored data by maximum likelihood
|
171 |
+
Parameters:
|
172 |
+
estimate
|
173 |
+
scale 19.85
|
174 |
+
> result$sd
|
175 |
+
scale
|
176 |
+
6.277119
|
177 |
+
"""
|
178 |
+
# This data has 10 uncensored values and 6 right-censored values.
|
179 |
+
obs = [1, 2.5, 3, 6, 7.5, 10, 12, 12, 14.5, 15, 16, 16, 20, 20, 21, 22]
|
180 |
+
cens = [False]*10 + [True]*6
|
181 |
+
data = CensoredData.right_censored(obs, cens)
|
182 |
+
|
183 |
+
loc, scale = expon.fit(data, floc=0, optimizer=optimizer)
|
184 |
+
|
185 |
+
assert loc == 0
|
186 |
+
# Use the analytical solution to compute the expected value. This
|
187 |
+
# is the sum of the observed values divided by the number of uncensored
|
188 |
+
# values.
|
189 |
+
n = len(data) - data.num_censored()
|
190 |
+
total = data._uncensored.sum() + data._right.sum()
|
191 |
+
expected = total / n
|
192 |
+
assert_allclose(scale, expected, 1e-8)
|
193 |
+
|
194 |
+
|
195 |
+
def test_gamma_right_censored():
|
196 |
+
"""
|
197 |
+
Fit gamma shape and scale to data with one right-censored value.
|
198 |
+
|
199 |
+
Calculation in R:
|
200 |
+
|
201 |
+
> library(fitdistrplus)
|
202 |
+
> data <- data.frame(left=c(2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0, 25.0),
|
203 |
+
+ right=c(2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0, NA))
|
204 |
+
> result = fitdistcens(data, 'gamma', start=list(shape=1, scale=10),
|
205 |
+
+ control=list(reltol=1e-13))
|
206 |
+
> result
|
207 |
+
Fitting of the distribution ' gamma ' on censored data by maximum
|
208 |
+
likelihood
|
209 |
+
Parameters:
|
210 |
+
estimate
|
211 |
+
shape 1.447623
|
212 |
+
scale 8.360197
|
213 |
+
> result$sd
|
214 |
+
shape scale
|
215 |
+
0.7053086 5.1016531
|
216 |
+
"""
|
217 |
+
# The last value is right-censored.
|
218 |
+
x = CensoredData.right_censored([2.5, 2.9, 3.8, 9.1, 9.3, 12.0, 23.0,
|
219 |
+
25.0],
|
220 |
+
[0]*7 + [1])
|
221 |
+
|
222 |
+
a, loc, scale = gamma.fit(x, floc=0, optimizer=optimizer)
|
223 |
+
|
224 |
+
assert_allclose(a, 1.447623, rtol=5e-6)
|
225 |
+
assert loc == 0
|
226 |
+
assert_allclose(scale, 8.360197, rtol=5e-6)
|
227 |
+
|
228 |
+
|
229 |
+
def test_gumbel():
|
230 |
+
"""
|
231 |
+
Fit gumbel_l and gumbel_r to censored data.
|
232 |
+
|
233 |
+
This R calculation should match gumbel_r.
|
234 |
+
|
235 |
+
> library(evd)
|
236 |
+
> library(fitdistrplus)
|
237 |
+
> data = data.frame(left=c(0, 2, 3, 9, 10, 10),
|
238 |
+
+ right=c(1, 2, 3, 9, NA, NA))
|
239 |
+
> result = fitdistcens(data, 'gumbel',
|
240 |
+
+ control=list(reltol=1e-14),
|
241 |
+
+ start=list(loc=4, scale=5))
|
242 |
+
> result
|
243 |
+
Fitting of the distribution ' gumbel ' on censored data by maximum
|
244 |
+
likelihood
|
245 |
+
Parameters:
|
246 |
+
estimate
|
247 |
+
loc 4.487853
|
248 |
+
scale 4.843640
|
249 |
+
"""
|
250 |
+
# First value is interval-censored. Last two are right-censored.
|
251 |
+
uncensored = np.array([2, 3, 9])
|
252 |
+
right = np.array([10, 10])
|
253 |
+
interval = np.array([[0, 1]])
|
254 |
+
data = CensoredData(uncensored, right=right, interval=interval)
|
255 |
+
loc, scale = gumbel_r.fit(data, optimizer=optimizer)
|
256 |
+
assert_allclose(loc, 4.487853, rtol=5e-6)
|
257 |
+
assert_allclose(scale, 4.843640, rtol=5e-6)
|
258 |
+
|
259 |
+
# Negate the data and reverse the intervals, and test with gumbel_l.
|
260 |
+
data2 = CensoredData(-uncensored, left=-right,
|
261 |
+
interval=-interval[:, ::-1])
|
262 |
+
# Fitting gumbel_l to data2 should give the same result as above, but
|
263 |
+
# with loc negated.
|
264 |
+
loc2, scale2 = gumbel_l.fit(data2, optimizer=optimizer)
|
265 |
+
assert_allclose(loc2, -4.487853, rtol=5e-6)
|
266 |
+
assert_allclose(scale2, 4.843640, rtol=5e-6)
|
267 |
+
|
268 |
+
|
269 |
+
def test_invgauss():
|
270 |
+
"""
|
271 |
+
Fit just the shape parameter of invgauss to data with one value
|
272 |
+
left-censored and one value right-censored.
|
273 |
+
|
274 |
+
Calculation in R; using a fixed dispersion parameter amounts to fixing
|
275 |
+
the scale to be 1.
|
276 |
+
|
277 |
+
> library(statmod)
|
278 |
+
> library(fitdistrplus)
|
279 |
+
> left <- c(NA, 0.4813096, 0.5571880, 0.5132463, 0.3801414, 0.5904386,
|
280 |
+
+ 0.4822340, 0.3478597, 3, 0.7191797, 1.5810902, 0.4442299)
|
281 |
+
> right <- c(0.15, 0.4813096, 0.5571880, 0.5132463, 0.3801414, 0.5904386,
|
282 |
+
+ 0.4822340, 0.3478597, NA, 0.7191797, 1.5810902, 0.4442299)
|
283 |
+
> data <- data.frame(left=left, right=right)
|
284 |
+
> result = fitdistcens(data, 'invgauss', control=list(reltol=1e-12),
|
285 |
+
+ fix.arg=list(dispersion=1), start=list(mean=3))
|
286 |
+
> result
|
287 |
+
Fitting of the distribution ' invgauss ' on censored data by maximum
|
288 |
+
likelihood
|
289 |
+
Parameters:
|
290 |
+
estimate
|
291 |
+
mean 0.853469
|
292 |
+
Fixed parameters:
|
293 |
+
value
|
294 |
+
dispersion 1
|
295 |
+
> result$sd
|
296 |
+
mean
|
297 |
+
0.247636
|
298 |
+
|
299 |
+
Here's the R calculation with the dispersion as a free parameter to
|
300 |
+
be fit.
|
301 |
+
|
302 |
+
> result = fitdistcens(data, 'invgauss', control=list(reltol=1e-12),
|
303 |
+
+ start=list(mean=3, dispersion=1))
|
304 |
+
> result
|
305 |
+
Fitting of the distribution ' invgauss ' on censored data by maximum
|
306 |
+
likelihood
|
307 |
+
Parameters:
|
308 |
+
estimate
|
309 |
+
mean 0.8699819
|
310 |
+
dispersion 1.2261362
|
311 |
+
|
312 |
+
The parametrization of the inverse Gaussian distribution in the
|
313 |
+
`statmod` package is not the same as in SciPy (see
|
314 |
+
https://arxiv.org/abs/1603.06687
|
315 |
+
for details). The translation from R to SciPy is
|
316 |
+
|
317 |
+
scale = 1/dispersion
|
318 |
+
mu = mean * dispersion
|
319 |
+
|
320 |
+
> 1/result$estimate['dispersion'] # 1/dispersion
|
321 |
+
dispersion
|
322 |
+
0.8155701
|
323 |
+
> result$estimate['mean'] * result$estimate['dispersion']
|
324 |
+
mean
|
325 |
+
1.066716
|
326 |
+
|
327 |
+
Those last two values are the SciPy scale and shape parameters.
|
328 |
+
"""
|
329 |
+
# One point is left-censored, and one is right-censored.
|
330 |
+
x = [0.4813096, 0.5571880, 0.5132463, 0.3801414,
|
331 |
+
0.5904386, 0.4822340, 0.3478597, 0.7191797,
|
332 |
+
1.5810902, 0.4442299]
|
333 |
+
data = CensoredData(uncensored=x, left=[0.15], right=[3])
|
334 |
+
|
335 |
+
# Fit only the shape parameter.
|
336 |
+
mu, loc, scale = invgauss.fit(data, floc=0, fscale=1, optimizer=optimizer)
|
337 |
+
|
338 |
+
assert_allclose(mu, 0.853469, rtol=5e-5)
|
339 |
+
assert loc == 0
|
340 |
+
assert scale == 1
|
341 |
+
|
342 |
+
# Fit the shape and scale.
|
343 |
+
mu, loc, scale = invgauss.fit(data, floc=0, optimizer=optimizer)
|
344 |
+
|
345 |
+
assert_allclose(mu, 1.066716, rtol=5e-5)
|
346 |
+
assert loc == 0
|
347 |
+
assert_allclose(scale, 0.8155701, rtol=5e-5)
|
348 |
+
|
349 |
+
|
350 |
+
def test_invweibull():
|
351 |
+
"""
|
352 |
+
Fit invweibull to censored data.
|
353 |
+
|
354 |
+
Here is the calculation in R. The 'frechet' distribution from the evd
|
355 |
+
package matches SciPy's invweibull distribution. The `loc` parameter
|
356 |
+
is fixed at 0.
|
357 |
+
|
358 |
+
> library(evd)
|
359 |
+
> library(fitdistrplus)
|
360 |
+
> data = data.frame(left=c(0, 2, 3, 9, 10, 10),
|
361 |
+
+ right=c(1, 2, 3, 9, NA, NA))
|
362 |
+
> result = fitdistcens(data, 'frechet',
|
363 |
+
+ control=list(reltol=1e-14),
|
364 |
+
+ start=list(loc=4, scale=5))
|
365 |
+
> result
|
366 |
+
Fitting of the distribution ' frechet ' on censored data by maximum
|
367 |
+
likelihood
|
368 |
+
Parameters:
|
369 |
+
estimate
|
370 |
+
scale 2.7902200
|
371 |
+
shape 0.6379845
|
372 |
+
Fixed parameters:
|
373 |
+
value
|
374 |
+
loc 0
|
375 |
+
"""
|
376 |
+
# In the R data, the first value is interval-censored, and the last
|
377 |
+
# two are right-censored. The rest are not censored.
|
378 |
+
data = CensoredData(uncensored=[2, 3, 9], right=[10, 10],
|
379 |
+
interval=[[0, 1]])
|
380 |
+
c, loc, scale = invweibull.fit(data, floc=0, optimizer=optimizer)
|
381 |
+
assert_allclose(c, 0.6379845, rtol=5e-6)
|
382 |
+
assert loc == 0
|
383 |
+
assert_allclose(scale, 2.7902200, rtol=5e-6)
|
384 |
+
|
385 |
+
|
386 |
+
def test_laplace():
|
387 |
+
"""
|
388 |
+
Fir the Laplace distribution to left- and right-censored data.
|
389 |
+
|
390 |
+
Calculation in R:
|
391 |
+
|
392 |
+
> library(fitdistrplus)
|
393 |
+
> dlaplace <- function(x, location=0, scale=1) {
|
394 |
+
+ return(0.5*exp(-abs((x - location)/scale))/scale)
|
395 |
+
+ }
|
396 |
+
> plaplace <- function(q, location=0, scale=1) {
|
397 |
+
+ z <- (q - location)/scale
|
398 |
+
+ s <- sign(z)
|
399 |
+
+ f <- -s*0.5*exp(-abs(z)) + (s+1)/2
|
400 |
+
+ return(f)
|
401 |
+
+ }
|
402 |
+
> left <- c(NA, -41.564, 50.0, 15.7384, 50.0, 10.0452, -2.0684,
|
403 |
+
+ -19.5399, 50.0, 9.0005, 27.1227, 4.3113, -3.7372,
|
404 |
+
+ 25.3111, 14.7987, 34.0887, 50.0, 42.8496, 18.5862,
|
405 |
+
+ 32.8921, 9.0448, -27.4591, NA, 19.5083, -9.7199)
|
406 |
+
> right <- c(-50.0, -41.564, NA, 15.7384, NA, 10.0452, -2.0684,
|
407 |
+
+ -19.5399, NA, 9.0005, 27.1227, 4.3113, -3.7372,
|
408 |
+
+ 25.3111, 14.7987, 34.0887, NA, 42.8496, 18.5862,
|
409 |
+
+ 32.8921, 9.0448, -27.4591, -50.0, 19.5083, -9.7199)
|
410 |
+
> data <- data.frame(left=left, right=right)
|
411 |
+
> result <- fitdistcens(data, 'laplace', start=list(location=10, scale=10),
|
412 |
+
+ control=list(reltol=1e-13))
|
413 |
+
> result
|
414 |
+
Fitting of the distribution ' laplace ' on censored data by maximum
|
415 |
+
likelihood
|
416 |
+
Parameters:
|
417 |
+
estimate
|
418 |
+
location 14.79870
|
419 |
+
scale 30.93601
|
420 |
+
> result$sd
|
421 |
+
location scale
|
422 |
+
0.1758864 7.0972125
|
423 |
+
"""
|
424 |
+
# The value -50 is left-censored, and the value 50 is right-censored.
|
425 |
+
obs = np.array([-50.0, -41.564, 50.0, 15.7384, 50.0, 10.0452, -2.0684,
|
426 |
+
-19.5399, 50.0, 9.0005, 27.1227, 4.3113, -3.7372,
|
427 |
+
25.3111, 14.7987, 34.0887, 50.0, 42.8496, 18.5862,
|
428 |
+
32.8921, 9.0448, -27.4591, -50.0, 19.5083, -9.7199])
|
429 |
+
x = obs[(obs != -50.0) & (obs != 50)]
|
430 |
+
left = obs[obs == -50.0]
|
431 |
+
right = obs[obs == 50.0]
|
432 |
+
data = CensoredData(uncensored=x, left=left, right=right)
|
433 |
+
loc, scale = laplace.fit(data, loc=10, scale=10, optimizer=optimizer)
|
434 |
+
assert_allclose(loc, 14.79870, rtol=5e-6)
|
435 |
+
assert_allclose(scale, 30.93601, rtol=5e-6)
|
436 |
+
|
437 |
+
|
438 |
+
def test_logistic():
|
439 |
+
"""
|
440 |
+
Fit the logistic distribution to left-censored data.
|
441 |
+
|
442 |
+
Calculation in R:
|
443 |
+
> library(fitdistrplus)
|
444 |
+
> left = c(13.5401, 37.4235, 11.906 , 13.998 , NA , 0.4023, NA ,
|
445 |
+
+ 10.9044, 21.0629, 9.6985, NA , 12.9016, 39.164 , 34.6396,
|
446 |
+
+ NA , 20.3665, 16.5889, 18.0952, 45.3818, 35.3306, 8.4949,
|
447 |
+
+ 3.4041, NA , 7.2828, 37.1265, 6.5969, 17.6868, 17.4977,
|
448 |
+
+ 16.3391, 36.0541)
|
449 |
+
> right = c(13.5401, 37.4235, 11.906 , 13.998 , 0. , 0.4023, 0. ,
|
450 |
+
+ 10.9044, 21.0629, 9.6985, 0. , 12.9016, 39.164 , 34.6396,
|
451 |
+
+ 0. , 20.3665, 16.5889, 18.0952, 45.3818, 35.3306, 8.4949,
|
452 |
+
+ 3.4041, 0. , 7.2828, 37.1265, 6.5969, 17.6868, 17.4977,
|
453 |
+
+ 16.3391, 36.0541)
|
454 |
+
> data = data.frame(left=left, right=right)
|
455 |
+
> result = fitdistcens(data, 'logis', control=list(reltol=1e-14))
|
456 |
+
> result
|
457 |
+
Fitting of the distribution ' logis ' on censored data by maximum
|
458 |
+
likelihood
|
459 |
+
Parameters:
|
460 |
+
estimate
|
461 |
+
location 14.633459
|
462 |
+
scale 9.232736
|
463 |
+
> result$sd
|
464 |
+
location scale
|
465 |
+
2.931505 1.546879
|
466 |
+
"""
|
467 |
+
# Values that are zero are left-censored; the true values are less than 0.
|
468 |
+
x = np.array([13.5401, 37.4235, 11.906, 13.998, 0.0, 0.4023, 0.0, 10.9044,
|
469 |
+
21.0629, 9.6985, 0.0, 12.9016, 39.164, 34.6396, 0.0, 20.3665,
|
470 |
+
16.5889, 18.0952, 45.3818, 35.3306, 8.4949, 3.4041, 0.0,
|
471 |
+
7.2828, 37.1265, 6.5969, 17.6868, 17.4977, 16.3391,
|
472 |
+
36.0541])
|
473 |
+
data = CensoredData.left_censored(x, censored=(x == 0))
|
474 |
+
loc, scale = logistic.fit(data, optimizer=optimizer)
|
475 |
+
assert_allclose(loc, 14.633459, rtol=5e-7)
|
476 |
+
assert_allclose(scale, 9.232736, rtol=5e-6)
|
477 |
+
|
478 |
+
|
479 |
+
def test_lognorm():
|
480 |
+
"""
|
481 |
+
Ref: https://math.montana.edu/jobo/st528/documents/relc.pdf
|
482 |
+
|
483 |
+
The data is the locomotive control time to failure example that starts
|
484 |
+
on page 8. That's the 8th page in the PDF; the page number shown in
|
485 |
+
the text is 270).
|
486 |
+
The document includes SAS output for the data.
|
487 |
+
"""
|
488 |
+
# These are the uncensored measurements. There are also 59 right-censored
|
489 |
+
# measurements where the lower bound is 135.
|
490 |
+
miles_to_fail = [22.5, 37.5, 46.0, 48.5, 51.5, 53.0, 54.5, 57.5, 66.5,
|
491 |
+
68.0, 69.5, 76.5, 77.0, 78.5, 80.0, 81.5, 82.0, 83.0,
|
492 |
+
84.0, 91.5, 93.5, 102.5, 107.0, 108.5, 112.5, 113.5,
|
493 |
+
116.0, 117.0, 118.5, 119.0, 120.0, 122.5, 123.0, 127.5,
|
494 |
+
131.0, 132.5, 134.0]
|
495 |
+
|
496 |
+
data = CensoredData.right_censored(miles_to_fail + [135]*59,
|
497 |
+
[0]*len(miles_to_fail) + [1]*59)
|
498 |
+
sigma, loc, scale = lognorm.fit(data, floc=0)
|
499 |
+
|
500 |
+
assert loc == 0
|
501 |
+
# Convert the lognorm parameters to the mu and sigma of the underlying
|
502 |
+
# normal distribution.
|
503 |
+
mu = np.log(scale)
|
504 |
+
# The expected results are from the 17th page of the PDF document
|
505 |
+
# (labeled page 279), in the SAS output on the right side of the page.
|
506 |
+
assert_allclose(mu, 5.1169, rtol=5e-4)
|
507 |
+
assert_allclose(sigma, 0.7055, rtol=5e-3)
|
508 |
+
|
509 |
+
|
510 |
+
def test_nct():
|
511 |
+
"""
|
512 |
+
Test fitting the noncentral t distribution to censored data.
|
513 |
+
|
514 |
+
Calculation in R:
|
515 |
+
|
516 |
+
> library(fitdistrplus)
|
517 |
+
> data <- data.frame(left=c(1, 2, 3, 5, 8, 10, 25, 25),
|
518 |
+
+ right=c(1, 2, 3, 5, 8, 10, NA, NA))
|
519 |
+
> result = fitdistcens(data, 't', control=list(reltol=1e-14),
|
520 |
+
+ start=list(df=1, ncp=2))
|
521 |
+
> result
|
522 |
+
Fitting of the distribution ' t ' on censored data by maximum likelihood
|
523 |
+
Parameters:
|
524 |
+
estimate
|
525 |
+
df 0.5432336
|
526 |
+
ncp 2.8893565
|
527 |
+
|
528 |
+
"""
|
529 |
+
data = CensoredData.right_censored([1, 2, 3, 5, 8, 10, 25, 25],
|
530 |
+
[0, 0, 0, 0, 0, 0, 1, 1])
|
531 |
+
# Fit just the shape parameter df and nc; loc and scale are fixed.
|
532 |
+
with np.errstate(over='ignore'): # remove context when gh-14901 is closed
|
533 |
+
df, nc, loc, scale = nct.fit(data, floc=0, fscale=1,
|
534 |
+
optimizer=optimizer)
|
535 |
+
assert_allclose(df, 0.5432336, rtol=5e-6)
|
536 |
+
assert_allclose(nc, 2.8893565, rtol=5e-6)
|
537 |
+
assert loc == 0
|
538 |
+
assert scale == 1
|
539 |
+
|
540 |
+
|
541 |
+
def test_ncx2():
|
542 |
+
"""
|
543 |
+
Test fitting the shape parameters (df, ncp) of ncx2 to mixed data.
|
544 |
+
|
545 |
+
Calculation in R, with
|
546 |
+
* 5 not censored values [2.7, 0.2, 6.5, 0.4, 0.1],
|
547 |
+
* 1 interval-censored value [[0.6, 1.0]], and
|
548 |
+
* 2 right-censored values [8, 8].
|
549 |
+
|
550 |
+
> library(fitdistrplus)
|
551 |
+
> data <- data.frame(left=c(2.7, 0.2, 6.5, 0.4, 0.1, 0.6, 8, 8),
|
552 |
+
+ right=c(2.7, 0.2, 6.5, 0.4, 0.1, 1.0, NA, NA))
|
553 |
+
> result = fitdistcens(data, 'chisq', control=list(reltol=1e-14),
|
554 |
+
+ start=list(df=1, ncp=2))
|
555 |
+
> result
|
556 |
+
Fitting of the distribution ' chisq ' on censored data by maximum
|
557 |
+
likelihood
|
558 |
+
Parameters:
|
559 |
+
estimate
|
560 |
+
df 1.052871
|
561 |
+
ncp 2.362934
|
562 |
+
"""
|
563 |
+
data = CensoredData(uncensored=[2.7, 0.2, 6.5, 0.4, 0.1], right=[8, 8],
|
564 |
+
interval=[[0.6, 1.0]])
|
565 |
+
with np.errstate(over='ignore'): # remove context when gh-14901 is closed
|
566 |
+
df, ncp, loc, scale = ncx2.fit(data, floc=0, fscale=1,
|
567 |
+
optimizer=optimizer)
|
568 |
+
assert_allclose(df, 1.052871, rtol=5e-6)
|
569 |
+
assert_allclose(ncp, 2.362934, rtol=5e-6)
|
570 |
+
assert loc == 0
|
571 |
+
assert scale == 1
|
572 |
+
|
573 |
+
|
574 |
+
def test_norm():
|
575 |
+
"""
|
576 |
+
Test fitting the normal distribution to interval-censored data.
|
577 |
+
|
578 |
+
Calculation in R:
|
579 |
+
|
580 |
+
> library(fitdistrplus)
|
581 |
+
> data <- data.frame(left=c(0.10, 0.50, 0.75, 0.80),
|
582 |
+
+ right=c(0.20, 0.55, 0.90, 0.95))
|
583 |
+
> result = fitdistcens(data, 'norm', control=list(reltol=1e-14))
|
584 |
+
|
585 |
+
> result
|
586 |
+
Fitting of the distribution ' norm ' on censored data by maximum likelihood
|
587 |
+
Parameters:
|
588 |
+
estimate
|
589 |
+
mean 0.5919990
|
590 |
+
sd 0.2868042
|
591 |
+
> result$sd
|
592 |
+
mean sd
|
593 |
+
0.1444432 0.1029451
|
594 |
+
"""
|
595 |
+
data = CensoredData(interval=[[0.10, 0.20],
|
596 |
+
[0.50, 0.55],
|
597 |
+
[0.75, 0.90],
|
598 |
+
[0.80, 0.95]])
|
599 |
+
|
600 |
+
loc, scale = norm.fit(data, optimizer=optimizer)
|
601 |
+
|
602 |
+
assert_allclose(loc, 0.5919990, rtol=5e-6)
|
603 |
+
assert_allclose(scale, 0.2868042, rtol=5e-6)
|
604 |
+
|
605 |
+
|
606 |
+
def test_weibull_censored1():
|
607 |
+
# Ref: http://www.ams.sunysb.edu/~zhu/ams588/Lecture_3_likelihood.pdf
|
608 |
+
|
609 |
+
# Survival times; '*' indicates right-censored.
|
610 |
+
s = "3,5,6*,8,10*,11*,15,20*,22,23,27*,29,32,35,40,26,28,33*,21,24*"
|
611 |
+
|
612 |
+
times, cens = zip(*[(float(t[0]), len(t) == 2)
|
613 |
+
for t in [w.split('*') for w in s.split(',')]])
|
614 |
+
data = CensoredData.right_censored(times, cens)
|
615 |
+
|
616 |
+
c, loc, scale = weibull_min.fit(data, floc=0)
|
617 |
+
|
618 |
+
# Expected values are from the reference.
|
619 |
+
assert_allclose(c, 2.149, rtol=1e-3)
|
620 |
+
assert loc == 0
|
621 |
+
assert_allclose(scale, 28.99, rtol=1e-3)
|
622 |
+
|
623 |
+
# Flip the sign of the data, and make the censored values
|
624 |
+
# left-censored. We should get the same parameters when we fit
|
625 |
+
# weibull_max to the flipped data.
|
626 |
+
data2 = CensoredData.left_censored(-np.array(times), cens)
|
627 |
+
|
628 |
+
c2, loc2, scale2 = weibull_max.fit(data2, floc=0)
|
629 |
+
|
630 |
+
assert_allclose(c2, 2.149, rtol=1e-3)
|
631 |
+
assert loc2 == 0
|
632 |
+
assert_allclose(scale2, 28.99, rtol=1e-3)
|
633 |
+
|
634 |
+
|
635 |
+
def test_weibull_min_sas1():
|
636 |
+
# Data and SAS results from
|
637 |
+
# https://support.sas.com/documentation/cdl/en/qcug/63922/HTML/default/
|
638 |
+
# viewer.htm#qcug_reliability_sect004.htm
|
639 |
+
|
640 |
+
text = """
|
641 |
+
450 0 460 1 1150 0 1150 0 1560 1
|
642 |
+
1600 0 1660 1 1850 1 1850 1 1850 1
|
643 |
+
1850 1 1850 1 2030 1 2030 1 2030 1
|
644 |
+
2070 0 2070 0 2080 0 2200 1 3000 1
|
645 |
+
3000 1 3000 1 3000 1 3100 0 3200 1
|
646 |
+
3450 0 3750 1 3750 1 4150 1 4150 1
|
647 |
+
4150 1 4150 1 4300 1 4300 1 4300 1
|
648 |
+
4300 1 4600 0 4850 1 4850 1 4850 1
|
649 |
+
4850 1 5000 1 5000 1 5000 1 6100 1
|
650 |
+
6100 0 6100 1 6100 1 6300 1 6450 1
|
651 |
+
6450 1 6700 1 7450 1 7800 1 7800 1
|
652 |
+
8100 1 8100 1 8200 1 8500 1 8500 1
|
653 |
+
8500 1 8750 1 8750 0 8750 1 9400 1
|
654 |
+
9900 1 10100 1 10100 1 10100 1 11500 1
|
655 |
+
"""
|
656 |
+
|
657 |
+
life, cens = np.array([int(w) for w in text.split()]).reshape(-1, 2).T
|
658 |
+
life = life/1000.0
|
659 |
+
|
660 |
+
data = CensoredData.right_censored(life, cens)
|
661 |
+
|
662 |
+
c, loc, scale = weibull_min.fit(data, floc=0, optimizer=optimizer)
|
663 |
+
assert_allclose(c, 1.0584, rtol=1e-4)
|
664 |
+
assert_allclose(scale, 26.2968, rtol=1e-5)
|
665 |
+
assert loc == 0
|
666 |
+
|
667 |
+
|
668 |
+
def test_weibull_min_sas2():
|
669 |
+
# http://support.sas.com/documentation/cdl/en/ormpug/67517/HTML/default/
|
670 |
+
# viewer.htm#ormpug_nlpsolver_examples06.htm
|
671 |
+
|
672 |
+
# The last two values are right-censored.
|
673 |
+
days = np.array([143, 164, 188, 188, 190, 192, 206, 209, 213, 216, 220,
|
674 |
+
227, 230, 234, 246, 265, 304, 216, 244])
|
675 |
+
|
676 |
+
data = CensoredData.right_censored(days, [0]*(len(days) - 2) + [1]*2)
|
677 |
+
|
678 |
+
c, loc, scale = weibull_min.fit(data, 1, loc=100, scale=100,
|
679 |
+
optimizer=optimizer)
|
680 |
+
|
681 |
+
assert_allclose(c, 2.7112, rtol=5e-4)
|
682 |
+
assert_allclose(loc, 122.03, rtol=5e-4)
|
683 |
+
assert_allclose(scale, 108.37, rtol=5e-4)
|
.venv/Lib/site-packages/scipy/stats/tests/test_crosstab.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
import numpy as np
|
3 |
+
from numpy.testing import assert_array_equal, assert_equal
|
4 |
+
from scipy.stats.contingency import crosstab
|
5 |
+
|
6 |
+
|
7 |
+
@pytest.mark.parametrize('sparse', [False, True])
|
8 |
+
def test_crosstab_basic(sparse):
|
9 |
+
a = [0, 0, 9, 9, 0, 0, 9]
|
10 |
+
b = [2, 1, 3, 1, 2, 3, 3]
|
11 |
+
expected_avals = [0, 9]
|
12 |
+
expected_bvals = [1, 2, 3]
|
13 |
+
expected_count = np.array([[1, 2, 1],
|
14 |
+
[1, 0, 2]])
|
15 |
+
(avals, bvals), count = crosstab(a, b, sparse=sparse)
|
16 |
+
assert_array_equal(avals, expected_avals)
|
17 |
+
assert_array_equal(bvals, expected_bvals)
|
18 |
+
if sparse:
|
19 |
+
assert_array_equal(count.A, expected_count)
|
20 |
+
else:
|
21 |
+
assert_array_equal(count, expected_count)
|
22 |
+
|
23 |
+
|
24 |
+
def test_crosstab_basic_1d():
|
25 |
+
# Verify that a single input sequence works as expected.
|
26 |
+
x = [1, 2, 3, 1, 2, 3, 3]
|
27 |
+
expected_xvals = [1, 2, 3]
|
28 |
+
expected_count = np.array([2, 2, 3])
|
29 |
+
(xvals,), count = crosstab(x)
|
30 |
+
assert_array_equal(xvals, expected_xvals)
|
31 |
+
assert_array_equal(count, expected_count)
|
32 |
+
|
33 |
+
|
34 |
+
def test_crosstab_basic_3d():
|
35 |
+
# Verify the function for three input sequences.
|
36 |
+
a = 'a'
|
37 |
+
b = 'b'
|
38 |
+
x = [0, 0, 9, 9, 0, 0, 9, 9]
|
39 |
+
y = [a, a, a, a, b, b, b, a]
|
40 |
+
z = [1, 2, 3, 1, 2, 3, 3, 1]
|
41 |
+
expected_xvals = [0, 9]
|
42 |
+
expected_yvals = [a, b]
|
43 |
+
expected_zvals = [1, 2, 3]
|
44 |
+
expected_count = np.array([[[1, 1, 0],
|
45 |
+
[0, 1, 1]],
|
46 |
+
[[2, 0, 1],
|
47 |
+
[0, 0, 1]]])
|
48 |
+
(xvals, yvals, zvals), count = crosstab(x, y, z)
|
49 |
+
assert_array_equal(xvals, expected_xvals)
|
50 |
+
assert_array_equal(yvals, expected_yvals)
|
51 |
+
assert_array_equal(zvals, expected_zvals)
|
52 |
+
assert_array_equal(count, expected_count)
|
53 |
+
|
54 |
+
|
55 |
+
@pytest.mark.parametrize('sparse', [False, True])
|
56 |
+
def test_crosstab_levels(sparse):
|
57 |
+
a = [0, 0, 9, 9, 0, 0, 9]
|
58 |
+
b = [1, 2, 3, 1, 2, 3, 3]
|
59 |
+
expected_avals = [0, 9]
|
60 |
+
expected_bvals = [0, 1, 2, 3]
|
61 |
+
expected_count = np.array([[0, 1, 2, 1],
|
62 |
+
[0, 1, 0, 2]])
|
63 |
+
(avals, bvals), count = crosstab(a, b, levels=[None, [0, 1, 2, 3]],
|
64 |
+
sparse=sparse)
|
65 |
+
assert_array_equal(avals, expected_avals)
|
66 |
+
assert_array_equal(bvals, expected_bvals)
|
67 |
+
if sparse:
|
68 |
+
assert_array_equal(count.A, expected_count)
|
69 |
+
else:
|
70 |
+
assert_array_equal(count, expected_count)
|
71 |
+
|
72 |
+
|
73 |
+
@pytest.mark.parametrize('sparse', [False, True])
|
74 |
+
def test_crosstab_extra_levels(sparse):
|
75 |
+
# The pair of values (-1, 3) will be ignored, because we explicitly
|
76 |
+
# request the counted `a` values to be [0, 9].
|
77 |
+
a = [0, 0, 9, 9, 0, 0, 9, -1]
|
78 |
+
b = [1, 2, 3, 1, 2, 3, 3, 3]
|
79 |
+
expected_avals = [0, 9]
|
80 |
+
expected_bvals = [0, 1, 2, 3]
|
81 |
+
expected_count = np.array([[0, 1, 2, 1],
|
82 |
+
[0, 1, 0, 2]])
|
83 |
+
(avals, bvals), count = crosstab(a, b, levels=[[0, 9], [0, 1, 2, 3]],
|
84 |
+
sparse=sparse)
|
85 |
+
assert_array_equal(avals, expected_avals)
|
86 |
+
assert_array_equal(bvals, expected_bvals)
|
87 |
+
if sparse:
|
88 |
+
assert_array_equal(count.A, expected_count)
|
89 |
+
else:
|
90 |
+
assert_array_equal(count, expected_count)
|
91 |
+
|
92 |
+
|
93 |
+
def test_validation_at_least_one():
|
94 |
+
with pytest.raises(TypeError, match='At least one'):
|
95 |
+
crosstab()
|
96 |
+
|
97 |
+
|
98 |
+
def test_validation_same_lengths():
|
99 |
+
with pytest.raises(ValueError, match='must have the same length'):
|
100 |
+
crosstab([1, 2], [1, 2, 3, 4])
|
101 |
+
|
102 |
+
|
103 |
+
def test_validation_sparse_only_two_args():
|
104 |
+
with pytest.raises(ValueError, match='only two input sequences'):
|
105 |
+
crosstab([0, 1, 1], [8, 8, 9], [1, 3, 3], sparse=True)
|
106 |
+
|
107 |
+
|
108 |
+
def test_validation_len_levels_matches_args():
|
109 |
+
with pytest.raises(ValueError, match='number of input sequences'):
|
110 |
+
crosstab([0, 1, 1], [8, 8, 9], levels=([0, 1, 2, 3],))
|
111 |
+
|
112 |
+
|
113 |
+
def test_result():
|
114 |
+
res = crosstab([0, 1], [1, 2])
|
115 |
+
assert_equal((res.elements, res.count), res)
|
.venv/Lib/site-packages/scipy/stats/tests/test_discrete_basic.py
ADDED
@@ -0,0 +1,548 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy.testing as npt
|
2 |
+
from numpy.testing import assert_allclose
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from scipy import stats
|
8 |
+
from .common_tests import (check_normalization, check_moment,
|
9 |
+
check_mean_expect,
|
10 |
+
check_var_expect, check_skew_expect,
|
11 |
+
check_kurt_expect, check_entropy,
|
12 |
+
check_private_entropy, check_edge_support,
|
13 |
+
check_named_args, check_random_state_property,
|
14 |
+
check_pickling, check_rvs_broadcast,
|
15 |
+
check_freezing,)
|
16 |
+
from scipy.stats._distr_params import distdiscrete, invdistdiscrete
|
17 |
+
from scipy.stats._distn_infrastructure import rv_discrete_frozen
|
18 |
+
|
19 |
+
vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4])
|
20 |
+
distdiscrete += [[stats.rv_discrete(values=vals), ()]]
|
21 |
+
|
22 |
+
# For these distributions, test_discrete_basic only runs with test mode full
|
23 |
+
distslow = {'zipfian', 'nhypergeom'}
|
24 |
+
|
25 |
+
|
26 |
+
def cases_test_discrete_basic():
|
27 |
+
seen = set()
|
28 |
+
for distname, arg in distdiscrete:
|
29 |
+
if distname in distslow:
|
30 |
+
yield pytest.param(distname, arg, distname, marks=pytest.mark.slow)
|
31 |
+
else:
|
32 |
+
yield distname, arg, distname not in seen
|
33 |
+
seen.add(distname)
|
34 |
+
|
35 |
+
|
36 |
+
@pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic())
|
37 |
+
def test_discrete_basic(distname, arg, first_case):
|
38 |
+
try:
|
39 |
+
distfn = getattr(stats, distname)
|
40 |
+
except TypeError:
|
41 |
+
distfn = distname
|
42 |
+
distname = 'sample distribution'
|
43 |
+
np.random.seed(9765456)
|
44 |
+
rvs = distfn.rvs(size=2000, *arg)
|
45 |
+
supp = np.unique(rvs)
|
46 |
+
m, v = distfn.stats(*arg)
|
47 |
+
check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf')
|
48 |
+
|
49 |
+
check_pmf_cdf(distfn, arg, distname)
|
50 |
+
check_oth(distfn, arg, supp, distname + ' oth')
|
51 |
+
check_edge_support(distfn, arg)
|
52 |
+
|
53 |
+
alpha = 0.01
|
54 |
+
check_discrete_chisquare(distfn, arg, rvs, alpha,
|
55 |
+
distname + ' chisquare')
|
56 |
+
|
57 |
+
if first_case:
|
58 |
+
locscale_defaults = (0,)
|
59 |
+
meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf,
|
60 |
+
distfn.logsf]
|
61 |
+
# make sure arguments are within support
|
62 |
+
# for some distributions, this needs to be overridden
|
63 |
+
spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0,
|
64 |
+
'nchypergeom_wallenius': 6}
|
65 |
+
k = spec_k.get(distname, 1)
|
66 |
+
check_named_args(distfn, k, arg, locscale_defaults, meths)
|
67 |
+
if distname != 'sample distribution':
|
68 |
+
check_scale_docstring(distfn)
|
69 |
+
check_random_state_property(distfn, arg)
|
70 |
+
check_pickling(distfn, arg)
|
71 |
+
check_freezing(distfn, arg)
|
72 |
+
|
73 |
+
# Entropy
|
74 |
+
check_entropy(distfn, arg, distname)
|
75 |
+
if distfn.__class__._entropy != stats.rv_discrete._entropy:
|
76 |
+
check_private_entropy(distfn, arg, stats.rv_discrete)
|
77 |
+
|
78 |
+
|
79 |
+
@pytest.mark.parametrize('distname,arg', distdiscrete)
|
80 |
+
def test_moments(distname, arg):
|
81 |
+
try:
|
82 |
+
distfn = getattr(stats, distname)
|
83 |
+
except TypeError:
|
84 |
+
distfn = distname
|
85 |
+
distname = 'sample distribution'
|
86 |
+
m, v, s, k = distfn.stats(*arg, moments='mvsk')
|
87 |
+
check_normalization(distfn, arg, distname)
|
88 |
+
|
89 |
+
# compare `stats` and `moment` methods
|
90 |
+
check_moment(distfn, arg, m, v, distname)
|
91 |
+
check_mean_expect(distfn, arg, m, distname)
|
92 |
+
check_var_expect(distfn, arg, m, v, distname)
|
93 |
+
check_skew_expect(distfn, arg, m, v, s, distname)
|
94 |
+
with np.testing.suppress_warnings() as sup:
|
95 |
+
if distname in ['zipf', 'betanbinom']:
|
96 |
+
sup.filter(RuntimeWarning)
|
97 |
+
check_kurt_expect(distfn, arg, m, v, k, distname)
|
98 |
+
|
99 |
+
# frozen distr moments
|
100 |
+
check_moment_frozen(distfn, arg, m, 1)
|
101 |
+
check_moment_frozen(distfn, arg, v+m*m, 2)
|
102 |
+
|
103 |
+
|
104 |
+
@pytest.mark.parametrize('dist,shape_args', distdiscrete)
|
105 |
+
def test_rvs_broadcast(dist, shape_args):
|
106 |
+
# If shape_only is True, it means the _rvs method of the
|
107 |
+
# distribution uses more than one random number to generate a random
|
108 |
+
# variate. That means the result of using rvs with broadcasting or
|
109 |
+
# with a nontrivial size will not necessarily be the same as using the
|
110 |
+
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
|
111 |
+
# of the results, not the values.
|
112 |
+
# Whether or not a distribution is in the following list is an
|
113 |
+
# implementation detail of the distribution, not a requirement. If
|
114 |
+
# the implementation the rvs() method of a distribution changes, this
|
115 |
+
# test might also have to be changed.
|
116 |
+
shape_only = dist in ['betabinom', 'betanbinom', 'skellam', 'yulesimon',
|
117 |
+
'dlaplace', 'nchypergeom_fisher',
|
118 |
+
'nchypergeom_wallenius']
|
119 |
+
|
120 |
+
try:
|
121 |
+
distfunc = getattr(stats, dist)
|
122 |
+
except TypeError:
|
123 |
+
distfunc = dist
|
124 |
+
dist = f'rv_discrete(values=({dist.xk!r}, {dist.pk!r}))'
|
125 |
+
loc = np.zeros(2)
|
126 |
+
nargs = distfunc.numargs
|
127 |
+
allargs = []
|
128 |
+
bshape = []
|
129 |
+
# Generate shape parameter arguments...
|
130 |
+
for k in range(nargs):
|
131 |
+
shp = (k + 3,) + (1,)*(k + 1)
|
132 |
+
param_val = shape_args[k]
|
133 |
+
allargs.append(np.full(shp, param_val))
|
134 |
+
bshape.insert(0, shp[0])
|
135 |
+
allargs.append(loc)
|
136 |
+
bshape.append(loc.size)
|
137 |
+
# bshape holds the expected shape when loc, scale, and the shape
|
138 |
+
# parameters are all broadcast together.
|
139 |
+
check_rvs_broadcast(
|
140 |
+
distfunc, dist, allargs, bshape, shape_only, [np.dtype(int)]
|
141 |
+
)
|
142 |
+
|
143 |
+
|
144 |
+
@pytest.mark.parametrize('dist,args', distdiscrete)
|
145 |
+
def test_ppf_with_loc(dist, args):
|
146 |
+
try:
|
147 |
+
distfn = getattr(stats, dist)
|
148 |
+
except TypeError:
|
149 |
+
distfn = dist
|
150 |
+
#check with a negative, no and positive relocation.
|
151 |
+
np.random.seed(1942349)
|
152 |
+
re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)]
|
153 |
+
_a, _b = distfn.support(*args)
|
154 |
+
for loc in re_locs:
|
155 |
+
npt.assert_array_equal(
|
156 |
+
[_a-1+loc, _b+loc],
|
157 |
+
[distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)]
|
158 |
+
)
|
159 |
+
|
160 |
+
|
161 |
+
@pytest.mark.parametrize('dist, args', distdiscrete)
|
162 |
+
def test_isf_with_loc(dist, args):
|
163 |
+
try:
|
164 |
+
distfn = getattr(stats, dist)
|
165 |
+
except TypeError:
|
166 |
+
distfn = dist
|
167 |
+
# check with a negative, no and positive relocation.
|
168 |
+
np.random.seed(1942349)
|
169 |
+
re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)]
|
170 |
+
_a, _b = distfn.support(*args)
|
171 |
+
for loc in re_locs:
|
172 |
+
expected = _b + loc, _a - 1 + loc
|
173 |
+
res = distfn.isf(0., *args, loc=loc), distfn.isf(1., *args, loc=loc)
|
174 |
+
npt.assert_array_equal(expected, res)
|
175 |
+
# test broadcasting behaviour
|
176 |
+
re_locs = [np.random.randint(-10, -1, size=(5, 3)),
|
177 |
+
np.zeros((5, 3)),
|
178 |
+
np.random.randint(1, 10, size=(5, 3))]
|
179 |
+
_a, _b = distfn.support(*args)
|
180 |
+
for loc in re_locs:
|
181 |
+
expected = _b + loc, _a - 1 + loc
|
182 |
+
res = distfn.isf(0., *args, loc=loc), distfn.isf(1., *args, loc=loc)
|
183 |
+
npt.assert_array_equal(expected, res)
|
184 |
+
|
185 |
+
|
186 |
+
def check_cdf_ppf(distfn, arg, supp, msg):
|
187 |
+
# supp is assumed to be an array of integers in the support of distfn
|
188 |
+
# (but not necessarily all the integers in the support).
|
189 |
+
# This test assumes that the PMF of any value in the support of the
|
190 |
+
# distribution is greater than 1e-8.
|
191 |
+
|
192 |
+
# cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer}
|
193 |
+
cdf_supp = distfn.cdf(supp, *arg)
|
194 |
+
# In very rare cases, the finite precision calculation of ppf(cdf(supp))
|
195 |
+
# can produce an array in which an element is off by one. We nudge the
|
196 |
+
# CDF values down by 15 ULPs help to avoid this.
|
197 |
+
cdf_supp0 = cdf_supp - 15*np.spacing(cdf_supp)
|
198 |
+
npt.assert_array_equal(distfn.ppf(cdf_supp0, *arg),
|
199 |
+
supp, msg + '-roundtrip')
|
200 |
+
# Repeat the same calculation, but with the CDF values decreased by 1e-8.
|
201 |
+
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg),
|
202 |
+
supp, msg + '-roundtrip')
|
203 |
+
|
204 |
+
if not hasattr(distfn, 'xk'):
|
205 |
+
_a, _b = distfn.support(*arg)
|
206 |
+
supp1 = supp[supp < _b]
|
207 |
+
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg),
|
208 |
+
supp1 + distfn.inc, msg + ' ppf-cdf-next')
|
209 |
+
|
210 |
+
|
211 |
+
def check_pmf_cdf(distfn, arg, distname):
|
212 |
+
if hasattr(distfn, 'xk'):
|
213 |
+
index = distfn.xk
|
214 |
+
else:
|
215 |
+
startind = int(distfn.ppf(0.01, *arg) - 1)
|
216 |
+
index = list(range(startind, startind + 10))
|
217 |
+
cdfs = distfn.cdf(index, *arg)
|
218 |
+
pmfs_cum = distfn.pmf(index, *arg).cumsum()
|
219 |
+
|
220 |
+
atol, rtol = 1e-10, 1e-10
|
221 |
+
if distname == 'skellam': # ncx2 accuracy
|
222 |
+
atol, rtol = 1e-5, 1e-5
|
223 |
+
npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0],
|
224 |
+
atol=atol, rtol=rtol)
|
225 |
+
|
226 |
+
# also check that pmf at non-integral k is zero
|
227 |
+
k = np.asarray(index)
|
228 |
+
k_shifted = k[:-1] + np.diff(k)/2
|
229 |
+
npt.assert_equal(distfn.pmf(k_shifted, *arg), 0)
|
230 |
+
|
231 |
+
# better check frozen distributions, and also when loc != 0
|
232 |
+
loc = 0.5
|
233 |
+
dist = distfn(loc=loc, *arg)
|
234 |
+
npt.assert_allclose(dist.pmf(k[1:] + loc), np.diff(dist.cdf(k + loc)))
|
235 |
+
npt.assert_equal(dist.pmf(k_shifted + loc), 0)
|
236 |
+
|
237 |
+
|
238 |
+
def check_moment_frozen(distfn, arg, m, k):
|
239 |
+
npt.assert_allclose(distfn(*arg).moment(k), m,
|
240 |
+
atol=1e-10, rtol=1e-10)
|
241 |
+
|
242 |
+
|
243 |
+
def check_oth(distfn, arg, supp, msg):
|
244 |
+
# checking other methods of distfn
|
245 |
+
npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg),
|
246 |
+
atol=1e-10, rtol=1e-10)
|
247 |
+
|
248 |
+
q = np.linspace(0.01, 0.99, 20)
|
249 |
+
npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg),
|
250 |
+
atol=1e-10, rtol=1e-10)
|
251 |
+
|
252 |
+
median_sf = distfn.isf(0.5, *arg)
|
253 |
+
npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5)
|
254 |
+
npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5)
|
255 |
+
|
256 |
+
|
257 |
+
def check_discrete_chisquare(distfn, arg, rvs, alpha, msg):
|
258 |
+
"""Perform chisquare test for random sample of a discrete distribution
|
259 |
+
|
260 |
+
Parameters
|
261 |
+
----------
|
262 |
+
distname : string
|
263 |
+
name of distribution function
|
264 |
+
arg : sequence
|
265 |
+
parameters of distribution
|
266 |
+
alpha : float
|
267 |
+
significance level, threshold for p-value
|
268 |
+
|
269 |
+
Returns
|
270 |
+
-------
|
271 |
+
result : bool
|
272 |
+
0 if test passes, 1 if test fails
|
273 |
+
|
274 |
+
"""
|
275 |
+
wsupp = 0.05
|
276 |
+
|
277 |
+
# construct intervals with minimum mass `wsupp`.
|
278 |
+
# intervals are left-half-open as in a cdf difference
|
279 |
+
_a, _b = distfn.support(*arg)
|
280 |
+
lo = int(max(_a, -1000))
|
281 |
+
high = int(min(_b, 1000)) + 1
|
282 |
+
distsupport = range(lo, high)
|
283 |
+
last = 0
|
284 |
+
distsupp = [lo]
|
285 |
+
distmass = []
|
286 |
+
for ii in distsupport:
|
287 |
+
current = distfn.cdf(ii, *arg)
|
288 |
+
if current - last >= wsupp - 1e-14:
|
289 |
+
distsupp.append(ii)
|
290 |
+
distmass.append(current - last)
|
291 |
+
last = current
|
292 |
+
if current > (1 - wsupp):
|
293 |
+
break
|
294 |
+
if distsupp[-1] < _b:
|
295 |
+
distsupp.append(_b)
|
296 |
+
distmass.append(1 - last)
|
297 |
+
distsupp = np.array(distsupp)
|
298 |
+
distmass = np.array(distmass)
|
299 |
+
|
300 |
+
# convert intervals to right-half-open as required by histogram
|
301 |
+
histsupp = distsupp + 1e-8
|
302 |
+
histsupp[0] = _a
|
303 |
+
|
304 |
+
# find sample frequencies and perform chisquare test
|
305 |
+
freq, hsupp = np.histogram(rvs, histsupp)
|
306 |
+
chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass)
|
307 |
+
|
308 |
+
npt.assert_(
|
309 |
+
pval > alpha,
|
310 |
+
f'chisquare - test for {msg} at arg = {str(arg)} with pval = {str(pval)}'
|
311 |
+
)
|
312 |
+
|
313 |
+
|
314 |
+
def check_scale_docstring(distfn):
|
315 |
+
if distfn.__doc__ is not None:
|
316 |
+
# Docstrings can be stripped if interpreter is run with -OO
|
317 |
+
npt.assert_('scale' not in distfn.__doc__)
|
318 |
+
|
319 |
+
|
320 |
+
@pytest.mark.parametrize('method', ['pmf', 'logpmf', 'cdf', 'logcdf',
|
321 |
+
'sf', 'logsf', 'ppf', 'isf'])
|
322 |
+
@pytest.mark.parametrize('distname, args', distdiscrete)
|
323 |
+
def test_methods_with_lists(method, distname, args):
|
324 |
+
# Test that the discrete distributions can accept Python lists
|
325 |
+
# as arguments.
|
326 |
+
try:
|
327 |
+
dist = getattr(stats, distname)
|
328 |
+
except TypeError:
|
329 |
+
return
|
330 |
+
if method in ['ppf', 'isf']:
|
331 |
+
z = [0.1, 0.2]
|
332 |
+
else:
|
333 |
+
z = [0, 1]
|
334 |
+
p2 = [[p]*2 for p in args]
|
335 |
+
loc = [0, 1]
|
336 |
+
result = dist.pmf(z, *p2, loc=loc)
|
337 |
+
npt.assert_allclose(result,
|
338 |
+
[dist.pmf(*v) for v in zip(z, *p2, loc)],
|
339 |
+
rtol=1e-15, atol=1e-15)
|
340 |
+
|
341 |
+
|
342 |
+
@pytest.mark.parametrize('distname, args', invdistdiscrete)
|
343 |
+
def test_cdf_gh13280_regression(distname, args):
|
344 |
+
# Test for nan output when shape parameters are invalid
|
345 |
+
dist = getattr(stats, distname)
|
346 |
+
x = np.arange(-2, 15)
|
347 |
+
vals = dist.cdf(x, *args)
|
348 |
+
expected = np.nan
|
349 |
+
npt.assert_equal(vals, expected)
|
350 |
+
|
351 |
+
|
352 |
+
def cases_test_discrete_integer_shapes():
|
353 |
+
# distributions parameters that are only allowed to be integral when
|
354 |
+
# fitting, but are allowed to be real as input to PDF, etc.
|
355 |
+
integrality_exceptions = {'nbinom': {'n'}, 'betanbinom': {'n'}}
|
356 |
+
|
357 |
+
seen = set()
|
358 |
+
for distname, shapes in distdiscrete:
|
359 |
+
if distname in seen:
|
360 |
+
continue
|
361 |
+
seen.add(distname)
|
362 |
+
|
363 |
+
try:
|
364 |
+
dist = getattr(stats, distname)
|
365 |
+
except TypeError:
|
366 |
+
continue
|
367 |
+
|
368 |
+
shape_info = dist._shape_info()
|
369 |
+
|
370 |
+
for i, shape in enumerate(shape_info):
|
371 |
+
if (shape.name in integrality_exceptions.get(distname, set()) or
|
372 |
+
not shape.integrality):
|
373 |
+
continue
|
374 |
+
|
375 |
+
yield distname, shape.name, shapes
|
376 |
+
|
377 |
+
|
378 |
+
@pytest.mark.parametrize('distname, shapename, shapes',
|
379 |
+
cases_test_discrete_integer_shapes())
|
380 |
+
def test_integer_shapes(distname, shapename, shapes):
|
381 |
+
dist = getattr(stats, distname)
|
382 |
+
shape_info = dist._shape_info()
|
383 |
+
shape_names = [shape.name for shape in shape_info]
|
384 |
+
i = shape_names.index(shapename) # this element of params must be integral
|
385 |
+
|
386 |
+
shapes_copy = list(shapes)
|
387 |
+
|
388 |
+
valid_shape = shapes[i]
|
389 |
+
invalid_shape = valid_shape - 0.5 # arbitrary non-integral value
|
390 |
+
new_valid_shape = valid_shape - 1
|
391 |
+
shapes_copy[i] = [[valid_shape], [invalid_shape], [new_valid_shape]]
|
392 |
+
|
393 |
+
a, b = dist.support(*shapes)
|
394 |
+
x = np.round(np.linspace(a, b, 5))
|
395 |
+
|
396 |
+
pmf = dist.pmf(x, *shapes_copy)
|
397 |
+
assert not np.any(np.isnan(pmf[0, :]))
|
398 |
+
assert np.all(np.isnan(pmf[1, :]))
|
399 |
+
assert not np.any(np.isnan(pmf[2, :]))
|
400 |
+
|
401 |
+
|
402 |
+
def test_frozen_attributes():
|
403 |
+
# gh-14827 reported that all frozen distributions had both pmf and pdf
|
404 |
+
# attributes; continuous should have pdf and discrete should have pmf.
|
405 |
+
message = "'rv_discrete_frozen' object has no attribute"
|
406 |
+
with pytest.raises(AttributeError, match=message):
|
407 |
+
stats.binom(10, 0.5).pdf
|
408 |
+
with pytest.raises(AttributeError, match=message):
|
409 |
+
stats.binom(10, 0.5).logpdf
|
410 |
+
stats.binom.pdf = "herring"
|
411 |
+
frozen_binom = stats.binom(10, 0.5)
|
412 |
+
assert isinstance(frozen_binom, rv_discrete_frozen)
|
413 |
+
delattr(stats.binom, 'pdf')
|
414 |
+
|
415 |
+
|
416 |
+
@pytest.mark.parametrize('distname, shapes', distdiscrete)
|
417 |
+
def test_interval(distname, shapes):
|
418 |
+
# gh-11026 reported that `interval` returns incorrect values when
|
419 |
+
# `confidence=1`. The values were not incorrect, but it was not intuitive
|
420 |
+
# that the left end of the interval should extend beyond the support of the
|
421 |
+
# distribution. Confirm that this is the behavior for all distributions.
|
422 |
+
if isinstance(distname, str):
|
423 |
+
dist = getattr(stats, distname)
|
424 |
+
else:
|
425 |
+
dist = distname
|
426 |
+
a, b = dist.support(*shapes)
|
427 |
+
npt.assert_equal(dist.ppf([0, 1], *shapes), (a-1, b))
|
428 |
+
npt.assert_equal(dist.isf([1, 0], *shapes), (a-1, b))
|
429 |
+
npt.assert_equal(dist.interval(1, *shapes), (a-1, b))
|
430 |
+
|
431 |
+
|
432 |
+
@pytest.mark.xfail_on_32bit("Sensible to machine precision")
|
433 |
+
def test_rv_sample():
|
434 |
+
# Thoroughly test rv_sample and check that gh-3758 is resolved
|
435 |
+
|
436 |
+
# Generate a random discrete distribution
|
437 |
+
rng = np.random.default_rng(98430143469)
|
438 |
+
xk = np.sort(rng.random(10) * 10)
|
439 |
+
pk = rng.random(10)
|
440 |
+
pk /= np.sum(pk)
|
441 |
+
dist = stats.rv_discrete(values=(xk, pk))
|
442 |
+
|
443 |
+
# Generate points to the left and right of xk
|
444 |
+
xk_left = (np.array([0] + xk[:-1].tolist()) + xk)/2
|
445 |
+
xk_right = (np.array(xk[1:].tolist() + [xk[-1]+1]) + xk)/2
|
446 |
+
|
447 |
+
# Generate points to the left and right of cdf
|
448 |
+
cdf2 = np.cumsum(pk)
|
449 |
+
cdf2_left = (np.array([0] + cdf2[:-1].tolist()) + cdf2)/2
|
450 |
+
cdf2_right = (np.array(cdf2[1:].tolist() + [1]) + cdf2)/2
|
451 |
+
|
452 |
+
# support - leftmost and rightmost xk
|
453 |
+
a, b = dist.support()
|
454 |
+
assert_allclose(a, xk[0])
|
455 |
+
assert_allclose(b, xk[-1])
|
456 |
+
|
457 |
+
# pmf - supported only on the xk
|
458 |
+
assert_allclose(dist.pmf(xk), pk)
|
459 |
+
assert_allclose(dist.pmf(xk_right), 0)
|
460 |
+
assert_allclose(dist.pmf(xk_left), 0)
|
461 |
+
|
462 |
+
# logpmf is log of the pmf; log(0) = -np.inf
|
463 |
+
with np.errstate(divide='ignore'):
|
464 |
+
assert_allclose(dist.logpmf(xk), np.log(pk))
|
465 |
+
assert_allclose(dist.logpmf(xk_right), -np.inf)
|
466 |
+
assert_allclose(dist.logpmf(xk_left), -np.inf)
|
467 |
+
|
468 |
+
# cdf - the cumulative sum of the pmf
|
469 |
+
assert_allclose(dist.cdf(xk), cdf2)
|
470 |
+
assert_allclose(dist.cdf(xk_right), cdf2)
|
471 |
+
assert_allclose(dist.cdf(xk_left), [0]+cdf2[:-1].tolist())
|
472 |
+
|
473 |
+
with np.errstate(divide='ignore'):
|
474 |
+
assert_allclose(dist.logcdf(xk), np.log(dist.cdf(xk)),
|
475 |
+
atol=1e-15)
|
476 |
+
assert_allclose(dist.logcdf(xk_right), np.log(dist.cdf(xk_right)),
|
477 |
+
atol=1e-15)
|
478 |
+
assert_allclose(dist.logcdf(xk_left), np.log(dist.cdf(xk_left)),
|
479 |
+
atol=1e-15)
|
480 |
+
|
481 |
+
# sf is 1-cdf
|
482 |
+
assert_allclose(dist.sf(xk), 1-dist.cdf(xk))
|
483 |
+
assert_allclose(dist.sf(xk_right), 1-dist.cdf(xk_right))
|
484 |
+
assert_allclose(dist.sf(xk_left), 1-dist.cdf(xk_left))
|
485 |
+
|
486 |
+
with np.errstate(divide='ignore'):
|
487 |
+
assert_allclose(dist.logsf(xk), np.log(dist.sf(xk)),
|
488 |
+
atol=1e-15)
|
489 |
+
assert_allclose(dist.logsf(xk_right), np.log(dist.sf(xk_right)),
|
490 |
+
atol=1e-15)
|
491 |
+
assert_allclose(dist.logsf(xk_left), np.log(dist.sf(xk_left)),
|
492 |
+
atol=1e-15)
|
493 |
+
|
494 |
+
# ppf
|
495 |
+
assert_allclose(dist.ppf(cdf2), xk)
|
496 |
+
assert_allclose(dist.ppf(cdf2_left), xk)
|
497 |
+
assert_allclose(dist.ppf(cdf2_right)[:-1], xk[1:])
|
498 |
+
assert_allclose(dist.ppf(0), a - 1)
|
499 |
+
assert_allclose(dist.ppf(1), b)
|
500 |
+
|
501 |
+
# isf
|
502 |
+
sf2 = dist.sf(xk)
|
503 |
+
assert_allclose(dist.isf(sf2), xk)
|
504 |
+
assert_allclose(dist.isf(1-cdf2_left), dist.ppf(cdf2_left))
|
505 |
+
assert_allclose(dist.isf(1-cdf2_right), dist.ppf(cdf2_right))
|
506 |
+
assert_allclose(dist.isf(0), b)
|
507 |
+
assert_allclose(dist.isf(1), a - 1)
|
508 |
+
|
509 |
+
# interval is (ppf(alpha/2), isf(alpha/2))
|
510 |
+
ps = np.linspace(0.01, 0.99, 10)
|
511 |
+
int2 = dist.ppf(ps/2), dist.isf(ps/2)
|
512 |
+
assert_allclose(dist.interval(1-ps), int2)
|
513 |
+
assert_allclose(dist.interval(0), dist.median())
|
514 |
+
assert_allclose(dist.interval(1), (a-1, b))
|
515 |
+
|
516 |
+
# median is simply ppf(0.5)
|
517 |
+
med2 = dist.ppf(0.5)
|
518 |
+
assert_allclose(dist.median(), med2)
|
519 |
+
|
520 |
+
# all four stats (mean, var, skew, and kurtosis) from the definitions
|
521 |
+
mean2 = np.sum(xk*pk)
|
522 |
+
var2 = np.sum((xk - mean2)**2 * pk)
|
523 |
+
skew2 = np.sum((xk - mean2)**3 * pk) / var2**(3/2)
|
524 |
+
kurt2 = np.sum((xk - mean2)**4 * pk) / var2**2 - 3
|
525 |
+
assert_allclose(dist.mean(), mean2)
|
526 |
+
assert_allclose(dist.std(), np.sqrt(var2))
|
527 |
+
assert_allclose(dist.var(), var2)
|
528 |
+
assert_allclose(dist.stats(moments='mvsk'), (mean2, var2, skew2, kurt2))
|
529 |
+
|
530 |
+
# noncentral moment against definition
|
531 |
+
mom3 = np.sum((xk**3) * pk)
|
532 |
+
assert_allclose(dist.moment(3), mom3)
|
533 |
+
|
534 |
+
# expect - check against moments
|
535 |
+
assert_allclose(dist.expect(lambda x: 1), 1)
|
536 |
+
assert_allclose(dist.expect(), mean2)
|
537 |
+
assert_allclose(dist.expect(lambda x: x**3), mom3)
|
538 |
+
|
539 |
+
# entropy is the negative of the expected value of log(p)
|
540 |
+
with np.errstate(divide='ignore'):
|
541 |
+
assert_allclose(-dist.expect(lambda x: dist.logpmf(x)), dist.entropy())
|
542 |
+
|
543 |
+
# RVS is just ppf of uniform random variates
|
544 |
+
rng = np.random.default_rng(98430143469)
|
545 |
+
rvs = dist.rvs(size=100, random_state=rng)
|
546 |
+
rng = np.random.default_rng(98430143469)
|
547 |
+
rvs0 = dist.ppf(rng.random(size=100))
|
548 |
+
assert_allclose(rvs, rvs0)
|