metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
---|---|---|
{
"filename": "conftest.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/doc/conftest.py",
"type": "Python"
}
|
"""
Pytest configuration and fixtures for the Numpy test suite.
"""
import pytest
import numpy
import matplotlib
import doctest
matplotlib.use('agg', force=True)
# Ignore matplotlib output such as `<matplotlib.image.AxesImage at
# 0x7f956908c280>`. doctest monkeypatching inspired by
# https://github.com/wooyek/pytest-doctest-ellipsis-markers (MIT license)
OutputChecker = doctest.OutputChecker
empty_line_markers = ['<matplotlib.', '<mpl_toolkits.mplot3d.']
class SkipMatplotlibOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
for marker in empty_line_markers:
if marker in got:
got = ''
break
return OutputChecker.check_output(self, want, got, optionflags)
doctest.OutputChecker = SkipMatplotlibOutputChecker
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
numpy.random.seed(1)
doctest_namespace['np'] = numpy
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@[email protected]@.PATH_END.py
|
{
"filename": "_reversescale.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolargl/marker/line/_reversescale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="reversescale",
parent_name="scatterpolargl.marker.line",
**kwargs,
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@scatterpolargl@marker@line@[email protected]_END.py
|
{
"filename": "waveforms_start.ipynb",
"repo_name": "gwastro/PyCBC-Tutorials",
"repo_path": "PyCBC-Tutorials_extracted/PyCBC-Tutorials-master/examples/waveforms_start.ipynb",
"type": "Jupyter Notebook"
}
|
### Generate the gravitational-wave waveform for a binary merger ###
```python
# Install the software we need
import sys
!{sys.executable} -m pip install pycbc ligo-common --no-cache-dir
```
Requirement already satisfied: pycbc in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (1.13.5)
Requirement already satisfied: lalsuite in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (6.53)
Requirement already satisfied: numpy<1.15.3,>=1.13.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (1.15.2)
Requirement already satisfied: Mako>=1.0.1 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (1.0.8)
Requirement already satisfied: cython in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (0.28.5)
Requirement already satisfied: decorator>=3.4.2 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (4.3.0)
Requirement already satisfied: scipy>=0.16.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (1.1.0)
Requirement already satisfied: matplotlib>=1.5.1 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (2.2.3)
Requirement already satisfied: pillow in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (5.3.0)
Requirement already satisfied: h5py>=2.5 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (2.8.0)
Requirement already satisfied: jinja2 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (2.10)
Requirement already satisfied: astropy<3.0.0,>=2.0.3 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (2.0.8)
Requirement already satisfied: mpld3>=0.3 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (0.3)
Requirement already satisfied: lscsoft-glue>=1.59.3 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (2.0.0)
Requirement already satisfied: kombine>=0.8.2 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (0.8.3)
Requirement already satisfied: emcee==2.2.1 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (2.2.1)
Requirement already satisfied: requests>=1.2.1 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (2.20.1)
Requirement already satisfied: beautifulsoup4>=4.6.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (4.6.3)
Requirement already satisfied: six>=1.10.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (1.11.0)
Requirement already satisfied: ligo-segments in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (1.2.0)
Requirement already satisfied: weave>=0.16.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pycbc) (0.17.0)
Requirement already satisfied: python-dateutil in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from lalsuite) (2.7.5)
Requirement already satisfied: MarkupSafe>=0.9.2 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from Mako>=1.0.1->pycbc) (1.1.0)
Requirement already satisfied: cycler>=0.10 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from matplotlib>=1.5.1->pycbc) (0.10.0)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from matplotlib>=1.5.1->pycbc) (2.3.0)
Requirement already satisfied: pytz in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from matplotlib>=1.5.1->pycbc) (2018.7)
Requirement already satisfied: kiwisolver>=1.0.1 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from matplotlib>=1.5.1->pycbc) (1.0.1)
Requirement already satisfied: backports.functools_lru_cache in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from matplotlib>=1.5.1->pycbc) (1.5)
Requirement already satisfied: subprocess32 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from matplotlib>=1.5.1->pycbc) (3.5.3)
Requirement already satisfied: pytest>=2.8 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from astropy<3.0.0,>=2.0.3->pycbc) (3.8.0)
Requirement already satisfied: pyOpenSSL in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from lscsoft-glue>=1.59.3->pycbc) (18.0.0)
Requirement already satisfied: idna<2.8,>=2.5 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from requests>=1.2.1->pycbc) (2.7)
Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from requests>=1.2.1->pycbc) (3.0.4)
Requirement already satisfied: urllib3<1.25,>=1.21.1 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from requests>=1.2.1->pycbc) (1.23)
Requirement already satisfied: certifi>=2017.4.17 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from requests>=1.2.1->pycbc) (2018.10.15)
Requirement already satisfied: setuptools in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from kiwisolver>=1.0.1->matplotlib>=1.5.1->pycbc) (40.8.0)
Requirement already satisfied: py>=1.5.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pytest>=2.8->astropy<3.0.0,>=2.0.3->pycbc) (1.6.0)
Requirement already satisfied: attrs>=17.4.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pytest>=2.8->astropy<3.0.0,>=2.0.3->pycbc) (18.2.0)
Requirement already satisfied: more-itertools>=4.0.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pytest>=2.8->astropy<3.0.0,>=2.0.3->pycbc) (4.3.0)
Requirement already satisfied: atomicwrites>=1.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pytest>=2.8->astropy<3.0.0,>=2.0.3->pycbc) (1.2.1)
Requirement already satisfied: pluggy>=0.7 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pytest>=2.8->astropy<3.0.0,>=2.0.3->pycbc) (0.7.1)
Requirement already satisfied: funcsigs in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pytest>=2.8->astropy<3.0.0,>=2.0.3->pycbc) (1.0.2)
Requirement already satisfied: pathlib2>=2.2.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pytest>=2.8->astropy<3.0.0,>=2.0.3->pycbc) (2.3.2)
Requirement already satisfied: cryptography>=2.2.1 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pyOpenSSL->lscsoft-glue>=1.59.3->pycbc) (2.3.1)
Requirement already satisfied: scandir in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from pathlib2>=2.2.0->pytest>=2.8->astropy<3.0.0,>=2.0.3->pycbc) (1.9.0)
Requirement already satisfied: asn1crypto>=0.21.0 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from cryptography>=2.2.1->pyOpenSSL->lscsoft-glue>=1.59.3->pycbc) (0.24.0)
Requirement already satisfied: cffi!=1.11.3,>=1.7 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from cryptography>=2.2.1->pyOpenSSL->lscsoft-glue>=1.59.3->pycbc) (1.11.5)
Requirement already satisfied: enum34 in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from cryptography>=2.2.1->pyOpenSSL->lscsoft-glue>=1.59.3->pycbc) (1.1.6)
Requirement already satisfied: ipaddress in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from cryptography>=2.2.1->pyOpenSSL->lscsoft-glue>=1.59.3->pycbc) (1.0.22)
Requirement already satisfied: pycparser in /home/nbuser/anaconda2_501/lib/python2.7/site-packages (from cffi!=1.11.3,>=1.7->cryptography>=2.2.1->pyOpenSSL->lscsoft-glue>=1.59.3->pycbc) (2.19)
```python
import pylab
from pycbc.waveform import get_td_waveform
# The mass of the component objects solar masses
m1 = 20
m2 = 20
# Start the waveform from 20 Hz
f_lower = 20
# The model of the gravitational-wave signal we are using
approximant = "SEOBNRv4"
# The time between samples that we want
delta_t = 1.0 / 4096
# Distance in million parsecs (1 parsec ~ 3.3 light years)
# This ignores redshift due to expansion of the universe (luminosity distance)
distance = 100
#Generate the waveform.
#Like electromagnetic radiation, gravitational-waves have two
#polarizations.
#We call them the "plus" polarization and a "cross" polarization
hp, hc = get_td_waveform(approximant=approximant,
mass1=m1, mass2=m2,
delta_t=delta_t, f_lower=f_lower,
distance=distance)
pylab.figure(1)
pylab.plot(hp.sample_times, hp, label='Plus polarization')
pylab.legend()
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
pylab.show()
# The plot below will show the strain for a source that is located
# directly above a detector. We will show later tutorial
# how this maps to non-optimal sky locations and real detector
# antenna patters
```
<Figure size 640x480 with 1 Axes>
```python
# Let's zoom in on the time that the binary merges
pylab.figure(2)
hp_merge = hp[len(hp)-500:]
pylab.plot(hp_merge.sample_times, hp_merge, label='Plus polarization')
pylab.legend()
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
pylab.show()
# You can see the signal as it evolves from the 'inspiral'
# stage to the 'merger' and finally when it 'rings down' (like a bell!)
```

```python
# Let's zoom in on the time that the binary merges
# Below we show the orthognal "cross" polarization of the gravitational
# wave also overlaid.
pylab.figure(3)
hc_merge = hc[len(hc)-500:]
pylab.plot(hp_merge.sample_times, hp_merge, label="Plus polarization")
pylab.plot(hc_merge.sample_times, hc_merge, label='Cross polarization')
pylab.legend()
pylab.xlabel('Time (s)')
pylab.ylabel('Strain')
pylab.show()
```

```python
```
|
gwastroREPO_NAMEPyCBC-TutorialsPATH_START.@PyCBC-Tutorials_extracted@PyCBC-Tutorials-master@examples@[email protected]_END.py
|
{
"filename": "multiclass.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scikit-learn/py2/sklearn/utils/multiclass.py",
"type": "Python"
}
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def check_classification_targets(y):
"""Ensure that target y is of a non-regression type.
Only the following target types (as defined in type_of_target) are allowed:
'binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences'
Parameters
----------
y : array-like
"""
y_type = type_of_target(y)
if y_type not in ['binary', 'multiclass', 'multiclass-multioutput',
'multilabel-indicator', 'multilabel-sequences']:
raise ValueError("Unknown label type: %r" % y_type)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integers of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its weight with the weight
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implicit zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
def _ovr_decision_function(predictions, confidences, n_classes):
"""Compute a continuous, tie-breaking ovr decision function.
It is important to include a continuous value, not only votes,
to make computing AUC or calibration meaningful.
Parameters
----------
predictions : array-like, shape (n_samples, n_classifiers)
Predicted classes for each binary classifier.
confidences : array-like, shape (n_samples, n_classifiers)
Decision functions or predicted probabilities for positive class
for each binary classifier.
n_classes : int
Number of classes. n_classifiers must be
``n_classes * (n_classes - 1 ) / 2``
"""
n_samples = predictions.shape[0]
votes = np.zeros((n_samples, n_classes))
sum_of_confidences = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
sum_of_confidences[:, i] -= confidences[:, k]
sum_of_confidences[:, j] += confidences[:, k]
votes[predictions[:, k] == 0, i] += 1
votes[predictions[:, k] == 1, j] += 1
k += 1
max_confidences = sum_of_confidences.max()
min_confidences = sum_of_confidences.min()
if max_confidences == min_confidences:
return votes
# Scale the sum_of_confidences to (-0.5, 0.5) and add it with votes.
# The motivation is to use confidence levels as a way to break ties in
# the votes without switching any decision made based on a difference
# of 1 vote.
eps = np.finfo(sum_of_confidences.dtype).eps
max_abs_confidence = max(abs(max_confidences), abs(min_confidences))
scale = (0.5 - eps) / max_abs_confidence
return votes + sum_of_confidences * scale
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scikit-learn@py2@sklearn@[email protected]@.PATH_END.py
|
{
"filename": "test_decode.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/simplejson/py3/simplejson/tests/test_decode.py",
"type": "Python"
}
|
from __future__ import absolute_import
import decimal
from unittest import TestCase
import sys
import simplejson as json
from simplejson.compat import StringIO, b, binary_type
from simplejson import OrderedDict
class MisbehavingBytesSubtype(binary_type):
def decode(self, encoding=None):
return "bad decode"
def __str__(self):
return "bad __str__"
def __bytes__(self):
return b("bad __bytes__")
class TestDecode(TestCase):
if not hasattr(TestCase, 'assertIs'):
def assertIs(self, a, b):
self.assertTrue(a is b, '%r is %r' % (a, b))
def test_decimal(self):
rval = json.loads('1.1', parse_float=decimal.Decimal)
self.assertTrue(isinstance(rval, decimal.Decimal))
self.assertEqual(rval, decimal.Decimal('1.1'))
def test_float(self):
rval = json.loads('1', parse_int=float)
self.assertTrue(isinstance(rval, float))
self.assertEqual(rval, 1.0)
def test_decoder_optimizations(self):
# Several optimizations were made that skip over calls to
# the whitespace regex, so this test is designed to try and
# exercise the uncommon cases. The array cases are already covered.
rval = json.loads('{ "key" : "value" , "k":"v" }')
self.assertEqual(rval, {"key":"value", "k":"v"})
def test_empty_objects(self):
s = '{}'
self.assertEqual(json.loads(s), eval(s))
s = '[]'
self.assertEqual(json.loads(s), eval(s))
s = '""'
self.assertEqual(json.loads(s), eval(s))
def test_object_pairs_hook(self):
s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [("xkd", 1), ("kcw", 2), ("art", 3), ("hxm", 4),
("qrt", 5), ("pad", 6), ("hoy", 7)]
self.assertEqual(json.loads(s), eval(s))
self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p)
self.assertEqual(json.load(StringIO(s),
object_pairs_hook=lambda x: x), p)
od = json.loads(s, object_pairs_hook=OrderedDict)
self.assertEqual(od, OrderedDict(p))
self.assertEqual(type(od), OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(json.loads(s,
object_pairs_hook=OrderedDict,
object_hook=lambda x: None),
OrderedDict(p))
def check_keys_reuse(self, source, loads):
rval = loads(source)
(a, b), (c, d) = sorted(rval[0]), sorted(rval[1])
self.assertIs(a, c)
self.assertIs(b, d)
def test_keys_reuse_str(self):
s = u'[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]'.encode('utf8')
self.check_keys_reuse(s, json.loads)
def test_keys_reuse_unicode(self):
s = u'[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]'
self.check_keys_reuse(s, json.loads)
def test_empty_strings(self):
self.assertEqual(json.loads('""'), "")
self.assertEqual(json.loads(u'""'), u"")
self.assertEqual(json.loads('[""]'), [""])
self.assertEqual(json.loads(u'[""]'), [u""])
def test_raw_decode(self):
cls = json.decoder.JSONDecoder
self.assertEqual(
({'a': {}}, 9),
cls().raw_decode("{\"a\": {}}"))
# http://code.google.com/p/simplejson/issues/detail?id=85
self.assertEqual(
({'a': {}}, 9),
cls(object_pairs_hook=dict).raw_decode("{\"a\": {}}"))
# https://github.com/simplejson/simplejson/pull/38
self.assertEqual(
({'a': {}}, 11),
cls().raw_decode(" \n{\"a\": {}}"))
def test_bytes_decode(self):
cls = json.decoder.JSONDecoder
data = b('"\xe2\x82\xac"')
self.assertEqual(cls().decode(data), u'\u20ac')
self.assertEqual(cls(encoding='latin1').decode(data), u'\xe2\x82\xac')
self.assertEqual(cls(encoding=None).decode(data), u'\u20ac')
data = MisbehavingBytesSubtype(b('"\xe2\x82\xac"'))
self.assertEqual(cls().decode(data), u'\u20ac')
self.assertEqual(cls(encoding='latin1').decode(data), u'\xe2\x82\xac')
self.assertEqual(cls(encoding=None).decode(data), u'\u20ac')
def test_bounds_checking(self):
# https://github.com/simplejson/simplejson/issues/98
j = json.decoder.JSONDecoder()
for i in [4, 5, 6, -1, -2, -3, -4, -5, -6]:
self.assertRaises(ValueError, j.scan_once, '1234', i)
self.assertRaises(ValueError, j.raw_decode, '1234', i)
x, y = sorted(['128931233', '472389423'], key=id)
diff = id(x) - id(y)
self.assertRaises(ValueError, j.scan_once, y, diff)
self.assertRaises(ValueError, j.raw_decode, y, i)
def test_bounded_int(self):
# SJ-PT-23-03, limit quadratic number parsing per Python 3.11
max_str_digits = getattr(sys, 'get_int_max_str_digits', lambda: 4300)()
s = '1' + '0' * (max_str_digits - 1)
self.assertEqual(json.loads(s), int(s))
self.assertRaises(ValueError, json.loads, s + '0')
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@simplejson@py3@simplejson@tests@[email protected]_END.py
|
{
"filename": "sampler.py",
"repo_name": "JulianBMunoz/21cmvFAST",
"repo_path": "21cmvFAST_extracted/21cmvFAST-master/public_21CMvFAST_MC/Programs/CosmoHammer_21CMMC/emcee/sampler.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The base sampler class implementing various helpful functions.
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["Sampler"]
import numpy as np
try:
import acor
acor = acor
except ImportError:
acor = None
class Sampler(object):
"""
An abstract sampler object that implements various helper functions
:param dim:
The number of dimensions in the parameter space.
:param lnpostfn:
A function that takes a vector in the parameter space as input and
returns the natural logarithm of the posterior probability for that
position.
:param args: (optional)
A list of extra arguments for ``lnpostfn``. ``lnpostfn`` will be
called with the sequence ``lnpostfn(p, *args)``.
"""
def __init__(self, dim, lnprobfn, args=[]):
self.dim = dim
self.lnprobfn = lnprobfn
self.args = args
# This is a random number generator that we can easily set the state
# of without affecting the numpy-wide generator
self._random = np.random.mtrand.RandomState()
self.reset()
@property
def random_state(self):
"""
The state of the internal random number generator. In practice, it's
the result of calling ``get_state()`` on a
``numpy.random.mtrand.RandomState`` object. You can try to set this
property but be warned that if you do this and it fails, it will do
so silently.
"""
return self._random.get_state()
@random_state.setter # NOQA
def random_state(self, state):
"""
Try to set the state of the random number generator but fail silently
if it doesn't work. Don't say I didn't warn you...
"""
try:
self._random.set_state(state)
except:
pass
@property
def acceptance_fraction(self):
"""
The fraction of proposed steps that were accepted.
"""
return self.naccepted / self.iterations
@property
def chain(self):
"""
A pointer to the Markov chain.
"""
return self._chain
@property
def flatchain(self):
"""
Alias of ``chain`` provided for compatibility.
"""
return self._chain
@property
def lnprobability(self):
"""
A list of the log-probability values associated with each step in
the chain.
"""
return self._lnprob
@property
def acor(self):
"""
The autocorrelation time of each parameter in the chain (length:
``dim``) as estimated by the ``acor`` module.
"""
if acor is None:
raise ImportError("You need to install acor: "
"https://github.com/dfm/acor")
return acor.acor(self._chain.T)[0]
def get_lnprob(self, p):
"""Return the log-probability at the given position."""
return self.lnprobfn(p, *self.args)
def reset(self):
"""
Clear ``chain``, ``lnprobability`` and the bookkeeping parameters.
"""
self.iterations = 0
self.naccepted = 0
def clear_chain(self):
"""An alias for :func:`reset` kept for backwards compatibility."""
return self.reset()
def sample(self, *args, **kwargs):
raise NotImplementedError("The sampling routine must be implemented "
"by subclasses")
def run_mcmc(self, pos0, N, rstate0=None, lnprob0=None, **kwargs):
"""
Iterate :func:`sample` for ``N`` iterations and return the result.
:param p0:
The initial position vector.
:param N:
The number of steps to run.
:param lnprob0: (optional)
The log posterior probability at position ``p0``. If ``lnprob``
is not provided, the initial value is calculated.
:param rstate0: (optional)
The state of the random number generator. See the
:func:`random_state` property for details.
:param kwargs: (optional)
Other parameters that are directly passed to :func:`sample`.
"""
for results in self.sample(pos0, lnprob0, rstate0, iterations=N,
**kwargs):
pass
return results
|
JulianBMunozREPO_NAME21cmvFASTPATH_START.@21cmvFAST_extracted@21cmvFAST-master@public_21CMvFAST_MC@Programs@CosmoHammer_21CMMC@[email protected]@.PATH_END.py
|
{
"filename": "config_tests.py",
"repo_name": "swagnercarena/paltas",
"repo_path": "paltas_extracted/paltas-main/test/config_tests.py",
"type": "Python"
}
|
import unittest
from paltas.Utils import cosmology_utils, hubble_utils
from paltas.Configs import config_handler
import paltas
import numpy as np
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.PointSource.point_source import PointSource
from scipy.signal import fftconvolve
from paltas.Sources.cosmos import COSMOSIncludeCatalog
from paltas.MainDeflector.simple_deflectors import PEMDShear
from paltas.Sources.sersic import SingleSersicSource
from paltas.PointSource.single_point_source import SinglePointSource
# Define the cosmos path
cosmos_folder = 'test_data/cosmos/'
class ConfigUtilsTests(unittest.TestCase):
def setUp(self):
# Fix the random seed to be able to have reliable tests
np.random.seed(10)
self.c = config_handler.ConfigHandler('test_data/config_dict.py')
self.c_all = config_handler.ConfigHandler('test_data/config_dict_all.py')
def test_init(self):
# Test that the class was initialized correctly
# Start by checking some of the global parameters
self.assertEqual(self.c.kwargs_numerics['supersampling_factor'],1)
self.assertEqual(self.c.numpix,64)
self.assertFalse(self.c.do_drizzle)
self.assertEqual(self.c.mag_cut,1.0)
self.assertEqual(self.c.add_noise,True)
# Now check some of the classes and make sure they match
# our expectations from the config file.
self.assertEqual(self.c.point_source_class,None)
self.assertEqual(self.c.lens_light_class,None)
self.assertTrue(isinstance(self.c.los_class,
paltas.Substructure.los_dg19.LOSDG19))
self.assertTrue(isinstance(self.c.main_deflector_class,
paltas.MainDeflector.simple_deflectors.PEMDShear))
self.assertTrue(isinstance(self.c.subhalo_class,
paltas.Substructure.subhalos_dg19.SubhalosDG19))
self.assertTrue(isinstance(self.c.source_class,
paltas.Sources.cosmos.COSMOSCatalog))
# Check the values that should be different for config_dict_all
self.assertEqual(self.c_all.mag_cut,None)
self.assertEqual(self.c_all.add_noise,False)
self.assertTrue(isinstance(self.c_all.lens_light_class,
paltas.Sources.sersic.SingleSersicSource))
self.assertTrue(isinstance(self.c_all.point_source_class,
paltas.PointSource.single_point_source.SinglePointSource))
def test_get_lenstronomy_models_kwargs(self):
# Test that the kwargs returned are consistent with what was provided
# in the configuration file.
kwargs_model, kwargs_params = self.c.get_lenstronomy_models_kwargs(
new_sample=False)
# First check the lens lists.
self.assertGreater(kwargs_model['lens_model_list'].count('TNFW'),0)
self.assertGreater(kwargs_model['lens_model_list'].count('NFW'),0)
self.assertEqual(kwargs_model['lens_model_list'].count('INTERPOL'),7)
self.assertEqual(kwargs_model['lens_model_list'].count('EPL_NUMBA'),1)
self.assertEqual(kwargs_model['lens_model_list'].count('SHEAR'),1)
self.assertEqual(len(kwargs_model['lens_model_list']),
len(kwargs_params['kwargs_lens']))
self.assertEqual(len(kwargs_model['lens_model_list']),
len(kwargs_model['lens_redshift_list']))
self.assertTrue(kwargs_model['multi_plane'])
# Now check the source lists
self.assertListEqual(kwargs_model['source_light_model_list'],['INTERPOL'])
self.assertTrue(isinstance(
kwargs_params['kwargs_source'][0]['image'],np.ndarray))
self.assertListEqual(kwargs_model['source_redshift_list'],[1.5])
self.assertEqual(kwargs_model['z_source_convention'],1.5)
self.assertEqual(kwargs_model['z_source'],1.5)
# Now check that there is no lens light or point source for this
# config.
self.assertEqual(len(kwargs_model['lens_light_model_list']),0)
self.assertEqual(len(kwargs_params['kwargs_lens_light']),0)
self.assertEqual(len(kwargs_model['point_source_model_list']),0)
self.assertEqual(len(kwargs_params['kwargs_ps']),0)
# Check that if new sample is specified, that the underlying sample
# changes.
kwargs_model_new, kwargs_params_new = (
self.c.get_lenstronomy_models_kwargs(new_sample=True))
self.assertFalse(kwargs_params['kwargs_lens'][-2]['theta_E'] ==
kwargs_params_new['kwargs_lens'][-2]['theta_E'])
# Finally check that the config with a point source fills in those
# lists.
kwargs_model, kwargs_params = self.c_all.get_lenstronomy_models_kwargs(
new_sample=False)
self.assertListEqual(kwargs_model['lens_light_model_list'],
['SERSIC_ELLIPSE'])
self.assertEqual(len(kwargs_params['kwargs_lens_light']),1)
self.assertListEqual(kwargs_model['point_source_model_list'],
['SOURCE_POSITION'])
self.assertEqual(len(kwargs_params['kwargs_ps']),1)
# Check that the multiplane triggers when you have los halos
kwargs_model, kwargs_params = self.c_all.get_lenstronomy_models_kwargs()
self.assertTrue(kwargs_model['multi_plane'])
def test_get_metadata(self):
# Test that the metadata matches the sample
kwargs_model, kwargs_params = self.c.get_lenstronomy_models_kwargs(
new_sample=False)
metadata = self.c.get_metadata()
# Check that the draw caused the phi and catalog_i to be written into
# the metadata.
self.assertTrue('source_parameters_phi' in metadata)
self.assertTrue('source_parameters_catalog_i' in metadata)
# Check the value of a few parameters
self.assertEqual(metadata['main_deflector_parameters_theta_E'],
kwargs_params['kwargs_lens'][-2]['theta_E'])
self.assertEqual(metadata['main_deflector_parameters_gamma1'],
kwargs_params['kwargs_lens'][-1]['gamma1'])
self.assertEqual(metadata['cosmology_parameters_cosmology_name'],
'planck18')
self.assertEqual(metadata['subhalo_parameters_c_0'],18)
self.assertEqual(metadata['los_parameters_c_0'],18)
def test_get_sample_cosmology(self):
# Just test that this gives the correct cosmology
cosmo = self.c.get_sample_cosmology()
cosmo_comp = cosmology_utils.get_cosmology('planck18')
self.assertEqual(cosmo.H0,cosmo_comp.H0)
# Check that the astropy version works
cosmo = self.c.get_sample_cosmology(as_astropy=True)
self.assertEqual(cosmo.H0,cosmo_comp.toAstropy().H0)
def test__calculate_ps_metadata(self):
# Check that the metadata is added as expected.
# Get all of the lenstronomy parameters and models that we need
kwargs_model, kwargs_params = self.c_all.get_lenstronomy_models_kwargs(
new_sample=False)
sample = self.c_all.get_current_sample()
z_source = kwargs_model['source_redshift_list'][0]
cosmo = cosmology_utils.get_cosmology(sample['cosmology_parameters'])
lens_equation_params = sample['lens_equation_solver_parameters']
lens_model = LensModel(kwargs_model['lens_model_list'],
z_source=z_source,
lens_redshift_list=kwargs_model['lens_redshift_list'],
cosmo=cosmo.toAstropy(),multi_plane=kwargs_model['multi_plane'])
point_source_model = PointSource(
kwargs_model['point_source_model_list'],lens_model=lens_model,
save_cache=True,kwargs_lens_eqn_solver=lens_equation_params)
# Initialize empty metadata and populate it.
metadata = {}
self.c_all._calculate_ps_metadata(metadata,kwargs_params,
point_source_model,lens_model)
# Check that all the new metadata is there.
pfix = 'point_source_parameters_'
self.assertTrue(pfix+'num_images' in metadata.keys())
self.assertTrue(pfix+'x_image_0' in metadata.keys())
self.assertTrue(pfix+'y_image_1' in metadata.keys())
self.assertTrue(pfix+'x_image_3' in metadata.keys())
self.assertTrue(pfix+'y_image_3' in metadata.keys())
# Check that image magnifications are written to metadata
self.assertTrue(pfix+'magnification_0' in metadata.keys())
self.assertTrue(pfix+'magnification_3' in metadata.keys())
# Check that if num_images < 3, we get Nan for image 2 & image 3
if(metadata[pfix+'num_images'] < 3):
self.assertTrue(np.isnan(metadata[pfix+'x_image_3']))
self.assertTrue(np.isnan(metadata[pfix+'y_image_2']))
# Check that the time delay metadata is written
self.assertTrue(pfix+'time_delay_0' in metadata.keys())
self.assertTrue(pfix+'time_delay_3' in metadata.keys())
self.assertTrue(pfix+'ddt' in metadata.keys())
# Check that if kappa_ext is not defined, we get a ValueError
del sample['point_source_parameters']['kappa_ext']
with self.assertRaises(ValueError):
self.c_all._calculate_ps_metadata(metadata,kwargs_params,
point_source_model,lens_model)
def test__draw_image_standard(self):
# Test that drawing the standard image behaves as expected.
# Grab the image we want to compare to
orig_image,orig_meta = self.c.source_class.image_and_metadata(0)
orig_image = orig_image[17:-17,:]
orig_image = orig_image[:,1:]/2 + orig_image[:,:-1]/2
orig_image = orig_image[:,16:-16]
# Start with a simple configuration, a source with no lens.
self.c.lens_light_class = None
self.c.main_deflector_class = None
self.c.los_class = None
self.c.subhalo_class = None
self.c.sample['source_parameters'] = {'z_source':0.19499999,
'cosmos_folder':cosmos_folder,'max_z':None,
'minimum_size_in_pixels':None,'faintest_apparent_mag':None,
'smoothing_sigma':0.0,'random_rotation':False,
'min_flux_radius':None,'output_ab_zeropoint':25.95,
'center_x':0.0,'center_y':0.0,
'source_inclusion_list':np.array([0])}
self.c.source_class = COSMOSIncludeCatalog(
cosmology_parameters='planck18',
source_parameters=self.c.sample['source_parameters'])
self.c.add_noise = False
self.c.numpix = 200
self.c.mag_cut = None
self.c.sample['main_deflector_parameters']['z_lens'] = 0.0
self.c.sample['detector_parameters'] = {
'pixel_scale':orig_meta['pixel_width'],'ccd_gain':2.5,
'read_noise':4.0,'magnitude_zero_point':25.0,'exposure_time':5400.0,
'sky_brightness':22,'num_exposures':1,'background_noise':None}
self.c.sample['psf_parameters'] = {'psf_type':'GAUSSIAN',
'fwhm': 0.1*orig_meta['pixel_width']}
# Draw our image. This should just be the source itself
image, metadata = self.c._draw_image_standard(self.c.add_noise)
# Check that the image is just the source
np.testing.assert_almost_equal(image,orig_image)
# Repeat the same test, but now with a really big psf and demanding
# that no psf be added via the boolean input.
self.c.sample['psf_parameters']['fwhm'] = 10
apply_psf = False
image, metadata = self.c._draw_image_standard(self.c.add_noise,apply_psf)
np.testing.assert_almost_equal(image,orig_image)
self.c.sample['psf_parameters']['fwhm'] = 0.1*orig_meta['pixel_width']
# Now introduce rotations to the source and make sure that goes through
self.c.sample['source_parameters']['random_rotation'] = True
image, metadata = self.c._draw_image_standard(self.c.add_noise)
np.testing.assert_array_less(np.ones(image.shape)*1e-10,
np.abs(image-orig_image))
# Check nothing weird happened to the metadata.
self.assertListEqual(list(metadata.keys()),
list(self.c.get_metadata().keys()))
# Add noise
self.c.sample['source_parameters']['random_rotation'] = False
self.c.add_noise = True
image, metadata = self.c._draw_image_standard(self.c.add_noise)
np.testing.assert_array_less(np.ones(image.shape)*1e-10,
np.abs(image-orig_image))
# Check that the mag_cut works
self.c.add_noise =False
self.c.mag_cut = 1.2
with self.assertRaises(config_handler.MagnificationError):
image, metadata = self.c._draw_image_standard(self.c.add_noise)
# Now add a deflector and see if we get a ring
self.c.add_noise = False
self.c.sample['source_parameters']['z_source'] = 1.0
self.c.sample['main_deflector_parameters'] = {'M200':1e13,'z_lens': 0.5,
'gamma': 2.0,'theta_E': 0.0,'e1':0.1,'e2':0.1,'center_x':0.02,
'center_y':-0.03,'gamma1':0.01,'gamma2':-0.02,'ra_0':0.0,
'dec_0':0.0}
self.c.main_deflector_class = PEMDShear(
cosmology_parameters='planck18',
main_deflector_parameters=self.c.sample['main_deflector_parameters'])
# Update the main deflector after the fact to ensure that the values
# are actually being updated in the draw_image call.
self.c.sample['main_deflector_parameters']['theta_E'] = 1.0
image, metadata = self.c._draw_image_standard(self.c.add_noise)
# Check for magnification and check most light is not in center of image
self.c.source_class.k_correct_image(orig_image,orig_meta['z'],
self.c.sample['source_parameters']['z_source'])
self.assertGreater(np.sum(image),np.sum(orig_image))
self.assertGreater(np.mean(image[0:90,0:90]),
np.mean(image[90:110,90:110]))
# Now we'll turn off our main deflector but create a fake LOS
# and subhalo class that gives the same profile.
class FakeLOSClass():
def update_parameters(self,*args,**kwargs):
return
def draw_los(self,*args,**kwargs):
model_list = ['EPL_NUMBA','SHEAR']
kwargs_list = [{'gamma': 2.0,'theta_E': 1.0,'e1':0.1,'e2':0.1,
'center_x':0.02,'center_y':-0.03},
{'gamma1':0.01,'gamma2':-0.02,'ra_0':0.0, 'dec_0':0.0}]
z_list = [0.5]*2
return model_list,kwargs_list,z_list
def calculate_average_alpha(self,*args,**kwargs):
return ([],[],[])
self.c.los_class = FakeLOSClass()
self.c.main_deflector_class = None
self.c.sample['los_parameters'] = {}
los_image, metadata = self.c._draw_image_standard(self.c.add_noise)
np.testing.assert_almost_equal(image,los_image)
# Repeat the same excercise but for the subhalos
class FakeSuhaloClass():
def update_parameters(self,*args,**kwargs):
return
def draw_subhalos(self,*args,**kwargs):
model_list = ['EPL_NUMBA','SHEAR']
kwargs_list = [{'gamma': 2.0,'theta_E': 1.0,'e1':0.1,'e2':0.1,
'center_x':0.02,'center_y':-0.03},
{'gamma1':0.01,'gamma2':-0.02,'ra_0':0.0, 'dec_0':0.0}]
z_list = [0.5]*2
return model_list,kwargs_list,z_list
self.c.subhalo_class = FakeSuhaloClass()
self.c.los_class = None
self.c.sample['subhalo_parameters'] = {}
sub_image, metadata = self.c._draw_image_standard(self.c.add_noise)
np.testing.assert_almost_equal(image,sub_image)
# Generate image with deflector & lens light
self.c.sample['lens_light_parameters'] = {'z_source':0.5,'magnitude':20,
'output_ab_zeropoint':25.95,'R_sersic':1.,'n_sersic':1.2,'e1':0.,
'e2':0.,'center_x':0.0,'center_y':0.0}
self.c.lens_light_class = SingleSersicSource(
cosmology_parameters='planck18',
source_parameters=self.c.sample['lens_light_parameters'])
lens_light_image, metadata = self.c._draw_image_standard(self.c.add_noise)
# Assert sum of center with lens light > sum of center orig_image
self.assertTrue(np.sum(lens_light_image[90:110,90:110]) >
np.sum(image[90:110,90:110]))
# Add point source and validate output
self.c.sample['point_source_parameters'] = {'x_point_source':0.001,
'y_point_source':0.001,'magnitude':22,'output_ab_zeropoint':25.95,
'compute_time_delays':False}
self.c.point_source_class = SinglePointSource(
self.c.sample['point_source_parameters'])
image_ps, metadata = self.c._draw_image_standard(self.c.add_noise)
# Check that more light is added to the image
self.assertTrue(np.sum(image_ps) > np.sum(image))
def test__draw_image_drizzle(self):
# Test that drawing drizzled images works as expected.
c_drizz = config_handler.ConfigHandler('test_data/config_dict_drizz.py')
# Start with the simplest configuration, a source with nothing lensing
# the source
c_drizz.sample['source_parameters'] = {'z_source':0.19499999,
'cosmos_folder':cosmos_folder,'max_z':None,
'minimum_size_in_pixels':None,'faintest_apparent_mag':None,
'smoothing_sigma':0.0,'random_rotation':False,
'min_flux_radius':None,'output_ab_zeropoint':25.95,
'center_x':0.0,'center_y':0.0,
'source_inclusion_list':np.array([0])}
c_drizz.los_class = None
c_drizz.subhalo_class = None
c_drizz.main_deflector_class = None
c_drizz.source_class = COSMOSIncludeCatalog(
cosmology_parameters='planck18',
source_parameters=c_drizz.sample['source_parameters'])
c_drizz.numpix = 200
c_drizz.kwargs_numerics = {'supersampling_factor':1}
c_drizz.mag_cut = None
c_drizz.add_noise = False
# Grab the image we want to compare to
orig_image,orig_meta = c_drizz.source_class.image_and_metadata(0)
orig_image = orig_image[17:-17,:]
orig_image = orig_image[:,1:]/2 + orig_image[:,:-1]/2
orig_image = orig_image[:,16:-16]
# Create a fake sample from our sampler
sim_pixel_width = orig_meta['pixel_width']
c_drizz.sample['main_deflector_parameters'] = {'z_lens':0.0}
c_drizz.sample['cosmology_parameters'] = {'cosmology_name': 'planck18'}
c_drizz.sample['psf_parameters'] = {'psf_type':'GAUSSIAN',
'fwhm': 0.1*orig_meta['pixel_width']}
c_drizz.sample['detector_parameters'] = {'pixel_scale':sim_pixel_width,
'ccd_gain':1.58,'read_noise':3.0,'magnitude_zero_point':25.127,
'exposure_time':1380.0,'sky_brightness':15.83,'num_exposures':1,
'background_noise':None}
c_drizz.sample['drizzle_parameters'] = {
'supersample_pixel_scale':sim_pixel_width,
'output_pixel_scale':sim_pixel_width,'wcs_distortion':None,
'offset_pattern':[(0,0),(0.0,0),(0.0,0.0),(-0.0,-0.0)]}
# Draw our image. This should just be the source itself
image, metadata = c_drizz._draw_image_drizzle()
# Check that the image is just the source
np.testing.assert_almost_equal(image,orig_image)
# Check that the metadata is correct
self.assertEqual(metadata['detector_parameters_pixel_scale'],
sim_pixel_width)
# Make the offset pattern more realistic and change the pixel widths
c_drizz.sample['drizzle_parameters']['offset_pattern'] = [(0,0),(0.5,0),
(0.0,0.5),(0.5,0.5)]
c_drizz.sample['detector_parameters']['pixel_scale'] = 0.04
c_drizz.sample['drizzle_parameters']['supersample_pixel_scale'] = 0.02
c_drizz.sample['drizzle_parameters']['output_pixel_scale'] = 0.03
c_drizz.numpix = 128
# Check that the mag_cut works
c_drizz.add_noise=False
c_drizz.mag_cut = 1.2
with self.assertRaises(config_handler.MagnificationError):
image, metadata = c_drizz._draw_image_drizzle()
# Now add a deflector and see if we get a ring
c_drizz.sample['source_parameters']['z_source'] = 1.0
c_drizz.sample['main_deflector_parameters'] = {'M200':1e13,
'z_lens': 0.5,'gamma': 2.0,'theta_E': 1.0,'e1':0.1,'e2':0.1,
'center_x':0.02,'center_y':-0.03,'gamma1':0.01,'gamma2':-0.02,
'ra_0':0.0, 'dec_0':0.0}
c_drizz.main_deflector_class = PEMDShear(cosmology_parameters='planck18',
main_deflector_parameters=c_drizz.sample['main_deflector_parameters'])
image, metadata = c_drizz._draw_image_drizzle()
# Check for magnification and check most light is not in
# center of image
self.assertTupleEqual((170,170),image.shape)
c_drizz.source_class.k_correct_image(orig_image,orig_meta['z'],
c_drizz.sample['source_parameters']['z_source'])
self.assertGreater(np.sum(image),np.sum(orig_image))
self.assertGreater(np.mean(image[0:80,0:80]),
np.mean(image[80:90,80:90]))
# Check that setting the noise flag returns a noisy image
c_drizz.add_noise = True
los_image_noise, metadata = c_drizz._draw_image_drizzle()
np.testing.assert_array_less(np.ones(image.shape)*1e-10,
np.abs(image-los_image_noise))
def test__draw_image_drizzle_psf(self):
# Test the pixel psf behaves identically to using fftconvolve
# Setup a fairly basic situation with a source at redshift 1.0 an a
# massive main deflector at redshift 0.5.
# Test that drawing drizzled images works as expected.
c_drizz = config_handler.ConfigHandler('test_data/config_dict_drizz.py')
# Start with the simplest configuration, a source with nothing lensing
# the source
c_drizz.sample['source_parameters'] = {'z_source':1.0,
'cosmos_folder':cosmos_folder,'max_z':None,
'minimum_size_in_pixels':None,'faintest_apparent_mag':None,
'smoothing_sigma':0.0,'random_rotation':False,
'min_flux_radius':None,'output_ab_zeropoint':25.95,'center_x':0.0,
'center_y':0.0,'source_inclusion_list':np.array([0])}
c_drizz.los_class = None
c_drizz.subhalo_class = None
c_drizz.sample['main_deflector_parameters'] = {'M200':1e13,
'z_lens': 0.5,'gamma': 2.0,'theta_E': 1.0,'e1':0.1,'e2':0.1,
'center_x':0.02,'center_y':-0.03,'gamma1':0.01,'gamma2':-0.02,
'ra_0':0.0, 'dec_0':0.0}
c_drizz.main_deflector_class = PEMDShear(
cosmology_parameters='planck18',
main_deflector_parameters=c_drizz.sample['main_deflector_parameters'])
c_drizz.source_class = COSMOSIncludeCatalog(
cosmology_parameters='planck18',
source_parameters=c_drizz.sample['source_parameters'])
c_drizz.numpix = 128
c_drizz.kwargs_numerics = {'supersampling_factor':1}
c_drizz.mag_cut = None
c_drizz.add_noise = False
# Create a fake sample from our sampler
sim_pixel_width = 0.04
c_drizz.sample['cosmology_parameters'] = {'cosmology_name': 'planck18'}
c_drizz.sample['psf_parameters'] = {'psf_type':'NONE'}
c_drizz.sample['detector_parameters'] = {'pixel_scale':sim_pixel_width,
'ccd_gain':1.58,'read_noise':3.0,'magnitude_zero_point':25.127,
'exposure_time':1380.0,'sky_brightness':15.83,'num_exposures':1,
'background_noise':None}
c_drizz.sample['drizzle_parameters'] = {
'supersample_pixel_scale':sim_pixel_width,
'output_pixel_scale':sim_pixel_width,'wcs_distortion':None,
'offset_pattern':[(0,0),(0.0,0),(0.0,0.0),(-0.0,-0.0)],
'psf_supersample_factor':1}
# Draw our image. This should just be the lensed source without
# noise and without a psf. This will be our supersamled image.
image, metadata = c_drizz._draw_image_drizzle()
image_degrade = hubble_utils.degrade_image(image,2)
# Now generate a pixel level psf that isn't supersampled.
psf_pixel = np.zeros((63,63))
x,y = np.meshgrid(np.arange(63),np.arange(63),indexing='ij')
psf_pixel[x,y] = np.exp(-((x-31)**2+(y-31)**2))
psf_pixel /= np.sum(psf_pixel)
c_drizz.sample['psf_parameters'] = {'psf_type':'PIXEL',
'kernel_point_source': psf_pixel,
'point_source_supersampling_factor':1}
# Now generate the image again in the degraded resolution
c_drizz.kwargs_numerics = {'supersampling_factor':2}
c_drizz.numpix = 64
c_drizz.sample['detector_parameters']['pixel_scale'] = sim_pixel_width*2
c_drizz.sample['drizzle_parameters']['output_pixel_scale'] = (
sim_pixel_width*2)
image_degrade_psf, metadata = c_drizz._draw_image_drizzle()
# Compare to the scipy image
scipy_image = fftconvolve(image_degrade,psf_pixel,mode='same')
np.testing.assert_almost_equal(scipy_image,image_degrade_psf)
# Now repeat this process but doing the psf convolution at the
# supersampling scale.
c_drizz.sample['psf_parameters']['point_source_supersampling_factor'] = 2
c_drizz.sample['drizzle_parameters']['psf_supersample_factor'] = 2
image_degrade_psf, metadata = c_drizz._draw_image_drizzle()
scipy_image = hubble_utils.degrade_image(
fftconvolve(image,psf_pixel,mode='same'),2)
np.testing.assert_almost_equal(scipy_image,image_degrade_psf,
decimal=6)
# Make sure the sample detector_parameters weren't changed in place.
self.assertEqual(c_drizz.sample['detector_parameters']['pixel_scale'],
sim_pixel_width*2)
self.assertEqual(metadata['detector_parameters_pixel_scale'],
sim_pixel_width*2)
# Now just make sure we can raise some errors. First an error
# if no point_source_supersampling_factor was specified.
with self.assertRaises(ValueError):
c_drizz.sample['psf_parameters'] = {'psf_type':'PIXEL',
'kernel_point_source': psf_pixel}
image_degrade_psf, meta_values = c_drizz._draw_image_drizzle()
# Next an error if it doesn't equal the psf_supersample_factor
with self.assertRaises(ValueError):
c_drizz.sample['psf_parameters'] = {'psf_type':'PIXEL',
'kernel_point_source': psf_pixel,
'point_source_supersampling_factor':1}
image_degrade_psf, meta_values = c_drizz._draw_image_drizzle()
# Next an error if the psf_supersample_factor is larger than the scaling
# provided by the drizzle parameters.
with self.assertRaises(ValueError):
c_drizz.sample['psf_parameters'][
'point_source_supersampling_factor'] = 4
c_drizz.sample['psf_parameters'] = {'psf_type':'PIXEL',
'kernel_point_source': psf_pixel,
'point_source_supersampling_factor':4}
image_degrade_psf, meta_values = c_drizz._draw_image_drizzle()
def test_draw_image(self):
# Just test that nothing crashes.
c_drizz = config_handler.ConfigHandler('test_data/config_dict_drizz.py')
_,_ = c_drizz.draw_image(new_sample=True)
image,_ = self.c.draw_image(new_sample=True)
# Also make sure the mask radius was applied
self.assertEqual(np.sum(image[len(image)//2-2:len(image)//2+2,
len(image)//2-2:len(image)//2+2]),0.0)
# Make sure that if the sample has crazy parameters, those carry
# thourgh.
self.c.sample['main_deflector_parameters']['theta_E'] = 0.1
self.c.mag_cut = None
image_small,metadata = self.c.draw_image(new_sample=False)
self.assertEqual(metadata['main_deflector_parameters_theta_E'],0.1)
self.assertLess(np.sum(image_small),np.sum(image))
# Make sure that with an outrageous magnification cut you get None.
self.c.mag_cut = 1e100
image,metadata = self.c.draw_image(new_sample=True)
self.assertTrue(image is None)
self.assertTrue(metadata is None)
def test_draw_image_reproducible(self):
# Test we can reproduce generated images by setting appropriate
# random seeds
c = config_handler.ConfigHandler('test_data/config_dict_drizz.py')
img_1, meta_1 = c.draw_image()
img_2, meta_2 = c.draw_image()
seed_1, seed_2 = meta_1['seed'], meta_2['seed']
assert not np.all(img_1 == img_2), "Images should be different"
# Just set the base_seed attribute manually; simpler than building
# a temporary config file
c_1 = config_handler.ConfigHandler('test_data/config_dict_drizz.py')
c_1.base_seed = seed_1
img_1a, _ = c_1.draw_image()
np.testing.assert_allclose(img_1, img_1a)
# Test this works even for an image in the middle of a training set
c_2 = config_handler.ConfigHandler('test_data/config_dict_drizz.py')
c_2.base_seed = seed_2
img_2a, _ = c_2.draw_image()
np.testing.assert_allclose(img_2, img_2a)
|
swagnercarenaREPO_NAMEpaltasPATH_START.@paltas_extracted@paltas-main@test@[email protected]_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/candlestick/decreasing/line/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="candlestick.decreasing.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@candlestick@decreasing@line@[email protected]_END.py
|
{
"filename": "05_Supervised_ML_1_intro.ipynb",
"repo_name": "astro-datalab/notebooks-latest",
"repo_path": "notebooks-latest_extracted/notebooks-latest-master/06_EPO/LaSerenaSchoolForDataScience/2019/05_Supervised_ML_I/05_Supervised_ML_1_intro.ipynb",
"type": "Jupyter Notebook"
}
|
```python
__author__ = 'Guillermo Damke <[email protected]>, Francisco Förster <[email protected]>, Alice Jacques <[email protected]>'
__version__ = '20210119' # yyyymmdd;
__datasets__ = ['Iris flower dataset']
__keywords__ = ['Introduction to Machine Learning', 'Supervised Machine Learning', 'La Serena School for Data Science']
```
# Introduction to Supervised Machine Learning - Basic Concepts
*In original form by Francisco Forster, Centro de Modelamiento Matemático (CMM), Universidad de Chile / Instituto Milenio de Astrofísica (MAS). Adaptated for NOIRLab Astro Data Lab by Guillermo Damke and Alice Jacques.*
#### This notebook is part of the curriculum of the 2019 La Serena School for Data Science.
## Table of Contents
This notebook presents an introduction to topics in Machine Learning, in the following sections:
* [General concepts in Machine Learning](#1---General-concepts-in-Machine-Learning)
* [Supervised (and Unsupervised) Machine Learning methods](#2---Supervised-and-Unsupervised-Machine-Learning)
* [Metrics to evaluate model performance](#3---Metrics-to-evaluate-model-performance)
* [Diagnostics](#4---Diagnostics)
* [Visual representations of results](#5---Visual-representations-of-results)
# Summary
This notebook introduces several concepts and definitions that are common in Machine Learning. Practical examples of these concepts are presented in a separate notebook.
# 1 - General concepts in Machine Learning
## 1.1 - Overfitting, underfitting, and the bias-variance tradeoff
### Overfitting and Underfitting
Two important concepts in machine learning are **overfitting** and **underfitting**.
If a model represents our data too accurately (**overfitting**), it may not effectively generalize unobserved data.
If a model represents our data too generally (**underfitting**), it may underrepresent the features of the data.
A popular solution to reduce overfitting consists of adding structure to the model through **regularization**. This favors simpler models through training inspired by **[Occam's razor](https://en.wikipedia.org/wiki/Occam%27s_razor)**.
### Bias
* Quantifies the precision of the model across the training sets.
### Variance
* Quantifies how sensitive the model is to small changes in the training set.
### Bias-variance tradeoff
The plot below shows the **bias-variance tradeoff**, which is a common problem in Supervised Machine Learning algorithms. It is related to model selection. A model with high complexity describes the training data well (low training error), but may not effectively generalize when applied to new data (high validation error, i.e., high error in predicting when presented to new data). A simpler model is not prone to overfitting the noise in the data, but it may underrepresent the features of the data (**underfitting**).

## 1.2 - Complexity, accuracy, robustness
In general, we want precise and robust models.
**Simpler models tend to be less accurate, but more robust.**
**More complex models tend to be more accurate, but less robust.**
This tension is usually expressed as the **bias-variance tradeoff** which is central to machine learning.
## 1.3 - Model selection
No one model performs uniformly better than another. One model may perform well in one data set and poorly in another.
## 1.4 - Classification vs. regression
The figure below represents two usual tasks performed with Machine Learning.
* **Classification**: refers to predicting to what class or category an object belongs to, given some input data about that object. In this case, the output is a category, class, or label (i.e., a discrete variable).
* **Regression**: refers to predicting an output real value, given some input data. In this case, the output is a continuous variable.

# 2 - Supervised and Unsupervised Machine Learning
In this section, we will introduce two different learning algorithms, which are considered either as Supervised or Unsupervised Machine Learning.
## 2.1 - Predictive or *Supervised Learning*:
Learn a mapping from inputs ${\bf x}$ to outputs $y$, given a **labeled** set of input-output pairs $D=\lbrace{({\bf x_i}, y_i)\rbrace}_{i=1}^N$.
$D$ is called the **training set**.
Each training input ${\bf x_i}$ is a vector of dimension $M$, with numbers called **features**, **attributes** or **covariates**. They are usually stored in a $N \times M$ **design matrix** ${\bf X}$.
An important consideration, as mentioned above:
* When $y$ is **categorical** the problem is known as **[classification](#1.4---Classification-vs.-regression)**.
* When $y$ is **real-valued** the problem is known as **[regression](#1.4---Classification-vs.-regression)**.
### Example of a labeled training set: the "Iris flower dataset".
The **Iris flower dataset** is commonly utilized in Machine Learning tests and examples for problems in categorical classification. Because of this, the dataset is included in several Python libraries, including the Seaborn library which we will use below.
The **Iris flower dataset** includes four real-valued variables (length and width of petals and sepals) for 50 samples of each three species of Iris (versicolor, virginica, and setosa):


#### What does this dataset look like?
Let's read the dataset and explore it with the Seaborn library:
```python
import seaborn as sns
%matplotlib inline
sns.set(style="ticks")
dfIris = sns.load_dataset("iris")
print("Design matrix shape (entries, attributes):", dfIris.shape)
print("Design matrix columns:", dfIris.columns)
```
Design matrix shape (entries, attributes): (150, 5)
Design matrix columns: Index(['sepal_length', 'sepal_width', 'petal_length', 'petal_width',
'species'],
dtype='object')
It can be seen that the dataset contains 150 entries with 5 atributes (columns).
We can view the first five entries with the `head` function:
```python
dfIris.head()
# Notice that the real-valued variables are in centimeters.
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>sepal_length</th>
<th>sepal_width</th>
<th>petal_length</th>
<th>petal_width</th>
<th>species</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>5.1</td>
<td>3.5</td>
<td>1.4</td>
<td>0.2</td>
<td>setosa</td>
</tr>
<tr>
<th>1</th>
<td>4.9</td>
<td>3.0</td>
<td>1.4</td>
<td>0.2</td>
<td>setosa</td>
</tr>
<tr>
<th>2</th>
<td>4.7</td>
<td>3.2</td>
<td>1.3</td>
<td>0.2</td>
<td>setosa</td>
</tr>
<tr>
<th>3</th>
<td>4.6</td>
<td>3.1</td>
<td>1.5</td>
<td>0.2</td>
<td>setosa</td>
</tr>
<tr>
<th>4</th>
<td>5.0</td>
<td>3.6</td>
<td>1.4</td>
<td>0.2</td>
<td>setosa</td>
</tr>
</tbody>
</table>
</div>
The function `info` prints "a concise summary" of a DataFrame:
```python
dfIris.info()
```
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 150 entries, 0 to 149
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 sepal_length 150 non-null float64
1 sepal_width 150 non-null float64
2 petal_length 150 non-null float64
3 petal_width 150 non-null float64
4 species 150 non-null object
dtypes: float64(4), object(1)
memory usage: 6.0+ KB
While the function `describe` is used to "generate descriptive statistics" of a DataFrame:
```python
dfIris.describe()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>sepal_length</th>
<th>sepal_width</th>
<th>petal_length</th>
<th>petal_width</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>150.000000</td>
<td>150.000000</td>
<td>150.000000</td>
<td>150.000000</td>
</tr>
<tr>
<th>mean</th>
<td>5.843333</td>
<td>3.057333</td>
<td>3.758000</td>
<td>1.199333</td>
</tr>
<tr>
<th>std</th>
<td>0.828066</td>
<td>0.435866</td>
<td>1.765298</td>
<td>0.762238</td>
</tr>
<tr>
<th>min</th>
<td>4.300000</td>
<td>2.000000</td>
<td>1.000000</td>
<td>0.100000</td>
</tr>
<tr>
<th>25%</th>
<td>5.100000</td>
<td>2.800000</td>
<td>1.600000</td>
<td>0.300000</td>
</tr>
<tr>
<th>50%</th>
<td>5.800000</td>
<td>3.000000</td>
<td>4.350000</td>
<td>1.300000</td>
</tr>
<tr>
<th>75%</th>
<td>6.400000</td>
<td>3.300000</td>
<td>5.100000</td>
<td>1.800000</td>
</tr>
<tr>
<th>max</th>
<td>7.900000</td>
<td>4.400000</td>
<td>6.900000</td>
<td>2.500000</td>
</tr>
</tbody>
</table>
</div>
For a quick visual exploration of the dataset, we can use the `pairplot` function of the Seaborn library.
We will pass the `hue="species"` argument, so that the three species (labels) in the dataset are represented by different colors.
```python
sns.pairplot(dfIris, hue="species");
```

We will train a model to predict the Iris classes in Section 4 of this notebook.
In addition, some applications of Supervised Machine Learning algorithms is presented in the ["04_Intro_Machine_Learning_practical"](https://github.com/astro-datalab/notebooks-latest/blob/master/06_EPO/LaSerenaSchoolForDataScience/2019/04_Intro_Machine_Learning_practical/Intro_Machine_Learning_practical.ipynb) entry of this series.
## 2.2 - Descriptive or *Unsupervised Learning*
Only inputs are given: $D=\lbrace{{\bf x_i}\rbrace}_{i=1}^N$
The goal here is to find interesting patterns, which is sometimes called **knowledge discovery**.
The problem is not always well defined. It may not be clear what kind of pattern to look for, and there may not be an obvious metric to use (unlike supervised learning).
Some applications of Unsupervised Machine Learning algorithms are presented in the ["04_Intro_Machine_Learning_practical"](https://github.com/astro-datalab/notebooks-latest/blob/master/06_EPO/LaSerenaSchoolForDataScience/2019/04_Intro_Machine_Learning_practical/Intro_Machine_Learning_practical.ipynb) entry of this series.
## 2.3 - Reinforcement Learning
Mixed between Supervised and Unsupervised. Only occasional reward or punishement signals are given (e.g. baby learning to walk).
# 3 - Metrics to evaluate model performance
## 3.1 - Classification loss
Learning algorithms, and optimization algorithms, need to quantify if the predicted value from a model agrees with the true value. The learning process involves a minimization process in which a **loss function** penalizes the wrong outcomes.
### Loss function and classification risk:
The most common loss function used for supervised classification is the **zero-one** loss function:
$L(y, \hat y) = \delta(y \ne \hat y)$
where $\hat y$ is the best guess value of $y$. The function is 1 if the guess is different than the true value; and 0 if the guess is the same as the true value.
The **classification risk** of a model is the expectation value of the loss function:
$E[L(y, \hat y)] = p(y \ne \hat y)$
For the zero-one loss function the risk is equal to the **misclassification rate** or **error rate**.
## 3.2 - Types of errors
Accuracy and classification risk are not necessarily good diagnostics of the quality of a model.
It is better to distinguish between two types of errors (assuming 1 is the label we are evaluating):
1. Assigning the label 1 to an object whose true class is 0 (a **false positive**)
2. Assigning the label 0 to an object whose true class is 1 (a **false negative**)

(Image from http://opendatastat.org/mnemonics/)
Additionally, correct cases can be separated as:
- Assigning the label 1 to an object whose true class is 1 is a **true positive**.
- Assigning the label 0 to an object whose true class is 0 is a **true negative**.
# 4 - Diagnostics
Applying the concepts introduced above, it is possible to define several diagnostics or metrics in Machine Learning to evaluate the goodness of a given algorithm applied to a dataset.
## 4.1 - Accuracy, contamination, recall, and precision
These four metrics are defined as:
$$\rm accuracy = \frac{\#\ correct\ labels}{total}$$
Note that this is one minus the classification risk (defined in [Section 3.1](#3.1---Classification-loss)).
$$\rm contamination\ =\ \frac{false~ positives}{true~ positives~ +~ false~ positives}$$
$$\rm recall\ =\ \frac{true~ positives}{true~ positives~ +~ false~ negatives}$$
$$\rm precision\ = 1 - contamination = \ \frac{true~ positives}{true~ positives~ +~ false~ positives}$$
Note: Sometimes, **recall** is also called **completeness**.
## 4.2 - Macro vs. micro averages
The definitions given above can be applied directly in a two-class problem. However, when evaluating the different diagnostics in a **multiclass problem** (i.e., non-binary classification) one has to choose to do macro or micro averages.
**Macro averaging**
Compute diagnostics for every class by taking the average of the class diagnostics.
**Micro averaging**
Compute diagnostics for the total errors without making a distinction between classes (True Positive, False Positive, False Negative).
For example, consider the following 3-class problem:
| Label | TP | FP | FN | Precision | Recall |
| - | - | - | - | - | - |
| c1 | 3 | 2 | 7 | 0.6 | 0.3 |
| c2 | 1 | 7 | 9 | 0.12 | 0.1 |
| c3 | 2 | 5 | 6 | 0.29 | 0.25 |
| Total | 6 | 14 | 22 | | |
| Macro averaged | | | | 0.34 | 0.22 |
| Micro averaged | | | | 0.3 | 0.21 |
In this case, the value for macro precision is:
\begin{align}
\rm Macro_{precision} &= \rm average \big(precision(c1), precision(c2), precision(c3)\big) \\
& = \frac{1}{3} \times \biggl( \frac{3}{3 + 2} + \frac{1}{1 + 7} + \frac{2}{2 + 5} \biggr) = 0.34
\end{align}
And the value for micro precision is:
\begin{align}
\rm Micro_{precision} &= \rm precision(total) \\
& = \frac{6}{6 + 14} = 0.3
\end{align}
## 4.3 True positive rate (TPR) and false positive rate (FPR)
These scores are defined as:
$$\rm TPR\ =\ recall\ =\ \frac{true~ positives}{true~ positives~ +~ false~ negatives}$$
$$\rm FPR\ = \ \frac{false~ positives}{false~ positives~ +~ true~ negatives}$$

(image by user Walber in Wikipedia. CC BY-SA 4.0)
## 4.4 - Problems with accuracy
As introduced above, accuracy is defined as:
$$\rm accuracy\ =\ \frac{\#~ Total~ of~ correct~ predictions}{\#~ Total~ number~ of~ predictions}$$
To show why accuracy is not a very useful statistic let's consider the following example.
**Example:** A model to predict whether a person is from a given country (with a population of 37 million people):
*Simple (and wrong) model*: assuming that the world population is 7.5 billion people, predict that a person is from that country with a probability 37/7500.
$$ \rm{correct\ predictions} = (7,500,000,000 - 37,000,000) \times \bigg(1 - \frac{37}{7500}\bigg) + 37,000,000 \times \frac{37}{7500} = 7,426,365,067$$
Then, accuracy becomes:
$$\rm accuracy = \frac{7,426,365,067}{7,500,000,000} = 0.99$$
Our classifier is 99% accurate, but it is clearly too simplistic!
### Precision and recall are better statistics
Let's try precision and recall instead. First, calculate the TP, FP and FN:
True positives (TP): $37,000,000 \times \frac{37}{7500} = 182,533$
False positives (FP): $(7,500,000,000 - 37,000,000) \times \frac{37}{7500} = 36,817,467$
False negatives (FN): $37,000,000 \times \big(1 - \frac{37}{7500}\big) = 36,817,467$
Then, we evaluate **recall** and **precision**:
$$\rm recall = \frac{TP}{TP + FN} = \frac{182,533}{182,533 + 36,817,467} = 0.005$$
$$\rm precision = \frac{TP}{TP + FP} = \frac{182,533}{182,533 + 36,817,467} = 0.005$$
Our classifier has only 0.5% recall and precision!
## 4.5 - F1 score
A simple statistic which takes into account both recall and precision is the **$\rm \bf F_1$ score**, which is twice their harmonic mean. It is defined as:
$$\rm F_1 = 2 \times \ \frac{1}{\frac{1}{precision}\ +\ \frac{1}{recall}} = 2 \times \ \frac{precision\ \times\ recall}{precision\ +\ recall}$$
## 4.6 - F$_\beta$ score
To give more or less weight to recall vs precision, the $F_\beta$ score is used:
$$\rm F_\beta = (1 + \beta^2) \times \frac{precision\ \times\ recall}{\beta^2\ precision\ +\ recall}$$
$F_\beta$ was derived so that it measures the effectiveness of retrieval with respect to a user who attaches **$\beta$ times as much importance to recall as precision**.
# 5 - Visual representations of results
## 5.1 - Confusion matrix
Also known as **error matrix**, it is a way to summarize results in classification problems.
The elements of the matrix correspond to the number (or fraction) of instances of an actual class which were classified as another class.
A perfect classifier has the *identity* as its normalized confusion matrix.
For example, a classifier for the Iris flower dataset could yield the following results:

<br></br>
<br></br>

## 5.2 - ROC curve
The **Receiver Operating Characteristic (ROC)** curve is a visualization of the tradeoff between the recall and precision of a classifier as the discrimination threshold is varied.
It plots the **True Positive Rate (TPR)** vs the **False Positive Rate (FPR)** at various thresholds.

The demo below shows the ROC curve for a classifier as the discrimination between TP and FP varies.
```python
from IPython.display import Image
Image(url="Images/roc_curve.gif")
```
<img src="Images/roc_curve.gif"/>
This demonstration is described [here](https://arogozhnikov.github.io/2015/10/05/roc-curve.html).
## 5.3 - Area under the curve (AUC) and Gini coefficient (G1)
The AUC is equal to the probability that the classifier will rank a randomly chosen positive instance higher than a randomly chosen negative one.
* A larger AUC indicates a better classification model
* A perfect classifier has AUC = 1
* A random classifier has AUC = 0.5 (note that the **no-discrimination line** is the identity)
* AUC is related to the G1, which is twice the area between the ROC and the no-discrimination line:
$\Large \rm G_1 = 2 \times AUC - 1$

The ROC AUC statistic is normally used to do model comparison.
## 5.4 - DET curve
An alternative to the ROC curve is the **Detection Error Tradeoff (DET)** curve.
The DET curve plots the **FNR (missed detections) vs. the FPR (false alarms)** on a non-linearly transformed axis in order to emphasize regions of low FPR and low FNR.

# In Conclusion
This has been a brief introduction of concepts in Machine Learning with focus on classification (Supervised Learning). Special emphasis has been put into introducing a variety of concepts and metrics that should be especially useful for the evaluation of Machine Learning algorithms in classification problems. Finally, we introduced some common visual representations of results, which are useful to summarize model performance.
|
astro-datalabREPO_NAMEnotebooks-latestPATH_START.@notebooks-latest_extracted@notebooks-latest-master@06_EPO@LaSerenaSchoolForDataScience@2019@05_Supervised_ML_I@[email protected]_END.py
|
{
"filename": "SolutionGUI.py",
"repo_name": "wmpg/Supracenter",
"repo_path": "Supracenter_extracted/Supracenter-master/supra/GUI/Dialogs/SolutionGUI.py",
"type": "Python"
}
|
################################################
# Credits:
# Peter Brown - Supervisor
# Luke McFadden - General coding
# Denis Vida - Ballistic code, WMPL
# Wayne Edwards - Supracenter code
# Elizabeth Silber - Updated Supracenter code, Geminus
# Gunter Stober - Advice on atmospheric profiles
# Stack Overflow - Frequent care and support
# Western Meteor Python Group
#################################################
import os
import time
import datetime
import copy
import webbrowser
import zipfile
import pickle
from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset
from PyQt5.QtWidgets import *
from functools import partial
import sys
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
import numpy as np
import obspy
import scipy.signal
from scipy.fft import fft
import pyqtgraph.exporters
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
from supra.Fireballs.SeismicTrajectory import timeOfArrival, trajSearch, estimateSeismicTrajectoryAzimuth, plotStationsAndTrajectory, waveReleasePointWindsContour, waveReleasePointWinds
from supra.Supracenter.slowscan2 import cyscan as slowscan
from supra.Supracenter.psoSearch import psoSearch
from supra.Supracenter.fetchCopernicus import copernicusAPI
from supra.Supracenter.cyscan5 import cyscan
# from supra.Supracenter.cyscanVectors import cyscan as cyscanV
from supra.Supracenter.propegateBackwards import propegateBackwards
from supra.GUI.Dialogs.AnnoteWindow import AnnoteWindow
from supra.GUI.Dialogs.Preferences import PreferenceWindow
from supra.GUI.Dialogs.Yields import Yield
from supra.GUI.Dialogs.FragStaff import FragmentationStaff
from supra.GUI.Dialogs.TrajSpace import TrajSpace
from supra.GUI.Dialogs.AllWaveformView import AllWaveformViewer
from supra.GUI.Dialogs.TrajInterp import TrajInterpWindow
from supra.GUI.Dialogs.StationList import StationList
from supra.GUI.Dialogs.ParticleMot import ParticleMotion
from supra.GUI.Dialogs.Polmap import Polmap
from supra.GUI.Dialogs.BandpassGUI import BandpassWindow
from supra.GUI.Dialogs.ReportDialog import ReportWindow
from supra.GUI.Dialogs.RayTraceView import rtvWindowDialog
from supra.GUI.Dialogs.GLMReader import glmWindowDialog
from supra.GUI.Dialogs.RotatePol import RotatePolWindow
from supra.GUI.Dialogs.lumEffDialog import lumEffDialog
from supra.GUI.Dialogs.TauSpread2 import TauSpreadGUI
from supra.Yields.YieldFuncs import transmissionFactor
from supra.GUI.Tools.GUITools import *
from supra.GUI.Tools.Theme import theme
from supra.GUI.Tools.WidgetBuilder import *
from supra.GUI.Tools.htmlLoader import htmlBuilder
from supra.GUI.Tools.Errors import errorCodes
from supra.GUI.Tabs.SupracenterSearch import supSearch
from supra.GUI.Tabs.TrajectorySearch import trajectorySearch
from supra.Stations.Filters import *
from supra.Stations.ProcessStation import procTrace, procStream, findChn
from supra.Stations.CalcAllTimes4 import calcAllTimes
from supra.Stations.CalcAllSigs import calcAllSigs
from supra.Stations.StationObj import Polarization, AnnotationList
from wmpl.Utils.TrajConversions import datetime2JD, jd2Date
from wmpl.Utils.Earth import greatCircleDistance
from supra.Utils.AngleConv import loc2Geo, chauvenet, angle2NDE
from supra.Utils.Formatting import *
from supra.Utils.Classes import Position, Constants, Pick, RectangleItem, Color, Plane, Annote, Constants
from supra.Utils.TryObj import *
from supra.Utils.pso import pso
from supra.Files.SaveObjs import Prefs, BAMFile
from supra.Files.SaveLoad import save, load, loadSourcesIntoBam
from supra.Atmosphere.Parse import parseWeather
from supra.Atmosphere.radiosonde import downloadRadio
from supra.Geminus.geminusGUI import Geminus
from supra.Supracenter.l137 import estPressure
from supra.Atmosphere.NRLMSISE import getAtmDensity
from supra.Atmosphere.pyHWM93 import getHWM
from wmpl.Utils.TrajConversions import date2JD
from wmpl.Utils.OSTools import mkdirP
HEIGHT_SOLVER_DIV = 250
THEO = False
PEN = [(0 *255, 0.4470*255, 0.7410*255),
(0.8500*255, 0.3250*255, 0.0980*255),
(0.9290*255, 0.6940*255, 0.1250*255),
(0.4940*255, 0.1840*255, 0.5560*255),
(0.4660*255, 0.6740*255, 0.1880*255),
(0.3010*255, 0.7450*255, 0.9330*255),
(0.6350*255, 0.0780*255, 0.1840*255)]
FRAG_LOC = Position(42.0224, -77.8427, 48210)
ENERGY = None #J
consts = Constants()
# Main Window
class SolutionGUI(QMainWindow):
def __init__(self):
super().__init__()
##############################
# Load system-wide preferences
##############################
qtRectangle = self.frameGeometry()
centerPoint = QDesktopWidget().availableGeometry().center()
qtRectangle.moveCenter(centerPoint)
self.move(qtRectangle.topLeft())
self.prefs = Prefs()
try:
with open(os.path.join('supra', 'Misc', 'BAMprefs.bam'), 'rb') as f:
self.prefs = pickle.load(f)
except FileNotFoundError as e:
# Prefs file missing - use default settings
print(printMessage("status"), "Preferences file not found (Was deleted, or fresh install) - Generating a default preference file.")
with open(os.path.join('supra', 'Misc', 'BAMprefs.bam'), 'wb') as f:
pickle.dump(self.prefs, f)
self.bam = BAMFile()
self.color = Color()
self.bam.energy_measurements = []
# Initialize all of the pyqt things in the GUI
initMainGUI(self)
initMainGUICosmetic(self)
# Add widgets to the floating box
self.addIniDockWidgets()
def geminus(self):
if not hasattr(self.bam.setup, "trajectory"):
errorMessage('No trajectory found!', 2, detail="Please include a trajectory in the source tab before using Geminus!")
return None
self.geminus_gui = Geminus(self.bam, self.prefs)
self.geminus_gui.setGeometry(QRect(100, 100, 1000, 800))
self.geminus_gui.show()
def tauSpread(self):
self.tSpr = TauSpreadGUI(self.bam, self.prefs)
self.tSpr.setGeometry(QRect(100, 100, 1000, 800))
self.tSpr.show()
def fPar(self):
file_name = fileSearch(['CSV (*.csv)'], None)
#read csv
t = []
fpar = []
with open(file_name, "r+") as f:
for line in f:
a = line.split(',')
time = None
try:
time = datetime.datetime.strptime(a[0], "%Y-%m-%dT%H:%M:%S.%fZ")
fpar.append(float(a[1]))
except:
pass
if time is not None:
shift = float(self.f_shift_edits.text())
t.append((time - self.bam.setup.fireball_datetime).total_seconds() + shift)
### scale fpar
min_fpar = np.min(fpar)
max_fpar = np.max(fpar)
fpar = fpar - min_fpar
axY = self.make_picks_waveform_canvas.getAxis('left')
waveform_min, waveform_max = axY.range
fpar = fpar/max_fpar*waveform_max
# print(t, fpar)
self.fpar_waveform = pg.PlotDataItem(x=t, y=fpar, pen='r')
self.make_picks_waveform_canvas.addItem(self.fpar_waveform)
def viewToolbar(self):
# Toggles the toolbar
self.ini_dock.toggleViewAction().trigger()
def viewFullscreen(self):
# Toggles fullscreen
if self.windowState() & QtCore.Qt.WindowFullScreen:
self.showNormal()
else:
self.showFullScreen()
def quitApp(self):
# Begins quit sequence
reply = QMessageBox.question(self, 'Quit Program', 'Are you sure you want to quit?', QMessageBox.Yes, QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
qApp.quit()
else:
return None
def openGit(self):
webbrowser.open_new_tab("https://github.com/dvida/Supracenter")
def openDocs(self):
# docs are a locally stored html file
webbrowser.open_new_tab(self.doc_file)
def genReport(self):
self.gr = ReportWindow(self.bam, self.prefs)
self.gr.setGeometry(QRect(500, 400, 500, 400))
self.gr.show()
def stndownloadDialog(self):
self.sd = StationList()
self.sd.setGeometry(QRect(500, 400, 500, 400))
self.sd.show()
def preferencesDialog(self):
self.p = PreferenceWindow()
self.p.setGeometry(QRect(500, 400, 500, 400))
self.p.show()
def trajInterpDialog(self):
self.t = TrajInterpWindow(self.bam, self)
self.t.setGeometry(QRect(500, 400, 500, 400))
self.t.show()
def rtvWindow(self):
self.rtv = rtvWindowDialog(self.bam, self.prefs)
self.rtv.setGeometry(QRect(100, 100, 1200, 700))
self.rtv.show()
def trajSpace(self):
self.ts = TrajSpace(self.bam)
self.ts.setGeometry(QRect(100, 100, 1200, 700))
self.ts.show()
def glmviewer(self):
self.glm = glmWindowDialog(self.bam)
self.glm.setGeometry(QRect(100, 100, 1200, 700))
self.glm.show()
def lumEffGUI(self):
self.lum_eff.setState(True)
qApp.processEvents()
self.leg = lumEffDialog(self.bam)
self.leg.setGeometry(QRect(100, 100, 1200, 700))
self.leg.show()
self.lum_eff.setState(False)
qApp.processEvents()
def showHeight(self):
self.show_height.switchState()
qApp.processEvents()
def csvLoad(self, table):
""" Loads csv file into a table
"""
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.AnyFile)
dlg.setNameFilters(['CSV File (*.csv)'])
dlg.exec_()
filename = dlg.selectedFiles()
try:
with open(filename[0]) as f:
data_table = []
next(f)
for line in f:
a = line.split(',')
if len(a) != 9:
errorMessage('Wrong number of columns for a picks file!', 1, info='Make sure a picks file is imported!')
return None
data_table.append(a)
except IsADirectoryError as e:
errorMessage('Please select a valid file to load', 1, detail='{:}'.format(e))
return None
defTable(self.csv_table, 0, 9, headers=['Pick Group', 'Network', 'Code', 'Latitude', 'Longitude', 'Elevation', 'Pick JD', 'Pick Time', 'Station Number'])
toTable(table, data_table)
def csvSave(self, table):
""" Saves a table to a csv
"""
dlg = QFileDialog.getSaveFileName(self, 'Save File')
file_name = checkExt(dlg[0], '.csv')
data_set = fromTable(table)
# Open the output CSV
with open(os.path.join(file_name), 'w') as f:
# Write the header
f.write('Pick group, Network, Code, Lat, Lon, Elev, Pick JD, Pick time, station_number \n')
# Go through all picks
for line in data_set:
line[-1] = int(line[-1])
# Write the CSV entry
f.write("{:}, {:}, {:}, {:}, {:}, {:}, {:}, {:}, {:}\n".format(*line))
errorMessage('Output to CSV!', 0, title='Exported!', detail='Filename: {:}'.format(file_name))
def supTheoSetup(self, manual):
supSearch(self.bam, self.prefs, manual=manual, results_print=False, obj=self, theo=True)
def supSearchSetup(self, manual):
supSearch(self.bam, self.prefs, manual=manual, results_print=False, obj=self)
def trajSearchSetup(self):
self.bam.seis_out_file = self.seis_out_edits.text()
x, fopt, geo, stat_names, stat_picks = trajectorySearch(self.bam, self.prefs)
# x, fopt = trajectorySearch(self.bam, self.prefs)
##########
# Display
##########
### Solution Table
defTable(self.seis_table, 9, 2, headers=['Parameter', 'Value'])
data_table = [['Error', fopt],
['X', x[0]],
['Y', x[1]],
['Time', x[2]],
['Velocity', x[3]],
['Azimuth', x[4]],
['Zenith', x[5]],
['Latitude', geo.lat],
['Longitude', geo.lon]]
toTable(self.seis_table, data_table)
### Residual Table
defTable(self.seis_resids, 0, 2, headers=['Station', 'Residual'])
res_table = []
for ss in range(len(stat_names)):
res_table.append([stat_names[ss], stat_picks[ss]])
toTable(self.seis_resids, res_table)
def rayTrace(self):
A = Position(float(self.ray_lat_edits.text()), float(self.ray_lon_edits.text()), float(self.ray_height_edits.text()))
B = Position(self.ray_pick_point[0], self.ray_pick_point[1], self.ray_pick_point[2])
A.pos_loc(B)
B.pos_loc(B)
try:
sounding = parseWeather(self.setup)
except:
errorMessage('Error reading weather profile in rayTrace', 2)
return None
if self.prefs.debug:
print("Starting and End points of Ray Trace")
print(A)
print(B)
if self.setup.perturb_times == 0:
self.setup.perturb_times = 1
trace_data = [None]*self.setup.perturb_times
trace_var = [None]*self.setup.perturb_times
t_arrival = [None]*self.setup.perturb_times
t_arrival_cy = [None]*self.setup.perturb_times
err = [None]*self.setup.perturb_times
#plt.style.use('dark_background')
fig = plt.figure(figsize=plt.figaspect(0.5))
fig.set_size_inches(5, 5)
ax = fig.add_subplot(1, 1, 1, projection='3d')
if self.setup.perturb_method == 'ensemble':
ensemble_file = self.setup.perturbation_spread_file
else:
ensemble_file = ''
x_var = []
y_var = []
z_var = []
p_var = []
t_var = []
error_list = []
for ptb_n in range(self.setup.perturb_times):
trace_data = []
trace_var = []
if ptb_n > 0 and self.ray_enable_perts.isChecked():
if self.prefs.debug:
print(printMessage("status"), "Perturbation {:}".format(ptb_n))
# generate a perturbed sounding profile
sounding_p = perturb(self.setup, sounding, self.setup.perturb_method, \
spread_file=self.setup.perturbation_spread_file, lat=self.setup.lat_centre, lon=self.setup.lon_centre, ensemble_file=ensemble_file, ensemble_no=ptb_n)
else:
# if not using perturbations on this current step, then return the original sounding profile
sounding_p = sounding
z_profile, _ = getWeather(np.array([A.x, A.y, A.z]), np.array([B.x, B.y, B.z]), \
self.setup.weather_type, A, copy.copy(sounding_p))
z_profile = zInterp(B.z, A.z, z_profile, div=100)
a, b, c, E, trace_data = slowscan(A.xyz, B.xyz, z_profile, wind=True, n_theta=self.setup.n_theta, n_phi=self.setup.n_theta, h_tol=self.setup.h_tol, v_tol=self.setup.v_tol)
if trace_data == trace_data:
if self.ray_enable_vars.isChecked():
last_k = 0
N = 15
m, n = np.shape(trace_var[0][0])
for i in range(m//N):
for j in range(n//N):
for line in trace_var:
k = line[3]
if k != last_k:
#c = (0, 0, (t_var[0] - np.pi/2)/np.pi/2%1)
ax.plot3D(x_var, y_var, z_var, c='r')
x_var = []
y_var = []
z_var = []
p_var = []
t_var = []
x_var.append(line[0][i*N, j*N])
y_var.append(line[1][i*N, j*N])
z_var.append(line[2][i*N, j*N])
p_var.append(line[4][i*N, j*N])
t_var.append(line[5][i*N, j*N])
last_k = k
ax.plot3D(x_var, y_var, z_var, c='r')
if ptb_n == 0:
xline = []
yline = []
zline = []
try:
for line in trace_data:
#line[0], line[1], line[2] = loc2Geo(A.lat, A.lon, A.elev, [line[0], line[1], line[2]])
xline.append(line[0])
yline.append(line[1])
zline.append(line[2])
ax.plot3D(np.array(xline)/1000, np.array(yline)/1000, np.array(zline)/1000, 'black')
#ax.scatter(xline, yline, zline, 'blue', marker='o')
#ax.scatter(0, 0, 0, 'orange', marker='^')
except IndexError:
pass
except TypeError:
pass
# ax.set_xlim3d(B.x, A.x)
# ax.set_ylim3d(B.y, A.y)
# ax.set_zlim3d(B.z, A.z)
x_pts = [None]*len(xline)
y_pts = [None]*len(xline)
for i in range(len(xline)):
x_pts[i], y_pts[i], _ = loc2Geo(B.lat, B.lon, B.elev, [xline[i], yline[i], zline[i]])
self.ray_canvas.plot(y_pts, x_pts, pen=(255, 255, 255), update=True)
if self.ray_enable_perts.isChecked():
xline = []
yline = []
zline = []
if ptb_n > 0:
for line in trace_data:
#line[0], line[1], line[2] = loc2Geo(A.lat, A.lon, A.elev, [line[0], line[1], line[2]])
xline.append(line[0])
yline.append(line[1])
zline.append(line[2])
try:
ax.plot3D(np.array(xline)/1000, np.array(yline)/1000, np.array(zline)/1000)#'#15ff00')
except:
pass
x_pts = [None]*len(xline)
y_pts = [None]*len(xline)
for i in range(len(xline)):
x_pts[i], y_pts[i], _ = loc2Geo(B.lat, B.lon, B.elev, [xline[i], yline[i], zline[i]])
self.ray_canvas.plot(y_pts, x_pts, pen=(21, 255, 0), update=True)
#ax.scatter(xline, yline, zline, 'black')
if self.ray_enable_windfield.isChecked():
c = (z_profile[:, 1])
mags = (z_profile[:, 2])
dirs = (z_profile[:, 3])
# Init the constants
consts = Constants()
#convert speed of sound to temp
t = np.square(c)*consts.M_0/consts.GAMMA/consts.R
#convert to EDN
#dirs = np.radians(angle2NDE(np.degrees(dirs)))
norm = Normalize()
norm.autoscale(t)
#convert mags and dirs to u and v
if self.setup.weather_type == 'custom':
u = mags*np.sin(np.radians(dirs))
v = mags*np.cos(np.radians(dirs))
else:
u = mags*np.sin(dirs)
v = mags*np.cos(dirs)
c = t[:-1]
c = (c.ravel() - c.min()) / c.ptp()
c = np.concatenate((c, np.repeat(c, 2)))
c = plt.cm.seismic(c)
xline = []
yline = []
zline = []
max_mag = np.nanmax(mags)/1000
for ii, line in enumerate(trace_data):
#line[0], line[1], line[2] = loc2Geo(A.lat, A.lon, A.elev, [line[0], line[1], line[2]])
xline = line[0]
yline = line[1]
zline = line[2]
try:
ax.quiver(np.array(xline)/1000, np.array(yline)/1000, np.array(zline)/1000, u[ii]/max_mag, v[ii]/max_mag, 0, color=c)
except:
pass
avg_error = np.mean(error_list)
print("Mean error in computation from loss in speed: {:5.2f}".format(avg_error))
# ax.set_xlim3d(B.x, A.x)
# ax.set_ylim3d(B.y, A.y)
# ax.set_zlim3d(B.z, A.z)
ax.set_xlabel('x (km +East)')
ax.set_ylabel('y (km +North)')
ax.set_zlabel('z (km +Up)')
ax.set_title('Ray Trace from {:} km to \n {:}'.format(A.z/1000, B))
self.ray_graphs.removeWidget(self.ray_line_canvas)
self.ray_line_canvas = FigureCanvas(Figure(figsize=(3, 3)))
self.ray_line_canvas = FigureCanvas(fig)
self.ray_line_canvas.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
# self.three_ray = NavigationToolbar(self.ray_line_canvas, self)
self.ray_graphs.addWidget(self.ray_line_canvas)
# self.ray_graphs.addWidget(self.three_ray)
self.ray_line_canvas.draw()
ax.mouse_init()
SolutionGUI.update(self)
def trajSolver(self):
A = Position(self.setup.lat_i, self.setup.lon_i, self.setup.elev_i)
B = Position(self.setup.lat_f, self.setup.lon_f, self.setup.elev_f)
if A.isNone() or B.isNone():
errorMessage('Positions of the trajectory are None!', 2, detail='Please define both ends of the trajectory!')
return None
A.pos_loc(B)
B.pos_loc(B)
v = np.array([B.x - A.x, B.y - A.y, B.z - A.z])
n = (tryFloat(self.ray_height_edits.text()) - A.z)/v[2]
P = n*v + np.array([A.x, A.y, A.z])
pt = Position(0, 0, 0)
pt.x = P[0]
pt.y = P[1]
pt.z = P[2]
pt.pos_geo(B)
if self.prefs.debug:
print("Solved Point:")
print(pt)
self.ray_lat_edits.setText(str(pt.lat))
self.ray_lon_edits.setText(str(pt.lon))
self.ray_pick_traj.setPoints(x=[pt.lon], y=[pt.lat], pen=(255, 0, 110))
self.ray_canvas.addItem(self.ray_pick_traj, update=True)
if self.ray_pick_point != [0, 0, 0]:
self.rayTrace()
def W_estGUI(self):
""" Opens up yield estimater GUI
"""
self.W_est.setState(True)
qApp.processEvents()
try:
self.w = Yield(self.bam, self.prefs, self.current_station)
except AttributeError as e:
errorMessage('Not enough data for yield generator', 2, detail='{:}'.format(e))
self.w.setGeometry(QRect(100, 100, 800, 200))
self.w.show()
self.W_est.setState(False)
qApp.processEvents()
def showContour(self, mode):
filename = saveFile('npy')
print('Working on contour - This could take a while...')
self.clearContour()
ref_pos = Position(self.bam.setup.lat_centre, self.bam.setup.lon_centre, 0)
### option to use a perturbation for the contour instead of nominal (change the 0 to the perturbation number)
# sounding = self.perturbGenerate(0, sounding, self.perturbSetup())
if mode == 'ballistic':
if errorCodes(self.bam.setup, 'trajectory'):
return None
try:
# points = self.bam.setup.trajectory.findPoints(gridspace=100, min_p=9000, max_p=50000)
points = self.bam.setup.trajectory.trajInterp2(div=50, min_p=17000, max_p=75000)
except AttributeError as e:
errorMessage('Trajectory is not defined!', 2, detail='{:}'.format(e))
return None
elif mode == 'fragmentation':
if errorCodes(self.bam.setup, 'fragmentation_point'):
return None
try:
A = self.bam.setup.fragmentation_point[0].position
A.pos_loc(ref_pos)
points = A.xyz
except (TypeError, IndexError) as e:
errorMessage('Fragmentation Point not defined correctly!', 1, info='Please define the fragmentation point in the setup toolbar', detail='{:}'.format(e))
return None
results = waveReleasePointWindsContour(self.bam, self.bam.setup.trajectory, ref_pos, points, mode=mode)
results = np.array(results)
dx, dy = 0.01, 0.01
print(results)
X = results[:, 0]
Y = results[:, 1]
T = results[:, 3]
data = []
for i in range(len(X)):
A = Position(0, 0, 0)
A.x = X[i]
A.y = Y[i]
A.z = 0
A.pos_geo(ref_pos)
data.append((A.lon, A.lat, dy, dx, T[i]))
# return data in a form readable by Rectangle Object
lat = []
lon = []
Z = []
for line in data:
lat.append(line[1])
lon.append(line[0])
Z.append(line[-1])
# this method will triangulate the info needed (recommended by pyplot)
#https://matplotlib.org/3.1.1/gallery/images_contours_and_fields/irregulardatagrid.html
import matplotlib.tri as tri
A = np.array([lon, lat, Z])
# Save the data for basemap plotting
np.save(filename, A)
# plt.scatter(lon, lat)
plt.tricontour(lon, lat, Z, levels=14, linewidths=0.5, colors='w')
cntr = plt.tricontourf(lon, lat, Z, levels=14, cmap="RdBu_r")
plt.colorbar(cntr)
# add trajectory
traj = self.bam.setup.trajectory
plt.plot([traj.pos_i.lon, traj.pos_f.lon], [traj.pos_i.lat, traj.pos_f.lat], c='b')
plt.scatter([traj.pos_f.lon], [traj.pos_f.lat], c='b', marker='+')
# add stations
for stn in self.bam.stn_list:
plt.scatter(stn.metadata.position.lon, stn.metadata.position.lat, marker='^', c='g')
plt.text(stn.metadata.position.lon, stn.metadata.position.lat, ''.format(stn.metadata.code))
plt.show()
self.show_f_contour.setState(False)
self.show_contour.setState(False)
### below will make a contour as before
# self.contour_data_squares = RectangleItem(data)
# self.make_picks_map_graph_canvas.addItem(self.contour_data_squares)
# print('Contour Finished!')
def clearContour(self):
pass
# self.make_picks_map_graph_canvas.removeItem(self.contour_data_squares)
def saveContour(self):
filename = QFileDialog.getSaveFileName(self, 'Save File')
np.save(filename[0], self.contour_data)
def loadContour(self):
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.AnyFile)
dlg.setNameFilters(['Contour Numpy File (*.npy)'])
dlg.exec_()
filename = dlg.selectedFiles()
try:
self.contour_data = np.load(filename[0])
except IsADirectoryError as e:
errorMessage('Directory chosen when .npy file expected!', 2, detail='{:}'.format(e))
return None
print(self.contour_data)
try:
self.make_picks_map_graph_canvas.addItem(RectangleItem(self.contour_data))
except IndexError as e:
errorMessage('The selected contour file is of the wrong size!', 2, info='Are you sure that this is a contour file and not some other .npy file?', detail='{:}'.format(e))
def checkForWorkDir(self):
try:
if not os.path.exists(self.prefs.workdir):
os.makedirs(self.prefs.workdir)
return True
except (FileNotFoundError, TypeError) as e:
errorMessage("No such file or directory: '{:}'".format(self.prefs.workdir), 2, info='Define a working directory in the toolbar on the side.', detail='{:}'.format(e))
return False
def loadRayGraph(self):
if not self.checkForWorkDir():
return None
#Build seismic data path
dir_path = os.path.join(self.prefs.workdir, self.setup.fireball_name)
# Load the station and waveform files list
data_file_path = os.path.join(dir_path, DATA_FILE)
if os.path.isfile(data_file_path):
stn_list = readStationAndWaveformsListFile(data_file_path, rm_stat=self.setup.rm_stat, debug=self.prefs.debug)
else:
errorMessage('Station and waveform data file not found! Download the waveform files first!', 2)
return None
stn_list = stn_list + self.setup.stations
for stn in stn_list:
if stn.metadata.code not in self.setup.rm_stat:
text = pg.TextItem(text='{:}-{:}'.format(stn.metadata.network, stn.metadata.code),\
border='w', color=(255, 255, 255), fill=(255, 255, 255, 100))
text.setPos(stn.metadata.position.lon, stn.metadata.position.lat)
self.ray_canvas.addItem(text)
end_point = pg.ScatterPlotItem()
end_point.addPoints(x=[self.setup.lon_f], y=[self.setup.lat_f], pen=(66, 232, 244), symbol='+')
self.ray_canvas.addItem(end_point, update=True)
x=[self.setup.lon_i, self.setup.lon_f]
y=[self.setup.lat_i, self.setup.lat_f]
self.ray_canvas.plot(x, y, pen=(66, 232, 244))
SolutionGUI.update(self)
def rayMouseClicked(self, evt):
if self.tog_picks.isChecked():
mousePoint = self.ray_canvas.vb.mapToView(evt.pos())
self.ray_pick.setPoints(x=[mousePoint.x()], y=[mousePoint.y()], pen=(255, 0, 110))
self.ray_canvas.addItem(self.ray_pick, update=True)
self.ray_pick_point = [mousePoint.y(), mousePoint.x(), 0]
self.ray_pick_label.setText("Lat: {:10.4f} Lon: {:10.4f} Elev {:10.2f}".format(*self.ray_pick_point))
def effectiveSoundSpeed(self, sounding):
'''
Returns the sound speed at every pressure level, also considering the winds and the k-vectors
'''
#############################
# Get start and end positions
#############################
lat = [tryFloat(self.fatm_start_lat.text()), tryFloat(self.fatm_end_lat.text())]
lon = [tryFloat(self.fatm_start_lon.text()), tryFloat(self.fatm_end_lon.text())]
elev = [tryFloat(self.fatm_start_elev.text()), tryFloat(self.fatm_end_elev.text())]
supra_pos = Position(lat[0], lon[0], elev[0])
detec_pos = Position(lat[1], lon[1], elev[1])
ref_pos = Position(self.bam.setup.lat_centre, self.bam.setup.lon_centre, 0)
supra_pos.pos_loc(ref_pos)
detec_pos.pos_loc(ref_pos)
################################
# Get eigen-path from ray-tracer
################################
r, pts = cyscan(supra_pos.xyz, detec_pos.xyz, sounding, trace=True, plot=False, particle_output=False, debug=False, wind=self.prefs.wind_en, h_tol=self.prefs.pso_min_ang, v_tol=self.prefs.pso_min_dist)
# The path taken in an isotropic atmosphere - straight line
u = supra_pos.xyz - detec_pos.xyz
nom_range = np.sqrt(u[0]**2 + u[1]**2 + u[2]**2)
ray_range = 0
pts = np.array(pts)
for ii in range(len(pts) - 1):
k = np.array([pts[ii + 1, 0] - pts[ii, 0],\
pts[ii + 1, 1] - pts[ii, 1],\
pts[ii + 1, 2] - pts[ii, 2]])
ray_range += np.sqrt(k[0]**2 + k[1]**2 + k[2]**2)
for ii in range(len(pts[0]) - 1):
k = np.array([pts[ii + 1, 0] - pts[ii, 0],\
pts[ii + 1, 1] - pts[ii, 1],\
pts[ii + 1, 2] - pts[ii, 2]])
k /= np.sqrt(k[0]**2 + k[1]**2 + k[2]**2)
c = sounding[:, 1]
mags = sounding[:, 2]
dirs = sounding[:, 3]
u = mags*np.sin(dirs)
v = mags*np.cos(dirs)
w = np.array([u, v, 0])
c_eff = c + np.dot(k, w)
return c_eff, nom_range, ray_range
def SCI(self):
sounding, perturbations = self.fatmLoadAtm(plot=False)
h = sounding[:, 0]
dirs = angle2NDE(np.degrees(sounding[:, 3]))
mags = sounding[:, 2]
u = mags*np.cos(np.radians(dirs))
v = mags*np.sin(np.radians(dirs))
h_index = np.where((h >= 40000) & (h <= 60000))
u_sci = u[h_index]
v_sci = v[h_index]
SCI_u = np.mean(u_sci)
SCI_v = np.mean(v_sci)
pos_i = Position(tryFloat(self.fatm_start_lat.text()), tryFloat(self.fatm_start_lon.text()), tryFloat(self.fatm_start_elev.text()))
pos_f = Position(tryFloat(self.fatm_end_lat.text()), tryFloat(self.fatm_end_lon.text()), tryFloat(self.fatm_end_elev.text()))
angle = pos_i.angleBetween(pos_f)
wind_vect = np.array([SCI_u, SCI_v])
trav_vect = np.array([np.sin(np.radians(angle)), np.cos(np.radians(angle))])
# projection, but trav_vect is normalized
SCI = np.dot(wind_vect, trav_vect)
print("Wind Index:")
print("U-Component: {:.2f} m/s".format(SCI_u))
print("V-Component: {:.2f} m/s".format(SCI_v))
print("Angle of Travel: {:.2f} deg (N due E)".format(angle))
print("Total SCI: {:.2f} m/s".format(SCI))
def fatmPlot(self, sounding, perturbations):
NTS = []
dh = sounding[:, 0]
lat = tryFloat(self.fatm_end_lat.text())
lon = tryFloat(self.fatm_end_lon.text())
ref_time = self.bam.setup.fireball_datetime
self.fatm_plot.ax.clear()
self.fatm_plot.ax.set_ylabel("Height [km]")
if self.fatm_variable_combo.currentText() == 'Sound Speed':
X = sounding[:, 1]
jd = date2JD(ref_time.year, ref_time.month, ref_time.day, ref_time.hour, ref_time.minute, ref_time.second)
for hh in dh:
t = getAtmDensity(lat, lon, hh, jd)
c = np.sqrt(consts.GAMMA*consts.R/consts.M_0*t)
NTS.append(c)
self.fatm_plot.ax.set_xlabel("Sound Speed [m/s]")
elif self.fatm_variable_combo.currentText() == 'Wind Magnitude':
X = sounding[:, 2]
for hh in dh:
u, v = getHWM(ref_time, lat, lon, hh/1000)
c = np.sqrt(u**2 + v**2)
NTS.append(c)
self.fatm_plot.ax.set_xlabel("Wind Magnitude [m/s]")
elif self.fatm_variable_combo.currentText() == 'Wind Direction':
X = np.degrees(sounding[:, 3])
for hh in dh:
u, v = getHWM(ref_time, lat, lon, hh/1000)
c = np.degrees(np.arctan2(u, v))
NTS.append(c)
self.fatm_plot.ax.set_xlabel("Wind Direction [deg from N]")
elif self.fatm_variable_combo.currentText() == 'Effective Sound Speed':
X, nom_range, nom_ray_range = self.effectiveSoundSpeed(sounding)
self.fatm_plot.ax.set_xlabel("Effective Sound Speed [m/s]")
elif self.fatm_variable_combo.currentText() == 'U-Component of Wind':
dirs = angle2NDE(np.degrees(sounding[:, 3]))
mags = sounding[:, 2]
X = mags*np.cos(np.radians(dirs))
for hh in dh:
u, v = getHWM(ref_time, lat, lon, hh/1000)
NTS.append(u)
self.fatm_plot.ax.set_xlabel("U-Component of Wind [m/s]")
elif self.fatm_variable_combo.currentText() == 'V-Component of Wind':
dirs = angle2NDE(np.degrees(sounding[:, 3]))
mags = sounding[:, 2]
X = mags*np.sin(np.radians(dirs))
for hh in dh:
u, v = getHWM(ref_time, lat, lon, hh/1000)
NTS.append(v)
self.fatm_plot.ax.set_xlabel("V-Component of Wind [m/s]")
else:
return None
Y = sounding[:, 0]
NTS = np.array(NTS)
self.fatm_plot.ax.plot(X, Y/1000, c="w", label="Nominal Profile (ERA5 & Model)")
if len(NTS) == len(Y):
self.fatm_plot.ax.plot(NTS, Y/1000, c='m', label="NRLMSISE & HWM")
SolutionGUI.update(self)
perts_range = []
if perturbations is None:
perturbations = []
if len(perturbations) != 0:
for ii, ptb in enumerate(perturbations):
if self.fatm_variable_combo.currentText() == 'Sound Speed':
X = ptb[:, 1]
elif self.fatm_variable_combo.currentText() == 'Wind Magnitude':
X = ptb[:, 2]
elif self.fatm_variable_combo.currentText() == 'Wind Direction':
X = np.degrees(ptb[:, 3])
elif self.fatm_variable_combo.currentText() == 'Effective Sound Speed':
X, nom_range, pert_ray_range = self.effectiveSoundSpeed(ptb)
perts_range.append(pert_ray_range)
elif self.fatm_variable_combo.currentText() == 'U-Component of Wind':
dirs = angle2NDE(np.degrees(ptb[:, 3]))
mags = ptb[:, 2]
X = mags*np.cos(np.radians(dirs))
elif self.fatm_variable_combo.currentText() == 'V-Component of Wind':
dirs = angle2NDE(np.degrees(ptb[:, 3]))
mags = ptb[:, 2]
X = mags*np.sin(np.radians(dirs))
else:
return None
Y = ptb[:, 0]
PERT_ALPHA = 0.4
if ii == 0:
self.fatm_plot.ax.plot(X, Y/1000, c='g', alpha=PERT_ALPHA, label="MC Realizations")
else:
self.fatm_plot.ax.plot(X, Y/1000, c='g', alpha=PERT_ALPHA)
SolutionGUI.update(self)
perts_range = np.array(perts_range)
try:
print('Isotropic Range: {:.2f} km'.format(nom_range/1000))
print('Nominal Atmospheric Range: {:.2f} km'.format(nom_ray_range/1000))
print('Perturbation Range: {:.2f} - {:.2f} km'.format(np.nanmin(perts_range/1000), np.nanmax(perts_range/1000)))
print('Average Speed: {:.2f} m/s'.format(np.nanmean(X)))
except:
pass
font=QtGui.QFont()
font.setPixelSize(20)
# self.fatm_canvas.getAxis("bottom").tickFont = font
# self.fatm_canvas.getAxis("left").tickFont = font
# self.fatm_canvas.getAxis('bottom').setPen(self.color.WHITE)
# self.fatm_canvas.getAxis('left').setPen(self.color.WHITE)
# self.fatm_canvas.getLabel("bottom").tickFont = font
# self.fatm_canvas.getLabel("left").tickFont = font
# self.fatm_canvas.getLabel('bottom').setPen(self.color.WHITE)
# self.fatm_canvas.getLabel('left').setPen(self.color.WHITE)
print(sounding)
lat = [tryFloat(self.fatm_start_lat.text()), tryFloat(self.fatm_end_lat.text())]
lon = [tryFloat(self.fatm_start_lon.text()), tryFloat(self.fatm_end_lon.text())]
elev = [tryFloat(self.fatm_start_elev.text()), tryFloat(self.fatm_end_elev.text())]
self.fatm_plot.ax.scatter(X[-1], Y[-1]/1000, marker="*", c="r", label="Source: {:.2f}°N {:.2f}°E {:.1f} km".format(lat[0], lon[0], elev[0]/1000))
self.fatm_plot.ax.scatter(X[0], Y[0]/1000, marker="^", c="b", label="Reciever: {:.2f}°N {:.2f}°E {:.1f} km".format(lat[1], lon[1], elev[1]/1000))
self.fatm_plot.ax.grid(alpha=0.2)
self.fatm_plot.ax.legend()
self.fatm_plot.show()
SolutionGUI.update(self)
def fatmSaveAtm(self):
filename = self.fatm_name_edits.text()
if len(filename) == 0:
errorMessage("File name cannot be blank!", 1, info='"{:}" is not an acceptable file name'.format(filename))
return None
if self.fatm_source_type.currentText() == "Copernicus Climate Change Service (ECMWF)":
weather_type = 'ecmwf'
elif self.fatm_source_type.currentText() == "Copernicus Climate Change Service (ECMWF) - Spread File":
weather_type = 'spread'
elif self.fatm_source_type.currentText() == "Radiosonde":
weather_type = 'radio'
self.bam.atmos.loadSounding(self.fatm_name_edits.text(), weather_type, \
lat=self.bam.setup.lat_centre, lon=self.bam.setup.lon_centre, \
rng=self.bam.setup.deg_radius, time=self.fatm_datetime_edits.dateTime())
save(self, True)
def fatmLoadAtm(self, plot=True):
lat = [tryFloat(self.fatm_start_lat.text()), tryFloat(self.fatm_end_lat.text())]
lon = [tryFloat(self.fatm_start_lon.text()), tryFloat(self.fatm_end_lon.text())]
elev = [tryFloat(self.fatm_start_elev.text()), tryFloat(self.fatm_end_elev.text())]
if elev[0] is None or elev[1] is None:
errorMessage("Unaccepted start and end heights, please fill in below", 1, \
detail="{:} and {:} are not formatted correctly".format(tryFloat(self.fatm_start_elev.text()), tryFloat(self.fatm_end_elev.text())))
return None
try:
atmos = self.bam.atmos.getSounding(lat=lat, lon=lon, heights=elev, ref_time=self.bam.setup.fireball_datetime)
except Exception as e:
errorMessage("Unable to load sounding data from BAM", 1, detail="{:}".format(e))
return None
if plot:
self.fatmPlot(*atmos)
else:
return atmos
def fatmFetch(self, download):
# ECMWF
perts = False
if self.fatm_source_type.currentIndex() == 0:
year = str(self.fatm_datetime_edits.dateTime().date().year())
month = str(self.fatm_datetime_edits.dateTime().date().month())
day = str(self.fatm_datetime_edits.dateTime().date().day())
time_of = str("{:02d}".format(self.fatm_datetime_edits.dateTime().time().hour())) + \
':' + str("{:02d}".format(self.fatm_datetime_edits.dateTime().time().minute()))
loc = self.fatm_name_edits.text()
variables = ['temperature', 'u_component_of_wind', 'v_component_of_wind', 'geopotential']
if download:
loc = saveFile("nc")
# If empty file string with extension 'nc' (they hit cancel in the file selection
# dialog)
if len(loc) == 2:
print(printMessage("status"), "Not downloading anything")
return None
qm = QtGui.QMessageBox
ret = qm.question(self, 'File Download - Reanalysis', "Download Nominal Profile?", qm.Yes | qm.No)
try:
if ret == qm.Yes:
print(printMessage("status"), "Downloading Reanalysis")
perts = False
else:
qm = QtGui.QMessageBox
ret = qm.question(self, 'File Download - Perturbations', "Download Perturbations?", qm.Yes | qm.No)
try:
if ret == qm.Yes:
print(printMessage("status"), "Downloading Perturbation Ensemble")
perts = True
else:
print(printMessage("status"), "Not downloading anything")
return None
except AttributeError:
print(printMessage("error"), "Attribute Error")
# If the "x" is clicked
return None
except AttributeError:
print(printMessage("error"), "Attribute Error")
# If the "x" is clicked
return None
errorMessage("Downloading weather profile to {:}...".format(loc), 0)
clat = self.bam.setup.lat_centre
clon = self.bam.setup.lon_centre
deg_rad = 5*self.bam.setup.deg_radius
area = [clat + deg_rad, clon - deg_rad, clat - deg_rad, clon + deg_rad]
try:
copernicusAPI(variables, year, month, day, time_of, loc, ensemble=perts, area=area)
except Exception as e:
errorMessage("Error downloading weather data from CDS", 1, detail='{:}'.format(e))
except:
print(printMessage("status"), "Other Exception")
errorMessage("Weather profile downloaded to {:}".format(loc), 0)
return None
else:
lat = [tryFloat(self.fatm_start_lat.text()), tryFloat(self.fatm_end_lat.text())]
lon = [tryFloat(self.fatm_start_lon.text()), tryFloat(self.fatm_end_lon.text())]
elev = [tryFloat(self.fatm_start_elev.text()), tryFloat(self.fatm_end_elev.text())]
if elev[0] is None or elev[1] is None:
errorMessage("Unaccepted start and end heights, please fill in below", 1, \
detail="{:} and {:} are not formatted correctly".format(tryFloat(self.fatm_start_elev.text()), tryFloat(self.fatm_end_elev.text())))
return None
try:
atmos = self.bam.atmos.getSounding(lat=lat, lon=lon, heights=elev, ref_time=self.bam.setup.fireball_datetime)
except Exception as e:
errorMessage("Unable to load sounding data from BAM", 1, detail="{:}".format(e))
return None
self.fatmPlot(*atmos)
# Radio
elif self.fatm_source_type.currentIndex() == 1:
errorMessage("Radio download not yet supported", 1)
return None
loc = self.fatm_name_edits.text()
if download:
if self.prefs.debug:
print("Looking for stations near: Lat: {:.4f} Lon: {:.4f}".format(self.setup.lat_centre, self.setup.lon_centre))
downloadRadio(self.setup.lat_centre, self.setup.lon_centre, loc, debug=self.prefs.debug)
with zipfile.ZipFile(loc, 'r') as zip_ref:
names = zip_ref.namelist()
zip_ref.extractall(os.path.join(self.prefs.workdir, self.setup.fireball_name))
if self.prefs.debug:
print('File extracted {:}'.format(names[0]))
else:
self.fatm_variable_combo.addItem('Temperature')
self.fatm_variable_combo.addItem('U-Component of Wind')
self.fatm_variable_combo.addItem('V-Component of Wind')
self.fatm_variable_combo.addItem('Wind Magnitude')
self.fatm_variable_combo.addItem('Wind Direction')
self.fatmPlot()
def fatmPrint(self, infraga=False):
filename = QFileDialog.getSaveFileName(self, 'Save File', '', 'Text File (*.txt)')
self.setup = self.bam.setup
self.setup.lat_centre = tryFloat(self.fatm_end_lat.text())
self.setup.lon_centre = tryFloat(self.fatm_end_lon.text())
self.setup.sounding_file = self.fatm_name_edits.text()
atm_time = self.fatm_datetime_edits.dateTime().time().hour()
variables = []
if self.prefs.atm_type == 'ecmwf':
variables = ['u', 'v', 't']
if errorCodes(self.setup, 'sounding_file'):
return None
try:
float(self.setup.lat_centre)
float(self.setup.lon_centre)
except TypeError as e:
errorMessage('Lat centre and/or lon centre are not floats or are not defined!', 2, detail='{:}'.format(e))
return None
lat = [tryFloat(self.fatm_start_lat.text()), tryFloat(self.fatm_end_lat.text())]
lon = [tryFloat(self.fatm_start_lon.text()), tryFloat(self.fatm_end_lon.text())]
elev = [tryFloat(self.fatm_start_elev.text()), tryFloat(self.fatm_end_elev.text())]
if elev[0] is None or elev[1] is None:
errorMessage("Unaccepted start and end heights, please fill in below", 1, \
detail="{:} and {:} are not formatted correctly".format(tryFloat(self.fatm_start_elev.text()), tryFloat(self.fatm_end_elev.text())))
return None
try:
sounding, _ = self.bam.atmos.getSounding(lat=lat, lon=lon, heights=elev, ref_time=self.bam.setup.fireball_datetime)
except Exception as e:
errorMessage('Unable to find specified profile!', 2, detail='{:}'.format(e))
return None
z = sounding[:, 0]
t = (sounding[:, 1]**2)/consts.GAMMA/consts.R*consts.M_0 - 273.15
u = sounding[:, 2]
v = np.degrees(sounding[:, 3])
p = sounding[:, 4]/100
### Taken from ecmwf_extractor.py
den0 = 0.001225
coeffs_A = np.array([-3.9082017e-2, -1.1526465e-3, 3.2891937e-5, -2.0494958e-7,
-4.7087295e-2, 1.2506387e-3, -1.5194498e-5, 6.581877e-8])
coeffs_B = np.array([-4.9244637e-3, -1.2984142e-6, -1.5701595e-6, 1.5535974e-8,
-2.7221769e-2, 4.247473e-4, -3.9583181e-6, 1.7295795e-8])
def density(z):
"""
Computes the atmospheric density according to
the US standard atmosphere model using a
polynomial fit
Parameters
----------
z : float
Altitude above sea level [km]
Returns:
density : float
Density of the atmosphere at altitude z [g/cm^3]
"""
poly_A, poly_B = 0.0, 1.0
for n in range(4):
poly_A += coeffs_A[n] * z**(n + 1)
poly_B += coeffs_B[n] * z**(n + 1)
return den0 * 10.0**(poly_A / poly_B)
rho = density(z/1000)
atmos_vars = [z, u, v, t, p, rho]
filename = checkExt(filename[0], '.txt')
header = 'Height'
for element in variables:
if element == variables[-1]:
header = header + ', ' + element + '\n'
else:
header = header + ', ' + element
if infraga:
# infraga takes z[km], T[K], u, v, rho[g/cm^3], P[mbar]
with open(str(filename), 'w') as f:
# f.write(header)
for line in range(len(sounding)):
info = ''
for v in [0, 3, 1, 2, 5, 4]:
if v == 4:
info = info + str(atmos_vars[v][line]) + '\n'
elif v == 0:
info = info + str(atmos_vars[v][line]/1000) + '\t'
else:
info = info + str(atmos_vars[v][line]) + '\t'
f.write(info)
else:
with open(str(filename), 'w') as f:
f.write(header)
for line in range(len(sounding)):
info = ''
for v in [0, 3, 1, 2, 4]:
if v == 4:
info = info + str(atmos_vars[v][line]) + '\n'
else:
info = info + str(atmos_vars[v][line]) + ','
f.write(info)
errorMessage('Printed out sounding data', 0, title="Print Done")
def refLoadTrajectory(self):
self.latedit.setText(str(self.setup.trajectory.pos_f.lat))
self.lonedit.setText(str(self.setup.trajectory.pos_f.lon))
self.azedit.setText(str(self.setup.trajectory.azimuth.deg))
self.zeedit.setText(str(self.setup.trajectory.zenith.deg))
self.veedit.setText(str(self.setup.trajectory.v))
self.vfedit.setText(str(self.setup.trajectory.v_f))
self.tiedit.setText(str(self.setup.trajectory.t))
def refLoad(self):
""" Loads csv file into a table
"""
dlg = QFileDialog()
dlg.setFileMode(QFileDialog.AnyFile)
dlg.setNameFilters(['CSV File (*.csv)'])
dlg.exec_()
filename = dlg.selectedFiles()
try:
with open(filename[0]) as f:
data_table = []
next(f)
for line in f:
a = line.split(',')
if len(a) != 5:
errorMessage('Wrong number of columns for file!', 1, info='Make sure the right file is imported!')
return None
data_table.append(a)
except IsADirectoryError as e:
errorMessage('Please select a valid file to load', 1, detail='{:}'.format(e))
return None
defTable(self.ref_table, 0, 5, headers=['Height', 'Latitude', 'Longitude', 'Time', 'δ Height'])
toTable(self.ref_table, data_table)
def refSave(self):
""" Saves a table to a csv
"""
dlg = QFileDialog.getSaveFileName(self, 'Save File')
file_name = checkExt(dlg[0], '.csv')
data_set = fromTable(self.ref_table)
# Open the output CSV
with open(os.path.join(file_name), 'w') as f:
# Write the header
f.write('Height Latitude Longitude Time \n')
# Go through all picks
for line in data_set:
line[-1] = int(line[-1])
# Write the CSV entry
f.write("{:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f} \n".format(*line))
errorMessage('Output to CSV!', 0, title='Exported!', detail='Filename: {:}'.format(file_name))
def saveTrace(self):
# Put button back
self.savtr.setState(True)
qApp.processEvents()
try:
station_no = self.current_station
# No current station
except AttributeError:
print(printMessage("warning"), "No current station selected, no trace can be saved!")
# Put button back
self.savtr.setState(False)
qApp.processEvents()
return None
# Extract current station
stn = self.bam.stn_list[station_no]
current_channel = self.make_picks_channel_choice.currentIndex()
chn_selected = self.make_picks_channel_choice.currentText()
st, resp, gap_times = procStream(stn, ref_time=self.bam.setup.fireball_datetime, merge=False)
st = findChn(st, chn_selected)
file_name = saveFile("mseed", note="")
st.write(file_name, format="MSEED")
# Put button back
self.savtr.setState(False)
qApp.processEvents()
def refHighStats(self):
for ii, stn in enumerate(self.stn_list):
if stn.metadata.code in self.setup.high_f and stn.metadata.code in self.setup.high_b:
self.ref_stn_view[ii].setBackground(self.color.both)
elif stn.metadata.code in self.setup.high_b:
self.ref_stn_view[ii].setBackground(self.color.ballistic)
elif stn.metadata.code in self.setup.high_f:
self.ref_stn_view[ii].setBackground((51, 153, 51))
def refClearStations(self):
# Set a blank widget to remove all stations
widget = QWidget()
self.ref_waveforms.setWidget(widget)
def refBallStations(self):
self.refClearStations()
self.refLoadStations(ballonly=True)
def refFragStations(self):
self.refClearStations()
self.refLoadStations(fragonly=True)
def refBothStations(self):
self.refClearStations()
self.refLoadStations(ballonly=True, fragonly=True)
def refLoadStations(self, ballonly=False, fragonly=False):
if not self.checkForWorkDir():
return None
self.setup = self.bam.setup
#Build seismic data path
dir_path = os.path.join(self.prefs.workdir, self.setup.fireball_name)
# Load the station and waveform files list
data_file_path = os.path.join(dir_path, DATA_FILE)
self.stn_list = self.bam.stn_list
# Need to check for duplicates here
stn_list = self.stn_list
widget = QWidget()
widget.setStyleSheet('background-color: black;')
layout = QVBoxLayout(widget)
layout.setAlignment(Qt.AlignTop)
# blankwidget = QWidget()
# blankwidget.setStyleSheet('background-color: rgb(0, 100, 200);')
self.ref_stn_view = []
self.ref_stn_canvas = []
self.waveform_data = [None]*len(stn_list)
for index in range(len(stn_list)):
stn = stn_list[index]
if ballonly:
if stn.metadata.code not in self.setup.high_b:
self.ref_stn_view.append(None)
self.ref_stn_canvas.append(None)
continue
if fragonly:
if stn.metadata.code not in self.setup.high_f:
self.ref_stn_view.append(None)
self.ref_stn_canvas.append(None)
continue
stn = stn_list[index]
station_layout = QVBoxLayout()
layout.addLayout(station_layout)
label_layout = QVBoxLayout()
waveform_layout = QGridLayout()
station_layout.addLayout(label_layout)
station_layout.addLayout(waveform_layout)
# station_layout.addWidget(blankwidget, 10, 0, 1, 20)
label_layout.addWidget(QLabel('--- Station: {:2}-{:5} ---'.format(stn_list[index].metadata.network, stn_list[index].metadata.code)))
self.ref_stn_view.append(pg.GraphicsLayoutWidget())
self.ref_stn_canvas.append(self.ref_stn_view[index].addPlot())
self.ref_stn_canvas[index].getAxis('bottom').setPen((255, 255, 255))
self.ref_stn_canvas[index].getAxis('left').setPen((255, 255, 255))
waveform_layout.addWidget(self.ref_stn_view[index], index, 0)
_, _ = self.discountDrawWaveform(self.setup, index, self.ref_stn_canvas[index])
self.ref_waveforms.setWidget(widget)
self.ref_waveforms.setWidgetResizable(True)
def makeRefTraj(self):
self.ref_traj_lat = float(self.latedit.text())
self.ref_traj_lon = float(self.lonedit.text())
self.ref_traj_az = float(self.azedit.text())
self.ref_traj_ze = float(self.zeedit.text())
self.ref_traj_t = float(self.tiedit.text())
self.ref_traj_v = float(self.veedit.text())
self.ref_traj_vf = float(self.vfedit.text())
self.ref_traj = Trajectory(self.ref_traj_t, self.ref_traj_v, \
zenith=Angle(self.ref_traj_ze), \
azimuth=Angle(self.ref_traj_az), \
pos_f=Position(self.ref_traj_lat, self.ref_traj_lon, 0), v_f=self.ref_traj_vf)
def refSyncHeights(self):
self.makeRefTraj()
frags = fromTable(self.ref_table)
new_frags = []
for f in frags:
P = self.ref_traj.findGeo(f[0])
T = self.ref_traj.findTime(f[0])
new_frags.append([P.elev, P.lat, P.lon, T, 0])
toTable(self.ref_table, new_frags)
def refPlotTraj(self):
self.makeRefTraj()
ref_pos = Position(self.setup.lat_centre, self.setup.lon_centre, 0)
# # sounding = parseWeather(self.setup)
# self.ref_traj.pos_f.pos_loc(ref_pos
points = self.ref_traj.trajInterp2(div=100, min_p=17000, max_p=50000)
points = np.flipud(points)
for ii, stn in enumerate(self.stn_list):
print("Station Number: {:}".format(ii+1))
stn.metadata.position.pos_loc(ref_pos)
b_time, b_pert = waveReleasePointWinds(stn.metadata.position.xyz, self.bam, self.prefs, ref_pos, points, self.ref_traj.vector)
# b_time = timeOfArrival(stn.position.xyz, self.ref_traj, self.setup, points, sounding=sounding, \
# travel=False, fast=False, ref_loc=ref_pos, theo=False, div=37)
try:
self.ref_stn_canvas[ii].addItem(pg.InfiniteLine(pos=(b_time, 0), angle=90, pen=QColor(0, 0, 255)))
# self.setColortoRow(self.ref_table, jj, QColor("blue"))
except:
# no f_time
pass
SolutionGUI.update(self)
def refPlotHeights(self):
alpha = 100
colors = [QColor(0, 255, 26), QColor(3, 252, 176), QColor(252, 3, 3), QColor(176, 252, 3), QColor(255, 133, 3),
QColor(149, 0, 255), QColor(76, 128, 4), QColor(82, 27, 27), QColor(101, 128, 125), QColor(5, 176, 249)]
colors_alpha = [QColor(0, 255, 26, alpha), QColor(3, 252, 176, alpha), QColor(252, 3, 3, alpha), QColor(176, 252, 3, alpha), QColor(255, 133, 3, alpha),
QColor(149, 0, 255, alpha), QColor(76, 128, 4, alpha), QColor(82, 27, 27, alpha), QColor(101, 128, 125, alpha), QColor(5, 176, 249, alpha)]
self.makeRefTraj()
frags = fromTable(self.ref_table)
ref_pos = Position(self.setup.lat_centre, self.setup.lon_centre, 0)
# sounding = parseWeather(self.setup)
for jj, f in enumerate(frags):
S = Position(f[1], f[2], f[0])
S.pos_loc(ref_pos)
T = f[3]
U = f[4]
if True:
# if U == 0:
for ii, stn in enumerate(self.stn_list):
print("Station No: {:}".format(ii+1))
stn.metadata.position.pos_loc(ref_pos)
stat_pos = stn.metadata.position
# Cut down atmospheric profile to the correct heights, and interp
lat = [S.lat, stat_pos.lat]
lon = [S.lon, stat_pos.lon]
elev = [S.elev, stat_pos.elev]
sounding, perturbations = self.bam.atmos.getSounding(lat=lat, lon=lon, heights=elev, spline=1000, ref_time=self.bam.setup.fireball_datetime)
# zProfile, _ = getWeather(np.array([S.lat, S.lon, S.elev]), np.array([stn.position.lat, stn.position.lon, stn.position.elev]), self.setup.weather_type, \
# [ref_pos.lat, ref_pos.lon, ref_pos.elev], copy.copy(sounding), convert=False)
# Travel time of the fragmentation wave
f_time, frag_azimuth, frag_takeoff, frag_err = cyscan(S.xyz, stn.metadata.position.xyz, sounding, \
wind=self.prefs.wind_en, h_tol=self.prefs.pso_min_ang, v_tol=self.prefs.pso_min_dist, processes=1)
try:
self.ref_stn_canvas[ii].addItem(pg.InfiniteLine(pos=(f_time, 0), angle=90, pen=colors[jj]))
# self.setColortoRow(self.ref_table, jj, QColor("blue"))
for j in range(5):
self.ref_table.item(jj, j).setBackground(colors[jj])
except:
# no f_time
pass
# else:
# P_upper = self.ref_traj.findGeo(f[0] + U)
# T_upper = self.ref_traj.findTime(f[0] + U)
# P_lower = self.ref_traj.findGeo(f[0] - U)
# T_lower = self.ref_traj.findTime(f[0] - U)
# for ii, stn in enumerate(self.stn_list):
# stn.position.pos_loc(ref_pos)
# # Cut down atmospheric profile to the correct heights, and interp
# zProfile_u, _ = getWeather(np.array([P_upper.lat, P_upper.lon, P_upper.elev]), np.array([stn.position.lat, stn.position.lon, stn.position.elev]), self.setup.weather_type, \
# [ref_pos.lat, ref_pos.lon, ref_pos.elev], copy.copy(sounding), convert=False)
# # Cut down atmospheric profile to the correct heights, and interp
# zProfile_l, _ = getWeather(np.array([P_lower.lat, P_lower.lon, P_lower.elev]), np.array([stn.position.lat, stn.position.lon, stn.position.elev]), self.setup.weather_type, \
# [ref_pos.lat, ref_pos.lon, ref_pos.elev], copy.copy(sounding), convert=False)
# # Travel time of the fragmentation wave
# f_time_u, frag_azimuth, frag_takeoff, frag_err = cyscan(P_upper.xyz, stn.position.xyz, zProfile_u, wind=True, \
# n_theta=self.setup.n_theta, n_phi=self.setup.n_phi, h_tol=self.setup.h_tol, v_tol=self.setup.v_tol)
# f_time_l, frag_azimuth, frag_takeoff, frag_err = cyscan(P_lower.xyz, stn.position.xyz, zProfile_l, wind=True, \
# n_theta=self.setup.n_theta, n_phi=self.setup.n_phi, h_tol=self.setup.h_tol, v_tol=self.setup.v_tol)
# try:
# self.ref_stn_canvas[ii].addItem(pg.LinearRegionItem(values=(f_time_l, f_time_u), pen=colors[jj], brush=colors_alpha[jj], movable=False))
# # self.ref_stn_canvas[ii].addItem(pg.InfiniteLine(pos=(f_time, 0), angle=90, pen=colors[jj]))
# # self.setColortoRow(self.ref_table, jj, QColor("blue"))
# for j in range(5):
# self.ref_table.item(jj, j).setBackground(colors[jj])
# except:
# # no f_time
# pass
SolutionGUI.update(self)
def discountDrawWaveform(self, setup, station_no, canvas):
# Extract current station
stn = self.stn_list[station_no]
st, resp, gap_times = procStream(stn, ref_time=self.bam.setup.fireball_datetime, merge=False)
st = findChn(st, "*")
self.current_waveform_raw = st.data
waveform_data, time_data = procTrace(st, ref_datetime=self.bam.setup.fireball_datetime,\
resp=resp, bandpass=[2, 8])
# Plot the waveform
for ii in range(len(waveform_data)):
self.waveform_data[station_no] = pg.PlotDataItem(x=time_data[ii], y=waveform_data[ii], pen='w')
canvas.addItem(self.waveform_data[station_no])
ref_pos = Position(self.setup.lat_centre, self.setup.lon_centre, 0)
try:
t_arrival = stn.metadata.position.pos_distance(ref_pos)/self.prefs.avg_sp_sound
canvas.setXRange(t_arrival-100, t_arrival+100, padding=1)
except TypeError:
t_arrival = None
#canvas.setLabel('bottom', "Time after {:}".format(setup.fireball_datetime), units='s')
# canvas.setLabel('left', "Signal Response")
return np.min(waveform_data), np.max(waveform_data)
def loadFrags(self):
frag_list = []
frag_name = fileSearch(['CSV (*.csv)'], None)
try:
with open(frag_name, 'r') as f:
for line in f:
a = line.strip('\n').split(',')
a = [float(i) for i in a]
frag_list.append(a)
f.close()
except IsADirectoryError as e:
errorMessage('Invalid CSV File!', 2, info='Directory given when .csv file was expected!', detail='{:}'.format(e))
return None
toTable(self.fragmentation_point, frag_list)
def addIniDockWidgets(self):
self.all_vars = QGroupBox(objectName="all_vars")
self.all_vars.setStyleSheet("QGroupBox#all_vars{color: white; background: rgb(0, 0, 0);}")
self.ini_dock.setWidget(self.all_vars)
dock_layout = QGridLayout()
self.all_vars.setLayout(dock_layout)
ini_tabs = QTabWidget()
dock_layout.addWidget(ini_tabs, 0, 0, 1, 2)
self.load_button = QPushButton('Load')
dock_layout.addWidget(self.load_button, 2, 0)
#self.load_button.clicked.connect(partial(loadGUI, self))
self.load_button.clicked.connect(partial(load, self))
self.save_button = QPushButton('Save')
dock_layout.addWidget(self.save_button, 2, 1)
# self.save_button.clicked.connect(partial(saveGUI, self, True))
self.save_button.clicked.connect(partial(save, self, True))
tab1 = QWidget()
tab1_content = QGridLayout()
tab1.setLayout(tab1_content)
ini_tabs.addTab(tab1, "General")
data_groupbox = QGroupBox("Fireball Data")
tab1_content.addWidget(data_groupbox, 1, 0, 1, 3)
data_groupbox_content = QGridLayout()
data_groupbox.setLayout(data_groupbox_content)
file_groupbox = QGroupBox("Input Files")
tab1_content.addWidget(file_groupbox, 3, 0, 1, 3)
file_groupbox_content = QGridLayout()
file_groupbox.setLayout(file_groupbox_content)
self.fireball_name_label, self.fireball_name_edits = createLabelEditObj('Fireball Name:', data_groupbox_content, 1, tool_tip='fireball_name')
self.lat_centre_label, self.lat_centre_edits = createLabelEditObj('Latitude Center:', data_groupbox_content, 2, tool_tip='lat_centre', validate='float')
self.lon_centre_label, self.lon_centre_edits = createLabelEditObj('Longitude Center:', data_groupbox_content, 3, tool_tip='lon_centre', validate='float')
self.deg_radius_label, self.deg_radius_edits = createLabelEditObj('Degrees in Search Radius:', data_groupbox_content, 4, tool_tip='deg_radius', validate='float')
self.fireball_datetime_label, self.fireball_datetime_edits = createLabelDateEditObj("Fireball Datetime", data_groupbox_content, 5, tool_tip='fireball_datetime')
self.light_curve_label, self.light_curve_edits, self.light_curve_buton = createFileSearchObj('Light Curve File: ', file_groupbox_content, 1, width=1, h_shift=0)
self.light_curve_buton.clicked.connect(partial(fileSearch, ['CSV (*.csv)', 'Text File (*.txt)'], self.light_curve_edits))
self.light_curve_buton.clicked.connect(partial(save, self, True))
self.contour_file_label, self.contour_file_edits, self.contour_file_buton = createFileSearchObj('Contour File: ', file_groupbox_content, 2, width=1, h_shift=0)
self.contour_file_buton.clicked.connect(partial(fileSearch, ['NPY (*.npy)'], self.contour_file_edits))
self.contour_file_buton.clicked.connect(partial(save, self, True))
self.station_picks_label, self.station_picks_edits, self.station_picks_buton = createFileSearchObj('Station Picks File: ', file_groupbox_content, 3, width=1, h_shift=0, tool_tip='station_picks_file')
self.station_picks_buton.clicked.connect(partial(fileSearch, ['CSV (*.csv)', 'Text File (*.txt)'], self.station_picks_edits))
self.station_picks_buton.clicked.connect(partial(save, self, True))
tab2 = QWidget()
tab2_content = QGridLayout()
tab2.setLayout(tab2_content)
ini_tabs.addTab(tab2, "Sources")
self.manual_label = QLabel("Manual Fragmentation Search:")
tab2_content.addWidget(self.manual_label, 4, 1, 1, 4)
self.manual_label.setToolTip("manual_fragmentation_search")
self.lat_frag_label = QLabel("Latitude:")
self.lat_frag_edits = QLineEdit("")
tab2_content.addWidget(self.lat_frag_label, 5, 1)
tab2_content.addWidget(self.lat_frag_edits, 6, 1)
self.lon_frag_label = QLabel("Longitude:")
self.lon_frag_edits = QLineEdit("")
tab2_content.addWidget(self.lon_frag_label, 5, 2)
tab2_content.addWidget(self.lon_frag_edits, 6, 2)
self.elev_frag_label = QLabel("Elevation:")
self.elev_frag_edits = QLineEdit("")
tab2_content.addWidget(self.elev_frag_label, 5, 3)
tab2_content.addWidget(self.elev_frag_edits, 6, 3)
self.time_frag_label = QLabel("Time:")
self.time_frag_edits = QLineEdit("")
tab2_content.addWidget(self.time_frag_label, 5, 4)
tab2_content.addWidget(self.time_frag_edits, 6, 4)
self.v_fixed_label, self.v_fixed_edits = createLabelEditObj('v_fixed:', tab2_content, 7, width=3, tool_tip='v_fixed', validate='float')
self.restricted_time_check = QCheckBox("Enable Restricted Time: ")
tab2_content.addWidget(self.restricted_time_check, 9, 4, 1, 1)
self.restricted_time_label, self.restricted_time_edits = createLabelDateEditObj("Restricted Time: ", tab2_content, 9, width=2, tool_tip='restricted_time')
self.azimuth_min_label, self.azimuth_min_edits = createLabelEditObj('azimuth_min:', tab2_content, 10, tool_tip='azimuth_min', validate='float')
self.azimuth_max_label, self.azimuth_max_edits = createLabelEditObj('azimuth_max:', tab2_content, 10, h_shift=2, tool_tip='azimuth_max', validate='float')
self.zangle_min_label, self.zangle_min_edits = createLabelEditObj('zangle_min:', tab2_content, 11, tool_tip='zenith_min', validate='float')
self.zangle_max_label, self.zangle_max_edits = createLabelEditObj('zangle_max:', tab2_content, 11, h_shift=2, tool_tip='zenith_max', validate='float')
self.lat_min_label, self.lat_min_edits = createLabelEditObj('lat_min', tab2_content, 12, tool_tip='x_min', validate='float')
self.lat_max_label, self.lat_max_edits = createLabelEditObj('lat_max', tab2_content, 12, h_shift=2, tool_tip='x_max', validate='float')
self.lon_min_label, self.lon_min_edits = createLabelEditObj('lon_min', tab2_content, 13, tool_tip='y_min', validate='float')
self.lon_max_label, self.lon_max_edits = createLabelEditObj('lon_max', tab2_content, 13, h_shift=2, tool_tip='y_max', validate='float')
self.elev_min_label, self.elev_min_edits = createLabelEditObj('elev_min:', tab2_content, 14, tool_tip='z_min', validate='float')
self.elev_max_label, self.elev_max_edits = createLabelEditObj('elev_max:', tab2_content, 14, h_shift=2, tool_tip='z_max', validate='float')
self.t_min_label, self.t_min_edits = createLabelEditObj('t_min:', tab2_content, 15, tool_tip='t_min', validate='float')
self.t_max_label, self.t_max_edits = createLabelEditObj('t_max:', tab2_content, 15, h_shift=2, tool_tip='t_max', validate='float')
self.v_min_label, self.v_min_edits = createLabelEditObj('v_min:', tab2_content, 16, tool_tip='v_min', validate='float')
self.v_max_label, self.v_max_edits = createLabelEditObj('v_max:', tab2_content, 16, h_shift=2, tool_tip='v_max', validate='float')
def atmPlotProfile(self, lat, lon, var_typ='t', perturb='none'):
consts = Constants()
if self.setup.weather_type == 'none':
errorMessage('Weather type is set to "none", no weather can be displayed', 1)
return None
dataset = parseWeather(self.setup)
if self.setup.weather_type == 'ecmwf':
sounding = findECMWFSound(lat, lon, dataset)
elif self.setup.weather_type == 'binary':
sounding = findAus(lat, lon, dataset)
elif self.setup.weather_type == 'custom':
sounding = dataset
self.var_typ = var_typ
self.atm_canvas.setLabel('left', "Height", units='m')
if self.var_typ == 't':
#(consts.GAMMA*consts.R/consts.M_0*temperature[:])**0.5
X = sounding[:, 1]
self.atm_canvas.setLabel('bottom', "Speed of Sound", units='m/s')
elif self.var_typ == 'm':
X = sounding[:, 2]
self.atm_canvas.setLabel('bottom', "Wind Magnitude", units='m/s')
elif self.var_typ == 'd':
X = sounding[:, 3]
self.atm_canvas.setLabel('bottom', "Wind Direction", units='deg E from N')
else:
errorMessage('Error reading var_typ in atmPlotProfile', 2)
return None
Y = sounding[:, 0]
self.atm_canvas.clear()
self.atm_canvas.plot(x=X, y=Y, pen='w')
SolutionGUI.update(self)
if self.setup.perturb_method == 'temporal':
# sounding data one hour later
sounding_u = parseWeather(self.setup, consts, time= 1)
# sounding data one hour earlier
sounding_l = parseWeather(self.setup, consts, time=-1)
else:
sounding_u = []
sounding_l = []
if self.setup.perturb_method == 'ensemble':
ensemble_file = self.setup.perturbation_spread_file
else:
ensemble_file = ''
if self.setup.perturb_method != 'none':
for ptb_n in range(1, self.setup.perturb_times):
if self.prefs.debug:
print(printMessage("status"), "Perturbation {:}".format(ptb_n))
# generate a perturbed sounding profile
sounding_p = perturbation_method(self.setup, dataset, self.setup.perturb_method, \
sounding_u=sounding_u, sounding_l=sounding_l, \
spread_file=self.setup.perturbation_spread_file, lat=self.setup.lat_centre, lon=self.setup.lon_centre, ensemble_file=ensemble_file, ensemble_no=ptb_n)
sounding_p = findECMWFSound(lat, lon, sounding_p)
if self.var_typ == 't':
X = sounding_p[:, 1]
elif self.var_typ == 'm':
X = sounding_p[:, 2]
elif self.var_typ == 'd':
X = sounding_p[:, 3]
else:
print(printMessage("error"), 'atmPlotProfile')
Y = sounding_p[:, 0]
self.atm_canvas.plot(x=X, y=Y, pen='g')
SolutionGUI.update(self)
def atmValueChange(self, obj, slider):
if obj == self.atm_lat_label:
obj.setText('Latitude: {:8.2f}'.format(slider.value()*self.slider_scale))
elif obj == self.atm_lon_label:
obj.setText('Longitude: {:8.2f}'.format(slider.value()*self.slider_scale))
else:
errorMessage(self, 'Bad atm slider pass in atmValueChange', 2)
self.atmPlotProfile(self.atm_lat_slide.value()*self.slider_scale, self.atm_lon_slide.value()*self.slider_scale, self.var_typ)
def makeValueChange(self, obj, slider):
if obj == self.low_bandpass_label:
obj.setText('Low: {:8.2f} Hz'.format(slider.value()*self.bandpass_scale))
elif obj == self.high_bandpass_label:
obj.setText('High: {:8.2f} Hz'.format(slider.value()*self.bandpass_scale))
else:
errorMessage('Bad atm slider pass in makeValueChange', 2)
self.updatePlot()
def makeStationObj(self, lst):
new_lst = []
for line in lst:
pos = Position(line[2], line[3], line[4])
stn = Station(line[0], line[1], pos, line[5], line[6], line[7])
new_lst.append(stn)
return new_lst
def makePicks(self):
self.launch.setEnabled(False)
if not self.checkForWorkDir():
return None
if self.bam.setup.lat_centre is None or \
self.bam.setup.lon_centre is None or \
self.bam.setup.deg_radius is None:
errorMessage("Warning: Reference latitude, longitude, and/or search radius is not defined!", 1)
return None
# Init the constants
self.bam.setup.search_area = [self.bam.setup.lat_centre - self.bam.setup.deg_radius,
self.bam.setup.lat_centre + self.bam.setup.deg_radius,
self.bam.setup.lon_centre - self.bam.setup.deg_radius,
self.bam.setup.lon_centre + self.bam.setup.deg_radius]
# try:
# # turn coordinates into position objects
# self.bam.setup.traj_i = Position(self.bam.setup.lat_i, self.bam.setup.lon_i, self.bam.setup.elev_i)
# self.bam.setup.traj_f = Position(self.bam.setup.lat_f, self.bam.setup.lon_f, self.bam.setup.elev_f)
# except:
# self.bam.setup.traj_i = Position(0, 0, 0)
# self.bam.setup.traj_f = Position(0, 0, 0)
# errorMessage("Warning: Unable to build trajectory points", 1)
self.waveformPicker()
def waveformPicker(self, waveform_window=600):
"""
Arguments:
data_list: [list]
Keyword arguments:
waveform_window: [int] Number of seconds for the waveform window.
difference_filter_all: [bool] If True, the Kalenda et al. (2014) difference filter will be applied
on the data plotted in the overview plot of all waveforms.
"""
self.v_sound = self.prefs.avg_sp_sound
# self.t0 = self.bam.setup.t0
# Filter out all stations for which the mseed file does not exist
filtered_stn_list = []
names = []
lats = []
lons = []
self.lat_centre = self.bam.setup.lat_centre
self.lon_centre = self.bam.setup.lon_centre
self.waveform_window = waveform_window
self.current_station = 0
self.current_waveform_raw = None
self.current_waveform_delta = None
self.current_waveform_processed = None
# List of picks
self.pick_list = []
self.pick_group = 0
# Define a list of colors for groups
self.pick_group_colors = ['w', 'g', 'm', 'c', 'y']
# Current station map handle
self.current_station_scat = None
# Station waveform marker handle
self.current_station_all_markers = None
# Picks on all waveform plot handle
self.all_waves_picks_handle = None
# Handle for pick text
self.pick_text_handle = None
self.pick_markers_handles = []
# handle for pick marker on the waveform
self.pick_waveform_handle = None
# Default bandpass values
self.bandpass_low_default = 2.0
self.bandpass_high_default = 8.0
### Sort stations by distance from source ###
# Calculate distances of station from source
self.source_dists = []
for stn in self.bam.stn_list:
stat_name, stat_lat, stat_lon = stn.metadata.code, stn.metadata.position.lat, stn.metadata.position.lon
names.append(stat_name)
lats.append(stat_lat)
lons.append(stat_lon)
# Calculate the distance in kilometers
dist = greatCircleDistance(np.radians(self.bam.setup.lat_centre), np.radians(self.bam.setup.lon_centre), \
np.radians(stat_lat), np.radians(stat_lon))
self.source_dists.append(dist)
# Get sorted arguments
dist_sorted_args = np.argsort(self.source_dists)
# Sort the stations by distance
self.bam.stn_list = [self.bam.stn_list[i] for i in dist_sorted_args]
self.source_dists = [self.source_dists[i] for i in dist_sorted_args]
#############################################
# Init the plot framework
self.initPlot()
BASEMAP_SCALE = 2
### Create Basemap
# resolution c, l, i, h, f
self.m = Basemap(projection='merc', \
llcrnrlat=np.ceil(self.bam.setup.lat_centre - BASEMAP_SCALE*self.bam.setup.deg_radius),\
urcrnrlat=np.floor(self.bam.setup.lat_centre + BASEMAP_SCALE*self.bam.setup.deg_radius), \
llcrnrlon=np.ceil(self.bam.setup.lon_centre - 1.5*BASEMAP_SCALE*self.bam.setup.deg_radius), \
urcrnrlon=np.floor(self.bam.setup.lon_centre + 1.5*BASEMAP_SCALE*self.bam.setup.deg_radius), \
lat_ts=1, \
resolution='l', ax=self.make_picks_map_graph_view.ax)
self.m.fillcontinents(color='grey', lake_color='aqua')
self.m.drawcountries(color='black')
self.m.drawlsmask(ocean_color='aqua')
self.m.drawparallels(np.arange(self.bam.setup.lat_centre - BASEMAP_SCALE*self.bam.setup.deg_radius, \
self.bam.setup.lat_centre + BASEMAP_SCALE*self.bam.setup.deg_radius + 1, 1), labels=[1,0,0,1], textcolor="white", fmt="%.1f")
meridians = self.m.drawmeridians(np.arange(self.bam.setup.lon_centre - 1.5*BASEMAP_SCALE*self.bam.setup.deg_radius, \
self.bam.setup.lon_centre + 1.5*BASEMAP_SCALE*self.bam.setup.deg_radius + 1, 1), labels=[1,0,0,1], textcolor="white", rotation="horizontal", fmt="%.1f")
self.m.drawmapscale(self.bam.setup.lon_centre - self.bam.setup.deg_radius - 0.25, \
self.bam.setup.lat_centre + self.bam.setup.deg_radius, \
self.bam.setup.lon_centre, self.bam.setup.lat_centre, 200, \
barstyle='fancy', units='km', fontsize=9, yoffset=None, labelstyle='simple', fontcolor='k', \
fillcolor1='w', fillcolor2='k', ax=None, format='%d', zorder=None)
#self.make_picks_map_graph_view.ax.set_xticklabels(self.make_picks_map_graph_view.ax.get_xticks(), rotation=45)
# for the poor grad student who is looking at this code, basemap uses TEXT objects
# instead of matplotlib xticks, so you have to do it THIS way for some reason
for m in meridians:
try:
meridians[m][1][0].set_rotation(45)
except:
pass
if hasattr(self.bam.setup, "contour_file"):
if self.bam.setup.contour_file is not None:
try:
A = np.load(self.bam.setup.contour_file)
lat, lon, Z = A[0], A[1], A[2]
Z /= 60
x, y = self.m(lat, lon)
# print(x, y, Z)
sc = self.make_picks_map_graph_view.ax.tricontourf(x, y, Z, cmap="viridis_r", zorder=2, alpha=0.4)
cb = plt.colorbar(sc)
cb.set_label("Travel Time [min]")
# try:
# self.make_picks_map_graph_view.ax.tricontourf(x, y, Z, levels=14, cmap="viridis_r", zorder=2, alpha=0.3)
# except TypeError as e:
# print(printMessage("error"), "Contour error in creating tricontourf! {:}".format(e))
# a = self.make_picks_map_graph_view.ax.colorbar(cntr)
# a.set_label("Time of Arrival [s]")
except FileNotFoundError:
print(printMessage("warning"), "Contour File not found!")
else:
print(printMessage("warning"), "No Contour found!")
# if not hasattr(self, 'make_picks_gmap_view'):
# self.make_picks_gmap_view = QWebView()
# self.make_picks_top_graphs.addWidget(self.make_picks_gmap_view)
# self.make_picks_gmap_view.sizeHint = lambda: pg.QtCore.QSize(100, 100)
# # Extract coordinates of the reference station
# gmap_filename = htmlBuilder(self.bam.setup, self.prefs, self.bam.stn_list)
# if self.prefs.debug:
# print(printMessage("status"), "HTML map generated: {:}".format(gmap_filename))
# self.make_picks_gmap_view.load(QUrl().fromLocalFile(gmap_filename))
# self.make_picks_map_graph_canvas.setLabel('bottom', "Longitude", units='deg E')
# self.make_picks_map_graph_canvas.setLabel('left', "Latitude", units='deg N')
self.drawStats(0)
# self.make_picks_map_graph_canvas.setXRange(self.bam.setup.lon_centre - self.bam.setup.deg_radius, \
# self.bam.setup.lon_centre + self.bam.setup.deg_radius)
# self.make_picks_map_graph_canvas.setYRange(self.bam.setup.lat_centre - self.bam.setup.deg_radius, \
# self.bam.setup.lat_centre + self.bam.setup.deg_radius)
###
# Plot reasonably close CTBTO stations (no waveforms)
#####
# with open(os.path.join("supra", "Misc", "CTBTO_stats.csv"), "r+") as f:
# a = f.readlines()
# for stat in a:
# stat_dat = stat.strip().split(',')
# stat_name = stat_dat[0]
# stat_lat = float(stat_dat[1])
# stat_lon = float(stat_dat[2])
# approx_dis = np.sqrt((stat_lat - self.bam.setup.lat_centre)**2 + (stat_lon - self.bam.setup.lon_centre)**2)
# if approx_dis <= 2*self.bam.setup.deg_radius:
# # marker = pg.ScatterPlotItem()
# # marker.setPoints(x=[stat_lon], y=[stat_lat], pen=(255, 0, 255), brush=(255, 0, 255), symbol='d')
# txt = "{:}".format(stat_name)
# # txt.setPos(stat_lon, stat_lat)
# # self.make_picks_map_graph_canvas.addItem(marker, update=True)
# # self.make_picks_map_graph_canvas.addItem(txt)
# x, y = self.m(stat_lon, stat_lat)
# self.make_picks_map_graph_view.ax.scatter(x, y, 32, marker='d', color='m', zorder=3)
# self.make_picks_map_graph_view.ax.annotate(txt, xy=(x, y), fontsize=12, color="white")
if self.prefs.frag_en:
if not hasattr(self.bam.setup, 'fragmentation_point'):
self.bam.setup.fragmentation_point = []
errorMessage('"Show fragmentations" is checked, but could not find any fragmentation sources', 0)
# Fragmentation plot
# for i, line in enumerate(self.bam.setup.fragmentation_point):
# x, y = self.m(float(line.position.lon), float(line.position.lat))
# self.make_picks_map_graph_view.ax.scatter(x, y, marker='+', color='g', zorder=3)
# self.make_picks_map_graph_canvas.scatterPlot(x=[float(line.position.lon)], y=[float(line.position.lat)],\
# pen=(0 + i*255/len(self.bam.setup.fragmentation_point), 255 - i*255/len(self.bam.setup.fragmentation_point), 0), symbol='+')
# Plot source location
x, y = self.m(self.bam.setup.lon_centre, self.bam.setup.lat_centre)
self.make_picks_map_graph_view.ax.scatter(x, y, marker='+', color='y', zorder=3)
x, y = self.m(-80.77209, 43.26420)
self.make_picks_map_graph_view.ax.scatter(x, y, marker='H', color='r', zorder=3)
x, y = self.m(-82.2225, 41.2928)
self.make_picks_map_graph_view.ax.scatter(x, y, marker='H', color='r', zorder=3)
x, y = self.m(-81.1450, 41.3111)
self.make_picks_map_graph_view.ax.scatter(x, y, marker='H', color='r', zorder=3)
# self.make_picks_map_graph_canvas.scatterPlot(x=[self.bam.setup.lon_centre], y=[self.bam.setup.lat_centre], symbol='+', pen=(255, 255, 0))
# Manual trajectory search
if self.prefs.ballistic_en:
# try:
if hasattr(self.bam.setup, "trajectory"):
if self.bam.setup.trajectory is None:
errorMessage('Trajectory is not defined!', 1, info='If not defining a trajectory, then turn off show ballistic waveform')
elif self.bam.setup.trajectory.pos_i.isNone():
errorMessage('Trajectory final position is not defined!', 1, info='If not defining a trajectory, then turn off show ballistic waveform')
else:
points = self.bam.setup.trajectory.trajInterp2(div=100, \
min_p=self.bam.setup.trajectory.pos_f.elev, max_p=self.bam.setup.trajectory.pos_i.elev)
b_lats = []
b_lons = []
for pt in points:
b_lats.append(pt[0])
b_lons.append(pt[1])
# Plot the trajectory with the bottom point known
x, y = self.m(b_lons, b_lats)
self.make_picks_map_graph_view.ax.arrow(x[0], y[0], x[-1]-x[0], y[-1]-y[0], color='b', zorder=3, width=4000)
# self.make_picks_map_graph_canvas.plot(b_lons,\
# b_lats,\
# pen=(0, 0, 255))
# Plot intersection with the ground
# x, y = self.m(self.bam.setup.trajectory.pos_f.lon, self.bam.setup.trajectory.pos_f.lat)
# self.make_picks_map_graph_view.ax.scatter(x, y, color='b', marker='+')
# except (TypeError, AttributeError) as e:
# errorMessage('Trajectory is not defined!', 1, info='If not defining a trajectory, then turn off show ballistic waveform', detail='{:}'.format(e))
# self.prefs.ballistic_en = False
self.make_picks_map_graph_view.show()
self.bam.stn_list = calcAllTimes(self, self.bam, self.prefs)
# self.bam.stn_list = calcAllSigs(self.bam, self.prefs)
save(self, True)
SolutionGUI.update(self)
self.updatePlot()
def chooseFilter(self, obj):
if obj.currentText() == 'Bandpass Filter':
self.filterBandpass()
elif obj.currentText() == 'Spectrogram of Raw Data':
self.showSpectrogram()
elif obj.currentText() == 'Difference Filter':
self.filterConvolution()
else:
pass
def navStats(self):
self.current_station = self.make_picks_station_choice.currentIndex()
self.updatePlot()
def refPosChanged(self):
self.drawStats(self.current_station)
def initPlot(self):
""" Initializes the plot framework. """
### Init the basic grid ###
# Register a mouse press event on the waveform axis
# plt.gca().figure.canvas.mpl_connect('button_press_event', self.onWaveMousePress)
# Register window resize
#plt.gca().figure.canvas.mpl_connect('resize_event', self.onResize)
self.make_picks_station_choice.clear()
self.pick_list = []
self.filter_combo_box.addItem('Raw Data')
self.filter_combo_box.addItem('Bandpass Filter')
self.filter_combo_box.addItem('Spectrogram of Raw Data')
self.filter_combo_box.addItem('Difference Filter')
self.filter_combo_box.currentTextChanged.connect(partial(self.chooseFilter, self.filter_combo_box))
self.export_to_csv.clicked.connect(self.exportCSV)
self.export_to_all_times.clicked.connect(self.exportToAllTimes)
self.station_marker = [None]*len(self.bam.stn_list)
self.station_waveform = [None]*len(self.bam.stn_list)
for ii, stn in enumerate(self.bam.stn_list):
# Check for infrasound channels in the station data
infra = False
for i in range(len(stn.stream)):
if "DF" in stn.stream[i].stats.channel:
infra = True
break
# Add a star to the station if it contains an infrasound station
if infra:
self.make_picks_station_choice.addItem("{:}-{:}*".format(stn.metadata.network, stn.metadata.code))
else:
self.make_picks_station_choice.addItem("{:}-{:}".format(stn.metadata.network, stn.metadata.code))
self.station_marker[ii] = pg.ScatterPlotItem()
self.station_waveform[ii] = pg.PlotCurveItem()
self.make_picks_ref_pos_choice.addItem("Lat/Lon Center")
if hasattr(self.bam, "source_list"):
for src in self.bam.source_list:
self.make_picks_ref_pos_choice.addItem("{:}: {:}".format(src.source_type, src.title))
self.make_picks_ref_pos_choice.activated.connect(self.refPosChanged)
self.make_picks_station_choice.activated.connect(self.navStats)
plt.style.use('dark_background')
fig = plt.figure(figsize=plt.figaspect(0.5))
fig.set_size_inches(8, 5)
self.station_ax = fig.add_subplot(1, 1, 1)
self.make_picks_waveform_canvas.scene().sigMouseClicked.connect(self.mouseClicked)
pg.QtWidgets.QApplication.processEvents()
# Plot all waveforms
######################################################################################3
max_wave_value = 0
min_wave_value = np.inf
min_time = np.inf
max_time = 0
lats = []
lons = []
for i in range(len(self.bam.stn_list)):
lats.append(self.bam.stn_list[i].metadata.position.lat)
lons.append(self.bam.stn_list[i].metadata.position.lon)
self.ballistic_idx = []
self.fragmentation_idx = []
# Go though all stations and waveforms
bad_stats = []
for idx, stn in enumerate(self.bam.stn_list):
sys.stdout.write('\rPlotting: {:} {:} '.format(stn.metadata.network, stn.metadata.code))
sys.stdout.flush()
time.sleep(0.001)
print('')
SolutionGUI.update(self)
def deleteStation(self):
# This didn't work before so it's just pass
pass
# stn = self.bam.stn_list[self.current_station]
# if self.prefs.debug:
# print(printMessage("debug"), "Deleting Station {:}-{:}".format(stn.metadata.network, stn.metadata.code))
# self.bam.stn_list.pop(self.current_station)
# save(self)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Alt:
self.alt_pressed = True
if event.key() == QtCore.Qt.Key_D:
self.incrementStation()
if event.key() == QtCore.Qt.Key_A:
self.decrementStation()
if event.key() == QtCore.Qt.Key_W:
try:
self.make_picks_waveform_canvas.clear()
self.filterBandpass(event=event)
except:
pass
if event.key() == QtCore.Qt.Key_S:
self.showSpectrogram(event=event)
if event.key() == QtCore.Qt.Key_C:
try:
self.make_picks_waveform_canvas.clear()
self.filterConvolution(event=event)
except:
pass
if event.key() == QtCore.Qt.Key_I:
try:
self.invertGraph()
except:
pass
if event.key() == QtCore.Qt.Key_X:
self.deleteStation()
if event.key() == QtCore.Qt.Key_Up:
self.group_no += 1
self.group_no = self.group_no%2
if self.group_no == 0:
self.tog_picks.changeImg(1)
else:
self.tog_picks.changeImg(5)
print("Current Pick Group: {:}".format(self.group_no))
if event.key() == QtCore.Qt.Key_Down:
self.group_no -= 1
self.group_no = self.group_no%2
if self.group_no == 0:
self.tog_picks.changeImg(1)
else:
self.tog_picks.changeImg(5)
print("Current Pick Group: {:}".format(self.group_no))
def keyReleaseEvent(self, event):
if event.key() == QtCore.Qt.Key_Alt:
self.alt_pressed = False
def incrementStation(self, event=None):
""" Increments the current station index. """
self.next_stat.setState(True)
qApp.processEvents()
self.make_picks_waveform_canvas.clear()
self.current_station += 1
if self.current_station >= len(self.bam.stn_list):
self.current_station = 0
# while self.checkExists() == False:
# self.current_station += 1
# if self.current_station >= len(self.bam.stn_list):
# self.current_station = 0
self.updatePlot(stn_changed=True)
self.next_stat.setState(False)
qApp.processEvents()
def decrementStation(self, event=None):
""" Decrements the current station index. """
self.prev_stat.setState(True)
qApp.processEvents()
self.make_picks_waveform_canvas.clear()
self.current_station -= 1
if self.current_station < 0:
self.current_station = len(self.bam.stn_list) - 1
# while self.checkExists() == False:
# self.current_station -= 1
# if self.current_station < 0:
# self.current_station = len(self.bam.stn_list) - 1
self.updatePlot(stn_changed=True)
self.prev_stat.setState(False)
qApp.processEvents()
def checkExists(self):
"""
Checks if the current waveform is readable
"""
# Extract current station
stn = self.bam.stn_list[self.current_station]
# Get the miniSEED file path
mseed_file_path = os.path.join(self.dir_path, stn.file_name)
try:
if os.path.isfile(mseed_file_path):
pass
else:
if self.prefs.debug:
print('File {:s} does not exist!'.format(mseed_file_path))
return False
except TypeError as e:
if self.prefs.debug:
print('Opening file {:s} failed with error: {:s}'.format(mseed_file_path, str(e)))
return False
try:
obspy.read(mseed_file_path)
except TypeError:
if self.prefs.debug:
print('mseed file could not be read:', mseed_file_path)
return False
return True
def mouseClicked(self, evt):
############################
# Conditional Clicks HERE
############################
stn = self.bam.stn_list[self.current_station]
channel = self.make_picks_channel_choice.currentText()
if self.tog_picks.isChecked():
mousePoint = self.make_picks_waveform_canvas.vb.mapToView(evt.pos())
self.make_picks_waveform_canvas.scatterPlot(x=[mousePoint.x()], y=[0], pen=self.colors[self.group_no], brush=self.colors[self.group_no], update=True)
pick = Pick(mousePoint.x(), stn, self.current_station, stn, self.group_no)
self.pick_list.append(pick)
print(printMessage('info'), "New pick object made: {:}-{:} {:.4f} s".format(stn.metadata.network, stn.metadata.code, mousePoint.x()))
if self.show_height.isChecked():
stat_picks = []
for pick in self.pick_list:
if pick.stn == stn:
stat_picks.append(pick)
self.w = FragmentationStaff(self.bam.setup, [stn, self.current_station, stat_picks, channel], self.bam)
self.w.setGeometry(QRect(100, 100, 1200, 900))
self.w.show()
# elif self.solve_height.isChecked():
# ref_pos = Position(self.setup.lat_centre, self.setup.lon_centre, 0)
# P = self.setup.trajectory.trajInterp(div=HEIGHT_SOLVER_DIV)
# stn = self.stn_list[self.current_station]
# stn.position.pos_loc(ref_pos)
# dataset = parseWeather(self.setup)
# C = []
# max_steps = len(P)*self.setup.perturb_times + 1
# count = 0
# loadingBar("Trying Heights", 0, max_steps)
# A = self.setup.trajectory.pos_i
# B = self.setup.trajectory.pos_f
# A.pos_loc(B)
# B.pos_loc(B)
# # Get prediction of time of the meteor, so the timing of each fragmentation can be known
# length_of_meteor = np.sqrt((A.x - B.x)**2 + (A.y - B.y)**2 + (A.z - B.z)**2)
# time_of_meteor = length_of_meteor/self.setup.trajectory.v
# for ii, point in enumerate(P):
# point.pos_loc(ref_pos)
# for ptb_n in range(self.setup.perturb_times):
# self.sounding = self.perturbGenerate(ptb_n, dataset, self.perturbSetup())
# zProfile, _ = getWeather(np.array([point.lat, point.lon, point.elev]), np.array([stn.position.lat, stn.position.lon, stn.position.elev]), self.setup.weather_type, \
# [ref_pos.lat, ref_pos.lon, ref_pos.elev], self.sounding, convert=False)
# #zProfile = zInterp(stn.position.z, point.z, zProfile, div=37)
# f_time, _, _, _ = cyscan(np.array([point.x, point.y, point.z]), np.array([stn.position.x, stn.position.y, stn.position.z]), zProfile, wind=True, \
# n_theta=self.setup.n_theta, n_phi=self.setup.n_phi, h_tol=self.setup.h_tol, v_tol=self.setup.v_tol)
# correction = time_of_meteor - A.z/self.setup.trajectory.pos_i.elev*(time_of_meteor)
# C.append(f_time + correction)
# count += 1
# loadingBar("Trying Heights", count, max_steps)
# C = np.array(C)
# idx = np.nanargmin(np.abs(C - pick.time))
# print("Error in time: {:.2f} s".format(np.abs(C - pick.time)[idx]))
# height_idx = idx//self.setup.perturb_times
# pert_idx = idx%self.setup.perturb_times
# self.position.append(P[height_idx])
# self.x = AllWaveformViewer(self.setup, self.stn_list, self.position, pert_idx)
# self.x.setGeometry(QRect(100, 100, 900, 900))
# self.x.show()
elif self.tog_rm_picks.isChecked():
self.make_picks_waveform_canvas.clear()
for ii, pick in enumerate(self.pick_list):
if pick.stn_no == self.current_station:
self.pick_list.pop(ii)
if self.prefs.debug:
print('Pick removed!')
self.make_picks_waveform_canvas.scatterPlot(x=[pick.time], y=[0], pen=self.colors[self.group_no], brush=self.colors[self.group_no], update=True)
self.drawWaveform(station_no=self.current_station)
# elif self.gnd_mot_picks.isChecked():
# # Open ground motion Dialog
# current_chn_start = channel[0:2]
# channel_opts = [self.make_picks_channel_choice.itemText(i) for i in range(self.make_picks_channel_choice.count())]
# # Check if zne/z12 is available
# count = 0
# for chn in channel_opts:
# if current_chn_start in chn:
# count += 1
# if count == 3:
# self.gr = ParticleMotion(self.make_picks_map_graph_canvas, self.bam, stn, channel, t_arrival=self.source_dists[self.current_station]/(310/1000), group_no=self.group_no)
# self.gr.setGeometry(QRect(100, 100, 1600, 700))
# self.gr.show()
# elif count < 3:
# errorMessage("Not enough channel data for particle motion!", 2, \
# detail="Three orthogonal stations are needed to do particle motion!")
# else:
# errorMessage("If you are seeing this, then somehow more than 3 channels have been selected",\
# 2, detail="")
save(self, True)
# save(self)
# elif self.bandpass_picks.isChecked():
# # Open bandpass GUI
# self.bp = BandpassWindow(self.bam, stn, channel, t_arrival=self.source_dists[self.current_station]/(310/1000))
# self.bp.setGeometry(QRect(100, 100, 1200, 700))
# self.bp.show()
# elif self.polmap_picks.isChecked():
# ref_pos = Position(self.bam.setup.lat_centre, self.bam.setup.lon_centre, 0)
# points = []
# # Calculate all points here
# for stn in self.bam.stn_list:
# if not hasattr(stn, "polarization"):
# stn.polarization = Polarization()
# if len(stn.polarization.azimuth) > 0:
# D = propegateBackwards(ref_pos, stn, self.bam)
# for line in D:
# if not np.isnan(line[0]):
# P = Position(0, 0, 0)
# P.x = line[0]
# P.y = line[1]
# P.z = line[2]
# P.pos_geo(ref_pos)
# S = Supracenter(P, line[3])
# points.append([S, stn.color])
# elif self.traj_space.isChecked():
# self.ts = TrajSpace(self.bam)
# self.ts.setGeometry(QRect(100, 100, 1200, 700))
# self.ts.show()
# self.traj_space.setState(False)
elif self.save_picks.isChecked():
# Turn this off once its been clicked
self.save_picks.setState(False)
# folder of event and station
dir_path = os.path.join(self.prefs.workdir, self.bam.setup.fireball_name, \
"{:}-{:}.{:}".format(stn.metadata.network, stn.metadata.code, channel))
# Make directory if needed
mkdirP(dir_path)
# save plot
file_name = os.path.join(dir_path, "Amplitude-time_series.png")
exporter = pg.exporters.ImageExporter(self.make_picks_waveform_view.scene())
exporter.export(file_name)
self.make_picks_map_graph_view.figure.savefig(os.path.join(dir_path, "Contour_map.png"))
lines = "#"*20 + "\n"
tr = stn.stream.select(channel=channel)
with open(os.path.join(dir_path, "Station_Metadata.txt"), 'w+') as f:
f.write(lines)
f.write("Station {:}\n".format(stn.metadata.network, stn.metadata.code))
f.write("{:}\n".format(stn.metadata.name))
f.write(lines)
f.write("Latitude {:.4f} °N\n".format(stn.metadata.position.lat))
f.write("Longitude {:.4f} °E\n".format(stn.metadata.position.lon))
f.write("Elevation {:.2f} m\n".format(stn.metadata.position.elev))
f.write(lines)
f.write("Response Attached: {:}\n".format(printTrue(stn.hasResponse())))
f.write("Seismic Available: {:}\n".format(printTrue(stn.hasSeismic())))
f.write("Infrasound Available: {:}\n".format(printTrue(stn.hasInfrasound())))
f.write(lines)
f.write("Channel Trace Stats:")
f.write("\t Channel: {:}".format(channel))
f.write("\t Sampling Rate: {:} Hz".format(tr.sampling_rate))
f.write("\t Delta: {:} s".format(tr.delta))
f.write("\t Calibration Factor {:}".format(tr.calib))
f.write("\t Npts {:}".format(tr.npts))
f.write("\t Start time {:}".format(tr.starttime))
f.write("\t End time {:}".format(tr.endtime))
errorMessage("Station Waveform Saved!", 0, title="Saved!", detail="Waveform saved in project folder {:}".format(dir_path))
# elif self.rotatepol.isChecked():
# rng = self.make_picks_waveform_canvas.getAxis('bottom').range
# start_time = self.bam.setup.fireball_datetime
# lside = start_time + datetime.timedelta(seconds=rng[0])
# rside = start_time + datetime.timedelta(seconds=rng[1])
# self.rpol = RotatePolWindow(self.bam.stn_list[self.current_station], channel, lside, rside, start_time)
# self.rpol.setGeometry(QRect(200, 300, 1600, 800))
# self.rpol.show()
# ### Annotations
# elif self.annote_picks.isChecked():
# # Pass grid to polmap
# self.pm = Polmap(self.bam, points)
# self.pm.setGeometry(QRect(100, 100, 1200, 700))
# self.pm.show()
# ### Annotations
elif self.annote_picks.isChecked():
# Create annotation
mousePoint = self.make_picks_waveform_canvas.vb.mapToView(evt.pos())
# pick = Pick(mousePoint.x(), self.stn_list[self.current_station], self.current_station, self.stn_list[self.current_station], self.group_no)
self.a = AnnoteWindow(mousePoint.x(), self.bam.stn_list[self.current_station], self.bam, mode="new", an=None, current_channel=channel)
self.a.setGeometry(QRect(200, 300, 1600, 800))
self.a.show()
# self.drawWaveform()
# self.alt_pressed = False
def addAnnotes(self):
TOP = self.make_picks_waveform_canvas.getAxis('left').range[1]
BOT = self.make_picks_waveform_canvas.getAxis('left').range[0]
stn = self.bam.stn_list[self.current_station]
for an in stn.annotation.annotation_list:
# line = pg.InfiniteLine(pos=(an.time, 0), angle=90, pen=an.color, label=an.title)
# line2 = pg.InfiniteLine(pos=(an.time+an.length, 0), angle=90, pen=an.color)
# self.make_picks_waveform_canvas.addItem(line, update=True)
# self.make_picks_waveform_canvas.addItem(line2, update=True)
length = an.length
if length == 0: length = 0.5
annote_area = pg.ROI((an.time, BOT), size=(length, TOP-BOT), pen=pg.mkPen(an.color),\
movable=False, \
rotatable=False, resizable=False)
annote_area.setAcceptedMouseButtons(QtCore.Qt.LeftButton)
annote_area.sigClicked.connect(partial(self.onAnnoteClick, an))
self.make_picks_waveform_canvas.addItem(annote_area, update=True)
def onAnnoteClick(self, an):
if not self.annote_picks.isChecked():
stn = self.bam.stn_list[self.current_station]
annote_list = stn.annotation.annotation_list
channel = self.make_picks_channel_choice.currentText()
self.a = AnnoteWindow(an.time, stn, self.bam, mode="edit", an=an, current_channel=channel)
self.a.setGeometry(QRect(200, 300, 1600, 800))
self.a.show()
def psdPlot(self):
if self.psd.isChecked():
print(printMessage("status"), "Calculating PSD")
stn = self.bam.stn_list[self.current_station]
mseed = stn.stream.copy()
current_channel = self.make_picks_channel_choice.currentIndex()
chn_selected = self.make_picks_channel_choice.currentText()
# A second stream containing channels with the response
resp = stn.response
st = mseed.select(inventory=resp.select(channel=chn_selected))
# Use st2 if able to, else use st
st = st.select(channel=chn_selected)
# Unpact miniSEED data
st = st[0].remove_response(inventory=resp, output="DISP")
st.detrend()
delta = st.stats.delta
start_datetime = st.stats.starttime.datetime
end_datetime = st.stats.endtime.datetime
waveform_data = st.data
self.current_waveform_time = np.arange(0, mseed[current_channel].stats.npts / mseed[current_channel].stats.sampling_rate, \
delta)
# self.current_waveform_time = np.arange(0, (end_datetime - start_datetime).total_seconds(), \
# delta)
# Construct time array, 0 is at start_datetime
time_data = np.copy(self.current_waveform_time)
# Cut the waveform data length to match the time data
waveform_data = waveform_data[:len(time_data)]
time_data = time_data[:len(waveform_data)] + stn.offset
sps = st.stats.sampling_rate
dt = 1/st.stats.sampling_rate
length = len(waveform_data)
freq = np.linspace(1/length, (sps/2), length)*sps/length
FAS = abs(fft(waveform_data))
# FAS_n = abs(fft(z_n))
# fas_data = pg.PlotDataItem()
plt.semilogx(freq, FAS)
# fas_noise_data = pg.PlotDataItem()
# fas_noise_data.setData(x=freq, y=FAS_n, pen=(255, 255, 255))
# fas_diff_data = pg.PlotDataItem()
# fas_diff_data.setData(x=freq, y=np.abs(FAS/FAS_n), pen=(0, 125, 255))
plt.xlabel("Frequency [Hz]")
plt.ylabel("Response")
plt.show()
else:
print(printMessage("debug"), "Turning off PSD")
def drawStats(self, current_stat):
self.make_picks_station_graph_view.ax.clear()
toa_line_time = np.linspace(0, 1000, 3)
# Plot the constant sound speed line (assumption is that the release happened at t = 0)
self.make_picks_station_graph_view.ax.plot((toa_line_time)*310/1000, toa_line_time, c='m', linestyle='--')
self.make_picks_station_graph_view.ax.plot((toa_line_time)*350/1000, toa_line_time, c='m', linestyle='--')
self.make_picks_station_graph_view.ax.plot((toa_line_time)*270/1000, toa_line_time, c='m', linestyle='--')
self.make_picks_station_graph_view.ax.set_xlabel("Distance from Reference [km]")
self.make_picks_station_graph_view.ax.set_ylabel("Time from Reference [s]")
group_list = []
groups = set()
src_title = self.make_picks_ref_pos_choice.currentText()
if src_title == "Lat/Lon Center":
ref_pos = Position(self.bam.setup.lat_centre, self.bam.setup.lon_centre, 0)
else:
for src in self.bam.source_list:
# Forgive me python :!
if "{:}: {:}".format(src.source_type, src.title) == src_title:
if src.source_type == "Ballistic":
ref_pos = src.source.pos_f
elif src.source_type == "Fragmentation":
ref_pos = src.source.position
# The times here will be off by a few seconds since the timing is not taken here
print("Reference Position: {:}".format(ref_pos))
for ii, stn in enumerate(self.bam.stn_list):
txt = "{:}".format(stn.metadata.code)
if txt == "ZACC": txt = "QZAG"
if txt == "AR_ZG": txt = "QARH"
if txt == "KASN": txt = "QKAS"
x, y = self.m(stn.metadata.position.lon, stn.metadata.position.lat)
# Calculate the distance from the source point to this station (kilometers)
station_dist = ref_pos.pos_distance(stn.metadata.position)/1000
toa = station_dist/(310/1000)
## ENABLE THIS TO HAVE MOVING CURSOR RED STATIONS
# if ii == current_stat:
# self.make_picks_map_graph_view.ax.scatter(x, y, 32, marker='^', color='red', zorder=3)
# self.make_picks_station_graph_view.ax.scatter(station_dist, 0, c='r', marker="^")
# self.make_picks_station_graph_view.ax.axvline(x=station_dist, c='r')
# self.make_picks_map_graph_view.ax.annotate(txt, xy=(x, y), fontsize=12, color="white")
# else:
self.make_picks_map_graph_view.ax.scatter(x, y, 32, marker='^', color='w', zorder=3)
self.make_picks_station_graph_view.ax.scatter(station_dist, 0, c='w', marker="^")
self.make_picks_station_graph_view.ax.axvline(x=station_dist, c='w')
self.make_picks_map_graph_view.ax.annotate(txt, xy=(x, y), fontsize=12, color="w")
if not hasattr(stn, 'annotation'):
stn.annotation = AnnotationList()
annotes_list = stn.annotation.annotation_list
for an in annotes_list:
groups.add(an.group)
group_list.append([an.group, an.time, an.length, stn])
# put a sample mark on the map
# LON = 174.4
# LAT = -41.25
# x, y = self.m(LON, LAT)
# self.make_picks_map_graph_view.ax.scatter(x, y, 48, marker='*', color='red', zorder=4)
if len(groups) > 0:
groups = list(groups)
for gr in groups:
gr_data_x = []
gr_data_y = []
for an in group_list:
if gr == an[0]:
gr_dis = ref_pos.pos_distance(an[3].metadata.position)/1000
gr_time = an[1]
gr_data_x.append(gr_dis)
gr_data_y.append(gr_time)
self.make_picks_station_graph_view.ax.scatter(gr_data_x, gr_data_y, s=32, label="{:}".format(gr))
self.make_picks_station_graph_view.ax.legend()
self.make_picks_map_graph_view.show()
self.make_picks_station_graph_view.show()
def drawWaveform(self, channel_changed=0, waveform_data=None, station_no=0, bandpass=None, stn_changed=False):
""" Draws the current waveform from the current station in the waveform window. Custom waveform
can be given an drawn, which is used when bandpass filtering is performed.
"""
loadSourcesIntoBam(self.bam)
station_no = self.current_station
# Clear waveform axis
self.make_picks_waveform_canvas.clear()
# Extract current station
stn = self.bam.stn_list[station_no]
# Get the miniSEED file path
# Try reading the mseed file, if it doesn't work, skip to the next frame
if channel_changed == 0:
# Populate channel list
self.make_picks_channel_choice.blockSignals(True)
self.make_picks_channel_choice.clear()
for i in range(len(stn.stream)):
self.make_picks_channel_choice.addItem(stn.stream[i].stats.channel)
self.make_picks_channel_choice.blockSignals(False)
current_channel = self.make_picks_channel_choice.currentIndex()
chn_selected = self.make_picks_channel_choice.currentText()
st, resp, gap_times = procStream(stn, ref_time=self.bam.setup.fireball_datetime, merge=False)
# if channel_changed == 0:
# # Populate channel list
# self.make_picks_channel_choice.blockSignals(True)
# self.make_picks_channel_choice.clear()
# for i in range(len(st)):
# self.make_picks_channel_choice.addItem(st.stats.channel)
# self.make_picks_channel_choice.blockSignals(False)
# nominal way to get trace metadata
# stn_id = mseed[current_channel].get_id()
# print(resp.get_channel_metadata(stn_id))
# A second stream containing channels with the response
# If a channel is built up of multiple sections with gaps between
# Merge the files without gaps
st = findChn(st, chn_selected)
self.current_waveform_raw = st.data
waveform_data, time_data = procTrace(st, ref_datetime=self.bam.setup.fireball_datetime,\
resp=resp, bandpass=bandpass)
# Calculate the time of arrival assuming constant propagation with the given speed of sound
try:
t_arrival = self.source_dists[self.current_station]/(self.v_sound/1000) + self.t0
except:
t_arrival = self.source_dists[self.current_station]/(310/1000)
# Plot the waveform
# self.current_station_waveform = pg.PlotDataItem(x=time_data, y=waveform_data, pen='w')
# 2.370980392E10
def getDeriv(tt, ww):
ddt = tt[1] - tt[0]
dw = []
dt = []
for w in range(len(ww) - 1):
dl = ww[w + 1] - ww[w]
dw.append(dl/ddt)
dt.append(tt[w])
return np.array(dt), np.array(dw)
for ii in range(len(waveform_data)):
## Derivatives
# dt, dw = getDeriv(time_data[ii], waveform_data[ii])
# print(len(dt), len(dw))
# self.current_station_waveform = pg.PlotDataItem(x=dt, y=dw, pen='r')
# self.make_picks_waveform_canvas.addItem(self.current_station_waveform)
##
self.current_station_waveform = pg.PlotDataItem(x=time_data[ii], y=waveform_data[ii], pen='w')
self.make_picks_waveform_canvas.addItem(self.current_station_waveform)
for gap in gap_times:
gap_plot = pg.LinearRegionItem(values=gap, orientation='vertical', brush='r', pen='r', hoverBrush='r', hoverPen='r', movable=False)
# gap_plot = pg.PlotDataItem(x=gap, y=[0, 0], pen='r')
self.make_picks_waveform_canvas.addItem(gap_plot)
# self.make_picks_waveform_canvas.plot(x=[t_arrival, t_arrival], y=[np.min(waveform_data), np.max(waveform_data)], pen=pg.mkPen(color=(255, 0, 0), width=2))
self.make_picks_waveform_canvas.setXRange(t_arrival-100, t_arrival+100, padding=1)
self.make_picks_waveform_canvas.setLabel('bottom', "Time after {:} s".format(self.bam.setup.fireball_datetime))
if resp is not None:
if chn_selected not in ["BDF", "HDF"]:
self.make_picks_waveform_canvas.setLabel('left', "Ground Motion", units='m')
else:
self.make_picks_waveform_canvas.setLabel('left', "Overpressure", units='Pa')
else:
self.make_picks_waveform_canvas.setLabel('left', "Response")
self.make_picks_waveform_canvas.plot(x=[-10000, 10000], y=[0, 0], pen=pg.mkPen(color=(100, 100, 100)))
font=QtGui.QFont()
font.setPixelSize(20)
self.make_picks_waveform_canvas.getAxis("bottom").tickFont = font
self.make_picks_waveform_canvas.getAxis("left").tickFont = font
self.make_picks_waveform_canvas.getAxis('bottom').setPen(self.color.WHITE)
self.make_picks_waveform_canvas.getAxis('left').setPen(self.color.WHITE)
for pick in self.pick_list:
if pick.stn_no == self.current_station:
self.make_picks_waveform_canvas.scatterPlot(x=[pick.time], y=[0], pen='r', update=True)
SolutionGUI.update(self)
# Initialize variables
b_time = 0
# Extract coordinates of the reference station
ref_pos = Position(self.bam.setup.lat_centre, self.bam.setup.lon_centre, 0)
# Calculate ground distances
try:
stn.stn_ground_distance(self.bam.setup.trajectory.pos_f)
except:
stn.stn_ground_distance(ref_pos)
available_channels = []
for i in range(len(st)):
available_channels.append(st.stats.channel)
expected_arrival_time = stn.ground_distance/330
apx_arrival = pg.InfiniteLine(pos=expected_arrival_time, angle=90, pen='m', movable=False, bounds=None, hoverPen=None, label=None, labelOpts=None, span=(0, 1), markers=None, name=None)
# self.make_picks_waveform_canvas.addItem(apx_arrival)
# If shouing computer found signals
# if self.show_sigs.isChecked() and stn.signals is not None:
# offset = (st.stats.starttime.datetime - self.bam.setup.fireball_datetime).total_seconds()
# for sig in stn.signals:
# start = sig[0] + offset
# finish = sig[1] + offset
# roi_box = pg.LinearRegionItem(values=(start, finish))
# roi_box.setMovable(False)
# self.make_picks_waveform_canvas.addItem(roi_box)
# If manual ballistic search is on
if self.prefs.ballistic_en and self.show_ball.getState() == True:
src = self.bam.setup.traj_metadata[0]
b_time = stn.times.ballistic[0][0][0]
if not np.isnan(b_time):
b_arrival = pg.InfiniteLine(pos=b_time, angle=90, pen=pg.mkPen(color=src.color, width=2), movable=False, bounds=None, hoverPen=None, label=None, labelOpts=None, span=(0, 1), markers=None, name=None)
self.make_picks_waveform_canvas.addItem(b_arrival)
# print(printMessage("ballistic"), "Nominal Arrival: {:.3f} s".format(b_time))
# else:
# print(printMessage("ballistic"), "No Nominal Arrival")
if self.prefs.pert_en:
data, remove = chauvenet(stn.times.ballistic[0][1][0])
# try:
# print(printMessage("ballistic"), 'Perturbation Arrival Range: {:.3f} - {:.3f}s'.format(np.nanmin(data), np.nanmax(data)))
# print('Removed points {:}'.format(remove))
# except ValueError:
# print(printMessage("ballistic"), 'No Perturbation Arrivals')
for i in range(len(data)):
if self.show_perts.isChecked():
try:
self.make_picks_waveform_canvas.plot(x=[data[i]]*2, \
y=[np.min(waveform_data), np.max(waveform_data)], pen=pg.mkPen(color=src.color, style=QtCore.Qt.DotLine) )
except:
pass
# Fragmentation Prediction
### SHOW PRESSURE LINE
if ENERGY is not None:
R = FRAG_LOC.pos_distance(stn.metadata.position)
W = (ENERGY/4.184e6)**1/3
f_d = 1#transmissionFactor(FRAG_LOC.elev)
Z = f_d*R/W
P_0 = 101325
P_a = 100
del_p = P_0*808*(1 + (Z/4.5)**2)/(1 + (Z/0.048)**2)**0.5/(1 + (Z/0.32)**2)**0.5/(1 + (Z/1.35)**2)**0.5
pres_fact = (P_0/P_a)**(1/6)
del_p = del_p/pres_fact
del_p_line = pg.InfiniteLine(pos=del_p, angle=0, pen=pg.mkPen(color="r", width=2), movable=False, bounds=None, hoverPen=None, label=None, labelOpts=None, span=(0, 1), markers=None, name=None)
self.make_picks_waveform_canvas.addItem(del_p_line)
del_p_line = pg.InfiniteLine(pos=-del_p, angle=0, pen=pg.mkPen(color="r", width=2), movable=False, bounds=None, hoverPen=None, label=None, labelOpts=None, span=(0, 1), markers=None, name=None)
self.make_picks_waveform_canvas.addItem(del_p_line)
# If manual fragmentation search is on
if self.prefs.frag_en and self.show_frags.getState() == True:
for i, frag in enumerate(self.bam.setup.fragmentation_point):
src = self.bam.setup.frag_metadata[i]
f_time = stn.times.fragmentation[i][0][0]
v_time = (frag.position.elev - stn.metadata.position.elev)/310
h_time = frag.position.ground_distance(stn.metadata.position)/1000
p_time = h_time + v_time
# print('++++++++++++++++')
# print(printMessage("fragmentation"), '({:}) ({:6.2f} km)'.format(i+1, frag.position.elev/1000))
frag.position.pos_loc(stn.metadata.position)
stn.metadata.position.pos_loc(stn.metadata.position)
xyz_range = np.sqrt((frag.position.x - stn.metadata.position.x)**2 + \
(frag.position.y - stn.metadata.position.y)**2 + \
(frag.position.z - stn.metadata.position.z)**2)
# print('Range {:7.3f} km'.format(xyz_range/1000))
if not np.isnan(f_time):
# Plot Fragmentation Prediction
#pen=pg.mkPen(color=src.color, width=2)
self.make_picks_waveform_canvas.plot(x=[f_time]*2, y=[np.min(waveform_data), np.max(waveform_data)], pen=(0, 255, 0), label='Fragmentation')
# if self.show_prec.isChecked():
# # Plot Precursor Arrivals
# self.make_picks_waveform_canvas.plot(x=[p_time]*2, y=[np.min(waveform_data), np.max(waveform_data)], pen=(210, 235, 52), label='Fragmentation')
stn.stn_distance(frag.position)
#print("Range: {:7.3f} km".format(stn.distance/1000))
# print('Arrival: {:.3f} s'.format(f_time))
# else:
# pass
# print(printMessage("fragmentation"), '({:}) ({:6.2f} km) No Arrival'.format(i+1, frag.position.elev/1000))
# print("### BREAKDOWN OF TIMES for a 40 km Event ###")
# print("Reference Time: {:}".format(self.bam.setup.fireball_datetime))
# pt = traj.trajInterp2(div=100,\
# min_p=39000,\
# max_p=41000)[0]
# print("Time along Trajectory: {:} s".format(pt[3]))
if self.prefs.pert_en:
data, remove = self.obtainPerts(stn.times.fragmentation, i)
# try:
# print(printMessage("fragmentation"), 'Perturbation Arrival Range: {:.3f} - {:.3f}s'.format(np.nanmin(data), np.nanmax(data)))
# print('Removed points {:}'.format(remove))
# except ValueError:
# print(printMessage("fragmentation"), 'No Perturbation Arrivals')
for j in range(len(data)):
if self.show_perts.isChecked():
try:
if not np.isnan(data[j]):
self.make_picks_waveform_canvas.plot(x=[data[j]]*2, y=[np.min(waveform_data),\
np.max(waveform_data)], alpha=0.3,\
pen=pg.mkPen(color=(0, 255, 0), style=QtCore.Qt.DotLine), zorder=3)
except IndexError:
errorMessage("Error in Arrival Times Index", 2, detail="Check that the arrival times file being used aligns with stations and perturbation times being used. A common problem here is that more perturbation times were selected than are available in the given Arrival Times Fireball. Try setting perturbation_times = 0 as a first test. If that doesn't work, try not using the Arrival Times file selected in the toolbar.")
return None
if stn_changed:
stationFormat(stn, self.bam.setup, ref_pos, chn_selected)
self.addAnnotes()
def obtainPerts(self, data, frag):
data_new = []
for i in range(len(data[frag][1])):
data_new.append(data[frag][1][i][0])
data, remove = chauvenet(data_new)
return data, remove
def showSpectrogram(self, event=None):
""" Show the spectrogram of the waveform in the current window. """
# ### Show the spectrogram ###
wave_arr = self.current_waveform_raw
fig = plt.figure()
ax_spec = fig.add_subplot(111)
ax_spec.specgram(wave_arr, Fs=20, cmap=plt.cm.inferno)
ax_spec.set_xlabel('Time (s)')
ax_spec.set_ylabel('Frequency (Hz)')
fig.show()
def filterBandpass(self, event=None):
""" Run bandpass filtering using values set on sliders. """
# Get bandpass filter values
bandpass_low = float(self.low_bandpass_edits.text())
bandpass_high = float(self.high_bandpass_edits.text())
# Limit the high frequency to be lower than the Nyquist frequency
# max_freq = (1.0/self.current_waveform_delta)/2
# if bandpass_high > max_freq:
# bandpass_high = max_freq - 0.1
# self.high_bandpass_slider.setValue(bandpass_high*self.bandpass_scale)
# # Init the butterworth bandpass filter
# butter_b, butter_a = butterworthBandpassFilter(bandpass_low, bandpass_high, \
# 1.0/self.current_waveform_delta, order=6)
# # Filter the data
# waveform_data = scipy.signal.filtfilt(butter_b, butter_a, np.copy(self.current_waveform_raw))
# Plot the updated waveform
self.drawWaveform(channel_changed=2, \
station_no=self.current_station, bandpass=[bandpass_low, bandpass_high])
def filterConvolution(self, event=None):
""" Apply the convolution filter on data as suggested in Kalenda et al. (2014). """
waveform_data = convolutionDifferenceFilter(self.current_waveform_raw)
self.drawWaveform(channel_changed=2, waveform_data=waveform_data, station_no=self.current_station)
def updatePlot(self, draw_waveform=True, stn_changed=False):
""" Update the plot after changes. """
if errorCodes(self, 'current_station', debug=self.prefs.debug):
return None
if len(self.bam.stn_list) == 0:
errorMessage('No stations were found!', 1, detail='Stations were not found. On the "Stations" tab, stations may be downloaded automatically through the "Download Stations" button, or added manually.')
return None
stn = self.bam.stn_list[self.current_station]
# if not hasattr(stn, "annotation"):
# stn.annotation = AnnotationList()
# print(stn.annotation)
self.make_picks_waveform_canvas.clear()
# Mark the position of the current station on the map
self.make_picks_station_choice.setCurrentIndex(self.current_station)
self.drawStats(self.current_station)
# for stn_mk in (i for i in self.station_marker if i is not None):
# if stn_mk == self.station_marker[self.current_station]:
# stn_mk.setPen((255, 0, 0))
# stn_mk.setBrush((255, 0, 0))
# stn_mk.setZValue(1)
# elif stn_mk in [self.station_marker[i] for i in self.ballistic_idx]:
# stn_mk.setPen((0, 0, 255))
# stn_mk.setBrush((0, 0, 255))
# stn_mk.setZValue(0)
# elif stn_mk in [self.station_marker[i] for i in self.fragmentation_idx]:
# stn_mk.setPen((0, 255, 0))
# stn_mk.setBrush((0, 255, 0))
# stn_mk.setZValue(0)
# else:
# stn_mk.setPen((255, 255, 255))
# stn_mk.setBrush((255, 255, 255))
# stn_mk.setZValue(0)
# for stn_mk in (i for i in self.station_waveform if i is not None):
# if stn_mk != self.station_waveform[self.current_station]:
# stn_mk.setPen((255, 255, 255))
# stn_mk.setZValue(0)
# else:
# stn_mk.setPen((255, 0, 0))
# stn_mk.setZValue(1)
# Plot the waveform from the current station
if draw_waveform:
self.drawWaveform(station_no=self.current_station, stn_changed=stn_changed)
# self.showTitle()
SolutionGUI.update(self)
def exportCSV(self, event):
""" Save picks to a CSV file. """
self.export_to_csv.setState(True)
qApp.processEvents()
dlg = QFileDialog.getSaveFileName(self, 'Save File')
file_name = checkExt(dlg[0], '.csv')
# Open the output CSV
try:
with open(os.path.join(file_name), 'w') as f:
# Write the header
f.write('Pick group, Network, Code, Lat, Lon, Elev, Pick JD, Pick time, station_number \n')
# Go through all picks
for pick in self.pick_list:
# Calculate Julian date of the pick time
pick_jd = datetime2JD(self.bam.setup.fireball_datetime + datetime.timedelta(seconds=pick.time))
stn = pick.stn
# Write the CSV entry
f.write("{:d}, {:s}, {:s}, {:.6f}, {:.6f}, {:.2f}, {:.8f}, {:}, {:}\n".format(0, stn.metadata.network, \
stn.metadata.code, stn.metadata.position.lat, stn.metadata.position.lon, stn.metadata.position.elev, pick_jd, pick.time, pick.stn_no))
except FileNotFoundError as e:
errorMessage('Could not find file!', 2, detail='{:}'.format(e))
self.export_to_csv.setState(False)
qApp.processEvents()
return None
errorMessage('Output to CSV!', 0, title='Exported!')
self.export_to_csv.setState(False)
qApp.processEvents()
def exportToAllTimes(self):
self.export_to_all_times.setState(True)
qApp.processEvents()
dlg = QFileDialog.getSaveFileName(self, 'Save File')
file_name = dlg[0]
np.save(file_name, self.arrTimes)
errorMessage("Saved Output File", 0)
self.export_to_all_times.setState(False)
qApp.processEvents()
def exportImage(self):
dlg = QFileDialog.getSaveFileName(self, 'Save File')
file_name = dlg[0]
exporter = pg.exporters.SVGExporter(self.make_picks_waveform_view.scene())
file_name = file_name + '.svg'
exporter.export(file_name)
def select(self, inverted):
if errorCodes(self, 'current_station_waveform'):
return None
if inverted:
self.make_picks_waveform_view.setBackground(self.color.BLACK)
self.current_station_waveform.setPen(self.color.WHITE)
self.make_picks_waveform_canvas.getAxis('bottom').setPen(self.color.WHITE)
self.make_picks_waveform_canvas.getAxis('left').setPen(self.color.WHITE)
else:
self.make_picks_waveform_view.setBackground(self.color.WHITE)
self.current_station_waveform.setPen(self.color.BLACK)
self.make_picks_waveform_canvas.getAxis('bottom').setPen(self.color.BLACK)
self.make_picks_waveform_canvas.getAxis('left').setPen(self.color.BLACK)
def invertGraph(self):
self.select(self.inverted)
self.inverted = not self.inverted
self.invert_picks.setState(self.inverted)
def showTitle(self):
if errorCodes(self, 'current_station_waveform'):
return None
stn = self.stn_list[self.current_station]
if self.showtitled:
self.make_picks_waveform_canvas.setTitle('')
else:
if self.inverted:
self.make_picks_waveform_canvas.setTitle('{:}-{:} [{:}] {:}'.format(stn.metadata.network, stn.metadata.code, stn.metadata.channel, stn.metadata.position), color=(0, 0, 0))
else:
self.make_picks_waveform_canvas.setTitle('{:}-{:} [{:}] {:}'.format(stn.metadata.network, stn.metadata.code, stn.metadata.channel, stn.metadata.position), color=(255, 255, 255))
self.showtitled = not self.showtitled
def perturbSetup(self):
""" Pulls the correct file names for the perturbing function to read, depending on the perturbation type
"""
if self.bam.setup.perturb_method == 'temporal':
# sounding data one hour later
sounding_u = parseWeather(self.bam.setup, time= 1)
# sounding data one hour earlier
sounding_l = parseWeather(self.bam.setup, time=-1)
else:
sounding_u = []
sounding_l = []
if self.setup.perturb_method == 'ensemble':
ensemble_file = self.setup.perturbation_spread_file
else:
ensemble_file = ''
if self.setup.perturb_times == 0: self.setup.perturb_times = 1
if not self.setup.perturb:
self.setup.perturb_times = 1
return np.array([sounding_l, sounding_u, ensemble_file])
def perturbGenerate(self, ptb_n, dataset, perturb_data, line=False):
""" Generates a perturbed cubic atmospheric profile (similar to 'dataset') based off of the perturb data
"""
sounding_l, sounding_u, ensemble_file = perturb_data[0], perturb_data[1], perturb_data[2]
# Perturbed soundings
if ptb_n > 0:
sounding_p = perturbation_method(self.setup, dataset, self.setup.perturb_method, \
sounding_u=sounding_u, sounding_l=sounding_l, \
spread_file=self.setup.perturbation_spread_file, lat=self.setup.lat_centre, lon=self.setup.lon_centre, \
ensemble_file=ensemble_file, ensemble_no=ptb_n, line=line)
# Nominal sounding
else:
sounding_p = dataset
return sounding_p
def supraSearch(self):
# Error Parsing
###############
if self.setup.lat_centre is None or self.setup.lon_centre is None:
errorMessage("Lat center or Lon center are not defined!", 1, info="Please define both to use this function")
return None
# if self.setup.search_min
# Parse the picked station data
s_info, s_name, weights, ref_pos = self.getStationData()
if s_info is None:
return None
# Check if manual search is defined
if self.setup.manual_fragmentation_search is None:
errorMessage("No manual fragmentation point defined!", 2, detail="Please specify a Manual Fragmentation Search in the Sources section of Variables")
return None
# Collect Results
#################
ref_pos = Position(ref_pos[0], ref_pos[1], ref_pos[2])
# Generate weather profile (cubic)
dataset = parseWeather(self.setup)
# Initialize results array
results = [None]*(self.setup.perturb_times + 1)
# Run through all perturbations
for ptb_n in range(self.setup.perturb_times + 1):
# for ptb_n == 0, nominal sounding, ptb_n > 1, perturbed sounding
sounding_p = self.perturbGenerate(ptb_n, dataset, self.perturbSetup())
# Return results for a specific atmosphere
results[ptb_n] = psoSearch(s_info, weights, s_name, self.setup, sounding_p, ref_pos, manual=True)
print("Error Function: {:5.2f} (Perturbation {:})".format(results[ptb_n].f_opt, ptb_n))
print("Opt: Latitude: {:.4f} Longitude: {:.4f} Elevation: {:.2f} Mean Error: {:.4f}"\
.format(results[ptb_n].x_opt.lat, results[ptb_n].x_opt.lon, results[ptb_n].x_opt.elev, results[ptb_n].motc))
n_stations = len(s_info)
xstn = s_info[:n_stations, :3]
# Display Results
#################
self.scatterPlot(self.setup, results, n_stations, xstn, s_name, dataset)
self.residPlot(results, s_name, xstn, self.prefs.workdir, n_stations)
defTable(self.tableWidget, n_stations + 1, 5, headers=['Station Name', "Latitude", "Longitude", "Elevation", "Residuals"])
setTableRow(self.tableWidget, 0, terms=["Total", results[0].x_opt.lat, results[0].x_opt.lon, results[0].x_opt.elev, results[0].f_opt])
for i in range(n_stations):
setTableRow(self.tableWidget, i + 1, terms=[s_name[i], xstn[i][0], xstn[i][1], xstn[i][2], results[0].r[i]])
if __name__ == '__main__':
pass
# app = QApplication(sys.argv)
# splash_pix = QPixmap(os.path.join('supra', 'Fireballs','docs', '_images', 'wmpl.png'))
# splash = QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)
# splash.setMask(splash_pix.mask())
# splash.show()
# app.processEvents()
# gui = SolutionGUI()
# gui.showFullScreen()
# gui.showMaximized()
# gui.show()
# splash.finish(gui)
# sys.exit(app.exec_())
|
wmpgREPO_NAMESupracenterPATH_START.@Supracenter_extracted@Supracenter-master@supra@GUI@[email protected]@.PATH_END.py
|
{
"filename": "testPythonExtension.py",
"repo_name": "CRPropa/CRPropa3",
"repo_path": "CRPropa3_extracted/CRPropa3-master/test/testPythonExtension.py",
"type": "Python"
}
|
import sys
try:
import unittest
except:
print("***********************************************************")
print("* WARNING!! Couldn't import python unittesting framework! *")
print("* No python tests have been executed *")
print("***********************************************************")
sys.exit(0)
try:
import crpropa as crp
except Exception as e:
print("*** CRPropa import failed")
print(type(e), str(e))
sys.exit(-1)
import numpy as np
class testCrossLanguagePolymorphism(unittest.TestCase):
def test_module(self):
class CountingModule(crp.Module):
def __init__(self):
crp.Module.__init__(self)
self.count = 0
def process(self, c):
self.count += 1
count_accept = CountingModule()
count_reject = CountingModule()
filter = crp.ParticleFilter([-1, 1])
filter.onAccept(count_accept)
filter.onReject(count_reject)
c = crp.Candidate()
for id in [-1, 1, 6, 9, -19, 23, 100010001]:
c.current.setId(id)
filter.process(c)
def test_ParticleCollector(self):
c = crp.Candidate()
p = crp.ParticleCollector()
p.process(c)
c_out = p[0]
for c_i in p:
c_out = c_i
def test_ObserverFeature(self):
class CountingFeature(crp.ObserverFeature):
def __init__(self):
crp.ObserverFeature.__init__(self)
self.value = 0
def checkDetection(self, candidate):
self.value += 1
return crp.DETECTED
obs = crp.Observer()
counter = CountingFeature()
obs.add(counter)
for i in range(5):
candidate = crp.Candidate()
obs.process(candidate)
self.assertEqual(i + 1, counter.value)
def testCustomMagneticField(self):
class CustomMagneticField(crp.MagneticField):
def __init__(self, val):
crp.MagneticField.__init__(self)
self.val = val
def getField(self, position):
return crp.Vector3d(self.val)
def getField(self, position, z):
return crp.Vector3d(self.val)
field = CustomMagneticField(crp.gauss)
propBP = crp.PropagationBP(field, 1e-4, 1*crp.Mpc, 1*crp.Mpc)
propCK = crp.PropagationCK(field, 1e-4, 1*crp.Mpc, 1*crp.Mpc)
propSDE = crp.DiffusionSDE(field)
pos = crp.Vector3d(-1, 0, 0)
z = 0
fieldAtPos = field.getField(pos, z)
self.assertEqual(fieldAtPos, propBP.getFieldAtPosition(pos, z))
self.assertEqual(fieldAtPos, propCK.getFieldAtPosition(pos, z))
self.assertEqual(fieldAtPos, propSDE.getMagneticFieldAtPosition(pos, z))
def testCustomAdvectionField(self):
class CustomAdvectionField(crp.AdvectionField):
def __init__(self, val):
crp.AdvectionField.__init__(self)
self.val = val
def getField(self, position):
return crp.Vector3d(self.val)
def getDivergence(self, position):
return 0.0
constMagVec = crp.Vector3d(0*crp.nG,0*crp.nG,1*crp.nG)
magField = crp.UniformMagneticField(constMagVec)
advField = CustomAdvectionField(1)
propSDE = crp.DiffusionSDE(magField, advField)
pos = crp.Vector3d(1, 0, 0)
advFieldAtPos = advField.getField(pos)
self.assertEqual(advFieldAtPos, propSDE.getAdvectionFieldAtPosition(pos))
def testCustomMassDensity(self):
class CustomMassDensity(crp.Density):
def __init__(self, density, HIDensity, HIIDensity, H2Density, nucleonDensity):
crp.Density.__init__(self)
self.density = density
self.HIDensity = HIDensity
self.HIIDensity = HIIDensity
self.H2Density = H2Density
self.nucleonDensity = nucleonDensity
def getDensity(self, position):
return self.density
def getHIDensity(self, position):
return self.HIDensity
def getHIIDensity(self, position):
return self.HIIDensity
def getH2Density(self, position):
return self.H2Density
def NucleonDensity(self, position):
return self.nucleonDensity
density = 10
HIDensity = 5
HIIDensity = 2
H2Density = 1
nucleonDensity = 0.5
massDensity = CustomMassDensity(density, HIDensity, HIIDensity, H2Density, nucleonDensity)
pos = crp.Vector3d(1, 0, 0)
self.assertEqual(density, massDensity.getDensity(pos))
self.assertEqual(HIDensity, massDensity.getHIDensity(pos))
self.assertEqual(HIIDensity, massDensity.getHIIDensity(pos))
self.assertEqual(H2Density, massDensity.getH2Density(pos))
def testCustomPhotonField(self):
class CustomPhotonField(crp.PhotonField):
def __init__(self, density):
crp.PhotonField.__init__(self)
self.density = density
self.fieldName = 'testCustomPhotonField'
self.isRedshiftDependent = True
def getPhotonDensity(self, energy, z):
return self.density
def getFieldName(self):
return self.fieldName
def hasRedshiftDependence(self):
return self.isRedshiftDependent
photonDensity = 10
photonField = CustomPhotonField(photonDensity)
energy = 10*crp.GeV
z = 0
self.assertEqual(photonDensity, photonField.getPhotonDensity(energy, z))
self.assertEqual('testCustomPhotonField', photonField.getFieldName())
self.assertEqual(True, photonField.hasRedshiftDependence())
def testCustomPhotonField(self):
class CustomPhotonField(crp.PhotonField):
def __init__(self, val):
crp.PhotonField.__init__(self)
self.val = val
def getFieldName(self):
return 'CMB'
def getPhotonDensity(self, ePhoton, z):
return self.val
constDensity = 1
photonField = CustomPhotonField(constDensity)
ppp = crp.PhotoPionProduction(photonField)
pppPhotonField = ppp.getPhotonField()
self.assertEqual(constDensity, pppPhotonField.getPhotonDensity(0))
class testCandidatePropertymap(unittest.TestCase):
def setUp(self):
self.candidate = crp.Candidate()
def __propertySetGet(self, value):
self.candidate.setProperty('Foo', value)
self.assertEqual(value, self.candidate.getProperty('Foo'))
def testString(self):
self.__propertySetGet('Bar')
def testUnicode(self):
self.__propertySetGet(u'Bar')
def testUnicodeName(self):
self.candidate.setProperty(u'Foo', 23)
self.assertEqual(23, self.candidate.getProperty(u'Foo'))
def testBool(self):
self.__propertySetGet(True)
self.__propertySetGet(False)
def testInt(self):
self.__propertySetGet(42)
def testFloat(self):
self.__propertySetGet(3.14)
v = np.array([2.])
self.__propertySetGet(v[0])
class testKeywordArguments(unittest.TestCase):
def testExceptionOnNonExistingArguemnt(self):
with self.assertRaises(Exception, msg="This is likely due to a swig bug. Please try to disable the builtin option by compiling crpropa with cmake .. -DENABLE_SWIG_BUILTIN=OFF"):
p = crp.PhotoDisintegration(nonExistingKeywordArguemntShouldRaiseException=True)
def testDisablingOfKwargs(self):
with self.assertRaises(Exception, msg="This is likely due to a swig bug. Please try to disable the builtin option by compiling crpropa with cmake .. -DENABLE_SWIG_BUILTIN=OFF"):
p = crp.PhotoDisintegration(photonField=crp.IRB_Dominguez11)
# swig currently does not support kwargs in overloaded functions - we should
# thus disable them.
#def testKeywordArgument(self):
# p = crp.PhotoDisintegration(photonField=crp.IRB_Dominguez11)
# self.assertTrue('IRB_Dominguez11' in p.getDescription())
class testVector3(unittest.TestCase):
def testPublicReferenceAccess(self):
v = crp.Vector3d(1., 2., 3.)
self.assertEqual(v.x, 1.)
self.assertEqual(v.y, 2.)
self.assertEqual(v.z, 3.)
v.x = 23.
self.assertEqual(v.x, 23.)
## this test fails in some systems
# def testArrayInterface(self):
# # this test fails for some combinations of Python version and system
# v = crp.Vector3d(1., 2., 3.)
# self.assertEqual(2., np.mean(v) )
# x = np.ones(3)
# self.assertEqual(6., sum(v * x) )
def testRepr(self):
v = crp.Vector3d(1., 2., 3.)
if sys.version_info >= (3, 3):
import unittest.mock
import io
with unittest.mock.patch('sys.stdout', new = io.StringIO()) as fake_out:
print(v)
else:
import StringIO
fake_out = StringIO.StringIO()
sys.stdout = fake_out
print(v)
sys.stdout = sys.__stdout__
self.assertEqual(fake_out.getvalue().rstrip(), v.getDescription())
def testOutOfBound(self):
v = crp.Vector3d(1., 2., 3.)
self.assertRaises(IndexError, v.__getitem__, 3)
self.assertRaises(IndexError, v.__setitem__, 3, 10)
class testParticleCollector(unittest.TestCase):
def testParticleCollectorIterator(self):
collector = crp.ParticleCollector()
lengths = [1*crp.pc, 10*crp.pc, 100*crp.pc]
for l in lengths:
c = crp.Candidate()
c.setTrajectoryLength(l)
collector.process(c)
self.assertEqual(len(collector), len(lengths))
for c, l in zip(collector, lengths):
self.assertEqual(c.getTrajectoryLength(), l)
def testParticleCollectorAsModuleListInput(self):
sim = crp.ModuleList()
sim.add(crp.MaximumTrajectoryLength(3.14))
sim.add(crp.SimplePropagation(0.001, 0.001))
collector = crp.ParticleCollector()
c1 = crp.Candidate()
c2 = crp.Candidate()
collector.process(c1)
collector.process(c2)
sim.run(collector.getContainer())
for c in collector:
self.assertAlmostEqual(
c.getTrajectoryLength(), 3.14, places=2)
def testParticleCollectorAsModuleListOutput(self):
sim = crp.ModuleList()
sim.add(crp.MaximumTrajectoryLength(3.14))
sim.add(crp.SimplePropagation(0.001, 0.001))
collector = crp.ParticleCollector()
sim.add(collector)
c = crp.Candidate()
sim.run(c)
self.assertAlmostEqual(
collector[0].getTrajectoryLength(),
3.14, places=2)
class testGrid(unittest.TestCase):
def testGridPropertiesConstructor(self):
N = 32
gp = crp.GridProperties(crp.Vector3d(0), N, 0.1)
grid = crp.Grid1f(gp)
self.assertEqual(grid.getNx(), 32)
if hasattr(crp, 'GridTurbulence'):
class testTurbulentField(unittest.TestCase):
#check problems brought up in https://github.com/CRPropa/CRPropa3/issues/322
def testTurbulenceSpectrum(self):
spectrum = crp.TurbulenceSpectrum(1., 1., 10.)
self.assertEqual(spectrum.getBrms(), 1.)
self.assertEqual(spectrum.getLmin(), 1.)
self.assertEqual(spectrum.getLmax(), 10.)
self.assertEqual(spectrum.getLbendover(), 1.)
self.assertEqual(spectrum.getSindex(), 5./3.)
self.assertEqual(spectrum.getQindex(), 4.)
def testGridTurbulence(self):
N = 64
boxSize = 1*crp.Mpc
l_bo = boxSize/8
spacing = boxSize / N
tf = crp.GridTurbulence(
crp.TurbulenceSpectrum(1.0, 2*spacing, boxSize, l_bo),
crp.GridProperties(crp.Vector3d(0), N, spacing)
)
if __name__ == '__main__':
unittest.main()
|
CRPropaREPO_NAMECRPropa3PATH_START.@CRPropa3_extracted@CRPropa3-master@[email protected]@.PATH_END.py
|
{
"filename": "test_html.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/cosmology/_io/tests/test_html.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import astropy.units as u
from astropy.cosmology._io.html import _FORMAT_TABLE, read_html_table, write_html_table
from astropy.table import QTable, Table, vstack
from astropy.utils.compat.optional_deps import HAS_BS4
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
class ReadWriteHTMLTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="ascii.html"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_bad_index(self, read, write, tmp_path):
"""Test if argument ``index`` is incorrect"""
fp = tmp_path / "test_to_html_table_bad_index.html"
write(fp, format="ascii.html")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
read(fp, index=2, format="ascii.html")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
read(fp, index="row 0", format="ascii.html")
# -----------------------
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_failed_cls(self, write, tmp_path):
"""Test failed table type."""
fp = tmp_path / "test_to_html_table_failed_cls.html"
with pytest.raises(TypeError, match="'cls' must be"):
write(fp, format="ascii.html", cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_cls(self, write, tbl_cls, tmp_path):
fp = tmp_path / "test_to_html_table_cls.html"
write(fp, format="ascii.html", cls=tbl_cls)
# -----------------------
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_readwrite_html_table_instance(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""Test cosmology -> ascii.html -> cosmology."""
fp = tmp_path / "test_readwrite_html_table_instance.html"
# ------------
# To Table
write(fp, format="ascii.html")
# some checks on the saved file
tbl = QTable.read(fp)
# assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ # metadata read not implemented
assert tbl["name"] == cosmo.name
# ------------
# From Table
tbl["mismatching"] = "will error"
tbl.write(fp, format="ascii.html", overwrite=True)
# tests are different if the last argument is a **kwarg
if cosmo._init_has_kwargs:
got = read(fp, format="ascii.html")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
# assert "mismatching" not in got.meta # metadata read not implemented
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
read(fp, format="ascii.html")
# unless mismatched are moved to meta
got = read(fp, format="ascii.html", move_to_meta=True)
assert got == cosmo
# assert got.meta["mismatching"] == "will error" # metadata read not implemented
# it won't error if everything matches up
tbl.remove_column("mismatching")
tbl.write(fp, format="ascii.html", overwrite=True)
got = read(fp, format="ascii.html")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``write``.
# tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]] #
# metadata read not implemented
got = read(fp, format="ascii.html")
assert got == cosmo
got = read(fp)
assert got == cosmo
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_rename_html_table_columns(self, read, write, tmp_path):
"""Tests renaming columns"""
fp = tmp_path / "test_rename_html_table_columns.html"
write(fp, format="ascii.html", latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
# For now, Cosmology class and name are stored in first 2 slots
for column_name in tbl.colnames[2:]:
assert column_name in _FORMAT_TABLE.values()
cosmo = read(fp, format="ascii.html")
converted_tbl = cosmo.to_format("astropy.table")
# asserts each column name has been reverted
# cosmology name is still stored in first slot
for column_name in converted_tbl.colnames[1:]:
assert column_name in _FORMAT_TABLE.keys()
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
@pytest.mark.parametrize("latex_names", [True, False])
def test_readwrite_html_subclass_partial_info(
self, cosmo_cls, cosmo, read, write, latex_names, tmp_path, add_cu
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_read_html_subclass_partial_info.html"
# test write
write(fp, format="ascii.html", latex_names=latex_names)
# partial information
tbl = QTable.read(fp)
# tbl.meta.pop("cosmology", None) # metadata not implemented
cname = "$$T_{0}$$" if latex_names else "Tcmb0"
del tbl[cname] # format is not converted to original units
tbl.write(fp, overwrite=True)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(fp, format="ascii.html")
got2 = read(fp, format="ascii.html", cosmology=cosmo_cls)
got3 = read(fp, format="ascii.html", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
# assert got.meta == cosmo.meta # metadata read not implemented
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_readwrite_html_mutlirow(self, cosmo, read, write, tmp_path, add_cu):
"""Test if table has multiple rows."""
fp = tmp_path / "test_readwrite_html_mutlirow.html"
# Make
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
table = vstack(
[c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts="silent",
)
cosmo_cls = type(cosmo)
assert cosmo is not None
for n, col in zip(table.colnames, table.itercols()):
if n not in cosmo_cls.parameters:
continue
param = cosmo_cls.parameters[n]
if param.unit in (None, u.one):
continue
# Replace column with unitless version
table.replace_column(n, (col << param.unit).value, copy=False)
table.write(fp, format="ascii.html")
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
read(fp, format="ascii.html")
# unless the index argument is provided
got = cosmo_cls.read(fp, index=1, format="ascii.html")
# got = read(fp, index=1, format="ascii.html")
assert got == cosmo
# the index can be a string
got = cosmo_cls.read(fp, index=cosmo.name, format="ascii.html")
assert got == cosmo
# it's better if the table already has an index
# this will be identical to the previous ``got``
table.add_index("name")
got2 = cosmo_cls.read(fp, index=cosmo.name, format="ascii.html")
assert got2 == cosmo
class TestReadWriteHTML(ReadWriteDirectTestBase, ReadWriteHTMLTestMixin):
"""
Directly test ``read/write_html``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="ascii.html")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_html_table, "write": write_html_table}
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_rename_direct_html_table_columns(self, read, write, tmp_path):
"""Tests renaming columns"""
fp = tmp_path / "test_rename_html_table_columns.html"
write(fp, format="ascii.html", latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
for column_name in tbl.colnames[2:]:
# for now, Cosmology as metadata and name is stored in first 2 slots
assert column_name in _FORMAT_TABLE.values()
cosmo = read(fp, format="ascii.html")
converted_tbl = cosmo.to_format("astropy.table")
# asserts each column name has been reverted
for column_name in converted_tbl.colnames[1:]:
# for now now, metadata is still stored in first slot
assert column_name in _FORMAT_TABLE.keys()
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@cosmology@_io@tests@[email protected]_END.py
|
{
"filename": "numpy_binary.py",
"repo_name": "CEA-COSMIC/pysap",
"repo_path": "pysap_extracted/pysap-master/pysap/base/loaders/numpy_binary.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
##########################################################################
# pySAP - Copyright (C) CEA, 2017 - 2018
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
import numpy as np
# Package import
from .loader_base import LoaderBase
from pysap.base.image import Image
class npBinary(LoaderBase):
""" Define the numpy binary loader.
"""
allowed_extensions = [".npy"]
def load(self, path):
""" A method that load the image data and associated metadata.
Parameters
----------
path: str
the path to the image to be loaded.
Returns
-------
image: Image
the loaded image.
"""
cube = np.load(path)
return Image(data_type="scalar",
data=cube)
def save(self, image, outpath, clobber=True):
""" A method that save the image data and associated metadata.
Parameters
----------
image: Image
the image to be saved.
outpath: str
the path where the the image will be saved.
"""
np.save(outpath, image.data)
|
CEA-COSMICREPO_NAMEpysapPATH_START.@pysap_extracted@pysap-master@pysap@base@loaders@[email protected]_END.py
|
{
"filename": "stats.py",
"repo_name": "ebachelet/pyLIMA",
"repo_path": "pyLIMA_extracted/pyLIMA-master/pyLIMA/fits/stats.py",
"type": "Python"
}
|
import numpy as np
import scipy.stats as ss
def normal_Kolmogorov_Smirnov(sample):
"""The moon illumination expressed as a percentage.
:param astropy sun: the sun ephemeris
:param astropy moon: the moon ephemeris
:return: a numpy array like indicated the moon illumination.
:rtype: array_like
"""
mu, sigma = ss.norm.fit(sample)
# use mu sigma for anomaly, 0,1 for rescaling???
KS_stat, KS_pvalue = ss.kstest(sample, 'norm', args=(0, 1))
# the sample is likely Gaussian-like if KS_stat (~ maximum distance between
# sample and theoritical distribution) -> 0
# the null hypothesis can not be rejected ( i.e the distribution of sample come
# from a Gaussian) if KS_pvalue -> 1
KS_judgement = 0
if KS_pvalue > 0.01:
KS_judgement = 1
if KS_pvalue > 0.05:
KS_judgement = 2
return KS_stat, KS_pvalue, KS_judgement
def normal_Anderson_Darling(sample):
"""Compute a Anderson-Darling tests on the sample versus a normal distribution
with mu = 0, sigma = 1
:param array_like sample: the sample you want to check the "Gaussianity"
:returns: the Anderson-Darling statistic, the Anderson-Darling critical
values associated to the significance
level of 15 % and the Anderson-Darling judgement
:rtype: float, array_like, array_like
"""
AD_stat, AD_critical_values, AD_significance_levels = ss.anderson(sample)
# the sample is likely Gaussian-like if AD_stat (~ maximum distance between
# sample and theoritical distribution) -> 0
# the null hypothesis can not be rejected ( i.e the distribution of sample come
# from a Gaussian) if AD_pvalue -> 1
AD_judgement = 0
if AD_stat < 2 * AD_critical_values[-1]:
AD_judgement = 1
if AD_stat < AD_critical_values[-1]:
AD_judgement = 2
return AD_stat, AD_critical_values[-1], AD_judgement
def normal_Shapiro_Wilk(sample):
"""Compute a Shapiro-Wilk tests on the sample versus a normal distribution with
mu = 0, sigma = 1
:param array_like sample: the sample you want to check the "Gaussianity"
:returns: the Shapiro-Wilk statistic and its related p_value
:rtype: float, float
"""
SW_stat, SW_pvalue = ss.shapiro(sample)
# the null hypothesis can not be rejected ( i.e the distribution of sample come
# from a Gaussian) if SW_stat -> 1
# the null hypothesis can not be rejected ( i.e the distribution of sample come
# from a Gaussian) if SW_pvalue -> 1
# Judegement made on the STATISTIC because 'W tests statistic is accurate but the
# p-value may not be" (see scipy doc)
SW_judgement = 0
if SW_pvalue > 0.01:
SW_judgement = 1
if SW_pvalue > 0.05:
SW_judgement = 2
return SW_stat, SW_pvalue, SW_judgement
### Statistics fit quality metrics
def normalized_chi2(chi2, n_data, n_parameters):
"""Compute the chi^2/dof
:param float chi2: the chi^2
:param int n_data: the number of data_points
:param int n_parameters: the number of model parameters
:returns: the chi^2/dof and the chi2dof_judgement
:rtype: float
"""
chi2_sur_dof = chi2 / (n_data - n_parameters)
chi2dof_judgement = 0
if chi2_sur_dof < 2:
chi2dof_judgement = 2
return chi2_sur_dof, chi2dof_judgement
def Bayesian_Information_Criterion(chi2, n_data, n_parameters):
"""Compute the BIC statistic.
:param float chi2: the chi^2
:param int n_data: the number of data_points
:param int n_parameters: the number of model parameters
:returns: the chi^2/dof
:rtype: float
"""
BIC = chi2 + n_parameters * np.log(n_data)
return BIC
def Akaike_Information_Criterion(chi2, n_parameters):
"""Compute the BIC statistic.
:param float chi2: the chi^2
:param int n_parameters: the number of model parameters
:returns: the chi^2/dof
:rtype: float
"""
AIC = chi2 + 2 * n_parameters
return AIC
|
ebacheletREPO_NAMEpyLIMAPATH_START.@pyLIMA_extracted@pyLIMA-master@pyLIMA@[email protected]@.PATH_END.py
|
{
"filename": "transformer_deformable.py",
"repo_name": "Nikhel1/Gal-DINO",
"repo_path": "Gal-DINO_extracted/Gal-DINO-main/models/dino/transformer_deformable.py",
"type": "Python"
}
|
# ------------------------------------------------------------------------
# DINO
# Copyright (c) 2022 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import copy
import os
from typing import Optional, List
import math
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from util.misc import inverse_sigmoid
from .ops.modules import MSDeformAttn
from .utils import sigmoid_focal_loss, MLP, _get_activation_fn, gen_sineembed_for_position
class DeformableTransformer(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.1,
activation="relu", return_intermediate_dec=False,
num_feature_levels=4, dec_n_points=4, enc_n_points=4,
two_stage=False, two_stage_num_proposals=300,
use_dab=False, high_dim_query_update=False, no_sine_embed=False):
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.two_stage = two_stage
self.two_stage_num_proposals = two_stage_num_proposals
self.use_dab = use_dab
encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, enc_n_points)
self.encoder = DeformableTransformerEncoder(encoder_layer, num_encoder_layers)
decoder_layer = DeformableTransformerDecoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, dec_n_points)
self.decoder = DeformableTransformerDecoder(decoder_layer, num_decoder_layers, return_intermediate_dec,
use_dab=use_dab, d_model=d_model, high_dim_query_update=high_dim_query_update, no_sine_embed=no_sine_embed)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
if two_stage:
self.enc_output = nn.Linear(d_model, d_model)
self.enc_output_norm = nn.LayerNorm(d_model)
self.pos_trans = nn.Linear(d_model * 2, d_model * 2)
self.pos_trans_norm = nn.LayerNorm(d_model * 2)
else:
if not self.use_dab:
self.reference_points = nn.Linear(d_model, 2)
self.high_dim_query_update = high_dim_query_update
if high_dim_query_update:
assert not self.use_dab, "use_dab must be True"
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
if not self.two_stage and not self.use_dab:
xavier_uniform_(self.reference_points.weight.data, gain=1.0)
constant_(self.reference_points.bias.data, 0.)
normal_(self.level_embed)
def get_proposal_pos_embed(self, proposals):
num_pos_feats = 128
temperature = 10000
scale = 2 * math.pi
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
# N, L, 4
proposals = proposals.sigmoid() * scale
# N, L, 4, 128
pos = proposals[:, :, :, None] / dim_t
# N, L, 4, 64, 2
pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)
return pos
def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):
N_, S_, C_ = memory.shape
base_scale = 4.0
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += (H_ * W_)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals))
output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))
output_memory = memory
output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
output_memory = self.enc_output_norm(self.enc_output(output_memory))
return output_memory, output_proposals
def get_valid_ratio(self, mask):
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def forward(self, srcs, masks, pos_embeds, query_embed=None):
"""
Input:
- srcs: List([bs, c, h, w])
- masks: List([bs, h, w])
"""
assert self.two_stage or query_embed is not None
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2) # bs, hw, c
mask = mask.flatten(1) # bs, hw
pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c
mask_flatten = torch.cat(mask_flatten, 1) # bs, \sum{hxw}
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)
# prepare input for decoder
bs, _, c = memory.shape
if self.two_stage:
output_memory, output_proposals = self.gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes)
# hack implementation for two-stage Deformable DETR
enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](output_memory)
enc_outputs_coord_unact = self.decoder.bbox_embed[self.decoder.num_layers](output_memory) + output_proposals
topk = self.two_stage_num_proposals
topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]
topk_coords_unact = torch.gather(enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4))
topk_coords_unact = topk_coords_unact.detach()
reference_points = topk_coords_unact.sigmoid()
init_reference_out = reference_points
pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact)))
query_embed, tgt = torch.split(pos_trans_out, c, dim=2)
elif self.use_dab:
reference_points = query_embed[..., self.d_model:].sigmoid()
tgt = query_embed[..., :self.d_model]
tgt = tgt.unsqueeze(0).expand(bs, -1, -1)
init_reference_out = reference_points
else:
query_embed, tgt = torch.split(query_embed, c, dim=1)
query_embed = query_embed.unsqueeze(0).expand(bs, -1, -1)
tgt = tgt.unsqueeze(0).expand(bs, -1, -1)
reference_points = self.reference_points(query_embed).sigmoid()
# bs, num_quires, 2
init_reference_out = reference_points
# decoder
hs, inter_references = self.decoder(tgt, reference_points, memory,
spatial_shapes, level_start_index, valid_ratios,
query_pos=query_embed if not self.use_dab else None,
src_padding_mask=mask_flatten)
inter_references_out = inter_references
if self.two_stage:
return hs, init_reference_out, inter_references_out, enc_outputs_class, enc_outputs_coord_unact
return hs, init_reference_out, inter_references_out, None, None
class DeformableTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4,
add_channel_attention=False,
use_deformable_box_attn=False,
box_attn_type='roi_align',
):
super().__init__()
# self attention
if use_deformable_box_attn:
self.self_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)
else:
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation, d_model=d_ffn)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# channel attention
self.add_channel_attention = add_channel_attention
if add_channel_attention:
self.activ_channel = _get_activation_fn('dyrelu', d_model=d_model)
self.norm_channel = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, key_padding_mask=None):
# self attention
src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, key_padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
# channel attn
if self.add_channel_attention:
src = self.norm_channel(src + self.activ_channel(src))
return src
class DeformableTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
if num_layers > 0:
self.layers = _get_clones(encoder_layer, num_layers)
else:
self.layers = []
del encoder_layer
self.num_layers = num_layers
self.norm = norm
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):
"""
Input:
- src: [bs, sum(hi*wi), 256]
- spatial_shapes: h,w of each level [num_level, 2]
- level_start_index: [num_level] start point of level in sum(hi*wi).
- valid_ratios: [bs, num_level, 2]
- pos: pos embed for src. [bs, sum(hi*wi), 256]
- padding_mask: [bs, sum(hi*wi)]
Intermedia:
- reference_points: [bs, sum(hi*wi), num_lebel, 2]
"""
output = src
# bs, sum(hi*wi), 256
if self.num_layers > 0:
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
for _, layer in enumerate(self.layers):
output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class DeformableTransformerDecoderLayer(nn.Module):
def __init__(self, d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4,
use_deformable_box_attn=False,
box_attn_type='roi_align',
key_aware_type=None,
decoder_sa_type='ca',
module_seq=['sa', 'ca', 'ffn'],
):
super().__init__()
self.module_seq = module_seq
assert sorted(module_seq) == ['ca', 'ffn', 'sa']
# cross attention
# self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
if use_deformable_box_attn:
self.cross_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)
else:
self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation, d_model=d_ffn, batch_dim=1)
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
self.key_aware_type = key_aware_type
self.key_aware_proj = None
self.decoder_sa_type = decoder_sa_type
assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']
if decoder_sa_type == 'ca_content':
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
def rm_self_attn_modules(self):
self.self_attn = None
self.dropout2 = None
self.norm2 = None
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_sa(self,
# for tgt
tgt: Optional[Tensor], # nq, bs, d_model
tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))
tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)
tgt_key_padding_mask: Optional[Tensor] = None,
tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4
# for memory
memory: Optional[Tensor] = None, # hw, bs, d_model
memory_key_padding_mask: Optional[Tensor] = None,
memory_level_start_index: Optional[Tensor] = None, # num_levels
memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
memory_pos: Optional[Tensor] = None, # pos for memory
# sa
self_attn_mask: Optional[Tensor] = None, # mask used for self-attention
cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention
):
# self attention
if self.self_attn is not None:
if self.decoder_sa_type == 'sa':
q = k = self.with_pos_embed(tgt, tgt_query_pos)
tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
elif self.decoder_sa_type == 'ca_label':
# q = self.with_pos_embed(tgt, tgt_query_pos)
bs = tgt.shape[1]
k = v = self.label_embedding.weight[:, None, :].repeat(1, bs, 1)
tgt2 = self.self_attn(tgt, k, v, attn_mask=self_attn_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
elif self.decoder_sa_type == 'ca_content':
tgt2 = self.self_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),
tgt_reference_points.transpose(0, 1).contiguous(),
memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask).transpose(0, 1)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
else:
raise NotImplementedError("Unknown decoder_sa_type {}".format(self.decoder_sa_type))
return tgt
def forward_ca(self,
# for tgt
tgt: Optional[Tensor], # nq, bs, d_model
tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))
tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)
tgt_key_padding_mask: Optional[Tensor] = None,
tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4
# for memory
memory: Optional[Tensor] = None, # hw, bs, d_model
memory_key_padding_mask: Optional[Tensor] = None,
memory_level_start_index: Optional[Tensor] = None, # num_levels
memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
memory_pos: Optional[Tensor] = None, # pos for memory
# sa
self_attn_mask: Optional[Tensor] = None, # mask used for self-attention
cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention
):
# cross attention
if self.key_aware_type is not None:
if self.key_aware_type == 'mean':
tgt = tgt + memory.mean(0, keepdim=True)
elif self.key_aware_type == 'proj_mean':
tgt = tgt + self.key_aware_proj(memory).mean(0, keepdim=True)
else:
raise NotImplementedError("Unknown key_aware_type: {}".format(self.key_aware_type))
tgt2 = self.cross_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),
tgt_reference_points.transpose(0, 1).contiguous(),
memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask).transpose(0, 1)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
return tgt
def forward(self,
# for tgt
tgt: Optional[Tensor], # nq, bs, d_model
tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))
tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)
tgt_key_padding_mask: Optional[Tensor] = None,
tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4
# for memory
memory: Optional[Tensor] = None, # hw, bs, d_model
memory_key_padding_mask: Optional[Tensor] = None,
memory_level_start_index: Optional[Tensor] = None, # num_levels
memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
memory_pos: Optional[Tensor] = None, # pos for memory
# sa
self_attn_mask: Optional[Tensor] = None, # mask used for self-attention
cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention
):
for funcname in self.module_seq:
if funcname == 'ffn':
tgt = self.forward_ffn(tgt)
elif funcname == 'ca':
tgt = self.forward_ca(tgt, tgt_query_pos, tgt_query_sine_embed, \
tgt_key_padding_mask, tgt_reference_points, \
memory, memory_key_padding_mask, memory_level_start_index, \
memory_spatial_shapes, memory_pos, self_attn_mask, cross_attn_mask)
elif funcname == 'sa':
tgt = self.forward_sa(tgt, tgt_query_pos, tgt_query_sine_embed, \
tgt_key_padding_mask, tgt_reference_points, \
memory, memory_key_padding_mask, memory_level_start_index, \
memory_spatial_shapes, memory_pos, self_attn_mask, cross_attn_mask)
else:
raise ValueError('unknown funcname {}'.format(funcname))
return tgt
class DeformableTransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, return_intermediate=False, use_dab=False, d_model=256, query_dim=4):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.return_intermediate = return_intermediate
assert return_intermediate
# hack implementation for iterative bounding box refinement and two-stage Deformable DETR
self.bbox_embed = None
self.class_embed = None
self.use_dab = use_dab
self.d_model = d_model
self.query_dim = query_dim
if use_dab:
self.query_scale = MLP(d_model, d_model, d_model, 2)
self.ref_point_head = MLP(2 * d_model, d_model, d_model, 2)
def forward(self, tgt, reference_points, src, src_spatial_shapes,
src_level_start_index, src_valid_ratios,
query_pos=None, src_padding_mask=None):
output = tgt
if self.use_dab:
assert query_pos is None
intermediate = []
intermediate_reference_points = [reference_points]
for layer_id, layer in enumerate(self.layers):
if reference_points.shape[-1] == 4:
reference_points_input = reference_points[:, :, None] \
* torch.cat([src_valid_ratios, src_valid_ratios], -1)[:, None] # bs, nq, 4, 4
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * src_valid_ratios[:, None]
if self.use_dab:
query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # bs, nq, 256*2
raw_query_pos = self.ref_point_head(query_sine_embed) # bs, nq, 256
pos_scale = self.query_scale(output) if layer_id != 0 else 1
query_pos = pos_scale * raw_query_pos
output = layer(output, query_pos, reference_points_input, src, src_spatial_shapes, src_level_start_index, src_padding_mask)
# hack implementation for iterative bounding box refinement
if self.bbox_embed is not None:
box_holder = self.bbox_embed(output)
box_holder[..., :self.query_dim] += inverse_sigmoid(reference_points)
new_reference_points = box_holder[..., :self.query_dim].sigmoid()
reference_points = new_reference_points.detach()
if layer_id != self.num_layers - 1:
intermediate_reference_points.append(new_reference_points)
intermediate.append(output)
return torch.stack(intermediate), torch.stack(intermediate_reference_points)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_deforamble_transformer(args):
return DeformableTransformer(
d_model=args.hidden_dim,
nhead=args.nheads,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
dim_feedforward=args.dim_feedforward,
dropout=args.dropout,
activation="relu",
return_intermediate_dec=True,
num_feature_levels=args.ddetr_num_feature_levels,
dec_n_points=args.ddetr_dec_n_points,
enc_n_points=args.ddetr_enc_n_points,
two_stage=args.ddetr_two_stage,
two_stage_num_proposals=args.num_queries,
use_dab=args.ddetr_use_dab,
high_dim_query_update=args.ddetr_high_dim_query_update,
no_sine_embed=args.ddetr_no_sine_embed)
|
Nikhel1REPO_NAMEGal-DINOPATH_START.@Gal-DINO_extracted@Gal-DINO-main@models@dino@[email protected]_END.py
|
{
"filename": "tutorial.ipynb",
"repo_name": "joe-antognini/kozai",
"repo_path": "kozai_extracted/kozai-master/docs/tutorial.ipynb",
"type": "Jupyter Notebook"
}
|
# A stroll through the `kozai` python package
## Installation
The `kozai` package is available on PyPI and can be installed with `pip` like so:
```
pip install kozai
```
If you don't have the right permissions, try installing it like this:
```
pip install --user kozai
```
If you run into problems with the package tests, please let me know at `[email protected]` so we can work to fix them. Note that the `kozai` package only works on Python 2 at the moment.
## Making your first triple
The `kozai` package makes it easy to create and evolve triples. The package comes with two classes which represent triples in different ways: `TripleDelaunay` and `TripleVectorial`. The `TripleDelaunay` class represents the triple using the Delaunay orbital elements and the `TripleVectorial` class represents the triple using the eccentricity and angular momentum vectors. At the moment, the `TripleVectorial` class only supports triples with a test particle secondary, whereas `TripleDelaunay` requires that all three stars have mass.
The `TripleDelaunay` class currently has more features than the `TripleVectorial` , so I recommend using the `TripleDelaunay` class wherever possible. If you need to evolve a triple in the test particle approximation you can simply set the secondary mass to be something very small (e.g., $10^{-5}$ $M_{\odot}$).
To start with we will use the `TripleDelaunay` class and explore the `TripleVectorial` class later. We first have to import the class from the `kozai.delaunay` module:
```python
from kozai.delaunay import TripleDelaunay
```
To create a triple we just call `TripleDelaunay`:
```python
triple = TripleDelaunay()
```
This triple has some default orbital parameters. We can see what the inner semi-major axis is:
```python
triple.a1
```
1.0
Note that the semi-major axes are all in AU. Let's take a look at the inclination:
```python
print('{0:.0f}'.format(triple.inc))
```
80
Note that the angles are given in degrees.
## Evolving a triple
Now that we have a high-inclination triple we can evolve it in time and see Kozai-Lidov oscillations. To start with, let's turn the octupole term off so that we are just looking at quadrupole oscillations:
```python
triple.octupole = False
```
Now let's evolve the triple for $3 \times 10^4$ yr:
```python
evolution = triple.evolve(3e4)
```
`evolution` is a numpy array where each entry is a different step in time. Each entry has the format
- $t$ (yr)
- $a_1$ (AU)
- $e_1$
- $g_1$ ($^{\circ}$)
- $a_2$ (AU)
- $e_2$
- $g_2$ ($^{\circ}$)
- $i$ ($^{\circ}$)]
Let's now see how the eccentricity changes in time.
```python
%pylab inline
rcParams.update({'font.size': 22})
```
Populating the interactive namespace from numpy and matplotlib
```python
t = evolution[:, 0]
```
```python
e = evolution[:, 2]
```
```python
plot(t, 1 - e)
yscale('log')
xlim([0, 3e4])
xlabel('t (yr)')
ylabel('1 - e');
```

We see here some nice Kozai-Lidov oscillations. Let's look at how the inclination changes in time:
```python
inc = evolution[:, -1]
```
```python
plot(t, inc)
xlim([0, 3e4])
ylim([0, 90])
xlabel('t (yr)')
ylabel('inclination (degrees)');
```

We see that we start out at $80^{\circ}$ and then move to close to the Kozai critical angle of $39.2^{\circ}$.
## The Eccentric KL Mechanism
Let's now take a look at what happens when we turn on the octupole order term. First we have to reset our triple:
```python
triple.reset()
```
Now the triple has been reset to its initial conditions. So the time is set to 0:
```python
triple.t
```
0.0
And the eccentricity and inclination are both at their original values:
```python
triple.e1
```
0.1
```python
print('{0:.0f}'.format(triple.inc))
```
80
Let's now turn the octupole term on:
```python
triple.octupole = True
```
Since we're looking for longer-term changes to the KL cycles we'll evolve this system for $10^5$ yr.
```python
evolution = triple.evolve(1e5)
```
Now let's take a look at the eccentricity evolution:
```python
t = evolution[:, 0]
e = evolution[:, 2]
```
```python
plot(t, 1 - e)
yscale('log')
xlim([0, 1e5])
xlabel('t (yr)')
ylabel('1 - e');
```

It's exactly the same as before! Why is that? Let's take a look at the masses of our stars:
```python
triple.m1
```
1.0
```python
triple.m2
```
1.0
By default our triple has an equal mass inner binary, so the octupole order term is zero! We can see this explicitly by looking at the $C_3$ coefficient:
```python
triple.C3
```
0.0
But the $C_2$ coefficient is not zero:
```python
triple.C2
```
7.941780708528179e+33
Note that the unit here is Joules.
In order to see the eccentric Kozai mechanism we need to have an unequal mass inner binary, so let's reset the triple and set the mass of the secondary to be something small, like $10^{-5}$ $M_{\odot}$.
```python
triple.reset()
```
```python
triple.m2 = 1e-5
```
```python
evolution = triple.evolve(1e5)
```
And now we'll take a look at the eccentricity evolution:
```python
t = evolution[:, 0]
e = evolution[:, 2]
```
```python
plot(t, 1 - e)
yscale('log')
xlim([0, 1e5])
xlabel('t (yr)')
ylabel('1 - e');
```

We can see the characteristic increase in the maximum eccentricity during each KL oscillation as the orbit of the inner binary flips from prograde to retrograde. Let's see what's happening to the orbital inclination:
```python
inc = evolution[:, -1]
```
```python
plot(t, inc)
xlim([0, 1e5])
xlabel('t (yr)')
ylabel('inclination (degrees)')
plot(linspace(0, 1e5, num=100), 90 * np.ones(100), ls=':');
```

## Some other features of the `TripleDelaunay` class
### Setting the orbital parameters
The `TripleDelaunay` class comes with a number of other features to make it easy to work with. You can set the orbital parameters of the triple from the call to the class:
```python
triple = TripleDelaunay(a1=1, a2=10, e1=.01, e2=.15, inc=85, m2=.5)
```
### Printing the triple parameters
It is also easy to get general information about the triple. If `print` is called the class returns the various parameters of the triple in JSON format:
```python
print(triple)
```
{
"a1": 1.0,
"a2": 10.0,
"algo": "vode",
"atol": 1e-09,
"collision": false,
"cputstop": 300,
"e1": 0.01,
"e2": 0.15,
"epsoct": 0.015345268542199487,
"g1": 0.0,
"g2": 0.0,
"gr": false,
"hexadecapole": false,
"inc": 84.99999999999997,
"m1": 1.0,
"m2": 0.5,
"m3": 1.0,
"maxoutput": 1000000,
"octupole": true,
"outfreq": 1,
"quadrupole": true,
"r1": 0.0,
"r2": 0.0,
"rtol": 1e-09,
"tstop": null
}
If you're working with many triples this makes it easy to keep track of the parameters of each one.
### Getting the eccentricity extrema
If you're evolving a triple over a long period of time you may not be interested in most of the evolution. Instead you may just want the points at which the eccentricity is at its largest or smallest. To only get the eccentricity extrema the `TripleDelaunay` class has an `extrema` method:
```python
triple = TripleDelaunay(a1=1, a2=20, e1=.1, e2=.3, inc=80, m2=.01)
extrema = triple.extrema(1e5)
```
Let's compare the extrema to the full evolution:
```python
triple = TripleDelaunay(a1=1, a2=20, e1=.1, e2=.3, inc=80, m2=.01)
evolution = triple.evolve(1e5)
```
```python
plot(evolution[:, 0], 1 - evolution[:, 2])
yscale('log')
scatter(extrema[:, 0], 1 - extrema[:, 2], c='r', s=100)
xlabel('t (yr)')
ylabel('1 - e');
```

```python
plot(evolution[:, 0], evolution[:, -1])
scatter(extrema[:, 0], extrema[:, -1], c='r', s=100)
xlabel('t (yr)')
ylabel('inclination (degrees)');
```

## Finding flips
Over even longer timescales we may just be interested in the times that the inner orbit flips from prograde to retrograde or vice versa. To find these moments `TripleDelaunay` has the `find_flip` method:
```python
triple = TripleDelaunay(m2=1e-5)
flips = triple.find_flips(1e6)
```
Let's compare to the full evolution.
```python
triple = TripleDelaunay(m2=1e-5)
evolution = triple.evolve(1e6)
```
```python
plot(evolution[:, 0], 1 - evolution[:, 2], c='lightgray')
xlim([0, 1e6])
yscale('log')
for elem in flips:
plot(elem[0] * np.ones(50), np.logspace(-8, 0, num=50), ls=':', c='k')
xlabel('t (yr)')
ylabel('1 - e');
```

Note that this method just looks for a change in the sign of $\cos i$. For massive secondaries the "pole" in the Hamiltonian at which arbitrarily large eccentricities are possible occurs at inclinations slightly larger than $90^{\circ}$. `find_flip` will just find when the orbit crosses $90^{\circ}$, not when the arbitrarily large eccentricities occur.
## The `vectorial` module
As mentioned at the beginning, the `kozai` package offers two classes to evolve hierarchical triples, of which `TripleDelaunay` is currently more powerful. But in cases where the test particle approximation is used the `TripleVectorial` class may be preferable. The two classes have been designed to have APIs as similar to each other as possible. Thus the methods between the two classes are mostly the same. The major differences are as follows:
- The mass of the secondary cannot be set in `TripleVectorial` --- it is always 0. Similarly, the mass of the secondary cannot be set to 0 in `TripleDelaunay` (although it can be set to some very small non-zero value).
- Post-Newtonian terms are not supported in `TripleVectorial` yet.
- The hexadecapole term is not included in `TripleVectorial` yet.
- `vectorial` has an additional method called `flip_period` which can be used to numerically calculate the time between flips. This will be described in more detail a little later.
### Evolving a triple in `vectorial`
Triples can be made in `TripleVectorial` in much the same way as in `TripleDelaunay`:
```python
from kozai.vectorial import TripleVectorial
```
```python
triple = TripleVectorial()
```
```python
evolution = triple.evolve(1e5)
```
We can examine the eccentricity evolution in the same way as before:
```python
plot(evolution[:, 0], 1 - evolution[:, 2])
yscale('log')
xlim([0, 1e5])
xlabel('t (yr)')
ylabel('1 - e');
```

And similarly we can examine the evolution of the inclination:
```python
plot(evolution[:, 0], evolution[:, -1])
xlim([0, 1e5])
xlabel('t (yr)')
ylabel('inclination (degrees)')
plot(np.linspace(0, 1e5, num=50), 90 * np.ones(50), ls=':');
```

And just as with `TripleDelaunay` we can just take the eccentricity maxima:
```python
triple.reset()
```
```python
extrema = triple.extrema(1e5)
```
```python
plot(evolution[:, 0], 1 - evolution[:, 2])
yscale('log')
xlim([0, 1e5])
xlabel('t (yr)')
ylabel('1 - e')
scatter(extrema[:, 0], 1 - extrema[:, 2], c='r', s=75);
```

```python
plot(evolution[:, 0], evolution[:, -1])
xlim([0, 1e5])
xlabel('t (yr)')
ylabel('inclination (degrees)')
plot(np.linspace(0, 1e5, num=50), 90 * np.ones(50), ls=':')
scatter(extrema[:, 0], extrema[:, -1], c='r', s=75);
```

### The `flip_period` method
The `TripleVectorial` class has one unique method: `flip_period`. This method will integrate along until some specified number of flips have occured (by default three) and will return the average time between flips. This method was not included in `TripleDelaunay` because arbitrarily large eccentricities do occur when the triple passes through some inclination slightly above $90^{\circ}$ which is dependent in a non-trivial way on the orbital and physical parameters of the system. In the test particle case this critical inclination is always $90^{\circ}$ so it is easy to know when arbitrarily large eccentricities are possible.
The method can be used very easily:
```python
triple = TripleVectorial()
p = triple.flip_period()
print(p)
```
98594.8567867995
Let's compare this to evolving the triple directly.
```python
triple.reset()
evolution = triple.evolve(4e5)
```
```python
plot(evolution[:, 0], 1 - evolution[:, 2], c='lightgray')
locator_params(nbins=4)
yscale('log')
xlim([0, 4e5])
xlabel('t (yr)')
ylabel('1 - e')
for i in range(1, 5):
plot(i * p * np.ones(50), np.logspace(-5, 0, num=50), ls=':', c='k');
```

So the flip period looks to have been calculated correctly!
## The `kl_period` module
The `kl_period` module has a number of utilities to calculate the timescales in hierarchical triples. To start, let's import the module, create a triple, and calculate the orbital times:
```python
import kozai.kl_period as klp
```
```python
triple = TripleVectorial(a1=1, a2=10, e1=.05, e2=.1, inc=80)
```
```python
klp.P_in(triple)
```
1.0
```python
klp.P_out(triple)
```
22.360679774997898
We can also use the standard KL timescale to estimate the period of KL oscillations:
$$t_{\textrm{KL}} \sim \frac{8}{15 \pi} \left(1 + \frac{m_1}{m_3}\right) \left(\frac{P_{\textrm{out}}^2}{P_{\textrm{in}}}\right)\left(1 - e_2^2\right)^{3/2}$$
```python
klp.kl_period_oom(triple)
```
167.2251703899268
Let's compare this to the evolution of the triple.
```python
evolution = triple.evolve(1e3)
```
```python
plot(evolution[:, 0], 1 - evolution[:, 2])
xlim([0, 1e3])
xlabel('t (yr)')
ylabel('1 - e')
yscale('log');
```

So the timescale is off by a factor of a few. For `TripleVectorial` objects we can calculate the period exactly, however, using equation (33) of Antognini (2015), which is implemented in `kl_period`:
```python
triple.reset()
klp.kl_period(triple)
```
/home/joe/code/kozai/kozai/kl_period.py:75: IntegrationWarning: The algorithm does not converge. Roundoff error is detected
in the extrapolation table. It is assumed that the requested tolerance
cannot be achieved, and that the returned result (if full_output = 1) is
the best which can be obtained.
return quad(
716.4024387862976
This matches the observed period much more closely. We can verify this by evolving the triple and calculating the mean time between eccentricity maxima using the `numeric_kl_period` function:
```python
triple.reset()
klp.numeric_kl_period(triple)
```
673.5476431427444
Evidently the numeric period does not too closely match the analytic period. This is because the length of a KL oscillation varies from one oscillation to the next due to the octupole order term. If the octupole term is turned off, these variations should vanish and we should match the result from `kl_period` much more closely:
```python
triple.reset()
triple.octupole = False
klp.numeric_kl_period(triple)
```
715.9436182420778
One final feature of the `kl_period` module is the `is_librating` function. As its name implies, it returns `True` if the triple librates and `False` if it rotates:
```python
klp.is_librating(triple)
```
False
## The `ekm` module
The last module we'll explore is the `ekm` module. This module presents a new class called `TripleOctupole`. This class makes it possible to evolve a hierarchical triple in the test particle limit by averaging over individual KL oscillations so as to capture only the contribution from the octupole term.
```python
from kozai.ekm import TripleOctupole
triple = TripleOctupole(e1=.1, Omega=180, inc=80, epsoct=.01)
```
We can evolve this triple in time. The `evolve` method returns the following:
- $t$ (in units of the secular timescale)
- $j_z$
- $\Omega$ (in degrees)
- $\left< f_j \right>$
- $\left< f_{\Omega} \right>$
- $x$
See Katz et al. (2011) for definitions of these quantities.
```python
evolution = triple.evolve(400)
```
We can now see the change in the $z$-component of the angular momentum as the inner binary undergoes a few flips:
```python
plot(evolution[:, 0], evolution[:, 1])
xlim([0, 400])
xlabel('t (yr)')
ylabel('j_z');
```

As with `TripleDelaunay` and `TripleVectorial` we can print out the properties of the triple in JSON format:
```python
print(triple)
```
{
"CKL": 0.01,
"Omega": 180.0,
"algo": "vode",
"atol": 1e-09,
"chi": 0.022720067311553332,
"cputstop": 300,
"e1": 0.1,
"g1": 0.0,
"inc": 80.0,
"jz": 0.1727777552550541,
"maxoutput": 1000000,
"outfreq": 1,
"phiq": 0.024926076355487686,
"rtol": 1e-09,
"tstop": 400
}
Finally, we can calculate the time between flips two different ways. We can either do it analytically using equation (67) of Antognini (2015) using the `flip_period` method:
```python
triple.reset()
triple.flip_period()
```
/home/joe/code/kozai/kozai/ekm.py:297: IntegrationWarning: The algorithm does not converge. Roundoff error is detected
in the extrapolation table. It is assumed that the requested tolerance
cannot be achieved, and that the returned result (if full_output = 1) is
the best which can be obtained.
P = quad(
123.6079642426734
We can also calculate the flip period numerically by evolving the triple directly using `numeric_flip_period`:
```python
triple.numeric_flip_period()
```
123.53423155777053
As we would hope, these two calculations yield very similar results.
|
joe-antogniniREPO_NAMEkozaiPATH_START.@kozai_extracted@kozai-master@[email protected]@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/third_party/README.md",
"type": "Markdown"
}
|
This sub-directory contains third-party code for which Google does not have
copyright. Each sub-directory should correspond to a third-party library and
must contain the appropriate LICENSE file.
See [instructions](https://opensource.google/docs/releasing/preparing/#third-party-components).
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@[email protected]@.PATH_END.py
|
{
"filename": "compute_second_order_aperture_mass_correlations_MS_shapenoise.py",
"repo_name": "sheydenreich/threepoint",
"repo_path": "threepoint_extracted/threepoint-main/python_scripts/old/compute_second_order_aperture_mass_correlations_MS_shapenoise.py",
"type": "Python"
}
|
from matplotlib import use
from file_loader import get_gamma_millennium_shapenoise
from utility import aperture_mass_computer,extract_second_order_aperture_masses_of_field
import numpy as np
import sys
from tqdm import tqdm
import multiprocessing.managers
from multiprocessing import Pool
from astropy.io import fits
import os
shapenoise=1
class MyManager(multiprocessing.managers.BaseManager):
pass
MyManager.register('np_zeros', np.zeros, multiprocessing.managers.ArrayProxy)
process_parallel=False
startpath = '/home/laila/OneDrive/1_Work/5_Projects/02_3ptStatistics/Map3_Covariances/MS/' # '/vol/euclid6/euclid6_ssd/sven/threepoint_with_laila/Map3_Covariances/MS/'
def compute_aperture_masses_of_field(los,theta_ap_array,save_map=None,use_polynomial_filter=False):
fieldsize = 4.*60
npix = 4096
field = get_gamma_millennium_shapenoise(los, shapenoise)
result = extract_second_order_aperture_masses_of_field(field,npix,theta_ap_array,fieldsize,compute_mcross=False,save_map=save_map,use_polynomial_filter=use_polynomial_filter)
return result
def compute_aperture_masses_of_field_kernel(kwargs):
result, los, theta_ap_array, save_map, use_polynomial_filter, realisation = kwargs
map2=compute_aperture_masses_of_field(los, theta_ap_array, save_map=save_map, use_polynomial_filter=use_polynomial_filter)
result[:,realisation]=map2
def compute_all_aperture_masses(all_los,savepath,aperture_masses = [1.17,2.34,4.69,9.37],n_processes = 64,use_polynomial_filter=False):
n_files = len(all_los)
n_thetas=len(aperture_masses)
if(process_parallel):
m = MyManager()
m.start()
results=m.np_zeros((n_thetas, n_files))
with Pool(processes=n_processes) as p:
args=[[results, all_los[i], aperture_masses, None, use_polynomial_filter, i] for i in range(n_files)]
for i in tqdm(p.imap_unordered(compute_aperture_masses_of_field_kernel, args), total=n_files):
pass
np.savetxt(savepath+f'map_squared_sigma_{shapenoise}',results)
else:
for los in all_los:
print(f"Processing {los}")
map2=compute_aperture_masses_of_field(los, aperture_masses, save_map=None, use_polynomial_filter=use_polynomial_filter)
np.savetxt(savepath+f"map_squared_{los}_sigma_{shapenoise}.dat", map2)
if(__name__=='__main__'):
# print("Computing test aperture mass maps:")
# path_kappa_dustgrain = "/vol/euclid7/euclid7_2/llinke/HOWLS/convergence_maps/DUSTGRAIN_COSMO_128/kappa_noise_0_LCDM_Om02_ks_nomask_shear.fits"
all_los = range(64)
# if not 'SLICS' in dirpath:
# dir_end_path = dirpath.split('/')[-1]
savepath = startpath + 'map_squared_our_thetas'
print('Writing summary statistics to ',savepath)
if not os.path.exists(savepath):
os.makedirs(savepath)
compute_all_aperture_masses(all_los,savepath+'/',n_processes=10,aperture_masses = [2,4,8,16])
# for (dirpath,_,_filenames) in os.walk(startpath+"shear_catalogues/"):
# if(len(_filenames)>2):
# filenames = np.sort([filename for filename in _filenames if '.fits' in filename])
# # dir_end_path = dirpath.split('/')[-1]
# savepath = dirpath.split('shear_catalogues')[0]+'map_cubed_our_thetas'+dirpath.split('shear_catalogues')[1]
# print('Reading shear catalogues from ',dirpath)
# print('Writing summary statistics to ',savepath)
# if not os.path.exists(savepath):
# os.makedirs(savepath)
# compute_all_aperture_masses(dirpath+'/',filenames,savepath+'/',aperture_masses = [0.5,1,2,4,8,16,32],n_processes=32)
# for (dirpath,_,_filenames) in os.walk(startpath+"shear_catalogues/"):
# if(len(_filenames)>2):
# filenames = np.sort([filename for filename in _filenames if '.fits' in filename])
# # dir_end_path = dirpath.split('/')[-1]
# savepath = dirpath.split('shear_catalogues')[0]+'map_cubed_1_to_8_arcmin'+dirpath.split('shear_catalogues')[1]
# print('Reading shear catalogues from ',dirpath)
# print('Writing summary statistics to ',savepath)
# if not os.path.exists(savepath):
# os.makedirs(savepath)
# compute_all_aperture_masses(dirpath+'/',filenames,savepath+'/',aperture_masses = [1,2,4,8],n_processes=16)
# for (dirpath,_,_filenames) in os.walk(startpath+"shear_catalogues/"):
# if(len(_filenames)>2):
# filenames = [filename for filename in _filenames if '.fits' in filename]
# # dir_end_path = dirpath.split('/')[-1]
# savepath = dirpath.split('shear_catalogues')[0]+'map_cubed_lower_resolution_intermediate_thetas'+dirpath.split('shear_catalogues')[1]
# print('Reading shear catalogues from ',dirpath)
# print('Writing summary statistics to ',savepath)
# if not os.path.exists(savepath):
# os.makedirs(savepath)
# compute_all_aperture_masses(dirpath+'/',filenames,savepath+'/',aperture_masses = [1.085,1.085*2,1.085*4,1.085*8],n_processes=32)
|
sheydenreichREPO_NAMEthreepointPATH_START.@threepoint_extracted@threepoint-main@python_scripts@old@compute_second_order_aperture_mass_correlations_MS_shapenoise.py@.PATH_END.py
|
{
"filename": "_namelengthsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2d/hoverlabel/_namelengthsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="namelengthsrc",
parent_name="histogram2d.hoverlabel",
**kwargs,
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@histogram2d@hoverlabel@[email protected]_END.py
|
{
"filename": "_text.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/title/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="text",
parent_name="scatter3d.marker.colorbar.title",
**kwargs,
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@scatter3d@marker@colorbar@title@[email protected]_END.py
|
{
"filename": "ovd_360.py",
"repo_name": "dazhiUBC/SCUBA2_MF",
"repo_path": "SCUBA2_MF_extracted/SCUBA2_MF-main/blank/ovd_360.py",
"type": "Python"
}
|
import pandas as pd
from MF import *
from astropy.coordinates import SkyCoord
ntrials = 10000 # number of mock maps
def read_ca(fname):
cata = pd.read_csv(fname,delimiter= ' ' )
f = cata['flux'] # flux
e = cata['err'] # err
c = SkyCoord(ra=cata['ra'],dec=cata['dec'],unit=(u.hourangle, u.deg)) # coordinate
return f,e,c
def read_sim(fname):
cata = pd.read_csv(fname,delimiter= ' ' )
f = cata['flux']
c = SkyCoord(ra=cata['ra'],dec=cata['dec'],unit=(u.hourangle, u.deg))
return f,c
def read_spu(fname):
cata = pd.read_csv(fname,delimiter= ' ' )
f = cata['spu_flux']
c = SkyCoord(ra=cata['ra'],dec=cata['dec'],unit=(u.hourangle, u.deg))
return f,c
# read the actual catalog
f_my, e_my, c_my = read_ca('../sources_4C23_850_cal_crop_MF.dat')
# define the position of the HzRGs
rg = SkyCoord(ra=316.811766,dec=23.529172, unit= u.deg)
# finally, plot the overdensity as a function of angular position (for every 30 degree)
# of course, it can be combined with the radial and flux analysis and reduce the time.
# This script is based on multiple scripts that I used in the past and I am too lazy to modify it.
# Hopefully it won't take too much time... (first rule: if it works, don't touch it)
# as other suggested, it would be plotted as historgram
nca = np.zeros(12) # number count for different angular position
nca_bright = np.zeros(12)
nca_faint = np.zeros(12)
for i in range(12):
for j in range(len(c_my)):
if 30*i<=c_my[j].position_angle(rg).degree<30*(i+1):
nca[i] = nca[i]+1
if f_my[j]>=5:
nca_bright[i] = nca_bright[i]+1
else:
nca_faint[i] = nca_faint[i]+1
na_sim = []
nab_sim = []
naf_sim = []
for i in range(ntrials):
nca_sim = np.zeros(12)
ncab_sim = np.zeros(12) # bright sources > 5 mJy
ncaf_sim = np.zeros(12)
f_sim, c_sim = read_sim('mock_850/mock_map'+str(i)+'_rec.dat')
for j in range(len(c_sim)):
for k in range(12):
if 30*k<=c_sim[j].position_angle(rg).degree<30*(k+1):
nca_sim[k] = nca_sim[k]+1
if f_sim[j]>=5:
ncab_sim[k] = ncab_sim[k]+1
else:
ncaf_sim[k] = ncaf_sim[k]+1
na_sim.append(nca_sim)
nab_sim.append(ncab_sim)
naf_sim.append(ncaf_sim)
na_sim = np.array(na_sim)
nab_sim = np.array(nab_sim)
naf_sim = np.array(naf_sim)
np.save('angular360/all.npy', np.array(na_sim))
np.save('angular360/bright.npy', np.array(nab_sim))
np.save('angular360/faint.npy', np.array(naf_sim))
# calculate the mean for every 20 maps
ova = []
ova_bright = []
ova_med = []
ova_bright_med = []
ova_84 = []
ova_bright_84 = []
ova_16 = []
ova_bright_16 = []
for i in range(int(ntrials/20)):
na_sim_mean = np.nanmean(na_sim[i*20:(i+1)*20],axis=0) # each annulus mean value
nab_sim_mean = np.nanmean(nab_sim[i*20:(i+1)*20],axis=0)
ova.append((nca-na_sim_mean)/na_sim_mean)
ova_bright.append((nca_bright-nab_sim_mean)/nab_sim_mean)
ova = np.array(ova)
ova_bright = np.array(ova_bright)
for i in range(12):
ova_med.append(np.median(ova[:,i]))
ova_bright_med.append(np.median(ova_bright[:,i]))
ova_84.append(np.nanpercentile(ova[:,i],84.1))
ova_bright_84.append(np.nanpercentile(ova_bright[:,i],84.1))
ova_16.append(np.nanpercentile(ova[:,i],15.9))
ova_bright_16.append(np.nanpercentile(ova_bright[:,i],15.9))
# plot as a function of angle
a = np.linspace(15,345,12)
plt.plot(a,ova_med, color='tab:blue',label="Overdensity")
plt.plot(a,ova_bright_med,color= 'tab:red',label="Overdensity(>5mJy)" )
plt.scatter(a,ova_med, color='tab:blue')#,label="Overdensity(<4')")
plt.scatter(a,ova_bright_med,color= 'tab:red')#,label="Overdensity(>4')" )
plt.fill_between(a,ova_16,ova_84,color='tab:blue',alpha=0.1 )
plt.fill_between(a,ova_bright_16,ova_bright_84,color='tab:red',alpha=0.1 )
plt.legend()
plt.xlabel('Position angle (degree)')
plt.ylabel('Overdensity')
plt.savefig('plot/360ovd_a.pdf',bbox_inches='tight')
plt.savefig('plot/360ovd_a.eps',bbox_inches='tight') # traditional visualization
plt.close()
plt.bar(a-7,ova_med,14,color='tab:blue',label="Overdensity") # bar demonstration
plt.bar(a+7,ova_bright_med,14,color= 'tab:red',label="Overdensity(>5mJy)" )
plt.legend()
plt.xlabel('Position angle (degree)')
plt.ylabel('Overdensity')
plt.savefig('plot/360ovd_a_bar.pdf',bbox_inches='tight')
plt.savefig('plot/360ovd_a_bar.eps',bbox_inches='tight')
plt.close()
|
dazhiUBCREPO_NAMESCUBA2_MFPATH_START.@SCUBA2_MF_extracted@SCUBA2_MF-main@blank@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "ratt-ru/eidos",
"repo_path": "eidos_extracted/eidos-master/eidos/__init__.py",
"type": "Python"
}
|
ratt-ruREPO_NAMEeidosPATH_START.@eidos_extracted@eidos-master@eidos@[email protected]_END.py
|
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/barpolar/hoverlabel/font/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="barpolar.hoverlabel.font", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@barpolar@hoverlabel@font@[email protected]_END.py
|
{
"filename": "_tickformatstopdefaults.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/polar/angularaxis/_tickformatstopdefaults.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatstopdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="tickformatstopdefaults",
parent_name="layout.polar.angularaxis",
**kwargs,
):
super(TickformatstopdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@polar@angularaxis@[email protected]_END.py
|
{
"filename": "test_overwrite.py",
"repo_name": "aewallin/allantools",
"repo_path": "allantools_extracted/allantools-master/tests/functional_tests/test_overwrite.py",
"type": "Python"
}
|
#!/usr/bin/python
import sys
sys.path.append("..")
import allantools as at
import numpy
import pytest
def test_overwrite():
""" Tests if data is overwritten as pointed out in issue
https://github.com/aewallin/allantools/issues/76
"""
x1 = at.noise.white(num_points=1024)
x2 = at.noise.pink(num_points=1024)
ds1 = at.Dataset(x1)
ds2 = at.Dataset(x2)
r1 = ds1.compute('oadev')
r2 = ds2.compute('oadev')
# If both are identical, something is wrong...
assert(not numpy.allclose(r1['stat'], r2['stat']))
if __name__ == "__main__":
test_overwrite()
|
aewallinREPO_NAMEallantoolsPATH_START.@allantools_extracted@allantools-master@tests@functional_tests@[email protected]_END.py
|
{
"filename": "test_Smoothing.py",
"repo_name": "sambit-giri/tools21cm",
"repo_path": "tools21cm_extracted/tools21cm-master/tests/test_Smoothing.py",
"type": "Python"
}
|
import numpy as np
import tools21cm as t2c
data = np.zeros((9,9,9))
data[4,4,4] = 1
def test_smooth_coeval():
'''
With this, smooth_coeval_gauss, smooth_coeval_tophat, gauss_kernel and tophat_kernel
are also tested.
'''
smt = t2c.smooth_coeval(data, 9, box_size_mpc=90)
assert smt[4,4,4]<1
def test_interpolate2d():
sl = data[:,:,4]
out1 = t2c.interpolate2d(sl, np.array([4]), np.array([4.5]), order=1).squeeze()
out2 = t2c.interpolate2d(sl, np.array([4]), np.array([4]), order=1).squeeze()
assert out1==0.5
assert out2==1.0
def test_interpolate3d():
out1 = t2c.interpolate3d(data, np.array([4]), np.array([4]), np.array([4.5]), order=1).squeeze()
out2 = t2c.interpolate3d(data, np.array([4]), np.array([4]), np.array([4]), order=1).squeeze()
assert out1==0.5
assert out2==1.0
def test_tophat_kernel_3d():
kernel = t2c.tophat_kernel_3d(5,5)
assert np.all((kernel-1/5**3)<0.001)
|
sambit-giriREPO_NAMEtools21cmPATH_START.@tools21cm_extracted@tools21cm-master@tests@[email protected]_END.py
|
{
"filename": "test_trig.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/special/tests/test_trig.py",
"type": "Python"
}
|
import numpy as np
from numpy.testing import assert_equal, assert_allclose, suppress_warnings
from scipy.special._ufuncs import _sinpi as sinpi
from scipy.special._ufuncs import _cospi as cospi
def test_integer_real_part():
x = np.arange(-100, 101)
y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10)))
x, y = np.meshgrid(x, y)
z = x + 1j*y
# In the following we should be *exactly* right
res = sinpi(z)
assert_equal(res.real, 0.0)
res = cospi(z)
assert_equal(res.imag, 0.0)
def test_half_integer_real_part():
x = np.arange(-100, 101) + 0.5
y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10)))
x, y = np.meshgrid(x, y)
z = x + 1j*y
# In the following we should be *exactly* right
res = sinpi(z)
assert_equal(res.imag, 0.0)
res = cospi(z)
assert_equal(res.real, 0.0)
def test_intermediate_overlow():
# Make sure we avoid overflow in situations where cosh/sinh would
# overflow but the product with sin/cos would not
sinpi_pts = [complex(1 + 1e-14, 227),
complex(1e-35, 250),
complex(1e-301, 445)]
# Data generated with mpmath
sinpi_std = [complex(-8.113438309924894e+295, -np.inf),
complex(1.9507801934611995e+306, np.inf),
complex(2.205958493464539e+306, np.inf)]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
for p, std in zip(sinpi_pts, sinpi_std):
assert_allclose(sinpi(p), std)
# Test for cosine, less interesting because cos(0) = 1.
p = complex(0.5 + 1e-14, 227)
std = complex(-8.113438309924894e+295, -np.inf)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(cospi(p), std)
def test_zero_sign():
y = sinpi(-0.0)
assert y == 0.0
assert np.signbit(y)
y = sinpi(0.0)
assert y == 0.0
assert not np.signbit(y)
y = cospi(0.5)
assert y == 0.0
assert not np.signbit(y)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@special@tests@[email protected]_END.py
|
{
"filename": "test_physical_numpyfuncs.py",
"repo_name": "juliotux/astropop",
"repo_path": "astropop_extracted/astropop-main/tests/test_physical_numpyfuncs.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# flake8: noqa: F403, F405
import numpy as np
import pytest
from astropop.math.physical import QFloat, UnitsError, units
from packaging import version
from astropop.testing import *
# Testing qfloat compatibility with Numpy ufuncs and array functions.
class TestQFloatNumpyArrayFuncs:
"""Test numpy array functions for numpy comatibility."""
def test_error_not_handled(self):
# handled QFloat must be ok
qf = QFloat([1.0, 2.0, 3.0], [0.1, 0.2, 0.3], "m")
res = np.sqrt(qf)
# not handled QFloat must raise
with pytest.raises(TypeError):
np.frexp(qf)
def test_error_only_call_method(self):
qf = QFloat([1.0, 2.0, 3.0], [0.1, 0.2, 0.3], "m")
with pytest.raises(TypeError):
np.sin.at(qf, 0)
def test_qfloat_np_append(self):
qf1 = QFloat([1.0, 2.0, 3.0], [0.1, 0.2, 0.3], unit="m")
qf2 = QFloat([1.0], [0.1], unit="km")
qf3 = QFloat(1.0, 0.1, unit="km")
qf4 = QFloat(0, 0)
qf = np.append(qf1, qf1)
assert_equal(qf.nominal, [1.0, 2.0, 3.0, 1.0, 2.0, 3.0])
assert_equal(qf.std_dev, [0.1, 0.2, 0.3, 0.1, 0.2, 0.3])
assert_equal(qf.unit, qf1.unit)
# This should work and convert the unit.
qf = np.append(qf1, qf2)
assert_equal(qf.nominal, [1.0, 2.0, 3.0, 1000.0])
assert_equal(qf.std_dev, [0.1, 0.2, 0.3, 100.0])
assert_equal(qf.unit, qf1.unit)
# Also this should work and convert the unit in the same way.
qf = np.append(qf1, qf3)
assert_equal(qf.nominal, [1.0, 2.0, 3.0, 1000.0])
assert_equal(qf.std_dev, [0.1, 0.2, 0.3, 100.0])
assert_equal(qf.unit, qf1.unit)
# This should fail due to unit
with pytest.raises(UnitsError):
qf = np.append(qf1, qf4)
# Testing with axis
qf1 = QFloat([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], "m",)
qf = np.append(qf1, QFloat([[8.0], [9.0]], [[0.8], [0.9]], "m"),
axis=1)
assert_equal(qf.nominal, [[1.0, 2.0, 3.0, 8.0], [4.0, 5.0, 6.0, 9.0]])
assert_equal(qf.std_dev, [[0.1, 0.2, 0.3, 0.8], [0.4, 0.5, 0.6, 0.9]])
qf = np.append(qf1, QFloat([[7.0, 8.0, 9.0]], [[0.7, 0.8, 0.9]], "m"),
axis=0)
assert_equal(qf.nominal, [[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]])
assert_equal(qf.std_dev, [[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6],
[0.7, 0.8, 0.9]])
def test_qfloat_np_around(self):
# single case
qf = np.around(QFloat(1.02549, 0.135964))
assert_equal(qf.nominal, 1)
assert_equal(qf.std_dev, 0)
qf = np.around(QFloat(1.02549, 0.135964), decimals=2)
assert_equal(qf.nominal, 1.03)
assert_equal(qf.std_dev, 0.14)
# just check array too
qf = np.around(QFloat([1.03256, 2.108645], [0.01456, 0.594324]),
decimals=2)
assert_equal(qf.nominal, [1.03, 2.11])
assert_equal(qf.std_dev, [0.01, 0.59])
def test_qfloat_np_atleast_1d(self):
# This function is not implemented, so should raise
with pytest.raises(TypeError):
np.atleast_1d(QFloat([1.0, 2.0], [0.1, 0.2], "m"))
def test_qfloat_np_atleast_2d(self):
# This function is not implemented, so should raise
with pytest.raises(TypeError):
np.atleast_2d(QFloat([1.0, 2.0], [0.1, 0.2], "m"))
def test_qfloat_np_atleast_3d(self):
# This function is not implemented, so should raise
with pytest.raises(TypeError):
np.atleast_3d(QFloat([1.0, 2.0], [0.1, 0.2], "m"))
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_broadcast(self):
raise NotImplementedError
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_broadcast_to(self):
raise NotImplementedError
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_ceil(self):
raise NotImplementedError
def test_qfloat_np_clip(self):
arr = np.arange(10)
qf = QFloat(arr, arr * 0.1, "m")
res = np.clip(qf, 2, 8)
tgt = [2, 2, 2, 3, 4, 5, 6, 7, 8, 8]
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, arr * 0.1)
assert_equal(qf.unit, res.unit)
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_columnstack(self):
raise NotImplementedError
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_concatenate(self):
raise NotImplementedError
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_copyto(self):
raise NotImplementedError
def test_qfloat_np_copysign(self):
arr = np.arange(10)
qf = QFloat(arr, arr * 0.1, "m")
res = np.copysign(qf, -1)
assert_almost_equal(res.nominal, -arr)
assert_almost_equal(res.std_dev, arr * 0.1)
assert_equal(qf.unit, res.unit)
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_cross(self):
raise NotImplementedError
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_cumprod(self):
raise NotImplementedError
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_cumsum(self):
raise NotImplementedError
def test_qfloat_np_delete(self):
a = np.array([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]])
qf = QFloat(a, a * 0.1, "m")
res1 = np.delete(qf, 1, axis=0)
assert_almost_equal(res1.nominal, [[1.0, 2.0, 3.0, 4.0],
[9.0, 10.0, 11.0, 12.0]])
assert_almost_equal(res1.std_dev, [[0.1, 0.2, 0.3, 0.4],
[0.9, 1.0, 1.1, 1.2]])
assert_equal(res1.unit, qf.unit)
res2 = np.delete(qf, 1, axis=1)
assert_almost_equal(res2.nominal, [[1.0, 3.0, 4.0],
[5.0, 7.0, 8.0],
[9.0, 11.0, 12.0]])
assert_almost_equal(res2.std_dev, [[0.1, 0.3, 0.4],
[0.5, 0.7, 0.8],
[0.9, 1.1, 1.2]])
assert_equal(res2.unit, qf.unit)
res3 = np.delete(qf, np.s_[::2], 1)
assert_almost_equal(res3.nominal,
[[2.0, 4.0], [6.0, 8.0], [10.0, 12.0]])
assert_almost_equal(res3.std_dev,
[[0.2, 0.4], [0.6, 0.8], [1.0, 1.2]])
assert_equal(res3.unit, qf.unit)
res4 = np.delete(qf, [1, 3, 5])
assert_almost_equal(res4.nominal,
[1.0, 3.0, 5.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
assert_almost_equal(res4.std_dev,
[0.1, 0.3, 0.5, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2])
assert_equal(res4.unit, qf.unit)
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_diff(self):
raise NotImplementedError
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_dstack(self):
raise NotImplementedError
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_ediff1d(self):
raise NotImplementedError
def test_qfloat_np_expand_dims(self):
qf = QFloat(1.0, 0.1, "m")
res1 = np.expand_dims(qf, axis=0)
assert_almost_equal(res1.nominal, [1.0])
assert_almost_equal(res1.std_dev, [0.1])
assert_equal(res1.unit, qf.unit)
assert_equal(res1.shape, (1,))
qf = QFloat([1.0, 2.0], [0.1, 0.2], "m")
res2 = np.expand_dims(qf, axis=0)
assert_almost_equal(res2.nominal, [[1.0, 2.0]])
assert_almost_equal(res2.std_dev, [[0.1, 0.2]])
assert_equal(res2.unit, qf.unit)
assert_equal(res2.shape, (1, 2))
res3 = np.expand_dims(qf, axis=1)
assert_almost_equal(res3.nominal, [[1.0], [2.0]])
assert_almost_equal(res3.std_dev, [[0.1], [0.2]])
assert_equal(res3.unit, qf.unit)
assert_equal(res3.shape, (2, 1))
if version.parse(np.version.full_version) >= version.parse('1.18.0'):
res4 = np.expand_dims(qf, axis=(2, 0))
assert_almost_equal(res4.nominal, [[[1.0], [2.0]]])
assert_almost_equal(res4.std_dev, [[[0.1], [0.2]]])
assert_equal(res4.unit, qf.unit)
assert_equal(res4.shape, (1, 2, 1))
def test_qfloat_np_flip(self):
a = np.arange(8).reshape((2, 2, 2))
qf = QFloat(a, a * 0.1, "m")
res1 = np.flip(qf)
assert_equal(res1.nominal, a[::-1, ::-1, ::-1])
assert_equal(res1.std_dev, a[::-1, ::-1, ::-1] * 0.1)
assert_equal(res1.unit, qf.unit)
res2 = np.flip(qf, 0)
assert_equal(res2.nominal, a[::-1, :, :])
assert_equal(res2.std_dev, a[::-1, :, :] * 0.1)
assert_equal(res2.unit, qf.unit)
res3 = np.flip(qf, 1)
assert_equal(res3.nominal, a[:, ::-1, :])
assert_equal(res3.std_dev, a[:, ::-1, :] * 0.1)
assert_equal(res3.unit, qf.unit)
res4 = np.flip(qf, 2)
assert_equal(res4.nominal, a[:, :, ::-1])
assert_equal(res4.std_dev, a[:, :, ::-1] * 0.1)
assert_equal(res4.unit, qf.unit)
# just some static check
qf = QFloat([[1, 2], [3, 4]], [[0.1, 0.2], [0.3, 0.4]], "m")
res5 = np.flip(qf)
assert_equal(res5.nominal, [[4, 3], [2, 1]])
assert_equal(res5.std_dev, [[0.4, 0.3], [0.2, 0.1]])
assert_equal(res5.unit, qf.unit)
res6 = np.flip(qf, 0)
assert_equal(res6.nominal, [[3, 4], [1, 2]])
assert_equal(res6.std_dev, [[0.3, 0.4], [0.1, 0.2]])
assert_equal(res6.unit, qf.unit)
res7 = np.flip(qf, 1)
assert_equal(res7.nominal, [[2, 1], [4, 3]])
assert_equal(res7.std_dev, [[0.2, 0.1], [0.4, 0.3]])
assert_equal(res7.unit, qf.unit)
def test_qfloat_np_fliplr(self):
a = np.arange(8).reshape((2, 2, 2))
qf = QFloat(a, a * 0.1, "m")
res = np.fliplr(qf)
assert_equal(res.nominal, a[:, ::-1, :])
assert_equal(res.std_dev, a[:, ::-1, :] * 0.1)
assert_equal(res.unit, qf.unit)
qf = QFloat([[1, 2], [3, 4]], [[0.1, 0.2], [0.3, 0.4]], "m")
res = np.fliplr(qf)
assert_equal(res.nominal, [[2, 1], [4, 3]])
assert_equal(res.std_dev, [[0.2, 0.1], [0.4, 0.3]])
assert_equal(res.unit, qf.unit)
def test_qfloat_np_flipud(self):
a = np.arange(8).reshape((2, 2, 2))
qf = QFloat(a, a * 0.1, "m")
res = np.flipud(qf)
assert_equal(res.nominal, a[::-1, :, :])
assert_equal(res.std_dev, a[::-1, :, :] * 0.1)
assert_equal(res.unit, qf.unit)
qf = QFloat([[1, 2], [3, 4]], [[0.1, 0.2], [0.3, 0.4]], "m")
res = np.flipud(qf)
assert_equal(res.nominal, [[3, 4], [1, 2]])
assert_equal(res.std_dev, [[0.3, 0.4], [0.1, 0.2]])
assert_equal(res.unit, qf.unit)
def test_qfloat_np_insert(self):
a = np.array([[1, 2], [3, 4], [5, 6]])
qf = QFloat(a, a * 0.1, "m")
res = np.insert(qf, 5, QFloat(999, 0.1, unit="m"))
assert_almost_equal(res.nominal, [1, 2, 3, 4, 5, 999, 6])
assert_almost_equal(res.std_dev, [0.1, 0.2, 0.3, 0.4, 0.5, 0.1, 0.6])
assert_equal(res.unit, qf.unit)
res = np.insert(qf, 1, QFloat(999, 0.1, unit="m"), axis=1)
assert_almost_equal(res.nominal,
[[1, 999, 2], [3, 999, 4], [5, 999, 6]])
assert_almost_equal(res.std_dev, [[0.1, 0.1, 0.2],
[0.3, 0.1, 0.4],
[0.5, 0.1, 0.6]])
assert_equal(res.unit, qf.unit)
def test_qfloat_np_mean(self):
a = np.arange(8).reshape((2, 4))
qf = QFloat(a, a * 0.1, "m")
res = np.mean(qf)
assert_almost_equal(res.nominal, np.mean(a))
assert_almost_equal(res.std_dev, np.std(a)/np.sqrt(a.size))
assert_equal(res.unit, qf.unit)
res = np.mean(qf, axis=0)
assert_almost_equal(res.nominal, np.mean(a, axis=0))
assert_almost_equal(res.std_dev, np.std(a, axis=0)/np.sqrt(2))
assert_equal(res.unit, qf.unit)
def test_qfloat_np_median(self):
a = np.arange(8).reshape((2, 4))
qf = QFloat(a, a * 0.1, "m")
res = np.median(qf)
assert_almost_equal(res.nominal, np.median(a))
assert_almost_equal(res.std_dev, np.std(a)/np.sqrt(a.size))
assert_equal(res.unit, qf.unit)
res = np.median(qf, axis=0)
assert_almost_equal(res.nominal, np.median(a, axis=0))
assert_almost_equal(res.std_dev, np.std(a, axis=0)/np.sqrt(2))
assert_equal(res.unit, qf.unit)
def test_qfloat_np_moveaxis(self):
arr = np.zeros((3, 4, 5))
qf = QFloat(arr, unit='m')
res = np.moveaxis(qf, 0, -1)
assert_equal(res.shape, (4, 5, 3))
assert_equal(res.unit, qf.unit)
res = np.moveaxis(qf, -1, 0)
assert_equal(res.shape, (5, 3, 4))
assert_equal(res.unit, qf.unit)
res = np.moveaxis(qf, (0, 1), (-1, -2))
assert_equal(res.shape, (5, 4, 3))
assert_equal(res.unit, qf.unit)
res = np.moveaxis(qf, [0, 1, 2], [-1, -2, -3])
assert_equal(res.shape, (5, 4, 3))
assert_equal(res.unit, qf.unit)
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_nancumprod(self):
raise NotImplementedError
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_nancumsum(self):
raise NotImplementedError
def test_qfloat_np_nanmean(self):
arr = np.array([1, 2, 1, np.nan, 1, 2, np.nan, 2])
qf = QFloat(arr, uncertainty=arr*0.1, unit="m")
res = np.nanmean(qf)
assert_almost_equal(res.nominal, 1.5)
assert_almost_equal(res.std_dev,
np.nanstd(qf.nominal)/np.sqrt(qf.size-2))
def test_qfloat_np_nanmedian(self):
arr = np.array([1, 2, 1, np.nan, 1, 2, np.nan, 2])
qf = QFloat(arr, uncertainty=arr*0.1, unit="m")
res = np.nanmedian(qf)
assert_almost_equal(res.nominal, 1.5)
assert_almost_equal(res.std_dev,
np.nanstd(qf.nominal)/np.sqrt(qf.size-2))
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_nanprod(self):
raise NotImplementedError
def test_qfoat_np_nanstd(self):
arr = np.array([1, 2, 1, np.nan, 1, 2, np.nan, 2])
qf = QFloat(arr, uncertainty=arr*0.1, unit="m")
res = np.nanstd(qf)
assert_almost_equal(res, np.nanstd(arr))
def test_qfloat_np_nanstd(self):
arr = np.array([1, 2, 1, np.nan, 1, 2, np.nan, 2])
qf = QFloat(arr, uncertainty=arr*0.1, unit="m")
res = np.nanstd(qf)
assert_almost_equal(res, np.nanstd(arr))
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_nansum(self):
raise NotImplementedError
def test_qfloat_np_nanvar(self):
arr = np.array([1, 2, 1, np.nan, 1, 2, np.nan, 2])
qf = QFloat(arr, uncertainty=arr*0.1, unit="m")
res = np.nanvar(qf)
assert_almost_equal(res, np.nanvar(arr))
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_prod(self):
raise NotImplementedError
def test_qfloat_np_ravel(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
qf = QFloat(arr, arr * 0.1, "m")
res = np.ravel(qf)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
def test_qfloat_np_repeat(self):
arr = np.array([1, 2, 3])
tgt = np.array([1, 1, 2, 2, 3, 3])
qf = QFloat(arr, arr * 0.1, "m")
res = np.repeat(qf, 2)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
def test_qfloat_np_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]])
qf = QFloat(arr, arr * 0.1, "m")
res = np.reshape(qf, (2, 6))
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
assert_equal(res.shape, (2, 6))
def test_qfloat_np_resize(self):
arr = np.array([[1, 2], [3, 4]])
qf = QFloat(arr, arr * 0.1, "m")
shp = (2, 4)
tgt = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
res = np.resize(qf, shp)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
assert_equal(res.shape, shp)
shp = (4, 2)
tgt = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
res = np.resize(qf, shp)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
assert_equal(res.shape, shp)
shp = (4, 3)
tgt = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
res = np.resize(qf, shp)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
assert_equal(res.shape, shp)
shp = (0,)
# must fail due to empty array (not real)
with pytest.raises(TypeError):
res = np.resize(qf, shp)
def test_qfloat_np_roll(self):
arr = np.arange(10)
qf = QFloat(arr, arr * 0.01, "m")
off = 2
tgt = np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
res = np.roll(qf, off)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.01)
assert_equal(res.unit, qf.unit)
off = -2
tgt = np.array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
res = np.roll(qf, off)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.01)
assert_equal(res.unit, qf.unit)
arr = np.arange(12).reshape((4, 3))
qf = QFloat(arr, arr * 0.01, "m")
ax = 0
off = 1
tgt = np.array([[9, 10, 11], [0, 1, 2], [3, 4, 5], [6, 7, 8]])
res = np.roll(qf, off, axis=ax)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.01)
assert_equal(res.unit, qf.unit)
ax = 1
off = 1
tgt = np.array([[2, 0, 1], [5, 3, 4], [8, 6, 7], [11, 9, 10]])
res = np.roll(qf, off, axis=ax)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.01)
assert_equal(res.unit, qf.unit)
def test_qfloat_np_rollaxis(self):
arr = np.ones((3, 4, 5, 6))
qf = QFloat(arr, arr * 0.01, "m")
res = np.rollaxis(qf, 3, 1)
assert_equal(res.shape, (3, 6, 4, 5))
res = np.rollaxis(qf, 2)
assert_equal(res.shape, (5, 3, 4, 6))
res = np.rollaxis(qf, 1, 4)
assert_equal(res.shape, (3, 5, 6, 4))
def test_qfloat_np_round(self):
# single case
qf = np.round(QFloat(1.02549, 0.135964))
assert_equal(qf.nominal, 1)
assert_equal(qf.std_dev, 0)
qf = np.round(QFloat(1.02549, 0.135964), decimals=2)
assert_equal(qf.nominal, 1.03)
assert_equal(qf.std_dev, 0.14)
# just check array too
qf = np.round(QFloat([1.03256, 2.108645], [0.01456, 0.594324]),
decimals=2)
assert_equal(qf.nominal, [1.03, 2.11])
assert_equal(qf.std_dev, [0.01, 0.59])
def test_qfloat_np_rot90(self):
arr = np.array([[0, 1, 2], [3, 4, 5]])
b1 = np.array([[2, 5], [1, 4], [0, 3]])
b2 = np.array([[5, 4, 3], [2, 1, 0]])
b3 = np.array([[3, 0], [4, 1], [5, 2]])
b4 = np.array([[0, 1, 2], [3, 4, 5]])
qf = QFloat(arr, arr * 0.1, "m")
for k in range(-3, 13, 4):
res = np.rot90(qf, k=k)
assert_equal(res.nominal, b1)
assert_equal(res.std_dev, b1 * 0.1)
assert_equal(res.unit, qf.unit)
for k in range(-2, 13, 4):
res = np.rot90(qf, k=k)
assert_equal(res.nominal, b2)
assert_equal(res.std_dev, b2 * 0.1)
assert_equal(res.unit, qf.unit)
for k in range(-1, 13, 4):
res = np.rot90(qf, k=k)
assert_equal(res.nominal, b3)
assert_equal(res.std_dev, b3 * 0.1)
assert_equal(res.unit, qf.unit)
for k in range(0, 13, 4):
res = np.rot90(qf, k=k)
assert_equal(res.nominal, b4)
assert_equal(res.std_dev, b4 * 0.1)
assert_equal(res.unit, qf.unit)
arr = np.arange(8).reshape((2, 2, 2))
qf = QFloat(arr, arr * 0.1, "m")
ax = (0, 1)
tgt = np.array([[[2, 3], [6, 7]], [[0, 1], [4, 5]]])
res = np.rot90(qf, axes=ax)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
ax = (1, 2)
tgt = np.array([[[1, 3], [0, 2]], [[5, 7], [4, 6]]])
res = np.rot90(qf, axes=ax)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
ax = (2, 0)
tgt = np.array([[[4, 0], [6, 2]], [[5, 1], [7, 3]]])
res = np.rot90(qf, axes=ax)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
ax = (1, 0)
tgt = np.array([[[4, 5], [0, 1]], [[6, 7], [2, 3]]])
res = np.rot90(qf, axes=ax)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
def test_qfloat_np_shape(self):
for shp in [(10,), (11, 12), (11, 12, 13)]:
qf = QFloat(np.ones(shp), np.ones(shp), "m")
assert_equal(np.shape(qf), shp)
def test_qfloat_np_size(self):
for shp in [(10,), (11, 12), (11, 12, 13)]:
qf = QFloat(np.ones(shp), np.ones(shp), "m")
assert_equal(np.size(qf), np.prod(shp))
def test_qfloat_np_squeeze(self):
arr = np.array([[[0], [1], [2]]])
qf = QFloat(arr, arr * 0.01, "m")
res = np.squeeze(qf)
assert_equal(res.shape, (3,))
assert_almost_equal(res.nominal, [0, 1, 2])
assert_almost_equal(res.std_dev, [0, 0.01, 0.02])
assert_equal(res.unit, qf.unit)
res = np.squeeze(qf, axis=0)
assert_equal(res.shape, (3, 1))
assert_almost_equal(res.nominal, [[0], [1], [2]])
assert_almost_equal(res.std_dev, [[0], [0.01], [0.02]])
assert_equal(res.unit, qf.unit)
with pytest.raises(ValueError):
np.squeeze(qf, axis=1)
res = np.squeeze(qf, axis=2)
assert_equal(res.shape, (1, 3))
assert_almost_equal(res.nominal, [[0, 1, 2]])
assert_almost_equal(res.std_dev, [[0, 0.01, 0.02]])
assert_equal(res.unit, qf.unit)
def test_qfloat_np_std(self):
qf = QFloat(np.arange(10), uncertainty=np.arange(10)*0.1)
assert_almost_equal(np.std(qf), 2.87228, decimal=4)
# errors do not enter in the account
qf = QFloat(np.arange(10), uncertainty=np.arange(10))
assert_almost_equal(np.std(qf), 2.87228, decimal=4)
def test_qfloat_np_sum(self):
arr = np.ones(10).reshape((2, 5))
qf = QFloat(arr, arr*0.1, "m")
res = np.sum(qf)
assert_equal(res.nominal, 10)
assert_equal(res.std_dev, 0.1*np.sqrt(10))
assert_equal(res.unit, qf.unit)
res = np.sum(qf, axis=0)
assert_equal(res.shape, [5])
assert_almost_equal(res.nominal, np.ones(5)*2)
assert_almost_equal(res.std_dev, np.ones(5)*np.sqrt(2)*0.1)
assert_equal(res.unit, qf.unit)
def test_qfloat_np_swapaxes(self):
arr = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
tgt = np.array([[[0, 4], [2, 6]], [[1, 5], [3, 7]]])
qf = QFloat(arr, arr * 0.1, "m")
res = np.swapaxes(qf, 0, 2)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
def test_qfloat_np_take(self):
arr = np.array([1, 2, 3, 4, 5])
tgt = np.array([2, 3, 5])
ind = [1, 2, 4]
qf = QFloat(arr, arr * 0.1, "m")
res = np.take(qf, ind)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
def test_qfloat_np_tile(self):
arr = np.array([0, 1, 2])
qf = QFloat(arr, arr * 0.1)
tile = 2
tgt = np.array([0, 1, 2, 0, 1, 2])
res = np.tile(qf, tile)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
tile = (2, 2)
tgt = np.array([[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]])
res = np.tile(qf, tile)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
# More checking
arr = np.array([[1, 2], [3, 4]])
qf = QFloat(arr, arr * 0.1)
tile = 2
tgt = np.array([[1, 2, 1, 2], [3, 4, 3, 4]])
res = np.tile(qf, tile)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
tile = (2, 1)
tgt = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
res = np.tile(qf, tile)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
def test_qfloat_np_transpose(self):
arr = np.array([[1, 2], [3, 4], [5, 6]])
tgt = np.array([[1, 3, 5], [2, 4, 6]])
qf = QFloat(arr, arr * 0.1, "m")
res = np.transpose(qf)
assert_almost_equal(res.nominal, tgt)
assert_almost_equal(res.std_dev, tgt * 0.1)
assert_equal(res.unit, qf.unit)
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_trunc(self):
raise NotImplementedError
def test_qfloat_np_var(self):
qf = QFloat(np.arange(10), uncertainty=np.arange(10)*0.1)
assert_almost_equal(np.var(qf), 8.25)
# errors do not enter in the account
qf = QFloat(np.arange(10), uncertainty=np.arange(10))
assert_almost_equal(np.var(qf), 8.25)
class TestQFloatNumpyUfuncs:
"""Test numpy array functions for numpy comatibility."""
@pytest.mark.parametrize('func', [np.abs, np.absolute])
def test_qfloat_np_absolute(self, func):
qf1 = QFloat(1.0, 0.1, 'm')
qf2 = QFloat(-1.0, 0.1, 'm')
qf3 = QFloat(-5.0, 0.1)
qf4 = QFloat(-6)
qf5 = QFloat([1, -1, 2, -2])
assert_equal(func(qf1), QFloat(1.0, 0.1, 'm'))
assert_equal(func(qf2), QFloat(1.0, 0.1, 'm'))
assert_equal(func(qf3), QFloat(5.0, 0.1))
assert_equal(func(qf4), QFloat(6))
assert_equal(func(qf5), [1, 1, 2, 2])
with pytest.raises(NotImplementedError):
# out argument should fail
func(qf1, out=[])
def test_qfloat_np_add(self):
qf1 = QFloat(2.0, 0.2, 'm')
qf2 = QFloat(1.0, 0.1, 'm')
qf3 = QFloat([1, 2, 3], [0.1, 0.2, 0.3], 'm')
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = np.add(qf1, qf2)
assert_equal(res.nominal, 3.0)
assert_almost_equal(res.std_dev, 0.223606797749979)
assert_equal(res.unit, units.Unit('m'))
res = np.add(qf1, qf3)
assert_equal(res.nominal, [3, 4, 5])
assert_almost_equal(res.std_dev, [0.2236068, 0.28284271, 0.36055513])
assert_equal(res.unit, units.Unit('m'))
res = np.add(qf3, qf1)
assert_equal(res.nominal, [3, 4, 5])
assert_almost_equal(res.std_dev, [0.2236068, 0.28284271, 0.36055513])
assert_equal(res.unit, units.Unit('m'))
with pytest.raises(UnitsError):
np.add(qf1, qf4)
with pytest.raises(UnitsError):
np.add(qf1, qf5)
with pytest.raises(UnitsError):
np.add(qf1, 1.0)
with pytest.raises(NotImplementedError):
# out argument should fail
np.add(qf1, qf2, out=[])
def test_qfloat_np_ceil(self):
assert_equal(np.ceil(QFloat(1.5, 0.1, 'm')), QFloat(2.0, 0.0, 'm'))
assert_equal(np.ceil(QFloat(-1.5, 0.1, 'm')), QFloat(-1.0, 0.0, 'm'))
assert_equal(np.ceil(QFloat(0.2, 0.1, 'm')), QFloat(1.0, 0.0, 'm'))
assert_equal(np.ceil(QFloat(-0.2, 0.1, 'm')), QFloat(0.0, 0.0, 'm'))
@pytest.mark.parametrize('func', [np.divide, np.true_divide])
def test_qfloat_np_divide(self, func):
qf1 = QFloat(2.0, 0.2, 'm')
qf2 = QFloat(1.0, 0.1, 'm')
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4], 'cm')
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = func(qf1, qf2)
assert_equal(res.nominal, 2)
assert_almost_equal(res.std_dev, 0.28284271)
assert_equal(res.unit, units.dimensionless_unscaled)
res = func(qf1, qf3)
assert_equal(res.nominal, [2, 1, 0.5])
assert_almost_equal(res.std_dev, [0.28284271, 0.14142136, 0.07071068])
assert_equal(res.unit, units.Unit('m/cm'))
res = func(qf3, qf1)
assert_equal(res.nominal, [0.5, 1, 2])
assert_almost_equal(res.std_dev, [0.0707107, 0.1414214, 0.2828427])
assert_equal(res.unit, units.Unit('cm/m'))
res = func(qf1, qf4)
assert_equal(res.nominal, 2.0)
assert_almost_equal(res.std_dev, 0.28284271247461906)
assert_equal(res.unit, units.Unit('m/s'))
res = func(qf1, qf5)
assert_equal(res.nominal, 2.0)
assert_almost_equal(res.std_dev, 0.2)
assert_equal(res.unit, units.Unit('m'))
with pytest.raises(NotImplementedError):
# out argument should fail
func(qf1, qf2, out=[])
def test_qfloat_np_divmod(self):
qf1 = QFloat(2.0, 0.2, 'm')
qf2 = QFloat(1.0, 0.1, 'm')
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4], 'cm')
qf4 = QFloat(1.0, 0.1, 's')
res = np.divmod(qf1, qf2)
assert_equal(res[0], np.floor_divide(qf1, qf2))
assert_equal(res[1], np.remainder(qf1, qf2))
res = np.divmod(qf1, qf3)
assert_equal(res[0], np.floor_divide(qf1, qf3))
assert_equal(res[1], np.remainder(qf1, qf3))
res = np.divmod(qf3, qf1)
assert_equal(res[0], np.floor_divide(qf3, qf1))
assert_equal(res[1], np.remainder(qf3, qf1))
res = np.divmod(qf1, qf4)
assert_equal(res[0], np.floor_divide(qf1, qf4))
assert_equal(res[1], np.remainder(qf1, qf4))
def test_qfloat_np_exp(self):
qf1 = QFloat(2.0, 0.2)
qf2 = QFloat(1.0, 0.1)
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4])
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = np.exp(qf1)
assert_equal(res.nominal, np.exp(2.0))
assert_almost_equal(res.std_dev, 0.2*np.exp(2.0))
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.exp(qf2)
assert_equal(res.nominal, np.exp(1.0))
assert_almost_equal(res.std_dev, 0.1*np.exp(1.0))
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.exp(qf3)
assert_equal(res.nominal, np.exp(qf3.nominal))
assert_almost_equal(res.std_dev, np.exp(qf3.nominal)*qf3.std_dev)
assert_equal(res.unit, units.dimensionless_unscaled)
with pytest.raises(UnitsError, match='log is only defined for '
'dimensionless quantities.'):
res = np.log(qf4)
res = np.exp(qf5)
assert_equal(res.nominal, np.exp(1.0))
assert_almost_equal(res.std_dev, 0.0)
assert_equal(res.unit, units.dimensionless_unscaled)
def test_qfloat_np_exp2(self):
qf1 = QFloat(2.0, 0.2)
qf2 = QFloat(1.0, 0.1)
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4])
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = np.exp2(qf1)
assert_equal(res.nominal, np.exp2(2.0))
assert_almost_equal(res.std_dev, 0.2*np.exp2(2.0)*np.log(2))
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.exp2(qf2)
assert_equal(res.nominal, np.exp2(1.0))
assert_almost_equal(res.std_dev, 0.1*np.exp2(1.0)*np.log(2))
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.exp2(qf3)
assert_equal(res.nominal, np.exp2(qf3.nominal))
assert_almost_equal(res.std_dev,
np.exp2(qf3.nominal)*qf3.std_dev*np.log(2))
assert_equal(res.unit, units.dimensionless_unscaled)
with pytest.raises(UnitsError, match='exp2 is only defined for '
'dimensionless quantities.'):
res = np.exp2(qf4)
res = np.exp2(qf5)
assert_equal(res.nominal, np.exp2(1.0))
assert_almost_equal(res.std_dev, 0.0)
assert_equal(res.unit, units.dimensionless_unscaled)
def test_qfloat_np_expm1(self):
qf1 = QFloat(2.0, 0.2)
qf2 = QFloat(1.0, 0.1)
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4])
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = np.expm1(qf1)
assert_equal(res.nominal, np.expm1(2.0))
assert_almost_equal(res.std_dev, 0.2*np.exp(2.0))
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.expm1(qf2)
assert_equal(res.nominal, np.expm1(1.0))
assert_almost_equal(res.std_dev, 0.1*np.exp(1.0))
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.expm1(qf3)
assert_equal(res.nominal, np.expm1(qf3.nominal))
assert_almost_equal(res.std_dev, np.exp(qf3.nominal)*qf3.std_dev)
assert_equal(res.unit, units.dimensionless_unscaled)
with pytest.raises(UnitsError, match='log is only defined for '
'dimensionless quantities.'):
res = np.log(qf4)
res = np.expm1(qf5)
assert_equal(res.nominal, np.expm1(1.0))
assert_almost_equal(res.std_dev, 0.0)
assert_equal(res.unit, units.dimensionless_unscaled)
def test_qfloat_np_floor(self):
assert_equal(np.floor(QFloat(1.5, 0.1, 'm')), QFloat(1.0, 0.0, 'm'))
assert_equal(np.floor(QFloat(-1.5, 0.1, 'm')), QFloat(-2.0, 0.0, 'm'))
assert_equal(np.floor(QFloat(0.2, 0.1, 'm')), QFloat(0.0, 0.0, 'm'))
assert_equal(np.floor(QFloat(-0.2, 0.1, 'm')), QFloat(-1.0, 0.0, 'm'))
def test_qfloat_np_floor_divide(self):
qf1 = QFloat(2.0, 0.2, 'm')
qf2 = QFloat(1.0, 0.1, 'm')
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4], 'cm')
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = np.floor_divide(qf1, qf2)
assert_equal(res.nominal, 2)
assert_almost_equal(res.std_dev, 0)
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.floor_divide(qf1, qf3)
assert_equal(res.nominal, [2, 1, 0])
assert_almost_equal(res.std_dev, [0, 0, 0])
assert_equal(res.unit, units.Unit('m/cm'))
res = np.floor_divide(qf3, qf1)
assert_equal(res.nominal, [0, 1, 2])
assert_almost_equal(res.std_dev, [0, 0, 0])
assert_equal(res.unit, units.Unit('cm/m'))
res = np.floor_divide(qf1, qf4)
assert_equal(res.nominal, 2)
assert_almost_equal(res.std_dev, 0)
assert_equal(res.unit, units.Unit('m/s'))
res = np.floor_divide(qf1, qf5)
assert_equal(res.nominal, 2)
assert_almost_equal(res.std_dev, 0)
assert_equal(res.unit, units.Unit('m'))
with pytest.raises(NotImplementedError):
# out argument should fail
np.floor_divide(qf1, qf2, out=[])
def test_qfloat_np_hypot(self):
qf1 = QFloat(3, 0.3, 'm')
qf2 = QFloat(4, 0.4, 'm')
qf3 = QFloat(3*np.ones((5, 5)), unit='m')
qf4 = QFloat(4*np.ones((5, 5)), unit='m')
res = np.hypot(qf1, qf2)
assert_equal(res.nominal, 5)
assert_almost_equal(res.std_dev, 0.36715119501371646)
assert_equal(res.unit, units.Unit('m'))
res = np.hypot(qf3, qf4)
assert_equal(res.nominal, 5*np.ones((5, 5)))
assert_almost_equal(res.std_dev, np.zeros((5, 5)))
assert_equal(res.unit, units.Unit('m'))
res = np.hypot(qf1, qf4)
assert_equal(res.nominal, 5*np.ones((5, 5)))
assert_almost_equal(res.std_dev, 0.18*np.ones((5, 5)))
assert_equal(res.unit, units.Unit('m'))
with pytest.raises(UnitsError):
np.hypot(qf1, 1)
with pytest.raises(UnitsError):
np.hypot(qf1, QFloat(1, unit='s'))
with pytest.raises(NotImplementedError):
# out argument should fail
np.multiply(qf1, qf2, out=[])
def test_qfloat_np_isfinit(self):
assert_false(np.isfinite(QFloat(np.inf)))
assert_false(np.isfinite(QFloat(np.nan)))
assert_true(np.isfinite(QFloat(1)))
assert_true(np.isfinite(QFloat(1, unit='m')))
assert_true(np.isfinite(QFloat(1, 0.1, unit='m/s')))
assert_equal(np.isfinite(QFloat([np.inf, np.nan, 1.0], unit='m')),
[False, False, True])
def test_qfloat_np_isinf(self):
assert_true(np.isinf(QFloat(np.inf)))
assert_false(np.isinf(QFloat(np.nan)))
assert_false(np.isinf(QFloat(1)))
assert_false(np.isinf(QFloat(1, unit='m')))
assert_false(np.isinf(QFloat(1, 0.1, unit='m/s')))
assert_equal(np.isinf(QFloat([np.inf, np.nan, 1.0], unit='m')),
[True, False, False])
def test_qfloat_np_isnan(self):
assert_false(np.isnan(QFloat(np.inf)))
assert_true(np.isnan(QFloat(np.nan)))
assert_false(np.isnan(QFloat(1)))
assert_false(np.isnan(QFloat(1, unit='m')))
assert_false(np.isnan(QFloat(1, 0.1, unit='m/s')))
assert_equal(np.isnan(QFloat([np.inf, np.nan, 1.0], unit='m')),
[False, True, False])
def test_qfloat_np_log(self):
qf1 = QFloat(2.0, 0.2)
qf2 = QFloat(1.0, 0.1)
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4])
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = np.log(qf1)
assert_equal(res.nominal, np.log(2.0))
assert_almost_equal(res.std_dev, 0.2/2.0)
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.log(qf2)
assert_equal(res.nominal, np.log(1.0))
assert_almost_equal(res.std_dev, 0.1/1.0)
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.log(qf3)
assert_equal(res.nominal, np.log([1, 2, 4]))
assert_almost_equal(res.std_dev,
qf3.std_dev/qf3.nominal)
assert_equal(res.unit, units.dimensionless_unscaled)
with pytest.raises(UnitsError, match='log is only defined for '
'dimensionless quantities.'):
res = np.log(qf4)
res = np.log(qf5)
assert_equal(res.nominal, np.log(1.0))
assert_almost_equal(res.std_dev, 0.0)
assert_equal(res.unit, units.dimensionless_unscaled)
def test_qfloat_np_log2(self):
qf1 = QFloat(2.0, 0.2)
qf2 = QFloat(1.0, 0.1)
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4])
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = np.log2(qf1)
assert_equal(res.nominal, np.log2(2.0))
assert_almost_equal(res.std_dev, 0.2/(2.0*np.log(2)))
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.log2(qf2)
assert_equal(res.nominal, np.log2(1.0))
assert_almost_equal(res.std_dev, 0.1/np.log(2))
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.log2(qf3)
assert_equal(res.nominal, np.log2([1, 2, 4]))
assert_almost_equal(res.std_dev,
qf3.std_dev/(qf3.nominal*np.log(2)))
assert_equal(res.unit, units.dimensionless_unscaled)
with pytest.raises(UnitsError, match='log2 is only defined for '
'dimensionless quantities.'):
res = np.log2(qf4)
res = np.log2(qf5)
assert_equal(res.nominal, np.log2(1.0))
assert_almost_equal(res.std_dev, 0.0)
assert_equal(res.unit, units.dimensionless_unscaled)
def test_qfloat_np_log10(self):
qf1 = QFloat(2.0, 0.2)
qf2 = QFloat(1.0, 0.1)
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4])
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = np.log10(qf1)
assert_equal(res.nominal, np.log10(2.0))
assert_almost_equal(res.std_dev, 0.2/(2.0*np.log(10)))
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.log10(qf2)
assert_equal(res.nominal, np.log10(1.0))
assert_almost_equal(res.std_dev, 0.1/np.log(10))
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.log10(qf3)
assert_equal(res.nominal, np.log10([1, 2, 4]))
assert_almost_equal(res.std_dev,
qf3.std_dev/(qf3.nominal*np.log(10)))
assert_equal(res.unit, units.dimensionless_unscaled)
with pytest.raises(UnitsError, match='log10 is only defined for '
'dimensionless quantities.'):
res = np.log10(qf4)
res = np.log10(qf5)
assert_equal(res.nominal, np.log10(1.0))
assert_almost_equal(res.std_dev, 0.0)
assert_equal(res.unit, units.dimensionless_unscaled)
def test_qfloat_np_log1p(self):
qf1 = QFloat(2.0, 0.2)
qf2 = QFloat(1.0, 0.1)
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4])
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = np.log(qf1)
assert_equal(res.nominal, np.log(2.0))
assert_almost_equal(res.std_dev, 0.2/2.0)
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.log(qf2)
assert_equal(res.nominal, np.log(1.0))
assert_almost_equal(res.std_dev, 0.1/1.0)
assert_equal(res.unit, units.dimensionless_unscaled)
res = np.log(qf3)
assert_equal(res.nominal, np.log([1, 2, 4]))
assert_almost_equal(res.std_dev,
qf3.std_dev/qf3.nominal)
assert_equal(res.unit, units.dimensionless_unscaled)
with pytest.raises(UnitsError, match='log is only defined for '
'dimensionless quantities.'):
res = np.log(qf4)
res = np.log(qf5)
assert_equal(res.nominal, np.log(1.0))
assert_almost_equal(res.std_dev, 0.0)
assert_equal(res.unit, units.dimensionless_unscaled)
def test_qfloat_np_multiply(self):
qf1 = QFloat(2.0, 0.2, 'm')
qf2 = QFloat(1.2, 0.1, 'm')
qf3 = QFloat([1, 2, 4], [0.1, 0.2, 0.4], 'cm')
res = np.multiply(qf1, 2)
assert_equal(res.nominal, 4)
assert_equal(res.std_dev, 0.4)
assert_equal(res.unit, units.Unit('m'))
res = np.multiply(qf1, qf2)
assert_equal(res.nominal, 2.4)
assert_almost_equal(res.std_dev, 0.3124099870362662)
assert_equal(res.unit, units.Unit('m2'))
res = np.multiply(qf1, qf3)
assert_equal(res.nominal, [2, 4, 8])
assert_almost_equal(res.std_dev, [0.28284271, 0.56568542, 1.13137085])
assert_equal(res.unit, units.Unit('m*cm'))
with pytest.raises(NotImplementedError):
# out argument should fail
np.multiply(qf1, qf2, out=[])
def test_qfloat_np_negative(self):
qf1 = QFloat(1.0, 0.1, 'm')
qf2 = QFloat(-1.0, 0.1, 'm')
qf3 = QFloat(-5.0, 0.1)
qf4 = QFloat(6)
qf5 = QFloat([1, -1, 2, -2])
assert_equal(np.negative(qf1), QFloat(-1.0, 0.1, 'm'))
assert_equal(np.negative(qf2), QFloat(1.0, 0.1, 'm'))
assert_equal(np.negative(qf3), QFloat(5.0, 0.1))
assert_equal(np.negative(qf4), QFloat(-6))
assert_equal(np.negative(qf5), QFloat([-1, 1, -2, 2]))
with pytest.raises(NotImplementedError):
# out argument should fail
np.negative(qf1, out=[])
def test_qfloat_np_positive(self):
qf1 = QFloat(1.0, 0.1, 'm')
qf2 = QFloat(-1.0, 0.1, 'm')
qf3 = QFloat(-5.0, 0.1)
qf4 = QFloat(6)
qf5 = QFloat([1, -1, 2, -2])
assert_equal(np.positive(qf1), QFloat(1.0, 0.1, 'm'))
assert_equal(np.positive(qf2), QFloat(-1.0, 0.1, 'm'))
assert_equal(np.positive(qf3), QFloat(-5.0, 0.1))
assert_equal(np.positive(qf4), QFloat(6))
assert_equal(np.positive(qf5), QFloat([1, -1, 2, -2]))
with pytest.raises(NotImplementedError):
# out argument should fail
np.positive(qf1, out=[])
@pytest.mark.parametrize('func', [np.power, np.float_power])
def test_qfloat_np_power(self, func):
qf1 = QFloat(2.0, 0.1, 'm')
qf2 = QFloat([2, 3, 4], [0.1, 0.2, 0.3], 'm')
qf3 = QFloat(2.0, 0.1)
qf4 = QFloat([2, 3, 4])
res = func(qf1, 2)
assert_equal(res.nominal, 4)
assert_equal(res.std_dev, 0.4)
assert_equal(res.unit, units.Unit('m2'))
res = func(qf1, 1.5)
assert_almost_equal(res.nominal, 2.8284271247461903)
assert_almost_equal(res.std_dev, 0.2121320343559643)
assert_equal(res.unit, units.Unit('m(3/2)'))
res = func(qf2, 2)
assert_equal(res.nominal, [4, 9, 16])
assert_almost_equal(res.std_dev, [0.4, 1.2, 2.4])
assert_equal(res.unit, units.Unit('m2'))
res = func(qf2, 1.5)
assert_almost_equal(res.nominal, [2.82842712, 5.19615242, 8])
assert_almost_equal(res.std_dev, [0.21213203, 0.51961524, 0.9])
assert_equal(res.unit, units.Unit('m(3/2)'))
res = func(qf1, qf3)
assert_equal(res.nominal, 4)
assert_almost_equal(res.std_dev, 0.4866954717550927)
assert_equal(res.unit, units.Unit('m2'))
with pytest.raises(ValueError):
func(qf1, qf4)
with pytest.raises(ValueError):
func(qf2, qf4)
with pytest.raises(ValueError):
func(qf4, qf1)
with pytest.raises(NotImplementedError):
# out argument should fail
func(qf1, 2, out=[])
@pytest.mark.parametrize('func', [np.mod, np.remainder])
def test_qfloat_np_remainder(self, func):
qf1 = QFloat(5.0, 0.1, 'm')
qf2 = QFloat(3.5, 0.1, 'm')
qf3 = QFloat(1.0, 0.1, 's')
qf4 = QFloat([1, 2, 3])
res = func(qf1, 2)
assert_equal(res.nominal, 1)
assert_equal(res.std_dev, 0.1)
assert_equal(res.unit, units.Unit('m'))
res = func(qf1, qf2)
assert_equal(res.nominal, 1.5)
assert_equal(res.std_dev, 0.14142135623730953)
assert_equal(res.unit, units.Unit('m'))
res = func(qf1, qf3)
assert_equal(res.nominal, 0)
assert_equal(res.std_dev, np.inf)
assert_equal(res.unit, units.Unit('m'))
res = func(qf1, qf4)
assert_equal(res.nominal, [0, 1, 2])
assert_equal(res.std_dev, [np.nan, 0.1, 0.1])
assert_equal(res.unit, units.Unit('m'))
res = func(qf4, 1.5)
assert_equal(res.nominal, [1, 0.5, 0])
assert_equal(res.std_dev, [0, 0, np.nan])
assert_equal(res.unit, units.Unit(''))
with pytest.raises(NotImplementedError):
# out argument should fail
func(qf1, qf2, out=[])
@pytest.mark.skip(reason="Not Implemented Yet")
def test_qfloat_np_rint(self):
raise NotImplementedError
def test_qfloat_np_sign(self):
assert_equal(np.sign(QFloat(1.5, 0.1, 'm')), 1)
assert_equal(np.sign(QFloat(-1.5, 0.1, 'm')), -1)
assert_equal(np.sign(QFloat(0.0, 0.1, 'm')), 0)
def test_qfloat_np_signbit(self):
assert_equal(np.sign(QFloat(1.5, 0.1, 'm')), 1)
assert_equal(np.sign(QFloat(-1.5, 0.1, 'm')), -1)
assert_equal(np.sign(QFloat(0.0, 0.1, 'm')), 0)
assert_equal(np.sign(QFloat(-0.0, 0.1, 'm')), -0)
def test_qfloat_np_sqrt(self):
qf1 = QFloat(4, 0.1, 'm2')
qf2 = QFloat([9, 100], [0.1, 0.1], 's2')
res = np.sqrt(qf1)
assert_equal(res.nominal, 2)
assert_equal(res.std_dev, 0.025)
assert_equal(res.unit, units.Unit('m'))
res = np.sqrt(qf2)
assert_equal(res.nominal, [3, 10])
assert_almost_equal(res.std_dev, [0.01666667, 0.005])
assert_equal(res.unit, units.Unit('s'))
with pytest.raises(NotImplementedError):
# out argument should fail
np.sqrt(qf1, out=[])
def test_qfloat_np_square(self):
qf1 = QFloat(2.0, 0.1, 'm')
qf2 = QFloat([1, 2, 3], [0.1, 0.2, 0.3], 'cm')
res = np.square(qf1)
assert_equal(res.nominal, 4)
assert_almost_equal(res.std_dev, 0.28284271247461906)
assert_equal(res.unit, units.Unit('m2'))
res = np.square(qf2)
assert_equal(res.nominal, [1, 4, 9])
assert_almost_equal(res.std_dev, [0.14142136, 0.56568542, 1.27279221])
assert_equal(res.unit, units.Unit('cm2'))
with pytest.raises(NotImplementedError):
# out argument should fail
np.square(qf1, out=[])
def test_qfloat_np_subtract(self):
qf1 = QFloat(2.0, 0.2, 'm')
qf2 = QFloat(1.0, 0.1, 'm')
qf3 = QFloat([1, 2, 3], [0.1, 0.2, 0.3], 'm')
qf4 = QFloat(1.0, 0.1, 's')
qf5 = QFloat(1.0)
res = np.subtract(qf1, qf2)
assert_equal(res.nominal, 1.0)
assert_almost_equal(res.std_dev, 0.223606797749979)
assert_equal(res.unit, units.Unit('m'))
res = np.subtract(qf1, qf3)
assert_equal(res.nominal, [1, 0, -1])
assert_almost_equal(res.std_dev, [0.2236068, 0.28284271, 0.36055513])
assert_equal(res.unit, units.Unit('m'))
res = np.subtract(qf3, qf1)
assert_equal(res.nominal, [-1, 0, 1])
assert_almost_equal(res.std_dev, [0.2236068, 0.28284271, 0.36055513])
assert_equal(res.unit, units.Unit('m'))
with pytest.raises(UnitsError):
np.subtract(qf1, qf4)
with pytest.raises(UnitsError):
np.subtract(qf1, qf5)
with pytest.raises(UnitsError):
np.subtract(qf1, 1.0)
with pytest.raises(NotImplementedError):
# out argument should fail
np.subtract(qf1, qf2, out=[])
def test_qfloat_np_trunc(self):
assert_equal(np.trunc(QFloat(1.5, 0.1, 'm')), QFloat(1, 0.0, 'm'))
assert_equal(np.trunc(QFloat(-1.5, 0.1, 'm')), QFloat(-1, 0.0, 'm'))
class TestQFloatNumpyUfuncTrigonometric:
"""Test the numpy trigonometric and inverse trigonometric functions."""
# Both radians and deg2rad must work in the same way
@pytest.mark.parametrize('func', [np.radians, np.deg2rad])
def test_qfloat_np_radians(self, func):
qf = QFloat(180, 0.1, 'degree')
res = func(qf)
assert_almost_equal(res.nominal, 3.141592653589793)
assert_almost_equal(res.std_dev, 0.001745329251994)
assert_equal(res.unit, units.Unit('rad'))
qf = QFloat(-180, 0.1, 'degree')
res = func(qf)
assert_almost_equal(res.nominal, -3.141592653589793)
assert_almost_equal(res.std_dev, 0.001745329251994)
assert_equal(res.unit, units.Unit('rad'))
qf = QFloat([0, 30, 45, 60, 90], [0.1, 0.2, 0.3, 0.4, 0.5], 'degree')
res = func(qf)
assert_almost_equal(res.nominal, [0, 0.52359878, 0.78539816,
1.04719755, 1.57079633])
assert_almost_equal(res.std_dev, [0.00174533, 0.00349066, 0.00523599,
0.00698132, 0.00872665])
assert_equal(res.unit, units.Unit('rad'))
# radian should no change
qf = QFloat(1.0, 0.1, 'radian')
res = func(qf)
assert_equal(res.nominal, 1.0)
assert_equal(res.std_dev, 0.1)
assert_equal(res.unit, units.Unit('rad'))
# Invalid units
for unit in ('m', None, 'm/s'):
with pytest.raises(UnitsError):
func(QFloat(1.0, 0.1, unit))
with pytest.raises(NotImplementedError):
# out argument should fail
func(qf, out=[])
# Both degrees and rad2deg must work in the same way
@pytest.mark.parametrize('func', [np.degrees, np.rad2deg])
def test_qfloat_np_degrees(self, func):
qf = QFloat(np.pi, 0.05, 'radian')
res = func(qf)
assert_almost_equal(res.nominal, 180.0)
assert_almost_equal(res.std_dev, 2.8647889756541165)
assert_equal(res.unit, units.Unit('deg'))
qf = QFloat(-np.pi, 0.05, 'radian')
res = func(qf)
assert_almost_equal(res.nominal, -180.0)
assert_almost_equal(res.std_dev, 2.8647889756541165)
assert_equal(res.unit, units.Unit('deg'))
qf = QFloat([np.pi, np.pi/2, np.pi/4, np.pi/6],
[0.01, 0.02, 0.03, 0.04], 'rad')
res = func(qf)
assert_almost_equal(res.nominal, [180.0, 90.0, 45.0, 30.0])
assert_almost_equal(res.std_dev, [0.5729578, 1.14591559,
1.71887339, 2.29183118])
assert_equal(res.unit, units.Unit('deg'))
# deg should no change
qf = QFloat(1.0, 0.1, 'deg')
res = func(qf)
assert_equal(res.nominal, 1.0)
assert_equal(res.std_dev, 0.1)
assert_equal(res.unit, units.Unit('deg'))
# Invalid units
for unit in ('m', None, 'm/s'):
with pytest.raises(UnitsError):
func(QFloat(1.0, 0.1, unit))
with pytest.raises(NotImplementedError):
# out argument should fail
func(qf, out=[])
def test_qfloat_np_sin(self):
qf = QFloat(np.pi, 0.05, 'radian')
res = np.sin(qf)
assert_almost_equal(res.nominal, 0.0)
assert_almost_equal(res.std_dev, 0.05)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat(90, 0.05, 'deg')
res = np.sin(qf)
assert_almost_equal(res.nominal, 1.0)
assert_almost_equal(res.std_dev, 0.0)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat([30, 45, 60], [0.1, 0.2, 0.3], 'deg')
res = np.sin(qf)
assert_almost_equal(res.nominal, [0.5, 0.70710678, 0.8660254])
assert_almost_equal(res.std_dev, [0.0015115, 0.00246827, 0.00261799])
assert_equal(res.unit, units.dimensionless_unscaled)
for unit in ['m', 'm/s', None]:
with pytest.raises(UnitsError):
np.sin(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.sin(qf, out=[])
def test_qfloat_np_cos(self):
qf = QFloat(180, 0.05, 'deg')
res = np.cos(qf)
assert_almost_equal(res.nominal, -1.0)
assert_almost_equal(res.std_dev, 0.0)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat(np.pi/2, 0.05, 'rad')
res = np.cos(qf)
assert_almost_equal(res.nominal, 0.0)
assert_almost_equal(res.std_dev, 0.05)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat([30, 45, 60], [0.1, 0.2, 0.3], 'deg')
res = np.cos(qf)
assert_almost_equal(res.nominal, [0.8660254, 0.70710678, 0.5])
assert_almost_equal(res.std_dev, [0.00087266, 0.00246827, 0.0045345])
assert_equal(res.unit, units.dimensionless_unscaled)
for unit in ['m', 'm/s', None]:
with pytest.raises(UnitsError):
np.cos(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.cos(qf, out=[])
def test_qfloat_np_tan(self):
qf = QFloat(45, 0.05, 'deg')
res = np.tan(qf)
assert_almost_equal(res.nominal, 1.0)
assert_almost_equal(res.std_dev, 0.0017453292519943294)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat(np.pi/4, 0.05, 'rad')
res = np.tan(qf)
assert_almost_equal(res.nominal, 1.0)
assert_almost_equal(res.std_dev, 0.1)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat([0, 30, 60], [0.1, 0.2, 0.3], 'deg')
res = np.tan(qf)
assert_almost_equal(res.nominal, [0, 0.57735027, 1.73205081])
assert_almost_equal(res.std_dev, [0.00174533, 0.00465421, 0.02094395])
assert_equal(res.unit, units.dimensionless_unscaled)
for unit in ['m', 'm/s', None]:
with pytest.raises(UnitsError):
np.tan(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.tan(qf, out=[])
def test_qfloat_np_sinh(self):
qf = QFloat(0, 0.05, 'radian')
res = np.sinh(qf)
assert_almost_equal(res.nominal, 0.0)
assert_almost_equal(res.std_dev, 0.05)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat(np.pi, 0.05, 'radian')
res = np.sinh(qf)
assert_almost_equal(res.nominal, 11.548739357257748)
assert_almost_equal(res.std_dev, 0.5795976637760759)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat(90, 0.05, 'deg')
res = np.sinh(qf)
assert_almost_equal(res.nominal, 2.3012989023072947)
assert_almost_equal(res.std_dev, 0.002189671298638268)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat([30, 45, 60], [0.1, 0.2, 0.3], 'deg')
res = np.sinh(qf)
assert_almost_equal(res.nominal, [0.5478535, 0.86867096, 1.24936705])
assert_almost_equal(res.std_dev, [0.0019901, 0.0046238, 0.0083791])
assert_equal(res.unit, units.dimensionless_unscaled)
for unit in ['m', 'm/s', None]:
with pytest.raises(UnitsError):
np.sinh(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.sinh(qf, out=[])
def test_qfloat_np_cosh(self):
qf = QFloat(0, 0.05, 'radian')
res = np.cosh(qf)
assert_almost_equal(res.nominal, 1.0)
assert_almost_equal(res.std_dev, 0.0)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat(np.pi, 0.05, 'radian')
res = np.cosh(qf)
assert_almost_equal(res.nominal, 11.591953275521519)
assert_almost_equal(res.std_dev, 0.5774369678628875)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat(90, 0.05, 'deg')
res = np.cosh(qf)
assert_almost_equal(res.nominal, 2.5091784786580567)
assert_almost_equal(res.std_dev, 0.0020082621458896)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat([30, 45, 60], [0.1, 0.2, 0.3], 'deg')
res = np.cosh(qf)
assert_almost_equal(res.nominal, [1.14023832, 1.32460909, 1.60028686])
assert_almost_equal(res.std_dev, [0.00095618, 0.00303223, 0.00654167])
assert_equal(res.unit, units.dimensionless_unscaled)
for unit in ['m', 'm/s', None]:
with pytest.raises(UnitsError):
np.cosh(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.cosh(qf, out=[])
def test_qfloat_np_tanh(self):
qf = QFloat(0, 0.05, 'radian')
res = np.tanh(qf)
assert_almost_equal(res.nominal, 0.0)
assert_almost_equal(res.std_dev, 0.05)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat(np.pi, 0.05, 'radian')
res = np.tanh(qf)
assert_almost_equal(res.nominal, 0.99627207622075)
assert_almost_equal(res.std_dev, 0.00037209750714)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat(90, 0.05, 'deg')
res = np.tanh(qf)
assert_almost_equal(res.nominal, 0.9171523356672744)
assert_almost_equal(res.std_dev, 0.0001386067128590)
assert_equal(res.unit, units.dimensionless_unscaled)
qf = QFloat([30, 45, 60], [0.1, 0.2, 0.3], 'deg')
res = np.tanh(qf)
assert_almost_equal(res.nominal, [0.48047278, 0.6557942, 0.78071444])
assert_almost_equal(res.std_dev, [0.00134241, 0.00198944, 0.00204457])
assert_equal(res.unit, units.dimensionless_unscaled)
for unit in ['m', 'm/s', None]:
with pytest.raises(UnitsError):
np.tanh(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.tanh(qf, out=[])
def test_qfloat_np_arcsin(self):
qf = QFloat(np.sqrt(2)/2, 0.01)
res = np.arcsin(qf)
assert_almost_equal(res.nominal, 0.7853981633974484)
assert_almost_equal(res.std_dev, 0.0141421356237309)
assert_equal(res.unit, units.Unit('rad'))
qf = QFloat([0, 0.5, 1], [0.01, 0.2, 0.3])
res = np.arcsin(qf)
assert_almost_equal(res.nominal, [0, 0.52359878, 1.57079633])
assert_almost_equal(res.std_dev, [0.01, 0.23094011, np.inf])
assert_equal(res.unit, units.Unit('rad'))
# Invalid units
for unit in ['m', 'm/s', 'rad', 'deg']:
with pytest.raises(UnitsError):
np.arcsin(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.arcsin(qf, out=[])
def test_qfloat_np_arccos(self):
qf = QFloat(np.sqrt(2)/2, 0.01)
res = np.arccos(qf)
assert_almost_equal(res.nominal, 0.7853981633974484)
assert_almost_equal(res.std_dev, 0.0141421356237309)
assert_equal(res.unit, units.Unit('rad'))
qf = QFloat([0, 0.5, 1], [0.01, 0.2, 0.3])
res = np.arccos(qf)
assert_almost_equal(res.nominal, [1.57079633, 1.04719755, 0])
assert_almost_equal(res.std_dev, [0.01, 0.23094011, np.inf])
assert_equal(res.unit, units.Unit('rad'))
# Invalid units
for unit in ['m', 'm/s', 'rad', 'deg']:
with pytest.raises(UnitsError):
np.arccos(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.arccos(qf, out=[])
def test_qfloat_np_arctan(self):
qf = QFloat(1.0, 0.01)
res = np.arctan(qf)
assert_almost_equal(res.nominal, 0.7853981633974484)
assert_almost_equal(res.std_dev, 0.005)
assert_equal(res.unit, units.Unit('rad'))
qf = QFloat([0, 0.5, 1], [0.01, 0.2, 0.3])
res = np.arctan(qf)
assert_almost_equal(res.nominal, [0, 0.4636476, 0.7853982])
assert_almost_equal(res.std_dev, [0.01, 0.16, 0.15])
assert_equal(res.unit, units.Unit('rad'))
# Invalid units
for unit in ['m', 'm/s', 'rad', 'deg']:
with pytest.raises(UnitsError):
np.arctan(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.arctan(qf, out=[])
def test_qfloat_np_arcsinh(self):
qf = QFloat(0.0, 0.01)
res = np.arcsinh(qf)
assert_almost_equal(res.nominal, 0.0)
assert_almost_equal(res.std_dev, 0.01)
assert_equal(res.unit, units.Unit('rad'))
qf = QFloat([0.5, 1.0, 10], [0.01, 0.2, 0.3])
res = np.arcsinh(qf)
assert_almost_equal(res.nominal, [0.4812118, 0.8813736, 2.998223])
assert_almost_equal(res.std_dev, [0.0089443, 0.1414214, 0.0298511])
assert_equal(res.unit, units.Unit('rad'))
# Invalid units
for unit in ['m', 'm/s', 'rad', 'deg']:
with pytest.raises(UnitsError):
np.arcsinh(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.arcsinh(qf, out=[])
def test_qfloat_np_arccosh(self):
qf = QFloat(1.0, 0.01)
res = np.arccosh(qf)
assert_almost_equal(res.nominal, 0.0)
# assert_almost_equal(res.std_dev, np.inf)
assert_equal(res.unit, units.Unit('rad'))
qf = QFloat([1.5, 5.0, 10], [0.01, 0.2, 0.3])
res = np.arccosh(qf)
assert_almost_equal(res.nominal, [0.9624237, 2.2924317, 2.9932228])
assert_almost_equal(res.std_dev, [0.0089443, 0.0408248, 0.0301511])
assert_equal(res.unit, units.Unit('rad'))
# Invalid units
for unit in ['m', 'm/s', 'rad', 'deg']:
with pytest.raises(UnitsError):
np.arccosh(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.arccosh(qf, out=[])
def test_qfloat_np_arctanh(self):
qf = QFloat(0.0, 0.01)
res = np.arctanh(qf)
assert_almost_equal(res.nominal, 0.0)
assert_almost_equal(res.std_dev, 0.01)
assert_equal(res.unit, units.Unit('rad'))
qf = QFloat([0.1, 0.5, 1.0], [0.01, 0.2, 0.3])
res = np.arctanh(qf)
assert_almost_equal(res.nominal, [0.1003353, 0.5493061, np.inf])
assert_almost_equal(res.std_dev, [0.010101, 0.2666667, np.inf])
assert_equal(res.unit, units.Unit('rad'))
# Invalid units
for unit in ['m', 'm/s', 'rad', 'deg']:
with pytest.raises(UnitsError):
np.arctanh(QFloat(1.0, unit=unit))
with pytest.raises(NotImplementedError):
# out argument should fail
np.arctanh(qf, out=[])
def test_qfloat_np_arctan2(self):
qf1 = QFloat(1.0, 0.01)
qf2 = QFloat(0.0, 0.01)
res = np.arctan2(qf1, qf2)
assert_almost_equal(res.nominal, 1.57079633)
assert_almost_equal(res.std_dev, 0.01)
assert_equal(res.unit, units.Unit('rad'))
qf1 = QFloat([0.5, 1.0, 10], [0.01, 0.2, 0.3])
qf2 = QFloat([0.1, 0.5, 1.0], [0.01, 0.2, 0.3])
res = np.arctan2(qf1, qf2)
assert_almost_equal(res.nominal, [1.373401, 1.107149, 1.471128])
assert_almost_equal(res.std_dev, [0.019612, 0.178885, 0.029851])
assert_equal(res.unit, units.Unit('rad'))
|
juliotuxREPO_NAMEastropopPATH_START.@astropop_extracted@astropop-main@tests@[email protected]_END.py
|
{
"filename": "test_logger.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/test/test_logger.py",
"type": "Python"
}
|
#####################################################################################
#
# Copyright (c) typedef int GmbH
# SPDX-License-Identifier: EUPL-1.2
#
#####################################################################################
import json
from io import StringIO as NativeStringIO
from io import StringIO
from mock import Mock
from twisted.logger import formatTime
from twisted.python.failure import Failure
from crossbar.test import TestCase
from crossbar._logging import (LogCapturer, make_stdout_observer, make_JSON_observer, record_separator,
make_stderr_observer)
from txaio import make_logger, get_global_log_level, set_global_log_level
from txaio.tx import Logger, LogLevel
_log = make_logger("info", logger=Mock)
def _makelog():
log = make_logger("info", logger=Mock)
return log
class _InitLoggerMaker(object):
def __init__(self):
self.log = make_logger("info", logger=Mock)
class _ClassDefLoggerMaker(object):
log = make_logger("info", logger=Mock)
class LoggerModuleTests(TestCase):
def setUp(self):
self.existing_level = get_global_log_level()
return super(LoggerModuleTests, self).setUp()
def tearDown(self):
set_global_log_level(self.existing_level)
def test_set_global(self):
"""
Setting the global log level via the function changes it.
"""
set_global_log_level("warn")
self.assertEqual(get_global_log_level(), "warn")
def test_set_global_changes_loggers(self):
"""
Setting the global log level changes the level of all loggers that were
not instantiated with a level.
"""
log = make_logger()
self.assertEqual(log._log_level, "info")
set_global_log_level("warn")
self.assertEqual(log._log_level, "warn")
def test_set_global_does_not_change_explicit_loggers(self):
"""
Setting the global log level does not change loggers that have an
explicit level set.
"""
log = make_logger("info")
self.assertEqual(log._log_level, "info")
set_global_log_level("warn")
self.assertEqual(log._log_level, "info")
class CrossbarLoggerTests(TestCase):
def test_disallow_direct_instantiation(self):
"""
The developer shouldn't call Logger directly, but use
make_logger.
"""
with self.assertRaises(AssertionError):
Logger("warn")
def test_set_level(self):
"""
The log level needs to be one of the accepted log levels.
"""
with self.assertRaises(ValueError):
make_logger("not a suitable level")
def test_logger_emits(self):
"""
A Logger emits messages through to its child logger.
"""
log = make_logger("trace", logger=Mock)
log.error("Foo happened!!!")
log._logger.emit.assert_called_with(LogLevel.error, "Foo happened!!!")
log.warn("Stuff", foo="bar")
log._logger.emit.assert_called_with(LogLevel.warn, "Stuff", foo="bar")
log.trace("Stuff that's trace", foo="bar")
log._logger.emit.assert_called_with(LogLevel.debug, "Stuff that's trace", foo="bar", txaio_trace=1)
def test_logger_emits_if_higher(self):
"""
A Logger that has a log level of a higher severity will not emit
messages of a lower severity.
"""
log = make_logger("info", logger=Mock)
log.error("Error!")
log.debug("Debug!")
log.info("Info!")
log.trace("Trace!")
calls = {}
for x in log._logger.emit.call_args_list:
calls[x[0][0]] = calls.get(x[0][0], 0) + 1
self.assertEqual(calls.get(LogLevel.critical, 0), 0)
self.assertEqual(calls.get(LogLevel.error, 0), 1)
self.assertEqual(calls.get(LogLevel.warn, 0), 0)
self.assertEqual(calls.get(LogLevel.info, 0), 1)
self.assertEqual(calls.get(LogLevel.debug, 0), 0)
def test_logger_namespace_init(self):
"""
The namespace of the Logger is of the creator when using __init__.
"""
lm = _InitLoggerMaker()
self.assertEqual(lm.log._logger.namespace, "crossbar.test.test_logger._InitLoggerMaker")
def test_logger_namespace_classdef(self):
"""
The namespace of the Logger is of the creator when using it in a class
definition.
"""
lm = _ClassDefLoggerMaker()
self.assertEqual(lm.log._logger.namespace, "crossbar.test.test_logger._ClassDefLoggerMaker")
def test_logger_namespace_moduledef(self):
"""
The namespace of the Logger is the creator module when it is made in a
module.
"""
self.assertEqual(_log._logger.namespace, "crossbar.test.test_logger")
def test_logger_namespace_function(self):
"""
The namespace of the Logger is the creator function when it is made in
a function outside of a class.
"""
log = _makelog()
self.assertEqual(log._logger.namespace, "crossbar.test.test_logger._makelog")
def test_logger_failure(self):
"""
The failure method catches the in-flight exception.
"""
log = make_logger("info", logger=Mock)
try:
1 / 0
except:
log.failure("Failure happened!")
self.assertEqual(log._logger.failure.call_count, 1)
def test_logger_failure_not_called(self):
"""
The failure method isn't called under 'none'.
"""
log = make_logger("none", logger=Mock)
try:
1 / 0
except:
log.failure("Failure happened!")
self.assertEqual(log._logger.failure.call_count, 0)
class JSONObserverTests(TestCase):
def test_basic(self):
"""
The JSON observer outputs a stream of log events.
"""
stream = StringIO()
observer = make_JSON_observer(stream)
log = make_logger(observer=observer)
log.info("Hello")
result = stream.getvalue()
log_entry = json.loads(result[:-1])
self.assertEqual(result[-1], record_separator)
self.assertEqual(len(log_entry.keys()), 4)
self.assertEqual(log_entry["level"], "info")
self.assertEqual(log_entry["text"], "Hello")
def test_failure(self):
"""
Failures include the stacktrace.
"""
stream = StringIO()
observer = make_JSON_observer(stream)
log = make_logger(observer=observer)
try:
1 / 0
except:
log.failure("Oh no {0}".format("!"))
result = stream.getvalue()
log_entry = json.loads(result[:-1])
self.assertEqual(result[-1], record_separator)
self.assertEqual(len(log_entry.keys()), 4)
self.assertIn("ZeroDivisionError", log_entry["text"])
self.assertIn("Oh no !", log_entry["text"])
self.assertEqual(log_entry["level"], "critical")
def test_not_json_serialisable(self):
"""
Non-JSON-serialisable parameters are repr()'d.
"""
stream = StringIO()
observer = make_JSON_observer(stream)
log = make_logger(observer=observer)
try:
1 / 0
except:
log.failure("Oh no", obj=observer)
result = stream.getvalue()
log_entry = json.loads(result[:-1])
self.assertEqual(result[-1], record_separator)
self.assertEqual(len(log_entry.keys()), 5)
self.assertIn("ZeroDivisionError", log_entry["text"])
self.assertIn("Oh no", log_entry["text"])
self.assertIn("<function ", log_entry["obj"])
self.assertEqual(log_entry["level"], "critical")
def test_repr_formatting(self):
"""
Non-JSON-serialisable parameters are repr()'d, and any curly brackets
in the result are escaped.
"""
stream = StringIO()
observer = make_JSON_observer(stream)
log = make_logger(observer=observer)
class BracketThing(object):
def __repr__(self):
return "<BracketThing kwargs={}>"
log.info("hi {obj}", obj=BracketThing())
result = stream.getvalue()
log_entry = json.loads(result[:-1])
self.assertEqual(result[-1], record_separator)
self.assertEqual(len(log_entry.keys()), 5)
self.assertEqual("hi <BracketThing kwargs={{}}>", log_entry["text"])
self.assertEqual(log_entry["level"], "info")
def test_raising_during_encoding(self):
"""
Non-JSON-serialisable parameters are repr()'d, and if that's impossible
then the message is lost.
"""
stream = StringIO()
observer = make_JSON_observer(stream)
log = make_logger(observer=observer)
class BadThing(object):
def __repr__(self):
raise Exception()
log.info("hi {obj}", obj=BadThing())
result = stream.getvalue()
log_entry = json.loads(result[:-1])
self.assertEqual(result[-1], record_separator)
self.assertEqual(len(log_entry.keys()), 3)
self.assertIn("MESSAGE LOST", log_entry["text"])
self.assertEqual(log_entry["level"], "error")
def test_unicode_logs(self):
"""
Unicode is JSON serialised correctly.
"""
stream = StringIO()
observer = make_JSON_observer(stream)
log = make_logger(observer=observer)
try:
raise Exception("\u2603")
except:
log.failure("Oh no")
result = stream.getvalue()
log_entry = json.loads(result[:-1])
self.assertEqual(result[-1], record_separator)
self.assertEqual(len(log_entry.keys()), 4)
self.assertIn("\u2603", log_entry["text"])
self.assertEqual(log_entry["level"], "critical")
class StdoutObserverTests(TestCase):
def test_basic(self):
stream = NativeStringIO()
observer = make_stdout_observer(_file=stream)
log = make_logger(observer=observer)
log.info("Hi!", log_system="foo")
result = stream.getvalue()
self.assertIn("[foo]", result)
def test_output_standard(self):
"""
The output format is the time, the system in square brackets, and the
message.
"""
stream = NativeStringIO()
observer = make_stdout_observer(_file=stream, format="standard")
event = {
'log_level': LogLevel.info,
'log_namespace': 'crossbar.test.test_logger.StdoutObserverTests',
'log_source': None,
'log_format': 'Hi there!',
'log_system': 'foo',
'log_time': 1434099813.77449
}
observer(event)
result = stream.getvalue()
self.assertEqual(result[:-1], formatTime(event["log_time"]) + " [foo] Hi there!")
def test_output_syslogd(self):
"""
The syslogd output format is the system in square brackets, and the
message.
"""
stream = NativeStringIO()
observer = make_stdout_observer(_file=stream, format="syslogd")
event = {
'log_level': LogLevel.info,
'log_namespace': 'crossbar.test.test_logger.StdoutObserverTests',
'log_source': None,
'log_format': 'Hi there!',
'log_system': 'foo',
'log_time': 1434099813.77449
}
observer(event)
result = stream.getvalue()
self.assertEqual(result[:-1], "[foo] Hi there!")
def test_format_log_category(self):
"""
A log category in the event will mean the format is replaced with the
format string referencing it.
"""
stream = NativeStringIO()
observer = make_stdout_observer(_file=stream, format="syslogd")
event = {
'log_level': LogLevel.info,
'log_namespace': 'crossbar.test.test_logger.StdoutObserverTests',
'log_category': "DBG100",
'x': 'x~',
'y': 'z',
'z': 'a',
'log_source': None,
'log_system': 'foo',
'log_time': 1434099813.77449
}
observer(event)
result = stream.getvalue()
self.assertEqual(result[:-1], "[foo] DEBUG x~ z a")
class StderrObserverTests(TestCase):
def test_basic(self):
stream = NativeStringIO()
observer = make_stderr_observer(_file=stream)
log = make_logger(observer=observer)
log.error("Hi!", log_system="foo")
result = stream.getvalue()
self.assertIn("[foo]", result)
def test_output_standard(self):
"""
The output format is the time, the system in square brackets, and the
message.
"""
stream = NativeStringIO()
observer = make_stderr_observer(_file=stream, format="standard")
event = {
'log_level': LogLevel.error,
'log_namespace': 'crossbar.test.test_logger.StdoutObserverTests',
'log_source': None,
'log_format': 'Hi there!',
'log_system': 'foo',
'log_time': 1434099813.77449
}
observer(event)
result = stream.getvalue()
self.assertEqual(result[:-1], formatTime(event["log_time"]) + " [foo] Hi there!")
def test_output_syslogd(self):
"""
The syslogd output format is the system in square brackets, and the
message.
"""
stream = NativeStringIO()
observer = make_stderr_observer(_file=stream, format="syslogd")
event = {
'log_level': LogLevel.error,
'log_namespace': 'crossbar.test.test_logger.StdoutObserverTests',
'log_source': None,
'log_format': 'Hi there!',
'log_system': 'foo',
'log_time': 1434099813.77449
}
observer(event)
result = stream.getvalue()
self.assertEqual(result[:-1], "[foo] Hi there!")
def test_format_log_category(self):
"""
A log category in the event will mean the format is replaced with the
format string referencing it.
"""
stream = NativeStringIO()
observer = make_stderr_observer(_file=stream, format="syslogd")
event = {
'log_level': LogLevel.error,
'log_namespace': 'crossbar.test.test_logger.StdoutObserverTests',
'log_category': "DBG100",
'x': 'x~',
'y': 'z',
'z': 'a',
'log_source': None,
'log_system': 'foo',
'log_time': 1434099813.77449
}
observer(event)
result = stream.getvalue()
self.assertEqual(result[:-1], "[foo] DEBUG x~ z a")
def test_format_failure(self):
"""
A traceback will print.
"""
stream = NativeStringIO()
observer = make_stderr_observer(_file=stream, format="syslogd")
try:
raise ValueError("noooo {0}".format("!!"))
except:
err = Failure()
event = {
'log_level': LogLevel.error,
'log_namespace': 'crossbar.test.test_logger.StdoutObserverTests',
'log_format': None,
'log_source': None,
'log_failure': err,
'log_system': 'foo',
'log_time': 1434099813.77449
}
observer(event)
result = stream.getvalue()
self.assertIn("noooo {0}", result)
class LogCapturerTests(TestCase):
def test_capturer(self):
"""
The log capturer is a context manager that captures the logs emitted
inside it.
"""
log = make_logger("info")
with LogCapturer() as l:
log.info("Whee!", log_category="CB500", foo="bar")
self.assertEqual(len(l.get_category("CB500")), 1)
self.assertEqual(l.get_category("CB500")[0]["foo"], "bar")
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@test@[email protected]_END.py
|
{
"filename": "_colorscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/marker/line/_colorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorscaleValidator(_plotly_utils.basevalidators.ColorscaleValidator):
def __init__(
self, plotly_name="colorscale", parent_name="scatter3d.marker.line", **kwargs
):
super(ColorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autocolorscale": False}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@marker@line@[email protected]_END.py
|
{
"filename": "ex_univar_kde.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/examples/ex_univar_kde.py",
"type": "Python"
}
|
"""
This example tests the nonparametric estimator
for several popular univariate distributions with the different
bandwidth selction methods - CV-ML; CV-LS; Scott's rule of thumb.
Produces six different plots for each distribution
1) Beta
2) f
3) Pareto
4) Laplace
5) Weibull
6) Poisson
"""
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
KDEMultivariate = sm.nonparametric.KDEMultivariate
np.random.seed(123456)
# Beta distribution
# Parameters
a = 2
b = 5
nobs = 250
support = np.random.beta(a, b, size=nobs)
rv = stats.beta(a, b)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(1)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Beta Distributed " \
"Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# f distribution
df = 100
dn = 100
nobs = 250
support = np.random.f(dn, df, size=nobs)
rv = stats.f(df, dn)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(2)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of f Distributed " \
"Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Pareto distribution
a = 2
nobs = 150
support = np.random.pareto(a, size=nobs)
rv = stats.pareto(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(3)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Pareto " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Laplace Distribution
mu = 0
s = 1
nobs = 250
support = np.random.laplace(mu, s, size=nobs)
rv = stats.laplace(mu, s)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(4)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Laplace " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Weibull Distribution
a = 1
nobs = 250
support = np.random.weibull(a, size=nobs)
rv = stats.weibull_min(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(5)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Weibull " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Poisson Distribution
a = 2
nobs = 250
support = np.random.poisson(a, size=nobs)
rv = stats.poisson(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='o', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='o', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='o', bw='cv_ml')
plt.figure(6)
plt.plot(support[ix], rv.pmf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Poisson " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
plt.show()
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@examples@[email protected]_END.py
|
{
"filename": "extract_message.py",
"repo_name": "PX4/pyulog",
"repo_path": "pyulog_extracted/pyulog-main/pyulog/extract_message.py",
"type": "Python"
}
|
"""
Extract values from a ULog file message to use in scripting
"""
import numpy as np
from .core import ULog
def extract_message(ulog_file_name: str, message: str,
time_s: "int | None" = None, time_e: "int | None" = None,
disable_str_exceptions: bool = False) -> list[dict]:
"""
Extract values from a ULog file
:param ulog_file_name: (str) The ULog filename to open and read
:param message: (str) A ULog message to return values from
:param time_s: (int) Offset time for conversion in seconds
:param time_e: (int) Limit until time for conversion in seconds
:return: (list[dict]) A list of each record from the ULog as key-value pairs
"""
if not isinstance(message, str):
raise AttributeError("Must provide a message to pull from ULog file")
ulog = ULog(ulog_file_name, message, disable_str_exceptions)
try:
data = ulog.get_dataset(message)
except Exception as exc:
raise AttributeError("Provided message is not in the ULog file") from exc
values = []
# use same field order as in the log, except for the timestamp
data_keys = [f.field_name for f in data.field_data]
data_keys.remove('timestamp')
data_keys.insert(0, 'timestamp') # we want timestamp at first position
#get the index for row where timestamp exceeds or equals the required value
time_s_i = np.where(data.data['timestamp'] >= time_s * 1e6)[0][0] \
if time_s else 0
#get the index for row upto the timestamp of the required value
time_e_i = np.where(data.data['timestamp'] >= time_e * 1e6)[0][0] \
if time_e else len(data.data['timestamp'])
# write the data
for i in range(time_s_i, time_e_i):
row = {}
for key in data_keys:
row[key] = data.data[key][i]
values.append(row)
return values
|
PX4REPO_NAMEpyulogPATH_START.@pyulog_extracted@pyulog-main@pyulog@[email protected]_END.py
|
{
"filename": "followup.py",
"repo_name": "gwastro/pycbc",
"repo_path": "pycbc_extracted/pycbc-master/pycbc/results/followup.py",
"type": "Python"
}
|
# Copyright (C) 2014 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This module provides functions to generate followup plots and trigger
time series.
"""
import numpy, matplotlib
# Only if a backend is not already set ... This should really *not* be done
# here, but in the executables you should set matplotlib.use()
# This matches the check that matplotlib does internally, but this *may* be
# version dependenant. If this is a problem then remove this and control from
# the executables directly.
import sys
if 'matplotlib.backends' not in sys.modules:
matplotlib.use('agg')
import pylab, mpld3, mpld3.plugins
from ligo.segments import segment
from pycbc.io.hdf import HFile
def columns_from_file_list(file_list, columns, ifo, start, end):
""" Return columns of information stored in single detector trigger
files.
Parameters
----------
file_list_file : string
pickle file containing the list of single detector
triggers.
ifo : string
The ifo to return triggers for.
columns : list of strings
The list of columns to read from the trigger files.
start : int
The start time to get triggers from
end : int
The end time to get triggers from
Returns
-------
trigger_dict : dict
A dictionary of column vectors with column names as keys.
"""
file_list = file_list.find_output_with_ifo(ifo)
file_list = file_list.find_all_output_in_range(ifo, segment(start, end))
trig_dict = {}
for trig_file in file_list:
f = HFile(trig_file.storage_path, 'r')
time = f['end_time'][:]
pick = numpy.logical_and(time < end, time > start)
pick_loc = numpy.where(pick)[0]
for col in columns:
if col not in trig_dict:
trig_dict[col] = []
trig_dict[col] = numpy.concatenate([trig_dict[col], f[col][:][pick_loc]])
return trig_dict
ifo_color = {'H1': 'blue', 'L1':'red', 'V1':'green'}
def coinc_timeseries_plot(coinc_file, start, end):
fig = pylab.figure()
f = HFile(coinc_file, 'r')
stat1 = f['foreground/stat1']
stat2 = f['foreground/stat2']
time1 = f['foreground/time1']
time2 = f['foreground/time2']
ifo1 = f.attrs['detector_1']
ifo2 = f.attrs['detector_2']
pylab.scatter(time1, stat1, label=ifo1, color=ifo_color[ifo1])
pylab.scatter(time2, stat2, label=ifo2, color=ifo_color[ifo2])
fmt = '.12g'
mpld3.plugins.connect(fig, mpld3.plugins.MousePosition(fmt=fmt))
pylab.legend()
pylab.xlabel('Time (s)')
pylab.ylabel('NewSNR')
pylab.grid()
return mpld3.fig_to_html(fig)
def trigger_timeseries_plot(file_list, ifos, start, end):
fig = pylab.figure()
for ifo in ifos:
trigs = columns_from_file_list(file_list,
['snr', 'end_time'],
ifo, start, end)
print(trigs)
pylab.scatter(trigs['end_time'], trigs['snr'], label=ifo,
color=ifo_color[ifo])
fmt = '.12g'
mpld3.plugins.connect(fig, mpld3.plugins.MousePosition(fmt=fmt))
pylab.legend()
pylab.xlabel('Time (s)')
pylab.ylabel('SNR')
pylab.grid()
return mpld3.fig_to_html(fig)
def times_to_urls(times, window, tag):
base = '/../followup/%s/%s/%s'
return times_to_links(times, window, tag, base=base)
def times_to_links(times, window, tag, base=None):
if base is None:
base = "<a href='/../followup/%s/%s/%s' target='_blank'>followup</a>"
urls = []
for time in times:
start = time - window
end = time + window
urls.append(base % (tag, start, end))
return urls
def get_gracedb_search_link(time):
# Set up a search string for a 3s window around the coincidence
gdb_search_query = '%.0f+..+%.0f' % (numpy.floor(time) - 1,
numpy.ceil(time) + 1)
gdb_search_url = ('https://gracedb.ligo.org/search/?query='
'{}&query_type=S'.format(gdb_search_query))
gdb_search_link = '<a href="' + gdb_search_url + '">Search</a>'
return gdb_search_link
|
gwastroREPO_NAMEpycbcPATH_START.@pycbc_extracted@pycbc-master@pycbc@[email protected]@.PATH_END.py
|
{
"filename": "octoai_endpoint.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/llms/octoai_endpoint.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms import OctoAIEndpoint
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"OctoAIEndpoint": "langchain_community.llms"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"OctoAIEndpoint",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@llms@[email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "dealii/dealii",
"repo_path": "dealii_extracted/dealii-master/bundled/README.md",
"type": "Markdown"
}
|
This folder contains third party projects bundled with deal.II
==============================================================
**Please note that these projects are copyrighted by others than the deal.II
authors, but are included by permission. For details, consult the stated
licenses below.**
Below is a detailed list of the content of each subdirectory, and the
licenses that apply.
boost-*
-------
Contains parts of the boost c++ libraries copyrighted by the boost authors
and licensed under the Boost Software License Version 1.0. See
`boost-*/LICENSE_1_0.txt` or http://www.boost.org/LICENSE_1_0.txt
A full version of the library can be downloaded at http://www.boost.org/.
kokkos-*
--------
Contains the Kokkos project licensed under the 3-clause BSD license.
A full version of the Kokkos project can be downloaded at
https://github.com/kokkos/kokkos.
taskflow-*
--------------
Contains the taskflow project licensed under the MIT license.
A full version of the project project can be downloaded at
https://taskflow.github.io/.
muparser_*
----------
Contains the MuParser project licensed under the MIT license.
A full version of the MuParser project can be downloaded at
http://muparser.beltoforion.de/.
tbb*
----
Contains parts of the Intel Threading Building Blocks library copyrighted
by the respective authors and licensed under the Apache License Version 2.0
. See `tbb*/README.md` or http://threadingbuildingblocks.org/.
A full version of the tbb project can be downloaded at
http://threadingbuildingblocks.org/.
umfpack (UMFPACK 5.0.2, AMD 2.2, UFCONFIG)
-------------------------------------------
Contains parts of the UMFPACK, AMD and UFCONFIG libraries copyrighted by
Timothy A. Davis, Patrick R. Amestoy, and Iain S. Duff and licensed under
the GNU Lesser General Public License version 2.1 or later. Or alternative
licenses as explained in `umfpack/UMFPACK/Doc/License` and
`umfpack/AMD/Doc/License`. See `umfpack/README.txt` and
`umfpack/lgpl-2.1.txt`.
A full version of UMFPACK can be downloaded at
http://faculty.cse.tamu.edu/davis/suitesparse.html
|
dealiiREPO_NAMEdealiiPATH_START.@dealii_extracted@dealii-master@[email protected]@.PATH_END.py
|
{
"filename": "mocklc.py",
"repo_name": "hpparvi/MuSCAT2_transit_pipeline",
"repo_path": "MuSCAT2_transit_pipeline_extracted/MuSCAT2_transit_pipeline-master/muscat2ta/mocklc.py",
"type": "Python"
}
|
# MuSCAT2 photometry and transit analysis pipeline
# Copyright (C) 2019 Hannu Parviainen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import warnings
import exodata
import numpy as np
import matplotlib.pyplot as pl
from os.path import join, split
from numpy import array, linspace, zeros, zeros_like, cos, atleast_2d, diag, arange, tile, newaxis, arccos
from numpy.random import multivariate_normal, seed
from exodata.astroquantities import Quantity as Qty
from pytransit import MandelAgol as MA
from pytransit.orbits_f import orbits as of
from tpc import SpectrumTool, Instrument, TabulatedFilter
from tpc.filter import *
warnings.filterwarnings("ignore", category=UserWarning)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
exocat = exodata.OECDatabase(join(split(__file__)[0], '../ext/oec/systems/'))
try:
from george import GP
from george.kernels import ExpKernel, Matern32Kernel
with_george = True
except ImportError:
with_george = False
class MockLC:
pb_names = 'g r i z'.split()
pb_centers = 1e-9*array([470, 640, 780, 900])
npb = len(pb_names)
def __init__(self, planet_name='wasp-80b', **kwargs):
self.t_exposure_d = Qty(kwargs.get('exptime', 60), 's').rescale('d')
self.t_baseline_d = Qty(kwargs.get('bltime', 0.5), 'h').rescale('d')
self.ldcs = kwargs.get('ldcs', array([[0.80,0.02], [0.61,0.16], [0.45,0.20], [0.36,0.20]]))
self.filters = 'g r i z'.split()
self.npb = len(self.filters)
self.planet = planet = exocat.searchPlanet(planet_name)
self.star = star = planet.star
self.p = kwargs.get('p', None) or float(planet.P)
self.k = kwargs.get('k', None) or float(planet.R.rescale('R_s') / star.R)
self.a = kwargs.get('a', None) or float(planet.getParam('semimajoraxis').rescale('R_s') / star.R)
self.i = float(planet.getParam('inclination').rescale('rad'))
self.b = kwargs.get('b', self.a * cos(self.i))
self.i = arccos(self.b / self.a)
self.duration_d = Qty(of.duration_eccentric_f(self.p, self.k, self.a, self.i, 0, 0, 1), 'd')
# Contamination
# -------------
qe_be = TabulatedFilter('1024B_eXcelon',
[300, 325, 350, 400, 450, 500, 700, 800, 850, 900, 950, 1050, 1150],
[0.0, 0.1, 0.25, 0.60, 0.85, 0.92, 0.96, 0.85, 0.70, 0.50, 0.30, 0.05, 0.0])
qe_b = TabulatedFilter('2014B',
[300, 350, 500, 550, 700, 800, 1000, 1050],
[0.10, 0.20, 0.90, 0.96, 0.90, 0.75, 0.11, 0.05])
qes = qe_be, qe_b, qe_be, qe_be
instrument = Instrument('MuSCAT2', (sdss_g, sdss_r, sdss_i, sdss_z), qes)
self.contaminator = SpectrumTool(instrument, "i'")
self.i_contamination = kwargs.get('i_contamination', 0.0)
self.cnteff = kwargs.get('contaminant_temperature', None) or float(star.T)
self.k0 = self.k/np.sqrt(1-self.i_contamination)
self.contamination = self.contaminator.contamination(self.i_contamination, float(star.T), self.cnteff)
@property
def t_total_d(self):
return self.duration_d + 2*self.t_baseline_d
@property
def duration_h(self):
return self.duration_d.rescale('h')
@property
def n_exp(self):
return int(self.t_total_d // self.t_exposure_d)
def __call__(self, rseed=0, ldcs=None, wnsigma=None, rnsigma=None, rntscale=0.5):
return self.create(rseed, ldcs, wnsigma, rnsigma, rntscale)
def create(self, rseed=0, ldcs=None, wnsigma=None, rnsigma=None, rntscale=0.5, nights=1):
ldcs = ldcs if ldcs is not None else self.ldcs
seed(rseed)
self.time = linspace(-0.5*float(self.t_total_d), 0.5*float(self.t_total_d), self.n_exp)
self.time = (tile(self.time, [nights, 1]) + (self.p*arange(nights))[:,newaxis]).ravel()
self.npt = self.time.size
self.transit = zeros([self.npt, 4])
for i, (ldc, c) in enumerate(zip(ldcs, self.contamination)):
self.transit[:, i] = MA().evaluate(self.time, self.k0, ldc, 0, self.p, self.a, self.i, c=c)
# White noise
# -----------
if wnsigma is not None:
self.wnoise = multivariate_normal(zeros(atleast_2d(self.transit).shape[1]), diag(wnsigma)**2, self.npt)
else:
self.wnoise = zeros_like(self.transit)
# Red noise
# ---------
if rnsigma and with_george:
self.gp = GP(rnsigma**2 * ExpKernel(rntscale))
self.gp.compute(self.time)
self.rnoise = self.gp.sample(self.time, self.npb).T
self.rnoise -= self.rnoise.mean(0)
else:
self.rnoise = zeros_like(self.transit)
# Final light curve
# -----------------
self.time_h = Qty(self.time, 'd').rescale('h')
self.flux = self.transit + self.wnoise + self.rnoise
return self.time_h, self.flux
def plot(self, figsize=(13,4)):
fig,axs = pl.subplots(1,3, figsize=figsize, sharex=True, sharey=True)
yshift = 0.01*arange(4)
axs[0].plot(self.time_h, self.flux + yshift)
axs[1].plot(self.time_h, self.transit + yshift)
axs[2].plot(self.time_h, 1 + self.rnoise + yshift)
pl.setp(axs, xlabel='Time [h]', xlim=self.time_h[[0,-1]])
pl.setp(axs[0], ylabel='Normalised flux')
[pl.setp(ax, title=title) for ax,title in
zip(axs, 'Transit model + noise, Transit model, Red noise'.split(', '))]
fig.tight_layout()
return fig, axs
def plot_color_difference(self, figsize=(13,4)):
fig, axs = pl.subplots(2, 3, figsize=figsize, sharex=True, sharey=True)
[ax.plot(self.time_h, 100*(fl - self.transit[:, -1])) for ax, fl in zip(axs[0], self.transit[:, :-1].T)]
[ax.plot(self.time_h, 100*(fl - self.flux[:, -1])) for ax, fl in zip(axs[1], self.flux[:, :-1].T)]
[pl.setp(ax, title='F$_{}$ - F$_z$'.format(pb)) for ax,pb in zip(axs[0], self.pb_names[:-1])]
pl.setp(axs[:, 0], ylabel='$\Delta F$ [%]')
pl.setp(axs[1, :], xlabel='Time [h]')
pl.setp(axs, xlim=self.time_h[[0, -1]])
fig.tight_layout()
return fig
|
hpparviREPO_NAMEMuSCAT2_transit_pipelinePATH_START.@MuSCAT2_transit_pipeline_extracted@MuSCAT2_transit_pipeline-master@[email protected]@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "pennucci/PulsePortraiture",
"repo_path": "PulsePortraiture_extracted/PulsePortraiture-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='PulsePortraiture',
version='0.0',
description='Data analysis package for wideband pulsar timing',
author='Tim Pennucci',
author_email='[email protected]',
url='http://github.com/pennucci/PulsePortraiture',
py_modules=['ppalign', 'ppgauss', 'pplib', 'ppspline', 'pptoas', 'pptoaslib', 'ppzap','telescope_codes'],
scripts=['ppalign.py','ppgauss.py', 'ppspline.py', 'pptoas.py',
'ppzap.py']
)
|
pennucciREPO_NAMEPulsePortraiturePATH_START.@PulsePortraiture_extracted@[email protected]@.PATH_END.py
|
{
"filename": "test_image_data_probe.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/integrationtests/mayavi/test_image_data_probe.py",
"type": "Python"
}
|
"""Simple test for the ImageDataProbe filter.
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from io import BytesIO
import copy
# Local imports.
from common import TestCase, get_example_data
class TestImageDataProbe(TestCase):
def check(self, saved=False):
"""Does the checking, if saved is True it does not change the
properties at first to see how those behave and only tests the
final unpickled state."""
script = self.script
e = script.engine
scene = e.current_scene
src = scene.children[0]
idp = src.children[0]
mm = idp.children[0]
if not saved:
assert src.get_output_dataset().is_a('vtkUnstructuredGrid')
assert idp.get_output_dataset().is_a('vtkImageData')
sc = idp.get_output_dataset().point_data.scalars
assert sc.name == 't'
assert mm.scalar_lut_manager.data_name == 't'
assert abs(sc.range[0]) < 1.0
assert abs(sc.range[1] - 626.0) < 1.0
idp.rescale_scalars = True
idp.dimensions = (41, 19, 19)
sc = idp.get_output_dataset().point_data.scalars
assert sc.name == idp.rescaled_scalar_name
assert mm.scalar_lut_manager.data_name == idp.rescaled_scalar_name
assert abs(sc.range[0]) < 1e-2
assert abs(sc.range[1] - 65535.0) < 1.e-2
assert (idp.get_output_dataset().dimensions == (41, 19, 19)).all()
def test(self):
self.main()
def do(self):
############################################################
# Imports.
from mayavi.filters.image_data_probe import ImageDataProbe
from mayavi.modules.api import ContourGridPlane
from mayavi.sources.vtk_xml_file_reader import VTKXMLFileReader
############################################################
# Create a new scene and set up the visualization.
s = self.new_scene()
script = mayavi = self.script
# Read a VTK (old style) data file.
r = VTKXMLFileReader()
r.initialize(get_example_data('fire_ug.vtu'))
script.add_source(r)
# Create the filters.
idp = ImageDataProbe()
script.add_filter(idp)
cgp = ContourGridPlane(enable_contours=False)
script.add_module(cgp)
cgp.grid_plane.axis = 'z'
cgp.grid_plane.position = 2
s.scene.isometric_view()
# Check.
self.check(saved=False)
############################################################
# Test if saving a visualization and restoring it works.
# Save visualization.
f = BytesIO()
f.name = abspath('test.mv2') # We simulate a file.
script.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine = script.engine
engine.close_scene(s)
# Load visualization
script.load_visualization(f)
s = engine.current_scene
s.scene.isometric_view()
# Now do the check.
self.check(saved=True)
############################################################
# Test if the Mayavi2 visualization can be deep-copied.
# Pop the source object.
source = s.children.pop()
# Add it back to see if that works without error.
s.children.append(source)
# Now do the check.
s.scene.isometric_view()
self.check(saved=True)
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
source1 = copy.deepcopy(source)
s.children[0] = source1
# Now do the check.
s.scene.isometric_view()
self.check(saved=True)
# If we have come this far, we are golden!
if __name__ == "__main__":
t = TestImageDataProbe()
t.test()
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@integrationtests@mayavi@[email protected]_END.py
|
{
"filename": "numpycompat.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/utils/compat/numpycompat.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ...utils import minversion
__all__ = ['NUMPY_LT_1_9_1', 'NUMPY_LT_1_10', 'NUMPY_LT_1_10_4',
'NUMPY_LT_1_11', 'NUMPY_LT_1_12', 'NUMPY_LT_1_13', 'NUMPY_LT_1_14']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_9_1 = not minversion('numpy', '1.9.1')
NUMPY_LT_1_10 = not minversion('numpy', '1.10.0')
NUMPY_LT_1_10_4 = not minversion('numpy', '1.10.4')
NUMPY_LT_1_11 = not minversion('numpy', '1.11.0')
NUMPY_LT_1_12 = not minversion('numpy', '1.12')
NUMPY_LT_1_13 = not minversion('numpy', '1.13')
NUMPY_LT_1_14 = not minversion('numpy', '1.14dev')
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@astropy@utils@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "spacetelescope/imexam",
"repo_path": "imexam_extracted/imexam-master/imexam/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
An Astropy affiliated package to help perform image examination through a
viewing tool, like DS9
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import * # noqa
# ----------------------------------------------------------------------------
try:
from . import imexamxpa # noqa
_have_xpa = True
except ImportError:
_have_xpa = False
# import high level functions into the imexam namespace
if _have_xpa:
from .util import list_active_ds9, find_path # noqa
from .util import display_help, display_xpa_help # noqa
from .util import set_logging # noqa
from . import connect as _connect
connect = _connect.Connect
|
spacetelescopeREPO_NAMEimexamPATH_START.@imexam_extracted@imexam-master@imexam@[email protected]_END.py
|
{
"filename": "setup.py",
"repo_name": "theonefromnowhere/FitCov",
"repo_path": "FitCov_extracted/FitCov-main/setup.py",
"type": "Python"
}
|
import setuptools
import os
import sys
long_description = 'FitCov'
package_basedir = os.path.abspath(os.path.dirname(__file__))
package_basename = 'FitCov'
sys.path.insert(0, os.path.join(package_basedir, package_basename))
if __name__ == '__main__':
setuptools.setup(
name="FitCov",
version="1.0",
author="Svyatoslav Trusov",
author_email="[email protected]",
description="Codes for fitted jackknife covariance",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/theonefromnowhere/ClusteringLibs",
packages=[package_basename],
#setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=['numpy','pycorr','iminuit'],
)
|
theonefromnowhereREPO_NAMEFitCovPATH_START.@FitCov_extracted@[email protected]@.PATH_END.py
|
{
"filename": "testdata.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/sandbox/nonparametric/testdata.py",
"type": "Python"
}
|
"""
Created on Fri Mar 04 07:36:28 2011
@author: Mike
"""
import numpy as np
class kdetest:
Hpi = np.matrix([[ 0.05163034, 0.5098923 ],
[0.50989228, 8.8822365 ]])
faithfulData = dict(
eruptions=[
3.6, 1.8, 3.333, 2.283, 4.533, 2.883, 4.7, 3.6, 1.95, 4.35, 1.833, 3.917,
4.2, 1.75, 4.7, 2.167, 1.75, 4.8, 1.6, 4.25, 1.8, 1.75, 3.45, 3.067, 4.533,
3.6, 1.967, 4.083, 3.85, 4.433, 4.3, 4.467, 3.367, 4.033, 3.833, 2.017, 1.867,
4.833, 1.833, 4.783, 4.35, 1.883, 4.567, 1.75, 4.533, 3.317, 3.833, 2.1, 4.633,
2, 4.8, 4.716, 1.833, 4.833, 1.733, 4.883, 3.717, 1.667, 4.567, 4.317, 2.233, 4.5,
1.75, 4.8, 1.817, 4.4, 4.167, 4.7, 2.067, 4.7, 4.033, 1.967, 4.5, 4, 1.983, 5.067,
2.017, 4.567, 3.883, 3.6, 4.133, 4.333, 4.1, 2.633, 4.067, 4.933, 3.95, 4.517, 2.167,
4, 2.2, 4.333, 1.867, 4.817, 1.833, 4.3, 4.667, 3.75, 1.867, 4.9, 2.483, 4.367, 2.1, 4.5,
4.05, 1.867, 4.7, 1.783, 4.85, 3.683, 4.733, 2.3, 4.9, 4.417, 1.7, 4.633, 2.317, 4.6,
1.817, 4.417, 2.617, 4.067, 4.25, 1.967, 4.6, 3.767, 1.917, 4.5, 2.267, 4.65, 1.867,
4.167, 2.8, 4.333, 1.833, 4.383, 1.883, 4.933, 2.033, 3.733, 4.233, 2.233, 4.533,
4.817, 4.333, 1.983, 4.633, 2.017, 5.1, 1.8, 5.033, 4, 2.4, 4.6, 3.567, 4, 4.5, 4.083,
1.8, 3.967, 2.2, 4.15, 2, 3.833, 3.5, 4.583, 2.367, 5, 1.933, 4.617, 1.917, 2.083,
4.583, 3.333, 4.167, 4.333, 4.5, 2.417, 4, 4.167, 1.883, 4.583, 4.25, 3.767, 2.033,
4.433, 4.083, 1.833, 4.417, 2.183, 4.8, 1.833, 4.8, 4.1, 3.966, 4.233, 3.5, 4.366,
2.25, 4.667, 2.1, 4.35, 4.133, 1.867, 4.6, 1.783, 4.367, 3.85, 1.933, 4.5, 2.383,
4.7, 1.867, 3.833, 3.417, 4.233, 2.4, 4.8, 2, 4.15, 1.867, 4.267, 1.75, 4.483, 4,
4.117, 4.083, 4.267, 3.917, 4.55, 4.083, 2.417, 4.183, 2.217, 4.45, 1.883, 1.85,
4.283, 3.95, 2.333, 4.15, 2.35, 4.933, 2.9, 4.583, 3.833, 2.083, 4.367, 2.133, 4.35,
2.2, 4.45, 3.567, 4.5, 4.15, 3.817, 3.917, 4.45, 2, 4.283, 4.767, 4.533, 1.85, 4.25,
1.983, 2.25, 4.75, 4.117, 2.15, 4.417, 1.817, 4.467],
waiting=[
79, 54, 74, 62, 85, 55, 88, 85, 51, 85, 54, 84, 78, 47, 83, 52,
62, 84, 52, 79, 51, 47, 78, 69, 74, 83, 55, 76, 78, 79, 73, 77,
66, 80, 74, 52, 48, 80, 59, 90, 80, 58, 84, 58, 73, 83, 64, 53,
82, 59, 75, 90, 54, 80, 54, 83, 71, 64, 77, 81, 59, 84, 48, 82,
60, 92, 78, 78, 65, 73, 82, 56, 79, 71, 62, 76, 60, 78, 76, 83,
75, 82, 70, 65, 73, 88, 76, 80, 48, 86, 60, 90, 50, 78, 63, 72,
84, 75, 51, 82, 62, 88, 49, 83, 81, 47, 84, 52, 86, 81, 75, 59,
89, 79, 59, 81, 50, 85, 59, 87, 53, 69, 77, 56, 88, 81, 45, 82,
55, 90, 45, 83, 56, 89, 46, 82, 51, 86, 53, 79, 81, 60, 82, 77,
76, 59, 80, 49, 96, 53, 77, 77, 65, 81, 71, 70, 81, 93, 53, 89,
45, 86, 58, 78, 66, 76, 63, 88, 52, 93, 49, 57, 77, 68, 81, 81,
73, 50, 85, 74, 55, 77, 83, 83, 51, 78, 84, 46, 83, 55, 81, 57,
76, 84, 77, 81, 87, 77, 51, 78, 60, 82, 91, 53, 78, 46, 77, 84,
49, 83, 71, 80, 49, 75, 64, 76, 53, 94, 55, 76, 50, 82, 54, 75,
78, 79, 78, 78, 70, 79, 70, 54, 86, 50, 90, 54, 54, 77, 79, 64,
75, 47, 86, 63, 85, 82, 57, 82, 67, 74, 54, 83, 73, 73, 88, 80,
71, 83, 56, 79, 78, 84, 58, 83, 43, 60, 75, 81, 46, 90, 46, 74]
)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@sandbox@[email protected]@.PATH_END.py
|
{
"filename": "04_B_train_on_data.py",
"repo_name": "gully/blase",
"repo_path": "blase_extracted/blase-main/experiments/04_full_bandwidth_HPF/04_B_train_on_data.py",
"type": "Python"
}
|
import os
import torch
from torch import nn
from tqdm import trange
import torch.optim as optim
from blase.emulator import SparsePhoenixEmulator
import matplotlib.pyplot as plt
from gollum.phoenix import PHOENIXSpectrum
import numpy as np
from muler.hpf import HPFSpectrumList, HPFSpectrum
import copy
import astropy.units as u
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
from astropy.io import fits
# Fetch the real HPF data
# We will demo on WASP 69
hdus = fits.open("../../data/WASP_69_hpf_stack.fits")
# Numpy arrays: 1 x N_pix
data = HPFSpectrum(
flux=hdus[1].data["flux"] * u.dimensionless_unscaled,
spectral_axis=hdus[1].data["wavelength"] * u.Angstrom,
)
data = data.remove_nans()
# Pre-process the model as described in the paper
spectrum = PHOENIXSpectrum(teff=4700, logg=4.5)
spectrum = spectrum.divide_by_blackbody()
spectrum = spectrum.normalize()
continuum_fit = spectrum.fit_continuum(polyorder=5)
spectrum = spectrum.divide(continuum_fit, handle_meta="ff")
wl_native = spectrum.wavelength.value
flux_native = spectrum.flux.value
# Create the emulator and load a pretrained model
prominence = 0.01
emulator = SparsePhoenixEmulator(
wl_native, flux_native, prominence=prominence, wing_cut_pixels=1000
)
emulator.to(device)
wl_native = emulator.wl_native.clone().detach().to(device)
wl_active = wl_native.to("cpu")[emulator.active_mask.to("cpu").numpy()]
target = (
emulator.flux_native.clone().detach().to(device)[emulator.active_mask.cpu().numpy()]
)
state_dict_post = torch.load("emulator_T4700g4p5_prom0p01_HPF.pt")
emulator.load_state_dict(state_dict_post)
emulator.radial_velocity = nn.Parameter(torch.tensor(-9.628, device=device))
emulator.radial_velocity.requires_grad = True
emulator.lam_centers.requires_grad = False
emulator.amplitudes.requires_grad = True
emulator.sigma_widths.requires_grad = True
emulator.gamma_widths.requires_grad = True
from blase.emulator import EchelleModel
model = EchelleModel(
data.spectral_axis.bin_edges.value.astype(np.float64), wl_native.cpu()
)
model.to(device)
model.ln_vsini = nn.Parameter(torch.log(torch.tensor(1.0, device=device)))
data_target = torch.tensor(
data.flux.value.astype(np.float64), device=device, dtype=torch.float64
)
data_wavelength = torch.tensor(
data.wavelength.value.astype(np.float64), device=device, dtype=torch.float64
)
loss_fn = nn.MSELoss(reduction="mean")
optimizer = optim.Adam(
list(filter(lambda p: p.requires_grad, model.parameters()))
+ list(filter(lambda p: p.requires_grad, emulator.parameters())),
0.01,
amsgrad=True,
)
n_epochs = 1000
losses = []
t_iter = trange(n_epochs, desc="Training", leave=True)
for epoch in t_iter:
model.train()
emulator.train()
high_res_model = emulator.forward()
yhat = model.forward(high_res_model)
loss = loss_fn(yhat, data_target)
loss.backward()
optimizer.step()
optimizer.zero_grad()
t_iter.set_description("Training Loss: {:0.8f}".format(loss.item()))
torch.save(emulator.state_dict(), "emulator_T4700g4p5_prom0p01_HPF_MAP.pt")
torch.save(model.state_dict(), "extrinsic_MAP.pt")
|
gullyREPO_NAMEblasePATH_START.@blase_extracted@blase-main@experiments@04_full_bandwidth_HPF@[email protected]_END.py
|
{
"filename": "_weight.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/splom/hoverlabel/font/_weight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="weight", parent_name="splom.hoverlabel.font", **kwargs
):
super(WeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["normal", "bold"]),
max=kwargs.pop("max", 1000),
min=kwargs.pop("min", 1),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@splom@hoverlabel@font@[email protected]_END.py
|
{
"filename": "fit_fhhe.py",
"repo_name": "cpiaulet/smint",
"repo_path": "smint_extracted/smint-master/smint/fit_fhhe.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 1 11:52:50 2020
@author: caroline
Estimate gas-to-core mass ratio based on the Lopez & Fortney 2014
models based on planet mass, radius, insolation and system age
Utilities functions
"""
# Import modules ----------
import numpy as np
from scipy.interpolate import RegularGridInterpolator
import emcee
import corner
from astropy.io import ascii as aioascii
from astropy import table
#%% utilities for interpolation
def find_radius_fenv(t=None, interp=None, met=1., age=1., log10_finc=0.,
log10_mass=1., fenv=10.):
"""
Given a metallicity, an age, an incident flux and a planet mass and envelope
mass fraction (%),
get the best-matching planet radius
"""
if interp is None:
interp = make_interpolator_LF14(t)
r_earth = interp((met,age,log10_finc,log10_mass,fenv), method='linear')
return r_earth
def make_interpolator_LF14(t, R_array, log_fenv_prior=False):
"""
make an interpolator for the planet radius as a function of
['metallicity_solar','age_Gyr', 'F_inc_oplus','Mass_oplus','f_env_pc']
using the Lopez & Fortney (2014) grid
the interpolation is linear with the log10 of the flux and planet mass
log_fenv_prior: if True, interpolate linearly in log space
"""
# make array of f_env_pc values
metallicity_solar = np.unique(np.array(t['metallicity_solar']))
age_Gyr = np.unique(np.array(t['age_Gyr']))
log10_F_inc_oplus = np.unique(np.log10(np.array(t['F_inc_oplus'])))
log10_Mass_oplus = np.unique(np.log10(np.array(t['Mass_oplus'])))
if log_fenv_prior:
f_env_pc = np.unique(np.log10(np.array(t['f_env_pc'])))
else:
f_env_pc = np.unique(np.array(t['f_env_pc']))
interpolator = RegularGridInterpolator((metallicity_solar, age_Gyr,
log10_F_inc_oplus, log10_Mass_oplus,
f_env_pc,),
R_array, bounds_error=False)
return interpolator
#%% emcee functions
def lnlike(theta, true_rad, true_rad_err, interp, met):
"""
Log-likelihood function for emcee fit
"""
# no distinction for log10: interpolator built accordingly (takes log10 as input if log_prior_fenv)
fenv, mass, age, finc = theta
# estimate interpolated radius for these params
radius = find_radius_fenv(t=None, interp=interp, met=met, age=age,
log10_finc=np.log10(finc), log10_mass=np.log10(mass),
fenv=fenv)
return -0.5*(((true_rad-radius)/true_rad_err)**2)
def lnprior(theta, mu, icovmat, flat_age, age_min, age_max, log_fenv_prior, grid_lim):
"""
prior (using known age distri, Finc distri, mass distri and flat in fenv
or log fenv)
"""
if log_fenv_prior:
log10_fenv, mass, age, finc = theta
fenv = 10**log10_fenv
else:
fenv, mass, age, finc = theta
if (fenv < grid_lim['fenv'][0]) or (fenv > grid_lim['fenv'][1]):
return -np.inf
if (mass < grid_lim['mass'][0]) or (mass > grid_lim['mass'][1]):
return -np.inf
if (age < grid_lim['age'][0]) or (age > grid_lim['age'][1]):
return -np.inf
if flat_age:
if (age < age_min) or (age > age_max):
return -np.inf
if (finc < grid_lim['finc'][0]) or (finc > grid_lim['finc'][1]):
return -np.inf
else:
if flat_age:
arr = np.array([mass, finc])
else:
arr = np.array([mass, age, finc])
diff = arr - mu
return -np.dot(diff, np.dot(icovmat, diff)) / 2.0
def lnprob(theta, true_rad, true_rad_err, interp, met, mu, icovmat, grid_lim,
flat_age=True, age_min=0.1, age_max=10., log_fenv_prior=True):
"""
Log-probability function
"""
lp = lnprior(theta, mu, icovmat, flat_age, age_min, age_max,
log_fenv_prior, grid_lim)
if not np.isfinite(lp):
return -np.inf
else:
return lp + lnlike(theta, true_rad, true_rad_err, interp, met)
#%% setup and run interpolator
def setup_priors(params):
"""
input: params of the fit
output: mean and covariance matrix for gaussian priors,
lower and upper bound on the flat prior on the age if not a gaussian prior
"""
if params["flat_age"]:
mu = np.array([params["Mp_earth"], params["Sinc_earth"]])
covmat = np.zeros((2,2))
covmat[0,0] = params["err_Mp_earth"]**2.
covmat[1,1] = params["err_Sinc_earth"]**2.
age_min = params["age_Gyr_inf"]
age_max = params["age_Gyr_sup"]
else:
mu = np.array([params["Mp_earth"], params["age_Gyr"], params["Sinc_earth"]])
covmat = np.zeros((3,3))
covmat[0,0] = params["err_Mp_earth"]**2.
covmat[1,1] = params["err_age_Gyr"]**2.
covmat[2,2] = params["err_Sinc_earth"]**2.
age_min = 0.1
age_max = 10.
params["icovmat"] = np.linalg.inv(covmat)
params["mu"] = mu
params["covmat"] = covmat
params["age_min"] = age_min
params["age_max"] = age_max
return params
def ini_fit(params, grid_lim=None):
"""
input: params of the fit
grid_lim: dict with the lower and upper bounds on the grid params
if None, uses the bounds from the Lopez & Fortney (2014) grid
output: initial positions of the walkers and labels for the fitted para
"""
if params["log_fenv_prior"]:
fenv_ini = 0.
fenv_unc = 1.
fenv_label = r"$\log_{10}$ f$_{env}$ [%]"
else:
fenv_ini = 10.
fenv_unc = 10.
fenv_label = r"f$_{env}$ [%]"
x0 = np.array([fenv_ini, params["Mp_earth"], params["age_Gyr"], params["Sinc_earth"]])
params["labels"] = [fenv_label, r"M$_p$ [M$_\oplus$]", "Age [Gyr]", r"S$_{inc}$ [S$_\oplus$]"]
params["pos0"] = [x0 + np.array([fenv_unc, params["err_Mp_earth"], 1., params["err_Sinc_earth"]])\
* np.random.randn(params["ndim"]) for i in range(params["nwalkers"])]
if grid_lim is None:
grid_lim = dict()
grid_lim['fenv'] = [0.01, 20.]
grid_lim['mass'] = [1.0, 20.]
grid_lim['age'] = [0.1, 10.]
grid_lim['finc'] = [0.1, 1000.]
params["grid_lim"] = grid_lim
return params
def run_fit(params, interpolator, met=1.):
"""
Run the emcee fit using the previously-set up priors and params
Interpolator: generated using make_interpolator_LF14()
returns the emcee samplers for met=1*solar or met=50*soar
"""
if met !=1 and met != 50:
raise ValueError("Metallicity has to be 1 or 50 * solar!")
print("\nSetting up the sampler...")
sampler = emcee.EnsembleSampler(params["nwalkers"], params["ndim"], lnprob,
args=(params["Rp_earth"],params["err_Rp_earth"],
interpolator, met, params["mu"],
params["icovmat"], params["grid_lim"],
params["flat_age"], params["age_min"],
params["age_max"], params["log_fenv_prior"]))
print("\nRunning the emcee fit...")
sampler.run_mcmc(params["pos0"], params["nsteps"])
if params["save"]:
print("\nSaving the results...")
np.save(params["outputdir"]+params["fname"]+'_chains_met'+str(int(met))+'.npy', sampler.chain)
return sampler
#%% post-processing
def calc_constraints(samples, params, more_percentiles=[15.9, 50., 84.1], suffix=""):
"""
Build astropy table of parameter constraints
samples: flattened samples generated by emcee sampler (shape=(nsamp, ndim))
params: fit params
more percentiles: list of additional percentiles to calculate and return
The table is printed, returned and saved to a csv file if params["save"]
"""
medians = np.median(samples, axis=0)
upper_lims = np.percentile(samples, 84.1, axis=0)
lower_lims = np.percentile(samples, 15.9, axis=0)
p1sig = upper_lims - medians
m1sig = lower_lims - medians
t = table.Table([params["labels"], medians, p1sig, m1sig],
names=("Parameter", "Median", "+1sigma", "-1sigma"))
for perc in more_percentiles:
colname = str(int(perc*100.)/100.)+"th perc."
t[colname] = np.percentile(samples, perc, axis=0)
print("\nTable of parameter constraints:\n")
print(t)
if params["save"]:
aioascii.write(t, params["outputdir"]+params["fname"]+suffix+'_constraints.csv', overwrite=True)
return t
def plot_corner(samples, params, which="met1",
plot_datapoints=False, smooth=1.,
quantiles=[0.16, 0.5, 0.84], title_kwargs={'fontsize':14},
hist_kwargs={"linewidth":3}, rg=None,
show_titles=[True,False], **kwargs):
"""
Corner plot for an emcee fit of the envelope mass fraction that matches
the observed planet and system params
samples: generated by emcee sampler
params: fit params
which: "met1", "met50", "both" depending on what we want to show
other args: args for the corner function
Returns the figure with the corner plot
"""
print("\n** Plotting corner for", which)
if "met" in which:
if which == "met1":
color = params["met1_color"]
hist_kwargs["color"] = params["met1_color"]
elif which == "met50":
color = params["met50_color"]
hist_kwargs["color"] = params["met50_color"]
fig = corner.corner(samples, labels=params["labels"],
plot_datapoints=plot_datapoints, smooth=smooth,
show_titles=show_titles[0], quantiles=quantiles,
title_kwargs=title_kwargs, color=color,
hist_kwargs=hist_kwargs, range=rg, **kwargs)
if which == "both":
hist_kwargs["color"] = params["met50_color"]
fig = corner.corner(samples[1], labels=params["labels"],
plot_datapoints=plot_datapoints, smooth=smooth,
show_titles=show_titles[1], title_kwargs=title_kwargs,
color=params["met50_color"], hist_kwargs=hist_kwargs,
range=rg, **kwargs)
hist_kwargs["color"] = params["met1_color"]
corner.corner(samples[0], fig=fig, labels=params["labels"],
plot_datapoints=plot_datapoints, smooth=smooth,
show_titles=show_titles[0], title_kwargs=title_kwargs,
color=params["met1_color"], hist_kwargs=hist_kwargs,
range=rg, **kwargs)
return fig
|
cpiauletREPO_NAMEsmintPATH_START.@smint_extracted@smint-master@smint@[email protected]_END.py
|
{
"filename": "_colorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/box/hoverlabel/font/_colorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="box.hoverlabel.font", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@box@hoverlabel@font@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/ixpeobssim/targets/__init__.py",
"type": "Python"
}
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@ixpeobssim@targets@[email protected]_END.py
|
|
{
"filename": "_scatterpolar.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/layout/template/data/_scatterpolar.py",
"type": "Python"
}
|
from plotly.graph_objs import Scatterpolar
|
[email protected][email protected]@packages@python@plotly@plotly@graph_objs@layout@template@data@[email protected]_END.py
|
{
"filename": "retrieval_result.py",
"repo_name": "ideasrule/platon",
"repo_path": "platon_extracted/platon-master/platon/retrieval_result.py",
"type": "Python"
}
|
import numpy as np
class RetrievalResult:
def __init__(self, results, retrieval_type, best_fit_params,
transit_bins=None, transit_depths=None, transit_errors=None,
eclipse_bins=None, eclipse_depths=None, eclipse_errors=None,
best_fit_transit_depths=None, best_fit_transit_dict=None,
best_fit_eclipse_depths=None, best_fit_eclipse_dict=None,
fit_info=None, divisors=None, labels=None):
self.best_fit_params = best_fit_params
self.retrieval_type = retrieval_type
self.transit_bins = transit_bins
self.transit_depths = transit_depths
self.transit_errors = transit_errors
if transit_bins is not None:
transit_bins = np.array(transit_bins)
self.transit_wavelengths = (transit_bins[:,0] + transit_bins[:,1]) / 2
self.transit_chi_sqr = np.sum((transit_depths - best_fit_transit_depths)**2 / transit_errors**2)
print("Transit chi sqr", self.transit_chi_sqr)
self.eclipse_bins = eclipse_bins
self.eclipse_depths = eclipse_depths
self.eclipse_errors = eclipse_errors
if eclipse_bins is not None:
eclipse_bins = np.array(eclipse_bins)
self.eclipse_wavelengths = (eclipse_bins[:,0] + eclipse_bins[:,1]) / 2
self.eclipse_chi_sqr = np.sum((eclipse_depths - best_fit_eclipse_depths)**2 / eclipse_errors**2)
self.best_fit_transit_depths = best_fit_transit_depths
self.best_fit_transit_dict = best_fit_transit_dict
self.best_fit_eclipse_depths = best_fit_eclipse_depths
self.best_fit_eclipse_dict = best_fit_eclipse_dict
self.fit_info = fit_info
self.__dict__.update(results)
if "logz" in results:
self.final_logz = results["logz"][-1]
self.divisors = divisors
self.labels = labels
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
setattr(self, key, value)
def __delitem__(self, key):
delattr(self, key)
def keys(self):
return list(self.__dict__.keys())
def values(self):
return list(self.__dict__.values())
def items(self):
return list(self.__dict__.items())
def __repr__(self):
return str(self.__dict__)
|
ideasruleREPO_NAMEplatonPATH_START.@platon_extracted@platon-master@platon@[email protected]_END.py
|
{
"filename": "test_reordering.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/sparse/csgraph/tests/test_reordering.py",
"type": "Python"
}
|
import numpy as np
from numpy.testing import assert_equal
from scipy.sparse.csgraph import reverse_cuthill_mckee, structural_rank
from scipy.sparse import csc_array, csr_array, coo_array
def test_graph_reverse_cuthill_mckee():
A = np.array([[1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 1],
[0, 1, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 1, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1]], dtype=int)
graph = csr_array(A)
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0])
assert_equal(perm, correct_perm)
# Test int64 indices input
graph.indices = graph.indices.astype('int64')
graph.indptr = graph.indptr.astype('int64')
perm = reverse_cuthill_mckee(graph, True)
assert_equal(perm, correct_perm)
def test_graph_reverse_cuthill_mckee_ordering():
data = np.ones(63,dtype=int)
rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2,
2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5,
6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9,
9, 10, 10, 10, 10, 10, 11, 11, 11, 11,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14,
14, 15, 15, 15, 15, 15])
cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2,
7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13,
15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13,
1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11,
4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14,
5, 7, 10, 13, 15])
graph = csr_array((data, (rows,cols)))
perm = reverse_cuthill_mckee(graph)
correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15,
0, 13, 7, 5, 9, 11, 1, 3])
assert_equal(perm, correct_perm)
def test_graph_structural_rank():
# Test square matrix #1
A = csc_array([[1, 1, 0],
[1, 0, 1],
[0, 1, 0]])
assert_equal(structural_rank(A), 3)
# Test square matrix #2
rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7])
cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4])
data = np.ones_like(rows)
B = coo_array((data,(rows,cols)), shape=(8,8))
assert_equal(structural_rank(B), 6)
#Test non-square matrix
C = csc_array([[1, 0, 2, 0],
[2, 0, 4, 0]])
assert_equal(structural_rank(C), 2)
#Test tall matrix
assert_equal(structural_rank(C.T), 2)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@sparse@csgraph@tests@[email protected]_END.py
|
{
"filename": "fit.py",
"repo_name": "veusz/veusz",
"repo_path": "veusz_extracted/veusz-master/veusz/widgets/fit.py",
"type": "Python"
}
|
# fitting plotter
# Copyright (C) 2005 Jeremy S. Sanders
# Email: Jeremy Sanders <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
import re
import sys
import numpy as N
from .. import document
from .. import setting
from .. import utils
from .. import qtall as qt
from .function import FunctionPlotter
from . import widget
# try importing iminuit first, then minuit, then None
try:
import iminuit as minuit
except ImportError:
try:
import minuit
except ImportError:
minuit = None
# Check whether iminuit version is old (1.x)
if minuit is not None:
if minuit.__version__[0:1] == '1':
isiminuit1 = True
else:
isiminuit1 = False
def _(text, disambiguation=None, context='Fit'):
"""Translate text."""
return qt.QCoreApplication.translate(context, text, disambiguation)
def minuitFit(evalfunc, params, names, values, xvals, yvals, yserr):
"""Do fitting with minuit (if installed)."""
def chi2(params):
"""generate a lambda function to impedance-match between PyMinuit's
use of multiple parameters versus our use of a single numpy vector."""
c = ((evalfunc(params, xvals) - yvals)**2 / yserr**2).sum()
if chi2.runningFit:
chi2.iters += 1
p = [chi2.iters, c] + params.tolist()
str = ("%5i " + "%8g " * (len(params)+1)) % tuple(p)
print(str)
return c
namestr = ', '.join(names)
fnstr = 'lambda %s: chi2(N.array([%s]))' % (namestr, namestr)
# this is safe because the only user-controlled variable is len(names)
fn = eval(fnstr, {'chi2' : chi2, 'N' : N})
print(_('Fitting via Minuit:'))
m = minuit.Minuit(fn, **values)
# set errordef explicitly (least-squares: 1.0 or log-likelihood: 0.5)
m.errordef = 1.0
# run the fit
chi2.runningFit = True
chi2.iters = 0
m.migrad()
# do some error analysis
have_symerr, have_err = False, False
try:
chi2.runningFit = False
m.hesse()
have_symerr = True
m.minos()
have_err = True
except Exception as e:
print(e)
if str(e).startswith('Discovered a new minimum'):
# the initial fit really failed
raise
# print the results
retchi2 = m.fval
dof = len(yvals) - len(params)
redchi2 = retchi2 / dof
if have_err:
if isiminuit1:
results = [" %s = %g \u00b1 %g (+%g / %g)" % (
n, m.values[n], m.errors[n], m.merrors[(n, 1.0)],
m.merrors[(n, -1.0)]) for n in names]
else:
results = [" %s = %g \u00b1 %g (+%g / %g)" % (
n, m.values[n], m.errors[n], m.merrors[n].upper,
m.merrors[n].lower) for n in names]
print(_('Fit results:\n') + "\n".join(results))
elif have_symerr:
print(_('Fit results:\n') + "\n".join([
" %s = %g \u00b1 %g" % (n, m.values[n], m.errors[n])
for n in names]))
print(_('MINOS error estimate not available.'))
else:
print(_('Fit results:\n') + "\n".join([
' %s = %g' % (n, m.values[n]) for n in names]))
print(_('No error analysis available: fit quality uncertain'))
print("chi^2 = %g, dof = %i, reduced-chi^2 = %g" % (retchi2, dof, redchi2))
vals = {name:m.values[name] for name in names}
return vals, retchi2, dof
class Fit(FunctionPlotter):
"""A plotter to fit a function to data."""
typename='fit'
allowusercreation=True
description=_('Fit a function to data')
def __init__(self, parent, name=None):
FunctionPlotter.__init__(self, parent, name=name)
self.addAction( widget.Action(
'fit', self.actionFit,
descr=_('Fit function'),
usertext=_('Fit function')) )
@classmethod
def addSettings(klass, s):
"""Construct list of settings."""
FunctionPlotter.addSettings(s)
s.add( setting.FloatDict(
'values',
{'a': 0.0, 'b': 1.0},
descr=_('Variables and fit values'),
usertext=_('Parameters')), 1 )
s.add( setting.DatasetExtended(
'xData', 'x',
descr=_('X data to fit (dataset name, list of values or expression)'),
usertext=_('X data')), 2 )
s.add( setting.DatasetExtended(
'yData', 'y',
descr=_('Y data to fit (dataset name, list of values or expression)'),
usertext=_('Y data')), 3 )
s.add( setting.Choice(
'defErrType', ['absolute', 'relative'], 'absolute',
descr=_('Default error type'),
usertext=_('Def. error type')) )
s.add( setting.Float(
'defErr', 0.05,
descr = 'Default absolute/relative error value for data',
usertext=_('Default error')))
s.add(setting.FloatOrAuto(
'fitMin', 'Auto',
descr=_('Minimum value at which to fit function'),
usertext=_('Min. fit range')))
s.add(setting.FloatOrAuto(
'fitMax', 'Auto',
descr=_('Maximum value at which to fit function'),
usertext=_('Max. fit range')))
s.add( setting.Bool(
'fitRange', False,
descr=_(
'Fit only the data between the minimum and maximum '
'of the axis for the function variable'),
usertext=_('Fit only range')), 4 )
s.add( setting.WidgetChoice(
'outLabel', '',
descr=_('Write best fit parameters to this text label after fitting'),
widgettypes=('label',),
usertext=_('Output label')), 5 )
s.add( setting.Str(
'outExpr', '',
descr=_('Output best fitting expression'),
usertext=_('Output expression')),
6, readonly=True )
s.add( setting.Float(
'chi2', -1,
descr='Output chi^2 from fitting',
usertext=_('Fit χ<sup>2</sup>')),
7, readonly=True )
s.add( setting.Int(
'dof', -1,
descr=_('Output degrees of freedom from fitting'),
usertext=_('Fit d.o.f.')),
8, readonly=True )
s.add( setting.Float(
'redchi2', -1,
descr=_('Output reduced-chi-squared from fitting'),
usertext=_('Fit reduced χ<sup>2</sup>')),
9, readonly=True )
f = s.get('function')
f.newDefault('a + b*x')
f.descr = _('Function to fit')
def affectsAxisRange(self):
"""This widget provides range information about these axes."""
s = self.settings
return ( (s.xAxis, 'sx'), (s.yAxis, 'sy') )
def getRange(self, axis, depname, axrange):
"""Update range with range of data."""
dataname = {'sx': 'xData', 'sy': 'yData'}[depname]
data = self.settings.get(dataname).getData(self.document)
if data:
drange = data.getRange()
if drange:
axrange[0] = min(axrange[0], drange[0])
axrange[1] = max(axrange[1], drange[1])
def initEnviron(self):
"""Copy data into environment."""
env = self.document.evaluate.context.copy()
env.update( self.settings.values )
return env
def updateOutputLabel(self, ops, vals, chi2, dof):
"""Use best fit parameters to update text label."""
s = self.settings
labelwidget = s.get('outLabel').findWidget()
if labelwidget is not None:
# build up a set of X=Y values
loc = self.document.locale
txt = []
for l, v in sorted(vals.items()):
val = utils.formatNumber(v, '%.4Vg', locale=loc)
txt.append( '%s = %s' % (l, val) )
# add chi2 output
txt.append( r'\chi^{2}_{\nu} = %s/%i = %s' % (
utils.formatNumber(chi2, '%.4Vg', locale=loc),
dof,
utils.formatNumber(chi2/dof, '%.4Vg', locale=loc) ))
# update label with text
text = r'\\'.join(txt)
ops.append( document.OperationSettingSet(
labelwidget.settings.get('label') , text ) )
def actionFit(self):
"""Fit the data."""
s = self.settings
# check and get compiled for of function
compiled = self.document.evaluate.compileCheckedExpression(s.function)
if compiled is None:
return
# populate the input parameters
paramnames = sorted(s.values)
params = N.array( [s.values[p] for p in paramnames] )
# FIXME: loads of error handling!!
d = self.document
# choose dataset depending on fit variable
if s.variable == 'x':
xvals = s.get('xData').getData(d).data
ydata = s.get('yData').getData(d)
else:
xvals = s.get('yData').getData(d).data
ydata = s.get('xData').getData(d)
yvals = ydata.data
yserr = ydata.serr
# if there are no errors on data
if yserr is None:
if ydata.perr is not None and ydata.nerr is not None:
print("Warning: Symmeterising positive and negative errors")
yserr = N.sqrt( 0.5*(ydata.perr**2 + ydata.nerr**2) )
else:
err = s.defErr
if s.defErrType == 'absolute':
print(f"Warning: No errors on values. Assuming absolute {err} errors.")
yserr = err + yvals*0
else: # relative
print(f"Warning: No errors on values. Assuming fractional {err} errors.")
yserr = yvals*err
yserr[yserr < 1e-8] = 1e-8
# if the fitRange parameter is on, we chop out data outside the
# range of the axis
if s.fitRange:
# get ranges for axes
if s.variable == 'x':
drange = self.parent.getAxes((s.xAxis,))[0].getPlottedRange()
mask = N.logical_and(xvals >= drange[0], xvals <= drange[1])
else:
drange = self.parent.getAxes((s.yAxis,))[0].getPlottedRange()
mask = N.logical_and(yvals >= drange[0], yvals <= drange[1])
xvals, yvals, yserr = xvals[mask], yvals[mask], yserr[mask]
print("Fitting %s from %g to %g" % (
s.variable, drange[0], drange[1]))
evalenv = self.initEnviron()
def evalfunc(params, xvals):
# update environment with variable and parameters
evalenv[self.settings.variable] = xvals
evalenv.update( zip(paramnames, params) )
try:
return eval(compiled, evalenv) + xvals*0.
except Exception as e:
self.document.log(str(e))
return N.nan
# minimum set for fitting
if s.fitMin != 'Auto':
if s.variable == 'x':
mask = xvals >= s.fitMin
else:
mask = yvals >= s.fitMin
xvals, yvals, yserr = xvals[mask], yvals[mask], yserr[mask]
# maximum set for fitting
if s.fitMax != 'Auto':
if s.variable == 'x':
mask = xvals <= s.fitMax
else:
mask = yvals <= s.fitMax
xvals, yvals, yserr = xvals[mask], yvals[mask], yserr[mask]
if s.fitMin != 'Auto' or s.fitMax != 'Auto':
print(
"Fitting %s between %s and %s"
% (s.variable, s.fitMin, s.fitMax))
# various error checks
if len(xvals) != len(yvals) or len(xvals) != len(yserr):
sys.stderr.write(_('Fit data not equal in length. Not fitting.\n'))
return
if len(params) > len(xvals):
sys.stderr.write(_('No degrees of freedom for fit. Not fitting\n'))
return
# actually do the fit, either via Minuit or our own LM fitter
chi2 = 1
dof = 1
# only consider finite values
finite = N.isfinite(xvals) & N.isfinite(yvals) & N.isfinite(yserr)
xvals = xvals[finite]
yvals = yvals[finite]
yserr = yserr[finite]
# check length after excluding non-finite values
if len(xvals) == 0:
sys.stderr.write(_('No data values. Not fitting.\n'))
return
if minuit is not None:
vals, chi2, dof = minuitFit(
evalfunc, params, paramnames, s.values,
xvals, yvals, yserr)
else:
print(_('Minuit not available, falling back to simple L-M fitting:'))
retn, chi2, dof = utils.fitLM(
evalfunc, params, xvals, yvals, yserr)
vals = {}
for i, v in zip(paramnames, retn):
vals[i] = float(v)
# list of operations do we can undo the changes
operations = []
# populate the return parameters
operations.append( document.OperationSettingSet(s.get('values'), vals) )
# populate the read-only fit quality params
operations.append( document.OperationSettingSet(s.get('chi2'), float(chi2)) )
operations.append( document.OperationSettingSet(s.get('dof'), int(dof)) )
if dof <= 0:
print(_('No degrees of freedom in fit.\n'))
redchi2 = -1.
else:
redchi2 = float(chi2/dof)
operations.append( document.OperationSettingSet(s.get('redchi2'), redchi2) )
# expression for fit
expr = self.generateOutputExpr(vals)
operations.append( document.OperationSettingSet(s.get('outExpr'), expr) )
self.updateOutputLabel(operations, vals, chi2, dof)
# actually change all the settings
d.applyOperation(
document.OperationMultiple(operations, descr=_('fit')) )
def generateOutputExpr(self, vals):
"""Try to generate text form of output expression.
vals is a dict of variable: value pairs
returns the expression
"""
paramvals = dict(vals)
s = self.settings
# also substitute in data name for variable
if s.variable == 'x':
paramvals['x'] = s.xData
else:
paramvals['y'] = s.yData
# split expression up into parts of text and nums, separated
# by non-text/nums
parts = re.split('([^A-Za-z0-9.])', s.function)
# replace part by things in paramvals, if they exist
for i, p in enumerate(parts):
if p in paramvals:
parts[i] = str(paramvals[p])
return ''.join(parts)
# allow the factory to instantiate an x,y plotter
document.thefactory.register(Fit)
|
veuszREPO_NAMEveuszPATH_START.@veusz_extracted@veusz-master@veusz@[email protected]@.PATH_END.py
|
{
"filename": "traverse_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/tools/common/traverse_test.py",
"type": "Python"
}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python module traversal."""
from tensorflow.python.platform import googletest
from tensorflow.tools.common import test_module1
from tensorflow.tools.common import test_module2
from tensorflow.tools.common import traverse
class TestVisitor(object):
def __init__(self):
self.call_log = []
def __call__(self, path, parent, children):
self.call_log += [(path, parent, children)]
class TraverseTest(googletest.TestCase):
def test_cycle(self):
class Cyclist(object):
pass
Cyclist.cycle = Cyclist
visitor = TestVisitor()
traverse.traverse(Cyclist, visitor)
# We simply want to make sure we terminate.
def test_module(self):
visitor = TestVisitor()
traverse.traverse(test_module1, visitor)
called = [parent for _, parent, _ in visitor.call_log]
self.assertIn(test_module1.ModuleClass1, called)
self.assertIn(test_module2.ModuleClass2, called)
def test_class(self):
visitor = TestVisitor()
traverse.traverse(TestVisitor, visitor)
self.assertEqual(TestVisitor,
visitor.call_log[0][1])
# There are a bunch of other members, but make sure that the ones we know
# about are there.
self.assertIn('__init__', [name for name, _ in visitor.call_log[0][2]])
self.assertIn('__call__', [name for name, _ in visitor.call_log[0][2]])
# There are more classes descended into, at least __class__ and
# __class__.__base__, neither of which are interesting to us, and which may
# change as part of Python version etc., so we don't test for them.
def test_non_class(self):
integer = 5
visitor = TestVisitor()
traverse.traverse(integer, visitor)
self.assertEqual([], visitor.call_log)
if __name__ == '__main__':
googletest.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@tools@common@[email protected]_END.py
|
{
"filename": "_binned_statistic.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/stats/_binned_statistic.py",
"type": "Python"
}
|
import builtins
import numpy as np
from numpy.testing import suppress_warnings
from operator import index
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
BinnedStatisticResult(statistic=array([4. , 4.5]),
bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
BinnedStatisticResult(statistic=array([[4. , 4.5],
[8. , 9. ]]), bin_edges=array([1., 4., 7.]),
binnumber=array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
BinnedStatisticResult(statistic=array([1., 2., 4.]),
bin_edges=array([1., 2., 3., 4.]),
binnumber=array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> rng = np.random.default_rng()
>>> windspeed = 8 * rng.random(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
Note that the returned linearized bin indices are used for an array with
extra bins on the outer binedges to capture values outside of the defined
bin bounds.
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny])
>>> ret.statistic
array([[2., 1.],
[1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def _bincount(x, weights):
if np.iscomplexobj(weights):
a = np.bincount(x, np.real(weights))
b = np.bincount(x, np.imag(weights))
z = a + b*1j
else:
z = np.bincount(x, weights)
return z
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False,
binned_statistic_result=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of N arrays of length D, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `sample`, or a list of sequences - each with the
same shape as `sample`. If `values` is such a list, the statistic
will be computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0. If the number of values
within a given bin is 0 or 1, the computed standard deviation value
will be 0 for the bin.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or positive int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
binned_statistic_result : binnedStatisticddResult
Result of a previous call to the function in order to reuse bin edges
and bin numbers with new values and/or a different statistic.
To reuse bin numbers, `expand_binnumbers` must have been set to False
(the default)
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
Examples
--------
>>> import numpy as np
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
Take an array of 600 (x, y) coordinates as an example.
`binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot
of dimension `D+1` is required.
>>> mu = np.array([0., 1.])
>>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])
>>> multinormal = stats.multivariate_normal(mu, sigma)
>>> data = multinormal.rvs(size=600, random_state=235412)
>>> data.shape
(600, 2)
Create bins and count how many arrays fall in each bin:
>>> N = 60
>>> x = np.linspace(-3, 3, N)
>>> y = np.linspace(-3, 4, N)
>>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],
... statistic='count')
>>> bincounts = ret.statistic
Set the volume and the location of bars:
>>> dx = x[1] - x[0]
>>> dy = y[1] - y[0]
>>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)
>>> z = 0
>>> bincounts = bincounts.ravel()
>>> x = x.ravel()
>>> y = y.ravel()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> with np.errstate(divide='ignore'): # silence random axes3d warning
... ax.bar3d(x, y, z, dx, dy, bincounts)
Reuse bin numbers and bin edges with new values:
>>> ret2 = stats.binned_statistic_dd(data, -np.arange(600),
... binned_statistic_result=ret,
... statistic='mean')
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError(f'invalid statistic {statistic!r}')
try:
bins = index(bins)
except TypeError:
# bins is not an integer
pass
# If bins was an integer-like object, now it is an actual Python int.
# NOTE: for _bin_edges(), see e.g. gh-11365
if isinstance(bins, int) and not np.isfinite(sample).all():
raise ValueError(f'{sample!r} contains non-finite values.')
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if statistic != 'count' and Vlen != Dlen:
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
if binned_statistic_result is None:
nbin, edges, dedges = _bin_edges(sample, bins, range)
binnumbers = _bin_numbers(sample, nbin, edges, dedges)
else:
edges = binned_statistic_result.bin_edges
nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)])
# +1 for outlier bins
dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)]
binnumbers = binned_statistic_result.binnumber
# Avoid overflow with double precision. Complex `values` -> `complex128`.
result_type = np.result_type(values, np.float64)
result = np.empty([Vdim, nbin.prod()], dtype=result_type)
if statistic in {'mean', np.mean}:
result.fill(np.nan)
flatcount = _bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in builtins.range(Vdim):
flatsum = _bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic in {'std', np.std}:
result.fill(np.nan)
flatcount = _bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in builtins.range(Vdim):
flatsum = _bincount(binnumbers, values[vv])
delta = values[vv] - flatsum[binnumbers] / flatcount[binnumbers]
std = np.sqrt(
_bincount(binnumbers, delta*np.conj(delta))[a] / flatcount[a]
)
result[vv, a] = std
result = np.real(result)
elif statistic == 'count':
result = np.empty([Vdim, nbin.prod()], dtype=np.float64)
result.fill(0)
flatcount = _bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic in {'sum', np.sum}:
result.fill(0)
for vv in builtins.range(Vdim):
flatsum = _bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic in {'median', np.median}:
result.fill(np.nan)
for vv in builtins.range(Vdim):
i = np.lexsort((values[vv], binnumbers))
_, j, counts = np.unique(binnumbers[i],
return_index=True, return_counts=True)
mid = j + (counts - 1) / 2
mid_a = values[vv, i][np.floor(mid).astype(int)]
mid_b = values[vv, i][np.ceil(mid).astype(int)]
medians = (mid_a + mid_b) / 2
result[vv, binnumbers[i][j]] = medians
elif statistic in {'min', np.min}:
result.fill(np.nan)
for vv in builtins.range(Vdim):
i = np.argsort(values[vv])[::-1] # Reversed so the min is last
result[vv, binnumbers[i]] = values[vv, i]
elif statistic in {'max', np.max}:
result.fill(np.nan)
for vv in builtins.range(Vdim):
i = np.argsort(values[vv])
result[vv, binnumbers[i]] = values[vv, i]
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except Exception:
null = np.nan
if np.iscomplexobj(null):
result = result.astype(np.complex128)
result.fill(null)
try:
_calc_binned_statistic(
Vdim, binnumbers, result, values, statistic
)
except ValueError:
result = result.astype(np.complex128)
_calc_binned_statistic(
Vdim, binnumbers, result, values, statistic
)
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = tuple([slice(None)] + Ndim * [slice(1, -1)])
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if expand_binnumbers and Ndim > 1:
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`result`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func):
unique_bin_numbers = np.unique(bin_numbers)
for vv in builtins.range(Vdim):
bin_map = _create_binned_data(bin_numbers, unique_bin_numbers,
values, vv)
for i in unique_bin_numbers:
stat = stat_func(np.array(bin_map[i]))
if np.iscomplexobj(stat) and not np.iscomplexobj(result):
raise ValueError("The statistic function returns complex ")
result[vv, i] = stat
def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv):
""" Create hashmap of bin ids to values in bins
key: bin number
value: list of binned data
"""
bin_map = dict()
for i in unique_bin_numbers:
bin_map[i] = []
for i in builtins.range(len(bin_numbers)):
bin_map[bin_numbers[i]].append(values[vv, i])
return bin_map
def _bin_edges(sample, bins=None, range=None):
""" Create edge arrays
"""
Dlen, Ndim = sample.shape
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
if len(range) != Ndim:
raise ValueError(
f"range given for {len(range)} dimensions; {Ndim} required")
smin = np.empty(Ndim)
smax = np.empty(Ndim)
for i in builtins.range(Ndim):
if range[i][1] < range[i][0]:
raise ValueError(
"In {}range, start must be <= stop".format(
f"dimension {i + 1} of " if Ndim > 1 else ""))
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in builtins.range(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Preserve sample floating point precision in bin edges
edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating)
else float)
# Create edge arrays
for i in builtins.range(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1,
dtype=edges_dtype)
else:
edges[i] = np.asarray(bins[i], edges_dtype)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
return nbin, edges, dedges
def _bin_numbers(sample, nbin, edges, dedges):
"""Compute the bin number each sample falls into, in each dimension
"""
Dlen, Ndim = sample.shape
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in range(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in range(Ndim):
# Find the rounding precision
dedges_min = dedges[i].min()
if dedges_min == 0:
raise ValueError('The smallest edge difference is numerically 0.')
decimal = int(-np.log10(dedges_min)) + 6
# Find which points are on the rightmost edge.
on_edge = np.where((sample[:, i] >= edges[i][-1]) &
(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal)))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
return binnumbers
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@stats@[email protected]_END.py
|
{
"filename": "iris_get_response.py",
"repo_name": "OfAaron3/irispreppy",
"repo_path": "irispreppy_extracted/irispreppy-main/irispreppy/radcal/iris_get_response.py",
"type": "Python"
}
|
import pickle
import urllib.error
import urllib.request
from datetime import datetime as dt
from glob import glob as ls
from os import path, remove
import numpy as np
from astropy.time import Time
from bs4 import BeautifulSoup
from scipy.interpolate import interp1d
from scipy.io import readsav
def iris_get_response(date=dt.strftime(dt.now(), '%Y-%m-%dT%H:%M:%S.%fZ'), version=0, response_file=None, pre_launch=False, full=False, angstrom=False, quiet=False):
'''Intended to use in place of iris_get_response.pro
Input Parameters:
date: Time or time list. Default is now.
version: Which version of the response file you want. Default is newest. Version numbers are 1 indexed, so the default 0 becomes -1.
response_file: Name of the response file you want to use. Must exactly match, including extension.
pre_launch: Not sure why this is in the original, but it is analaguous to version=2. Default is False.
full: Full effective area structure is returned with cryptic coefficients. Default is False.
angstrom: If True, lambda is returned in angstroms. If False, lambda is retunred in nm. Default is False.
quiet: If true, prints messages about contacting hesperia
Notes:
1. version, response_file, and prelaunch all perform the same function, here is their precedence,
pre_launch>version>response_file
2. Code automatically checks https://hesperia.gsfc.nasa.gov/ssw/iris/response/ for new response files. If this url
changes in the future, do a search and replace. The files are assumed to be geny IDL structs.
3. All original comments will be preceeded by ;, as is convention in IDL
4. Translated from iris_get_response.pro. Originally by J.P.Weulser, S.L. Freeland, and G.Chintzoglou
History:
2021-12-14 - A.W.Peat - Translated from IDL and added QOL improvements
'''
toppath=path.dirname(path.realpath(__file__))
resppath=path.join(toppath, "responses")
resps=ls(path.join(resppath, "*.pkl"))
resps.sort()
#Checks for new responses everytime it is run
try:
with urllib.request.urlopen("https://hesperia.gsfc.nasa.gov/ssw/iris/response/") as respurl:
htmlsoup=BeautifulSoup(respurl, 'html.parser')
for tags in htmlsoup.find_all('a'):
href=tags.get('href')
if "sra" in href and path.join(resppath, href[:-4]+'pkl') not in resps:
if not quiet:
print("New response file found, "+href+'.\nDownloading...')
urllib.request.urlretrieve("https://hesperia.gsfc.nasa.gov/ssw/iris/response/"+href, "temp.geny")
newgeny=readsav('temp.geny')
remove('temp.geny')
recgeny=newgeny[list(newgeny.keys())[0]][0]
with open(toppath+"/responses/"+href[:-4]+'pkl', "wb") as pklout:
pickle.dump(recgeny, pklout)
resps=ls(toppath+"/responses/*.*") #Needs to reload responses if a new one is found
resps.sort()
except urllib.error.URLError:
if not quiet:
print("You are not connected to the internet. Cannot check for new response files.")
except:
if not quiet:
print("Hesperia is reachable but not loading. Cannot check for new response files.")
#0a Opening correct file
if pre_launch:
response=resps[1] #0 indexing
elif version!=0:
if version<=0:
if not quiet:
print("No such version of response file. Defaulting to most recent version.")
response=resps[-1]
elif version<=len(resps):
response=resps[version-1]
else:
if not quiet:
print("Requested version of response file not found. Defaulting to most recent version.")
response=resps[-1]
elif response_file!=None:
if toppath+"/responses/"+response_file not in resps:
if not quiet:
print(response_file+" not found. Using most recent file.")
response=resps[-1]
else:
response="./responses/"+response_file
else:
response=resps[version-1]
with open(response, "rb") as pklin:
r=pickle.load(pklin) #Loading in the response file and calling it r
#0b Handling keywords
if int(r['version'])<2:
if angstrom:
r['lambda']=r['lambda']*10
return(r)
ntt=1 #ntt is always 1 since this will have a wrapper around it
#1. Output structure
if full:
o1=r #Output 1. Is temporary
else:
keys=['LAMBDA', 'AREA_SG', 'NAME_SG', 'DN2PHOT_SG', 'AREA_SJI', 'NAME_SJI', 'DN2PHOT_SJI', 'COMMENT', 'VERSION', 'VERSION_DATE']
o1={k:r[k] for k in keys}
o1['DATE-OBS']=date
o1['AREA_SG']=np.zeros_like(o1['AREA_SG'])
o1['AREA_SJI']=np.zeros_like(o1['AREA_SJI'])
o=[o1 for i in range(0, ntt)] #Output
del o1
#2. FUV SG Effective Areas
# ; Rough SG spectral ranges. Setting eff.area to 0 outside of these
lamran=[[133.1,135.9],[138.8,140.8]]
#Not entirley sure how the coeff array is organised, but the comment on the IDL version says,
# "; time dependent response for sz[3]=3 wavelengths". The index in Python is [0] though
sz=r['coeffs_fuv'].shape
rr=np.zeros((ntt, sz[0]))
for j in range(0, sz[0]):
rr[:,j]=fit_iris_xput_lite(date, r['c_f_time'], r['coeffs_fuv'][j])
#; interpolate onto lambda grid, separately for each of the two FUV CCDs
for j in range(0, 2):
w=np.where((r['lambda']>=lamran[j][0]) & (r['lambda']<=lamran[j][1]))
for k in range(0, ntt):
interp=interp1d(r['c_f_lambda'][j:j+2], rr[k, j:j+2], fill_value='extrapolate')
#If you feel uneasy about this extrapolation, this is how iris_get_resposne.pro works implicitly
o[k]['AREA_SG'][0,w]=interp(r['lambda'][w])
#3. NUV SG Effective Areas
# ; Rough SG spectral ranges. Setting eff.area to 0 outside of these
lamran=[278.2,283.5]
#Not entirley sure how the coeff array is organised, but the comment on the IDL version says,
# "; time dependent response for sz[3]=3 wavelengths". The index in Python is [0] though
sz=r['coeffs_nuv'].shape
rr=np.zeros((ntt, sz[0]))
for j in range(0, sz[0]):
rr[:,j]=fit_iris_xput_lite(date, r['c_n_time'], r['coeffs_nuv'][j])
#; interpolate onto lambda grid
w=np.where((r['lambda']>=lamran[0]) & (r['lambda']<=lamran[1]))
if int(r['version'])<3:
for k in range(0, ntt):
interp=interp1d(r['c_n_lambda'], rr[k], fill_value='extrapolate')
o[k]['AREA_SG'][1,w]=interp(r['lambda'][w])
else: #I guess for version>=3, len(r['c_n_lambda'])>2
interp=interp1d(r['c_n_lambda'], rr[k], fill_value='extrapolate', kind='quadratic')
o[k]['AREA_SG'][1,w]=interp(r['lambda'][w])
#4. SJI Effective Areas
if int(r['version'])<3:
sz=r['coeffs_sji'].shape
for j in range(0, sz[0]):
# ; calculate pre-launch area from the individual elements
pl_a=r['geom_area']
for k in range(0, len(r['index_el_sji'][0])):
pl_a=(pl_a*np.array([[r['elements'][r['index_el_sji'][0]][i][1]] for i in range(0, r['elements'][r['index_el_sji'][0]].shape[0])])).T
pl_a=pl_a[:,0,:] #Because of the way I get this to work, I introduce an extra 1-length axis
rr=fit_iris_xput_lite(time, r['c_s_time'][j], r['coeffs_sji'][j])
for k in range(0, ntt):
o[k]['AREA_SJI'][j]=pl_a*rr[k]
else:
for nuv in range(0, 2):
# ; calculate baseline SJI area curves
asji=r['geom_area']
for k in range(0, len(r['index_el_sji'][nuv*2])):
arr=np.array([[r['elements'][r['index_el_sji'][2:4, 3]][i]] for i in range(0, r['elements'][r['index_el_sji'][2:4, 3]].shape[0])])
asji=asji*(np.array([arr[0][0][1], arr[1][0][1]])).T
del arr
# ; apply time dependent profile shape adjustment to FUV SJI
if ~nuv:
# ; FUV: apply FUV SG "slant", then normalize so that a weighted (2.4:1)
# ; sum at C II and Si IV gives constant response
wei=[2.4,1] # ; typical solar ratio CII : SiIV
wav=r['c_f_lambda']
nwv=len(wav)
wav=[wav[0], (wav[nwv-2]*2+wav[nwv-1])/3] # ; 2 wvlngts in nm
# ; calculate baseline SG area for scaling purposes
asg=r['geom_area']
for k in range(0, len(r['index_el_sg'][nuv])):
asg=asg*r['elements'][r['index_el_sg']][nuv, k][1].T
# ; SG and SJI areas at wav
interp=interp1d(r['lambda'], asg, fill_value='extrapolate')
asg2=interp(wav)
asj2=np.zeros((2,2))
for j in range(0,2):
interp=interp1d(r['lambda'], asji[:,j], fill_value='extrapolate')
asj2[:,j]=interp(wav)
# ; calculate the normalized slant function scal, apply to asji
for k in range(0, ntt):
# ; ; best-estimate slant, i.e., eff.area @ wav / baseline SG @ wav
interp=interp1d(o[k]['LAMBDA'], o[k]['AREA_SG'][0], fill_value='extrapolate')
sca2=interp(wav)/asg2
# ; normalize slant so that total(wei*asj2*sca2)/total(wei*asj2)=1
for j in range(0, 2):
sca2n=sca2*np.sum(wei*asj2[:,j], axis=None)/np.sum(wei*asj2[:,j]*sca2)
interp=interp1d(wav, sca2n, fill_value='extrapolate')
scaln=interp(r['lambda'])
o[k]['AREA_SJI'][j]=asji[:,j]*scaln
else:
# ; NUV: essentially same calculation as r.version=3
for k in range(0, ntt):
o[k]['AREA_SJI'][2:4]=asji
for j in range(0, 4):
# ; SJI specific time dependency
rr=fit_iris_xput_lite(date, r['c_s_time'][j], r['coeffs_sji'][j])
for k in range(0, ntt):
o[k]['AREA_SJI'][j]=o[k]['AREA_SJI'][j]*rr[k]
if angstrom:
o['lambda']=o['lambda']*10
return(o)
def fit_iris_xput_lite(tt0, tcc0, ccc):
'''
Stripped down form of fit_iris_xput.pro, using only the things
get_iris_response.pro uses.
I am so sorry, but I have no idea what any of these keywords are.
The previous documentation is very cryptic. I will include ALL of their comments.
Notes:
1. All original comments will be preceeded by ;, as is convention in IDL
2. Based on fit_iris_xput.pro by JPW.
History:
2021-12-14 - A.W.Peat - Translated from IDL
'''
tex=1.5 # ; exponent for transition between exp.decay intervals
if tcc0.shape[1]!=2:
raise RuntimeError("Incorrect number of elements in tcoef (tcco)")
m=tcc0.size//2
#This is crazy. Originally here they did
#m=size(tcc0); if m[1] ne 2; return, 0; endif; m=m[m[0]+2]/2.
#; times in years since tcc[0,0]
#The original is using NASA's epoch of 1979. I'm using 1970, as is standard.
nasaLag=Time('1979-01-01T00:00:00.00Z').unix
yr2sec=365.25*24*60*60
tcc=(tcc0+nasaLag)/yr2sec
tt=Time(tt0).unix/yr2sec
ntt=1
#Always going to be 1 in this instance. Script can, in theory, take more than one input.
if ccc.size!=3*m:
raise RuntimeError("Incorrect number of elements in tcoef (tcco)")
# ; calculation of output fit
ee=ccc[:,2]
a=np.zeros((ntt, 2*m)) #; base vector
#no need to reform in python, it's 2D by default here
for j in range(0, m):
# ; base vector for interval of constant multiplier
if j>0:
ww=np.where((tt>=tcc[j, 0]) & (tt<=tcc[j, 1]))
nww=len(ww)
else:
ww=np.where(tt<=tcc[j,1])
nww=len(ww)
if nww>0:
a[ww, 2*j]=1
a[ww, 2*j+1]=np.exp(ee[j]*(tt-tcc[j,0]))
# ; base vector for interval when multiplier is linear function of time
# Sometimes dtt<0, so have to NaN it before the power to stop a warning
if j>0:
ww=np.where((tt>tcc[j-1,1]) & (tt < tcc[j,0]))
nww=len(ww)
if nww>0:
dtt=(tt-tcc[j-1,1])/(tcc[j,0]-tcc[j-1, 1])
if dtt<0:
dtt=np.nan
a[ww, 2*j]=dtt**tex
a[ww, 2*j+1]=dtt**tex*np.exp(ee[j]*(tt-tcc[j,0]))
if j < (m-1):
ww=np.where((tt>tcc[j,1]) and (tt<tcc[j+1, 0]))
nww=len(ww)
if nww>0:
dtt=(tt-tcc[j,1])/(tcc[j+1,0]-tcc[j,1])
if dtt<0:
dtt=np.nan
a[ww, 2*j]=1-dtt**tex
a[ww, 2*j+1]=(1-dtt**tex)*np.exp(ee[j]*(tt-tcc[j,0]))
cc=ccc[:,:2].flatten()
f=a@cc
return(f)
if __name__=="__main__":
#When script is called directly, it just looks for new response files#
toppath=path.dirname(path.realpath(__file__))
resppath=path.join(toppath, "responses")
resps=ls(path.join(resppath, "*.pkl"))
resps.sort()
new=False
try:
with urllib.request.urlopen("https://hesperia.gsfc.nasa.gov/ssw/iris/response/") as respurl:
htmlsoup=BeautifulSoup(respurl, 'html.parser')
for tags in htmlsoup.find_all('a'):
href=tags.get('href')
if "sra" in href and path.join(resppath, href[:-4]+'pkl') not in resps:
print("New response file found, "+href+'.\nDownloading...')
urllib.request.urlretrieve("https://hesperia.gsfc.nasa.gov/ssw/iris/response/"+href, "temp.geny")
newgeny=readsav('temp.geny')
remove('temp.geny')
recgeny=newgeny[list(newgeny.keys())[0]][0]
with open(toppath+"/responses/"+href[:-4]+'pkl', "wb") as pklout:
pickle.dump(recgeny, pklout)
resps=ls(toppath+"/responses/*.*") #Needs to reload responses if a new one is found
resps.sort()
except urllib.error.URLError:
print("You are not connected to the internet. Cannot check for new response files.")
new=True
except:
print("Hesperia is reachable but not loading. Cannot check for new response files.")
new=True
if not new:
print("No new response files found.")
|
OfAaron3REPO_NAMEirispreppyPATH_START.@irispreppy_extracted@irispreppy-main@irispreppy@radcal@[email protected]_END.py
|
{
"filename": "example_gam_0.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/sandbox/examples/example_gam_0.py",
"type": "Python"
}
|
'''first examples for gam and PolynomialSmoother used for debugging
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Note: uncomment plt.show() to display graphs
'''
import time
import numpy as np
import numpy.random as R
import matplotlib.pyplot as plt
import scipy.stats
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod import families
example = 2 #3 # 1,2 or 3
#np.random.seed(987654)
standardize = lambda x: (x - x.mean()) / x.std()
demean = lambda x: (x - x.mean())
nobs = 500
lb, ub = -1., 1. #for Poisson
#lb, ub = -0.75, 2 #0.75 #for Binomial
x1 = R.uniform(lb, ub, nobs) #R.standard_normal(nobs)
x1 = np.linspace(lb, ub, nobs)
x1.sort()
x2 = R.uniform(lb, ub, nobs) #
#x2 = R.standard_normal(nobs)
x2.sort()
#x2 = np.cos(x2)
x2 = x2 + np.exp(x2/2.)
#x2 = np.log(x2-x2.min()+0.1)
y = 0.5 * R.uniform(lb, ub, nobs) #R.standard_normal((nobs,))
f1 = lambda x1: (2*x1 - 0.5 * x1**2 - 0.75 * x1**3) # + 0.1 * np.exp(-x1/4.))
f2 = lambda x2: (x2 - 1* x2**2) # - 0.75 * np.exp(x2))
z = standardize(f1(x1)) + standardize(f2(x2))
z = standardize(z) + 1 # 0.1
#try this
z = f1(x1) + f2(x2)
#z = demean(z)
z -= np.median(z)
print('z.std()', z.std())
#z = standardize(z) + 0.2
# with standardize I get better values, but I do not know what the true params are
print(z.mean(), z.min(), z.max())
#y += z #noise
y = z
d = np.array([x1,x2]).T
if example == 1:
print("normal")
m = AdditiveModel(d)
m.fit(y)
x = np.linspace(-2,2,50)
print(m)
if example == 2:
print("binomial")
mod_name = 'Binomial'
f = families.Binomial()
#b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(y)])
b = np.asarray([scipy.stats.bernoulli.rvs(p) for p in f.link.inverse(z)])
b.shape = y.shape
m = GAM(b, d, family=f)
toc = time.time()
m.fit(b)
tic = time.time()
print(tic-toc)
#for plotting
yp = f.link.inverse(y)
p = b
if example == 3:
print("Poisson")
f = families.Poisson()
#y = y/y.max() * 3
yp = f.link.inverse(z)
p = np.asarray([scipy.stats.poisson.rvs(val) for val in f.link.inverse(z)],
float)
p.shape = y.shape
m = GAM(p, d, family=f)
toc = time.time()
m.fit(p)
tic = time.time()
print(tic-toc)
if example > 1:
y_pred = m.results.mu# + m.results.alpha#m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(p, '.')
plt.plot(yp, 'b-', label='true')
plt.plot(y_pred, 'r-', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM ' + mod_name)
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x1, x2]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], p[sortidx], '.')
plt.plot(xx[sortidx], yp[sortidx], 'b.', label='true')
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='GAM')
plt.legend(loc='upper left')
plt.title('gam.GAM ' + mod_name + ' ' + ii)
counter += 1
# counter = 2
# for ii, xx in zip(['z', 'x1', 'x2'], [z, x1, x2]):
# #plt.figure()
# plt.subplot(2, 2, counter)
# plt.plot(xx, p, '.')
# plt.plot(xx, yp, 'b-', label='true')
# plt.plot(xx, y_pred, 'r-', label='GAM')
# plt.legend(loc='upper left')
# plt.title('gam.GAM Poisson ' + ii)
# counter += 1
plt.figure()
plt.plot(z, 'b-', label='true' )
plt.plot(np.log(m.results.mu), 'r-', label='GAM')
plt.title('GAM Poisson, raw')
plt.figure()
plt.plot(x1, standardize(m.smoothers[0](x1)), 'r')
plt.plot(x1, standardize(f1(x1)), linewidth=2)
plt.figure()
plt.plot(x2, standardize(m.smoothers[1](x2)), 'r')
plt.plot(x2, standardize(f2(x2)), linewidth=2)
##y_pred = m.results.predict(d)
##plt.figure()
##plt.plot(z, p, '.')
##plt.plot(z, yp, 'b-', label='true')
##plt.plot(z, y_pred, 'r-', label='AdditiveModel')
##plt.legend()
##plt.title('gam.AdditiveModel')
#plt.show()
## pylab.figure(num=1)
## pylab.plot(x1, standardize(m.smoothers[0](x1)), 'b')
## pylab.plot(x1, standardize(f1(x1)), linewidth=2)
## pylab.figure(num=2)
## pylab.plot(x2, standardize(m.smoothers[1](x2)), 'b')
## pylab.plot(x2, standardize(f2(x2)), linewidth=2)
## pylab.show()
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@sandbox@examples@[email protected]_END.py
|
{
"filename": "estimator.py",
"repo_name": "smsharma/mining-for-substructure-lens",
"repo_path": "mining-for-substructure-lens_extracted/mining-for-substructure-lens-master/inference/estimator.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, print_function
import logging
import os
import json
import numpy as np
from collections import OrderedDict
import torch
from inference.models.vgg import VGGRatioEstimator
from inference.models.resnet import ResNetRatioEstimator
from inference.trainer import RatioTrainer
from inference.utils import create_missing_folders, load_and_check, get_optimizer
from inference.utils import get_loss, clean_log_r, clean_t
from inference.utils import restrict_samplesize
logger = logging.getLogger(__name__)
class ParameterizedRatioEstimator(object):
theta_mean = np.array([0.1, -2.0])
theta_std = np.array([0.1, 0.5])
def __init__(
self,
resolution=64,
n_parameters=2,
n_aux=0,
architecture="resnet",
log_input=False,
rescale_inputs=True,
rescale_theta=True,
zero_bias=False,
):
self.resolution = resolution
self.n_parameters = n_parameters
self.n_aux = n_aux
self.log_input = log_input
self.rescale_inputs = rescale_inputs
self.rescale_theta = rescale_theta
self.architecture = architecture
self.x_scaling_mean = None
self.x_scaling_std = None
self.aux_scaling_mean = None
self.aux_scaling_std = None
self._create_model(zero_bias)
def train(
self,
method,
x,
theta,
theta_alt,
aux=None,
log_r_xz=None,
log_r_xz_alt=None,
t_xz=None,
t_xz_alt=None,
alpha=1.0,
optimizer="adam",
n_epochs=50,
batch_size=256,
initial_lr=0.001,
final_lr=0.0001,
nesterov_momentum=None,
validation_split=0.25,
validation_split_seed=None,
early_stopping=True,
limit_samplesize=None,
verbose="some",
update_input_rescaling=True,
validation_loss_before=None,
):
logger.info("Starting training")
logger.info(" Method: %s", method)
if method in ["cascal", "rascal", "alices"]:
logger.info(" alpha: %s", alpha)
logger.info(" Batch size: %s", batch_size)
logger.info(" Optimizer: %s", optimizer)
logger.info(" Epochs: %s", n_epochs)
logger.info(
" Learning rate: %s initially, decaying to %s",
initial_lr,
final_lr,
)
if optimizer == "sgd":
logger.info(" Nesterov momentum: %s", nesterov_momentum)
logger.info(" Validation split: %s", validation_split)
logger.info(" Early stopping: %s", early_stopping)
if limit_samplesize is None:
logger.info(" Samples: all")
else:
logger.info(" Samples: %s", limit_samplesize)
logger.info(" Update x rescaling: %s", update_input_rescaling)
# Load training data
logger.info("Loading training data")
theta = load_and_check(theta, memmap=False)
theta_alt = load_and_check(theta_alt, memmap=False)
x = load_and_check(x, memmap=True)
log_r_xz = load_and_check(log_r_xz, memmap=False)
log_r_xz_alt = load_and_check(log_r_xz_alt, memmap=False)
t_xz = load_and_check(t_xz, memmap=False)
t_xz_alt = load_and_check(t_xz_alt, memmap=False)
aux = load_and_check(aux, memmap=False)
self._check_required_data(method, log_r_xz, log_r_xz_alt, t_xz, t_xz_alt)
if update_input_rescaling:
self._initialize_input_transform(x, aux)
# Clean up input data
if log_r_xz is not None:
log_r_xz = log_r_xz.reshape((-1, 1))
log_r_xz_alt = log_r_xz_alt.reshape((-1, 1))
theta = theta.reshape((-1, 2))
theta_alt = theta_alt.reshape((-1, 2))
log_r_xz = clean_log_r(log_r_xz)
log_r_xz_alt = clean_log_r(log_r_xz_alt)
t_xz = clean_t(t_xz)
t_xz_alt = clean_t(t_xz_alt)
# Rescale aux, theta, and t_xz
aux = self._transform_aux(aux)
theta = self._transform_theta(theta)
theta_alt = self._transform_theta(theta_alt)
if t_xz is not None:
t_xz = self._transform_t_xz(t_xz)
t_xz_alt = self._transform_t_xz(t_xz_alt)
# Infer dimensions of problem
n_samples = x.shape[0]
n_parameters = theta.shape[1]
resolution_x = x.shape[1]
resolution_y = x.shape[2]
n_aux = 0 if aux is None else aux.shape[1]
logger.info(
"Found %s samples with %s parameters, image resolution %s x %s, and %s auxiliary parameters",
n_samples,
n_parameters,
resolution_x,
resolution_y,
n_aux,
)
if resolution_x != resolution_y:
raise RuntimeError(
"Currently only supports square images, but found resolution {} x {}".format(
resolution_x, resolution_y
)
)
resolution = resolution_x
if n_aux != self.n_aux:
raise RuntimeError(
"Number of auxiliary variables found in data ({}) does not match number of"
"auxiliary variables in model ({})".format(n_aux, self.n_aux)
)
if aux is not None and aux.shape[0] != n_samples:
raise RuntimeError(
"Number of samples in auxiliary variables does not match number of"
"samples ({})".format(aux.shape[0], n_samples)
)
# Limit sample size
if limit_samplesize is not None and limit_samplesize < n_samples:
logger.info(
"Only using %s of %s training samples", limit_samplesize, n_samples
)
x, theta, theta_alt, log_r_xz, log_r_xz_alt, t_xz, t_xz_alt, aux = restrict_samplesize(
limit_samplesize, x, theta, theta_alt, log_r_xz, log_r_xz_alt, t_xz, t_xz_alt, aux
)
# Check consistency of input with model
if n_parameters != self.n_parameters:
raise RuntimeError(
"Number of parameters does not match model: {} vs {}".format(
n_parameters, self.n_parameters
)
)
if resolution != self.resolution:
raise RuntimeError(
"Number of observables does not match model: {} vs {}".format(
resolution, self.resolution
)
)
# Data
data = self._package_training_data(method, x, theta, theta_alt, log_r_xz, log_r_xz_alt, t_xz, t_xz_alt, aux)
# Losses
loss_functions, loss_labels, loss_weights = get_loss(method, alpha)
# Optimizer
opt, opt_kwargs = get_optimizer(optimizer, nesterov_momentum)
# Train model
logger.info("Training model")
trainer = RatioTrainer(self.model, run_on_gpu=True)
result = trainer.train(
data=data,
loss_functions=loss_functions,
loss_weights=loss_weights,
loss_labels=loss_labels,
epochs=n_epochs,
batch_size=batch_size,
optimizer=opt,
optimizer_kwargs=opt_kwargs,
initial_lr=initial_lr,
final_lr=final_lr,
validation_split=validation_split,
validation_split_seed=validation_split_seed,
early_stopping=early_stopping,
verbose=verbose,
validation_loss_before=validation_loss_before,
)
return result
def log_likelihood_ratio(
self,
x,
theta,
aux=None,
test_all_combinations=True,
evaluate_score=False,
evaluate_grad_x=False,
batch_size=1000,
grad_x_theta_index=0,
):
if self.model is None:
raise ValueError("No model -- train or load model before evaluating it!")
# Load training data
logger.debug("Loading evaluation data")
x = load_and_check(x, memmap=True)
aux = load_and_check(aux)
theta = load_and_check(theta)
# Rescale theta and aux
aux = self._transform_aux(aux)
theta = self._transform_theta(theta)
# Evaluate
if test_all_combinations:
logger.debug("Starting ratio evaluation for all combinations")
all_log_r_hat = []
all_t_hat = []
all_grad_x = None
for i, this_theta in enumerate(theta):
logger.debug(
"Starting ratio evaluation for thetas %s / %s: %s",
i + 1,
len(theta),
this_theta,
)
_, log_r_hat, t_hat, x_grad = self._evaluate(
theta0s=[this_theta],
xs=x,
auxs=aux,
evaluate_score=evaluate_score,
evaluate_grad_x=evaluate_grad_x,
batch_size=batch_size,
)
all_log_r_hat.append(log_r_hat)
all_t_hat.append(t_hat)
if x_grad is not None and i == grad_x_theta_index:
all_grad_x = x_grad
all_log_r_hat = np.array(all_log_r_hat)
all_t_hat = np.array(all_t_hat)
else:
logger.debug("Starting ratio evaluation")
_, all_log_r_hat, all_t_hat, all_grad_x = self._evaluate(
theta0s=theta,
xs=x,
auxs=aux,
evaluate_score=evaluate_score,
evaluate_grad_x=evaluate_grad_x,
batch_size=batch_size,
)
logger.debug("Evaluation done")
return all_log_r_hat, all_t_hat, all_grad_x
def _evaluate(
self,
theta0s,
xs,
auxs=None,
evaluate_score=False,
evaluate_grad_x=False,
run_on_gpu=True,
double_precision=False,
batch_size=1000,
):
# Batches
n_xs = len(xs)
n_batches = (n_xs - 1) // batch_size + 1
# results
all_s, all_log_r, all_t, all_x_grad = [], [], [], []
for i_batch in range(n_batches):
x_batch = np.asarray(
np.copy(xs[i_batch * batch_size : (i_batch + 1) * batch_size])
)
if len(theta0s) == n_xs:
theta_batch = np.copy(
theta0s[i_batch * batch_size : (i_batch + 1) * batch_size]
)
else:
theta_batch = np.repeat(
np.copy(theta0s).reshape(1, -1), x_batch.shape[0], axis=0
)
if auxs is not None:
aux_batch = np.copy(
auxs[i_batch * batch_size : (i_batch + 1) * batch_size]
)
else:
aux_batch = None
s, log_r, t, x_grad = self._evaluate_batch(
theta_batch,
x_batch,
aux_batch,
evaluate_score,
evaluate_grad_x,
run_on_gpu,
double_precision,
)
all_s.append(s)
all_log_r.append(log_r)
if t is not None:
all_t.append(t)
if x_grad is not None:
all_x_grad.append(x_grad)
# mash together
all_s = np.concatenate(all_s, 0)
all_log_r = np.concatenate(all_log_r, 0)
if len(all_t) > 0:
all_t = np.concatenate(all_t, 0)
else:
all_t = None
if len(all_x_grad) > 0:
all_x_grad = np.concatenate(all_x_grad, 0)
else:
all_x_grad = None
return all_s, all_log_r, all_t, all_x_grad
def _evaluate_batch(
self,
theta0s,
xs,
auxs,
evaluate_score,
evaluate_grad_x,
run_on_gpu,
double_precision,
):
# CPU or GPU?
run_on_gpu = run_on_gpu and torch.cuda.is_available()
device = torch.device("cuda" if run_on_gpu else "cpu")
dtype = torch.double if double_precision else torch.float
# Prepare data
self.model = self.model.to(device, dtype)
theta0s = torch.from_numpy(theta0s).to(device, dtype)
xs = torch.from_numpy(xs).to(device, dtype)
if auxs is not None:
auxs = torch.from_numpy(auxs).to(device, dtype)
# Evaluate ratio estimator with score or x gradients:
if evaluate_score or evaluate_grad_x:
self.model.eval()
if evaluate_score:
theta0s.requires_grad = True
if evaluate_grad_x:
xs.requires_grad = True
s, log_r, t, x_grad = self.model(
theta0s,
xs,
aux=auxs,
track_score=evaluate_score,
return_grad_x=evaluate_grad_x,
create_gradient_graph=False,
)
# Copy back tensors to CPU
if run_on_gpu:
s = s.cpu()
log_r = log_r.cpu()
if t is not None:
t = t.cpu()
if x_grad is not None:
x_grad = x_grad.cpu()
# Get data and return
s = s.detach().numpy().flatten()
log_r = log_r.detach().numpy().flatten()
if t is not None:
t = t.detach().numpy()
if x_grad is not None:
x_grad = x_grad.detach().numpy()
# Evaluate ratio estimator without score:
else:
with torch.no_grad():
self.model.eval()
s, log_r, _, _ = self.model(
theta0s,
xs,
aux=auxs,
track_score=False,
return_grad_x=False,
create_gradient_graph=False,
)
# Copy back tensors to CPU
if run_on_gpu:
s = s.cpu()
log_r = log_r.cpu()
# Get data and return
s = s.detach().numpy().flatten()
log_r = log_r.detach().numpy().flatten()
t = None
x_grad = None
return s, log_r, t, x_grad
def save(self, filename, save_model=False):
if self.model is None:
raise ValueError("No model -- train or load model before saving!")
# Check paths
create_missing_folders([os.path.dirname(filename)])
# Save settings
logger.debug("Saving settings to %s_settings.json", filename)
settings = self._wrap_settings()
with open(filename + "_settings.json", "w") as f:
json.dump(settings, f)
# Save state dict
logger.debug("Saving state dictionary to %s_state_dict.pt", filename)
torch.save(self.model.state_dict(), filename + "_state_dict.pt")
# Save model
if save_model:
logger.debug("Saving model to %s_model.pt", filename)
torch.save(self.model, filename + "_model.pt")
def load(self, filename):
# Load settings and create model
logger.debug("Loading settings from %s_settings.json", filename)
with open(filename + "_settings.json", "r") as f:
settings = json.load(f)
self._unwrap_settings(settings)
self._create_model()
# Load state dict
logger.debug("Loading state dictionary from %s_state_dict.pt", filename)
self.model.load_state_dict(
torch.load(filename + "_state_dict.pt", map_location="cpu")
)
def _create_model(self, zero_bias=False):
logger.info("Creating model")
logger.info(" Architecture: %s", self.architecture)
logger.info(" Log input: %s", self.log_input)
logger.info(
" Rescale input: %s",
self.x_scaling_std is not None and self.x_scaling_mean is not None,
)
logger.info(
" Weight initialization: %s", "zero bias" if zero_bias else "default"
)
if self.architecture in ["resnet", "resnet18"]:
self.model = ResNetRatioEstimator(
n_parameters=self.n_parameters,
n_aux=self.n_aux,
n_hidden=512,
log_input=self.log_input,
input_mean=self.x_scaling_mean,
input_std=self.x_scaling_std,
zero_bias=zero_bias,
)
elif self.architecture == "resnet50":
self.model = ResNetRatioEstimator(
n_parameters=self.n_parameters,
n_aux=self.n_aux,
cfg=50,
n_hidden=1024,
log_input=self.log_input,
input_mean=self.x_scaling_mean,
input_std=self.x_scaling_std,
zero_bias=zero_bias,
)
elif self.architecture == "vgg":
self.model = VGGRatioEstimator(
n_parameters=self.n_parameters,
log_input=self.log_input,
input_mean=self.x_scaling_mean,
input_std=self.x_scaling_std,
)
else:
raise RuntimeError("Unknown architecture {}".format(self.architecture))
logger.info("Model has %s trainable parameters", self._count_model_parameters())
def _count_model_parameters(self):
return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
def _initialize_input_transform(self, x, aux=None, n_eval=1000):
if self.rescale_inputs and self.log_input:
self.x_scaling_mean = np.mean(np.log(1. + x[:n_eval]))
self.x_scaling_std = np.maximum(np.std(np.log(1. + x[:n_eval])), 1.0e-6)
elif self.rescale_inputs and (not self.log_input):
self.x_scaling_mean = np.mean(x)
self.x_scaling_std = np.maximum(np.std(x), 1.0e-6)
else:
self.x_scaling_mean = None
self.x_scaling_std = None
if self.rescale_inputs and aux is not None:
self.aux_scaling_mean = np.mean(aux, axis=0)
self.aux_scaling_std = np.maximum(np.std(aux, axis=0), 1.0e-6)
else:
self.aux_scaling_mean = None
self.aux_scaling_std = None
self.model.input_mean = self.x_scaling_mean
self.model.input_std = self.x_scaling_std
def _transform_aux(self, aux):
if (
aux is not None
and self.aux_scaling_mean is not None
and self.aux_scaling_std is not None
):
aux = aux - self.aux_scaling_mean[np.newaxis, :]
aux = aux / self.aux_scaling_std[np.newaxis, :]
return aux
def _transform_theta(self, theta):
if self.rescale_theta:
theta = theta - self.theta_mean[np.newaxis, :]
theta = theta / self.theta_std[np.newaxis, :]
return theta
def _transform_t_xz(self, t_xz):
if self.rescale_theta:
t_xz = t_xz * self.theta_std[np.newaxis, :]
return t_xz
def _wrap_settings(self):
settings = {
"resolution": self.resolution,
"n_parameters": self.n_parameters,
"n_aux": self.n_aux,
"architecture": self.architecture,
"log_input": self.log_input,
"rescale_inputs": self.rescale_inputs,
"x_scaling_mean": self.x_scaling_mean,
"x_scaling_std": self.x_scaling_std,
"rescale_theta": self.rescale_theta,
"aux_scaling_mean": []
if self.aux_scaling_mean is None
else list(self.aux_scaling_mean),
"aux_scaling_std": []
if self.aux_scaling_std is None
else list(self.aux_scaling_std),
}
return settings
def _unwrap_settings(self, settings):
self.resolution = int(settings["resolution"])
self.n_parameters = int(settings["n_parameters"])
self.n_aux = int(settings["n_aux"])
self.architecture = str(settings["architecture"])
self.log_input = bool(settings["log_input"])
self.rescale_inputs = str(settings["rescale_inputs"])
self.x_scaling_mean = float(settings["x_scaling_mean"])
self.x_scaling_std = float(settings["x_scaling_std"])
self.rescale_theta = bool(settings["rescale_theta"])
self.aux_scaling_mean = list(settings["aux_scaling_mean"])
if len(self.aux_scaling_mean) == 0:
self.aux_scaling_mean = None
else:
self.aux_scaling_mean = np.array(self.aux_scaling_mean)
self.aux_scaling_std = list(settings["aux_scaling_std"])
if len(self.aux_scaling_std) == 0:
self.aux_scaling_std = None
else:
self.aux_scaling_std = np.array(self.aux_scaling_std)
@staticmethod
def _check_required_data(method, r_xz, r_xz_alt, t_xz, t_xz_alt):
if method in ["cascal", "alices", "rascal"] and (t_xz is None or t_xz_alt is None):
raise RuntimeError(
"Method {} requires joint score information".format(method)
)
if method in ["rolr", "alices", "rascal"] and (r_xz is None or r_xz_alt is None):
raise RuntimeError(
"Method {} requires joint likelihood ratio information".format(method)
)
@staticmethod
def _package_training_data(method, x, theta, theta_alt, log_r_xz, log_r_xz_alt, t_xz, t_xz_alt, aux=None):
data = OrderedDict()
data["x"] = x
data["theta"] = theta
data["theta_alt"] = theta_alt
if method in ["rolr", "alice", "alices", "rascal"]:
data["log_r_xz"] = log_r_xz
data["log_r_xz_alt"] = log_r_xz_alt
if method in ["cascal", "alices", "rascal"]:
data["t_xz"] = t_xz
data["t_xz_alt"] = t_xz_alt
if aux is not None:
data["aux"] = aux
return data
|
smsharmaREPO_NAMEmining-for-substructure-lensPATH_START.@mining-for-substructure-lens_extracted@mining-for-substructure-lens-master@[email protected]@.PATH_END.py
|
{
"filename": "plot_window.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/visualization/plot_window.py",
"type": "Python"
}
|
import abc
import sys
from collections import defaultdict
from numbers import Number
from typing import TYPE_CHECKING, Union
import matplotlib
import numpy as np
from more_itertools import always_iterable
from unyt.exceptions import UnitConversionError
from yt._maintenance.deprecation import issue_deprecation_warning
from yt._typing import AlphaT
from yt.data_objects.image_array import ImageArray
from yt.frontends.sph.data_structures import ParticleDataset
from yt.frontends.stream.data_structures import StreamParticlesDataset
from yt.frontends.ytdata.data_structures import YTSpatialPlotDataset
from yt.funcs import (
fix_axis,
fix_unitary,
is_sequence,
iter_fields,
mylog,
obj_length,
parse_center_array,
validate_moment,
)
from yt.geometry.api import Geometry
from yt.geometry.oct_geometry_handler import OctreeIndex
from yt.units.unit_object import Unit # type: ignore
from yt.units.unit_registry import UnitParseError # type: ignore
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.exceptions import (
YTCannotParseUnitDisplayName,
YTDataTypeUnsupported,
YTInvalidFieldType,
YTPlotCallbackError,
YTUnitNotRecognized,
)
from yt.utilities.math_utils import ortho_find
from yt.utilities.orientation import Orientation
from yt.visualization._handlers import ColorbarHandler, NormHandler
from yt.visualization.base_plot_types import CallbackWrapper, ImagePlotMPL
from ._commons import (
_get_units_label,
_swap_axes_extents,
get_default_from_config,
)
from .fixed_resolution import (
FixedResolutionBuffer,
OffAxisProjectionFixedResolutionBuffer,
)
from .geo_plot_utils import get_mpl_transform
from .plot_container import (
ImagePlotContainer,
invalidate_data,
invalidate_figure,
invalidate_plot,
)
if TYPE_CHECKING:
from yt.visualization.plot_modifications import PlotCallback
if sys.version_info >= (3, 11):
from typing import assert_never
else:
from typing_extensions import assert_never
def get_window_parameters(axis, center, width, ds):
width = ds.coordinates.sanitize_width(axis, width, None)
center, display_center = ds.coordinates.sanitize_center(center, axis)
xax = ds.coordinates.x_axis[axis]
yax = ds.coordinates.y_axis[axis]
bounds = (
display_center[xax] - width[0] / 2,
display_center[xax] + width[0] / 2,
display_center[yax] - width[1] / 2,
display_center[yax] + width[1] / 2,
)
return (bounds, center, display_center)
def get_oblique_window_parameters(
normal, center, width, ds, depth=None, get3bounds=False
):
center, display_center = ds.coordinates.sanitize_center(center, axis=None)
width = ds.coordinates.sanitize_width(normal, width, depth)
if len(width) == 2:
# Transforming to the cutting plane coordinate system
# the original dimensionless center messes up off-axis
# SPH projections though -> don't use this center there
center = (
(center - ds.domain_left_edge) / ds.domain_width - 0.5
) * ds.domain_width
(normal, perp1, perp2) = ortho_find(normal)
mat = np.transpose(np.column_stack((perp1, perp2, normal)))
center = np.dot(mat, center)
w = tuple(el.in_units("code_length") for el in width)
bounds = tuple(((2 * (i % 2)) - 1) * w[i // 2] / 2 for i in range(len(w) * 2))
if get3bounds and depth is None:
# off-axis projection, depth not specified
# -> set 'large enough' depth using half the box diagonal + margin
d2 = ds.domain_width[0].in_units("code_length") ** 2
d2 += ds.domain_width[1].in_units("code_length") ** 2
d2 += ds.domain_width[2].in_units("code_length") ** 2
diag = np.sqrt(d2)
bounds = bounds + (-0.51 * diag, 0.51 * diag)
return (bounds, center)
def get_axes_unit(width, ds):
r"""
Infers the axes unit names from the input width specification
"""
if ds.no_cgs_equiv_length:
return ("code_length",) * 2
if is_sequence(width):
if isinstance(width[1], str):
axes_unit = (width[1], width[1])
elif is_sequence(width[1]):
axes_unit = (width[0][1], width[1][1])
elif isinstance(width[0], YTArray):
axes_unit = (str(width[0].units), str(width[1].units))
else:
axes_unit = None
else:
if isinstance(width, YTArray):
axes_unit = (str(width.units), str(width.units))
else:
axes_unit = None
return axes_unit
def validate_mesh_fields(data_source, fields):
# this check doesn't make sense for ytdata plot datasets, which
# load mesh data as a particle field but nonetheless can still
# make plots with it
if isinstance(data_source.ds, YTSpatialPlotDataset):
return
canonical_fields = data_source._determine_fields(fields)
invalid_fields = []
for field in canonical_fields:
finfo = data_source.ds.field_info[field]
if finfo.sampling_type == "particle":
if not hasattr(data_source.ds, "_sph_ptypes"):
pass
elif finfo.is_sph_field:
continue
invalid_fields.append(field)
if len(invalid_fields) > 0:
raise YTInvalidFieldType(invalid_fields)
class PlotWindow(ImagePlotContainer, abc.ABC):
r"""
A plotting mechanism based around the concept of a window into a
data source. It can have arbitrary fields, each of which will be
centered on the same viewpoint, but will have individual zlimits.
The data and plot are updated separately, and each can be
invalidated as the object is modified.
Data is handled by a FixedResolutionBuffer object.
Parameters
----------
data_source :
:class:`yt.data_objects.selection_objects.base_objects.YTSelectionContainer2D`
This is the source to be pixelized, which can be a projection,
slice, or a cutting plane.
bounds : sequence of floats
Bounds are the min and max in the image plane that we want our
image to cover. It's in the order of (xmin, xmax, ymin, ymax),
where the coordinates are all in the appropriate code units.
buff_size : sequence of ints
The size of the image to generate.
antialias : boolean
This can be true or false. It determines whether or not sub-pixel
rendering is used during data deposition.
window_size : float
The size of the window on the longest axis (in units of inches),
including the margins but not the colorbar.
"""
def __init__(
self,
data_source,
bounds,
buff_size=(800, 800),
antialias=True,
periodic=True,
origin="center-window",
oblique=False,
window_size=8.0,
fields=None,
fontsize=18,
aspect=None,
setup=False,
*,
geometry: Geometry = Geometry.CARTESIAN,
) -> None:
# axis manipulation operations are callback-only:
self._swap_axes_input = False
self._flip_vertical = False
self._flip_horizontal = False
self.center = None
self._periodic = periodic
self.oblique = oblique
self._equivalencies = defaultdict(lambda: (None, {})) # type: ignore [var-annotated]
self.buff_size = buff_size
self.antialias = antialias
self._axes_unit_names = None
self._transform = None
self._projection = None
self.aspect = aspect
skip = list(FixedResolutionBuffer._exclude_fields) + data_source._key_fields
fields = list(iter_fields(fields))
self.override_fields = list(set(fields).intersection(set(skip)))
self.fields = [f for f in fields if f not in skip]
self._frb: FixedResolutionBuffer | None = None
super().__init__(data_source, window_size, fontsize)
self._set_window(bounds) # this automatically updates the data and plot
if origin != "native":
match geometry:
case Geometry.CARTESIAN | Geometry.SPECTRAL_CUBE:
pass
case (
Geometry.CYLINDRICAL
| Geometry.POLAR
| Geometry.SPHERICAL
| Geometry.GEOGRAPHIC
| Geometry.INTERNAL_GEOGRAPHIC
):
mylog.info("Setting origin='native' for %s geometry.", geometry)
origin = "native"
case _:
assert_never(geometry)
self.origin = origin
if self.data_source.center is not None and not oblique:
ax = self.data_source.axis
xax = self.ds.coordinates.x_axis[ax]
yax = self.ds.coordinates.y_axis[ax]
center, display_center = self.ds.coordinates.sanitize_center(
self.data_source.center, ax
)
center = [display_center[xax], display_center[yax]]
self.set_center(center)
axname = self.ds.coordinates.axis_name[ax]
transform = self.ds.coordinates.data_transform[axname]
projection = self.ds.coordinates.data_projection[axname]
self._projection = get_mpl_transform(projection)
self._transform = get_mpl_transform(transform)
self._setup_plots()
for field in self.data_source._determine_fields(self.fields):
finfo = self.data_source.ds._get_field_info(field)
pnh = self.plots[field].norm_handler
# take_log can be `None` so we explicitly compare against a boolean
pnh.prefer_log = finfo.take_log is not False
# override from user configuration if any
log, linthresh = get_default_from_config(
self.data_source,
field=field,
keys=["log", "linthresh"],
defaults=[None, None],
)
if linthresh is not None:
self.set_log(field, linthresh=linthresh)
elif log is not None:
self.set_log(field, log)
def __iter__(self):
for ds in self.ts:
mylog.warning("Switching to %s", ds)
self._switch_ds(ds)
yield self
def piter(self, *args, **kwargs):
for ds in self.ts.piter(*args, **kwargs):
self._switch_ds(ds)
yield self
@property
def frb(self):
# Force the regeneration of the fixed resolution buffer
# * if there's none
# * if the data has been invalidated
# * if the frb has been inalidated
if not self._data_valid:
self._recreate_frb()
return self._frb
@frb.setter
def frb(self, value):
self._frb = value
self._data_valid = True
@frb.deleter
def frb(self):
del self._frb
self._frb = None
def _recreate_frb(self):
old_fields = None
old_filters = []
# If we are regenerating an frb, we want to know what fields we had before
if self._frb is not None:
old_fields = list(self._frb.data.keys())
old_units = [_.units for _ in self._frb.data.values()]
old_filters = self._frb._filters
# Set the bounds
if hasattr(self, "zlim"):
# Support OffAxisProjectionPlot and OffAxisSlicePlot
bounds = self.xlim + self.ylim + self.zlim
else:
bounds = self.xlim + self.ylim
# Generate the FRB
self.frb = self._frb_generator(
self.data_source,
bounds,
self.buff_size,
self.antialias,
periodic=self._periodic,
filters=old_filters,
)
# At this point the frb has the valid bounds, size, aliasing, etc.
if old_fields is not None:
# Restore the old fields
for key, units in zip(old_fields, old_units, strict=False):
self._frb.render(key)
equiv = self._equivalencies[key]
if equiv[0] is None:
self._frb[key].convert_to_units(units)
else:
self.frb.set_unit(key, units, equiv[0], equiv[1])
# Restore the override fields
for key in self.override_fields:
self._frb.render(key)
@property
def _has_swapped_axes(self):
# note: we always run the validations here in case the states of
# the conflicting attributes have changed.
return self._validate_swap_axes(self._swap_axes_input)
@invalidate_data
def swap_axes(self):
# toggles the swap_axes behavior
new_swap_value = not self._swap_axes_input
# note: we also validate here to catch invalid states immediately, even
# though we validate on accessing the attribute in `_has_swapped_axes`.
self._swap_axes_input = self._validate_swap_axes(new_swap_value)
return self
def _validate_swap_axes(self, swap_value: bool) -> bool:
if swap_value and (self._transform or self._projection):
mylog.warning("Cannot swap axes due to transform or projection")
return False
return swap_value
@property
def width(self):
Wx = self.xlim[1] - self.xlim[0]
Wy = self.ylim[1] - self.ylim[0]
return (Wx, Wy)
@property
def bounds(self):
return self.xlim + self.ylim
@invalidate_data
def zoom(self, factor):
r"""This zooms the window by *factor* > 0.
- zoom out with *factor* < 1
- zoom in with *factor* > 1
Parameters
----------
factor : float
multiplier for the current width
"""
if factor <= 0:
raise ValueError("Only positive zooming factors are meaningful.")
Wx, Wy = self.width
centerx = self.xlim[0] + Wx * 0.5
centery = self.ylim[0] + Wy * 0.5
nWx, nWy = Wx / factor, Wy / factor
self.xlim = (centerx - nWx * 0.5, centerx + nWx * 0.5)
self.ylim = (centery - nWy * 0.5, centery + nWy * 0.5)
return self
@invalidate_data
def pan(self, deltas):
r"""Pan the image by specifying absolute code unit coordinate deltas.
Parameters
----------
deltas : Two-element sequence of floats, quantities, or (float, unit)
tuples.
(delta_x, delta_y). If a unit is not supplied the unit is assumed
to be code_length.
"""
if len(deltas) != 2:
raise TypeError(
f"The pan function accepts a two-element sequence.\nReceived {deltas}."
)
if isinstance(deltas[0], Number) and isinstance(deltas[1], Number):
deltas = (
self.ds.quan(deltas[0], "code_length"),
self.ds.quan(deltas[1], "code_length"),
)
elif isinstance(deltas[0], tuple) and isinstance(deltas[1], tuple):
deltas = (
self.ds.quan(deltas[0][0], deltas[0][1]),
self.ds.quan(deltas[1][0], deltas[1][1]),
)
elif isinstance(deltas[0], YTQuantity) and isinstance(deltas[1], YTQuantity):
pass
else:
raise TypeError(
"The arguments of the pan function must be a sequence of floats,\n"
f"quantities, or (float, unit) tuples. Received {deltas}"
)
self.xlim = (self.xlim[0] + deltas[0], self.xlim[1] + deltas[0])
self.ylim = (self.ylim[0] + deltas[1], self.ylim[1] + deltas[1])
return self
@invalidate_data
def pan_rel(self, deltas):
r"""Pan the image by specifying relative deltas, to the FOV.
Parameters
----------
deltas : sequence of floats
(delta_x, delta_y) in *relative* code unit coordinates
"""
Wx, Wy = self.width
self.xlim = (self.xlim[0] + Wx * deltas[0], self.xlim[1] + Wx * deltas[0])
self.ylim = (self.ylim[0] + Wy * deltas[1], self.ylim[1] + Wy * deltas[1])
return self
@invalidate_plot
def set_unit(self, field, new_unit, equivalency=None, equivalency_kwargs=None):
"""Sets a new unit for the requested field
parameters
----------
field : string or field tuple
The name of the field that is to be changed.
new_unit : string or Unit object
equivalency : string, optional
If set, the equivalency to use to convert the current units to
the new requested unit. If None, the unit conversion will be done
without an equivalency
equivalency_kwargs : string, optional
Keyword arguments to be passed to the equivalency. Only used if
``equivalency`` is set.
"""
for f, u in zip(iter_fields(field), always_iterable(new_unit), strict=True):
self.frb.set_unit(f, u, equivalency, equivalency_kwargs)
self._equivalencies[f] = (equivalency, equivalency_kwargs)
pnh = self.plots[f].norm_handler
pnh.display_units = u
return self
@invalidate_plot
def set_origin(self, origin):
"""Set the plot origin.
Parameters
----------
origin : string or length 1, 2, or 3 sequence.
The location of the origin of the plot coordinate system. This
is typically represented by a '-' separated string or a tuple of
strings. In the first index the y-location is given by 'lower',
'upper', or 'center'. The second index is the x-location, given as
'left', 'right', or 'center'. Finally, whether the origin is
applied in 'domain' space, plot 'window' space or 'native'
simulation coordinate system is given. For example, both
'upper-right-domain' and ['upper', 'right', 'domain'] place the
origin in the upper right hand corner of domain space. If x or y
are not given, a value is inferred. For instance, 'left-domain'
corresponds to the lower-left hand corner of the simulation domain,
'center-domain' corresponds to the center of the simulation domain,
or 'center-window' for the center of the plot window. In the event
that none of these options place the origin in a desired location,
a sequence of tuples and a string specifying the
coordinate space can be given. If plain numeric types are input,
units of `code_length` are assumed. Further examples:
=============================================== ===============================
format example
=============================================== ===============================
'{space}' 'domain'
'{xloc}-{space}' 'left-window'
'{yloc}-{space}' 'upper-domain'
'{yloc}-{xloc}-{space}' 'lower-right-window'
('{space}',) ('window',)
('{xloc}', '{space}') ('right', 'domain')
('{yloc}', '{space}') ('lower', 'window')
('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')
((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window')
(xloc, yloc, '{space}') (0.23, 0.5, 'domain')
=============================================== ===============================
"""
self.origin = origin
return self
@invalidate_plot
@invalidate_figure
def set_mpl_projection(self, mpl_proj):
r"""
Set the matplotlib projection type with a cartopy transform function
Given a string or a tuple argument, this will project the data onto
the plot axes with the chosen transform function.
Assumes that the underlying data has a PlateCarree transform type.
To annotate the plot with coastlines or other annotations,
`render()` will need to be called after this function
to make the axes available for annotation.
Parameters
----------
mpl_proj : string or tuple
if passed as a string, mpl_proj is the specified projection type,
if passed as a tuple, then tuple will take the form of
``("ProjectionType", (args))`` or ``("ProjectionType", (args), {kwargs})``
Valid projection type options include:
'PlateCarree', 'LambertConformal', 'LabmbertCylindrical',
'Mercator', 'Miller', 'Mollweide', 'Orthographic',
'Robinson', 'Stereographic', 'TransverseMercator',
'InterruptedGoodeHomolosine', 'RotatedPole', 'OGSB',
'EuroPP', 'Geostationary', 'Gnomonic', 'NorthPolarStereo',
'OSNI', 'SouthPolarStereo', 'AlbersEqualArea',
'AzimuthalEquidistant', 'Sinusoidal', 'UTM',
'NearsidePerspective', 'LambertAzimuthalEqualArea'
Examples
--------
This will create a Mollweide projection using Mollweide default values
and annotate it with coastlines
>>> import yt
>>> ds = yt.load("")
>>> p = yt.SlicePlot(ds, "altitude", "AIRDENS")
>>> p.set_mpl_projection("AIRDENS", "Mollweide")
>>> p.render()
>>> p.plots["AIRDENS"].axes.coastlines()
>>> p.show()
This will move the PlateCarree central longitude to 90 degrees and
annotate with coastlines.
>>> import yt
>>> ds = yt.load("")
>>> p = yt.SlicePlot(ds, "altitude", "AIRDENS")
>>> p.set_mpl_projection(
... "AIRDENS", ("PlateCarree", (), {"central_longitude": 90, "globe": None})
... )
>>> p.render()
>>> p.plots["AIRDENS"].axes.set_global()
>>> p.plots["AIRDENS"].axes.coastlines()
>>> p.show()
This will create a RoatatedPole projection with the unrotated pole
position at 37.5 degrees latitude and 177.5 degrees longitude by
passing them in as args.
>>> import yt
>>> ds = yt.load("")
>>> p = yt.SlicePlot(ds, "altitude", "AIRDENS")
>>> p.set_mpl_projection("RotatedPole", (177.5, 37.5))
>>> p.render()
>>> p.plots["AIRDENS"].axes.set_global()
>>> p.plots["AIRDENS"].axes.coastlines()
>>> p.show()
This will create a RoatatedPole projection with the unrotated pole
position at 37.5 degrees latitude and 177.5 degrees longitude by
passing them in as kwargs.
>>> import yt
>>> ds = yt.load("")
>>> p = yt.SlicePlot(ds, "altitude", "AIRDENS")
>>> p.set_mpl_projection(
... ("RotatedPole", (), {"pole_latitude": 37.5, "pole_longitude": 177.5})
... )
>>> p.render()
>>> p.plots["AIRDENS"].axes.set_global()
>>> p.plots["AIRDENS"].axes.coastlines()
>>> p.show()
"""
self._projection = get_mpl_transform(mpl_proj)
axname = self.ds.coordinates.axis_name[self.data_source.axis]
transform = self.ds.coordinates.data_transform[axname]
self._transform = get_mpl_transform(transform)
return self
@invalidate_data
def _set_window(self, bounds):
"""Set the bounds of the plot window.
This is normally only called internally, see set_width.
Parameters
----------
bounds : a four element sequence of floats
The x and y bounds, in the format (x0, x1, y0, y1)
"""
if self.center is not None:
dx = bounds[1] - bounds[0]
dy = bounds[3] - bounds[2]
self.xlim = (self.center[0] - dx / 2.0, self.center[0] + dx / 2.0)
self.ylim = (self.center[1] - dy / 2.0, self.center[1] + dy / 2.0)
else:
self.xlim = tuple(bounds[0:2])
self.ylim = tuple(bounds[2:4])
if len(bounds) == 6:
# Support OffAxisProjectionPlot and OffAxisSlicePlot
self.zlim = tuple(bounds[4:6])
mylog.info("xlim = %f %f", self.xlim[0], self.xlim[1])
mylog.info("ylim = %f %f", self.ylim[0], self.ylim[1])
if hasattr(self, "zlim"):
mylog.info("zlim = %f %f", self.zlim[0], self.zlim[1])
@invalidate_data
def set_width(self, width, unit=None):
"""set the width of the plot window
parameters
----------
width : float, array of floats, (float, unit) tuple, or tuple of
(float, unit) tuples.
Width can have four different formats to support windows with
variable x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10
kiloparsecs wide in the x and y directions,
((10,'kpc'),(15,'kpc')) requests a window that is 10 kiloparsecs
wide along the x axis and 15 kiloparsecs wide along the y axis.
In the other two examples, code units are assumed, for example
(0.2, 0.3) requests a plot that has an x width of 0.2 and a y
width of 0.3 in code units. If units are provided the resulting
plot axis labels will use the supplied units.
unit : str
the unit the width has been specified in. If width is a tuple, this
argument is ignored. Defaults to code units.
"""
if isinstance(width, Number):
if unit is None:
width = (width, "code_length")
else:
width = (width, fix_unitary(unit))
axes_unit = get_axes_unit(width, self.ds)
width = self.ds.coordinates.sanitize_width(self.frb.axis, width, None)
centerx = (self.xlim[1] + self.xlim[0]) / 2.0
centery = (self.ylim[1] + self.ylim[0]) / 2.0
self.xlim = (centerx - width[0] / 2, centerx + width[0] / 2)
self.ylim = (centery - width[1] / 2, centery + width[1] / 2)
if hasattr(self, "zlim"):
centerz = (self.zlim[1] + self.zlim[0]) / 2.0
mw = self.ds.arr(width).max()
self.zlim = (centerz - mw / 2.0, centerz + mw / 2.0)
self.set_axes_unit(axes_unit)
return self
@invalidate_data
def set_center(self, new_center, unit="code_length"):
"""Sets a new center for the plot window
parameters
----------
new_center : two element sequence of floats
The coordinates of the new center of the image in the
coordinate system defined by the plot axes. If the unit
keyword is not specified, the coordinates are assumed to
be in code units.
unit : string
The name of the unit new_center is given in. If new_center is a
YTArray or tuple of YTQuantities, this keyword is ignored.
"""
error = RuntimeError(
"\n"
"new_center must be a two-element list or tuple of floats \n"
"corresponding to a coordinate in the plot relative to \n"
"the plot coordinate system.\n"
)
if new_center is None:
self.center = None
elif is_sequence(new_center):
if len(new_center) != 2:
raise error
for el in new_center:
if not isinstance(el, Number) and not isinstance(el, YTQuantity):
raise error
if isinstance(new_center[0], Number):
new_center = [self.ds.quan(c, unit) for c in new_center]
self.center = new_center
else:
raise error
self._set_window(self.bounds)
return self
@invalidate_data
def set_antialias(self, aa):
"""Turn antialiasing on or off.
parameters
----------
aa : boolean
"""
self.antialias = aa
@invalidate_data
def set_buff_size(self, size):
"""Sets a new buffer size for the fixed resolution buffer
parameters
----------
size : int or two element sequence of ints
The number of data elements in the buffer on the x and y axes.
If a scalar is provided, then the buffer is assumed to be square.
"""
if is_sequence(size):
self.buff_size = size
else:
self.buff_size = (size, size)
return self
@invalidate_plot
def set_axes_unit(self, unit_name):
r"""Set the unit for display on the x and y axes of the image.
Parameters
----------
unit_name : string or two element tuple of strings
A unit, available for conversion in the dataset, that the
image extents will be displayed in. If set to None, any previous
units will be reset. If the unit is None, the default is chosen.
If unit_name is '1', 'u', or 'unitary', it will not display the
units, and only show the axes name. If unit_name is a tuple, the
first element is assumed to be the unit for the x axis and the
second element the unit for the y axis.
Raises
------
YTUnitNotRecognized
If the unit is not known, this will be raised.
Examples
--------
>>> from yt import load
>>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
>>> p = ProjectionPlot(ds, "y", "Density")
>>> p.set_axes_unit("kpc")
"""
# blind except because it could be in conversion_factors or units
if unit_name is not None:
if isinstance(unit_name, str):
unit_name = (unit_name, unit_name)
for un in unit_name:
try:
self.ds.length_unit.in_units(un)
except (UnitConversionError, UnitParseError) as e:
raise YTUnitNotRecognized(un) from e
self._axes_unit_names = unit_name
return self
@invalidate_plot
def flip_horizontal(self):
"""
inverts the horizontal axis (the image's abscissa)
"""
self._flip_horizontal = not self._flip_horizontal
return self
@invalidate_plot
def flip_vertical(self):
"""
inverts the vertical axis (the image's ordinate)
"""
self._flip_vertical = not self._flip_vertical
return self
def to_fits_data(self, fields=None, other_keys=None, length_unit=None, **kwargs):
r"""Export the fields in this PlotWindow instance
to a FITSImageData instance.
This will export a set of FITS images of either the fields specified
or all the fields already in the object.
Parameters
----------
fields : list of strings
These fields will be pixelized and output. If "None", the keys of
the FRB will be used.
other_keys : dictionary, optional
A set of header keys and values to write into the FITS header.
length_unit : string, optional
the length units that the coordinates are written in. The default
is to use the default length unit of the dataset.
"""
return self.frb.to_fits_data(
fields=fields, other_keys=other_keys, length_unit=length_unit, **kwargs
)
class PWViewerMPL(PlotWindow):
"""Viewer using matplotlib as a backend via the WindowPlotMPL."""
_current_field = None
_frb_generator: type[FixedResolutionBuffer] | None = None
_plot_type: str | None = None
def __init__(self, *args, **kwargs) -> None:
if self._frb_generator is None:
self._frb_generator = kwargs.pop("frb_generator")
if self._plot_type is None:
self._plot_type = kwargs.pop("plot_type")
self._splat_color = kwargs.pop("splat_color", None)
PlotWindow.__init__(self, *args, **kwargs)
# import type here to avoid import cycles
# note that this import statement is actually crucial at runtime:
# the filter methods for the present class are defined only when
# fixed_resolution_filters is imported, so we need to guarantee
# that it happens no later than instantiation
self._callbacks: list[PlotCallback] = []
@property
def _data_valid(self) -> bool:
return self._frb is not None and self._frb._data_valid
@_data_valid.setter
def _data_valid(self, value):
if self._frb is None:
# we delegate the (in)validation responsibility to the FRB
# if we don't have one yet, we can exit without doing anything
return
else:
self._frb._data_valid = value
def _setup_origin(self):
origin = self.origin
axis_index = self.data_source.axis
xc = None
yc = None
if isinstance(origin, str):
origin = tuple(origin.split("-"))
if len(origin) > 3:
raise ValueError(
"Invalid origin argument with too many elements; "
f"expected 1, 2 or 3 elements, got {self.origin!r}, counting {len(origin)} elements. "
"Use '-' as a separator for string arguments."
)
if len(origin) == 1:
coord_system = origin[0]
if coord_system not in ("window", "domain", "native"):
raise ValueError(
"Invalid origin argument. "
"Single element specification must be 'window', 'domain', or 'native'. "
f"Got {self.origin!r}"
)
origin = ("lower", "left", coord_system)
elif len(origin) == 2:
err_msg = "Invalid origin argument. Using 2 elements:\n"
if origin[0] in ("left", "right", "center"):
o0map = {"left": "lower", "right": "upper", "center": "center"}
origin = (o0map[origin[0]],) + origin
elif origin[0] in ("lower", "upper"):
origin = (origin[0], "center", origin[-1])
else:
err_msg += " - the first one must be 'left', 'right', 'lower', 'upper' or 'center'\n"
if origin[-1] not in ("window", "domain", "native"):
err_msg += " - the second one must be 'window', 'domain', or 'native'\n"
if len(err_msg.split("\n")) > 2:
err_msg += f"Got {self.origin!r}"
raise ValueError(err_msg)
elif len(origin) == 3:
err_msg = "Invalid origin argument. Using 3 elements:\n"
if isinstance(origin[0], (int, float)):
xc = self.ds.quan(origin[0], "code_length")
elif isinstance(origin[0], tuple):
xc = self.ds.quan(*origin[0])
elif origin[0] not in ("lower", "upper", "center"):
err_msg += " - the first one must be 'lower', 'upper' or 'center' or a distance\n"
if isinstance(origin[1], (int, float)):
yc = self.ds.quan(origin[1], "code_length")
elif isinstance(origin[1], tuple):
yc = self.ds.quan(*origin[1])
elif origin[1] not in ("left", "right", "center"):
err_msg += " - the second one must be 'left', 'right', 'center' or a distance\n"
if origin[-1] not in ("window", "domain", "native"):
err_msg += " - the third one must be 'window', 'domain', or 'native'\n"
if len(err_msg.split("\n")) > 2:
err_msg += f"Got {self.origin!r}"
raise ValueError(err_msg)
assert not isinstance(origin, str)
assert len(origin) == 3
assert origin[2] in ("window", "domain", "native")
if origin[2] == "window":
xllim, xrlim = self.xlim
yllim, yrlim = self.ylim
elif origin[2] == "domain":
xax = self.ds.coordinates.x_axis[axis_index]
yax = self.ds.coordinates.y_axis[axis_index]
xllim = self.ds.domain_left_edge[xax]
xrlim = self.ds.domain_right_edge[xax]
yllim = self.ds.domain_left_edge[yax]
yrlim = self.ds.domain_right_edge[yax]
elif origin[2] == "native":
return (self.ds.quan(0.0, "code_length"), self.ds.quan(0.0, "code_length"))
if xc is None and yc is None:
assert origin[0] in ("lower", "upper", "center")
assert origin[1] in ("left", "right", "center")
if origin[0] == "lower":
yc = yllim
elif origin[0] == "upper":
yc = yrlim
elif origin[0] == "center":
yc = (yllim + yrlim) / 2.0
if origin[1] == "left":
xc = xllim
elif origin[1] == "right":
xc = xrlim
elif origin[1] == "center":
xc = (xllim + xrlim) / 2.0
x_in_bounds = xc >= xllim and xc <= xrlim
y_in_bounds = yc >= yllim and yc <= yrlim
if not x_in_bounds and not y_in_bounds:
raise ValueError(
"origin inputs not in bounds of specified coordinate system domain; "
f"got {self.origin!r} Bounds are {xllim, xrlim} and {yllim, yrlim} respectively"
)
return xc, yc
def _setup_plots(self):
from matplotlib.mathtext import MathTextParser
if self._plot_valid:
return
if not self._data_valid:
self._recreate_frb()
self._colorbar_valid = True
field_list = list(set(self.data_source._determine_fields(self.fields)))
for f in field_list:
axis_index = self.data_source.axis
xc, yc = self._setup_origin()
if self.ds._uses_code_length_unit:
# this should happen only if the dataset was initialized with
# argument unit_system="code" or if it's set to have no CGS
# equivalent. This only needs to happen here in the specific
# case that we're doing a computationally intense operation
# like using cartopy, but it prevents crashes in that case.
(unit_x, unit_y) = ("code_length", "code_length")
elif self._axes_unit_names is None:
unit = self.ds.get_smallest_appropriate_unit(
self.xlim[1] - self.xlim[0]
)
unit_x = unit_y = unit
coords = self.ds.coordinates
if hasattr(coords, "image_units"):
# check for special cases defined in
# non cartesian CoordinateHandler subclasses
image_units = coords.image_units[coords.axis_id[axis_index]]
if image_units[0] in ("deg", "rad"):
unit_x = "code_length"
elif image_units[0] == 1:
unit_x = "dimensionless"
if image_units[1] in ("deg", "rad"):
unit_y = "code_length"
elif image_units[1] == 1:
unit_y = "dimensionless"
else:
(unit_x, unit_y) = self._axes_unit_names
# For some plots we may set aspect by hand, such as for spectral cube data.
# This will likely be replaced at some point by the coordinate handler
# setting plot aspect.
if self.aspect is None:
self.aspect = float(
(self.ds.quan(1.0, unit_y) / self.ds.quan(1.0, unit_x)).in_cgs()
)
extentx = (self.xlim - xc)[:2]
extenty = (self.ylim - yc)[:2]
# extentx/y arrays inherit units from xlim and ylim attributes
# and these attributes are always length even for angular and
# dimensionless axes so we need to strip out units for consistency
if unit_x == "dimensionless":
extentx = extentx / extentx.units
else:
extentx.convert_to_units(unit_x)
if unit_y == "dimensionless":
extenty = extenty / extenty.units
else:
extenty.convert_to_units(unit_y)
extent = [*extentx, *extenty]
image = self.frb.get_image(f)
mask = self.frb.get_mask(f)
assert mask is None or mask.dtype == bool
font_size = self._font_properties.get_size()
if f in self.plots.keys():
pnh = self.plots[f].norm_handler
cbh = self.plots[f].colorbar_handler
else:
pnh, cbh = self._get_default_handlers(
field=f, default_display_units=image.units
)
if pnh.display_units != image.units:
equivalency, equivalency_kwargs = self._equivalencies[f]
image.convert_to_units(
pnh.display_units, equivalency, **equivalency_kwargs
)
fig = None
axes = None
cax = None
draw_axes = True
draw_frame = None
if f in self.plots:
draw_axes = self.plots[f]._draw_axes
draw_frame = self.plots[f]._draw_frame
if self.plots[f].figure is not None:
fig = self.plots[f].figure
axes = self.plots[f].axes
cax = self.plots[f].cax
# This is for splatting particle positions with a single
# color instead of a colormap
if self._splat_color is not None:
# make image a rgba array, using the splat color
greyscale_image = self.frb[f]
ia = np.zeros((greyscale_image.shape[0], greyscale_image.shape[1], 4))
ia[:, :, 3] = 0.0 # set alpha to 0.0
locs = greyscale_image > 0.0
to_rgba = matplotlib.colors.colorConverter.to_rgba
color_tuple = to_rgba(self._splat_color)
ia[locs] = color_tuple
ia = ImageArray(ia)
else:
ia = image
swap_axes = self._has_swapped_axes
aspect = self.aspect
if swap_axes:
extent = _swap_axes_extents(extent)
ia = ia.transpose()
aspect = 1.0 / aspect # aspect ends up passed to imshow(aspect=aspect)
self.plots[f] = WindowPlotMPL(
ia,
extent,
self.figure_size,
font_size,
aspect,
fig,
axes,
cax,
self._projection,
self._transform,
norm_handler=pnh,
colorbar_handler=cbh,
alpha=mask.astype("float64") if mask is not None else None,
)
axes_unit_labels = self._get_axes_unit_labels(unit_x, unit_y)
if self.oblique:
labels = [
r"$\rm{Image\ x" + axes_unit_labels[0] + "}$",
r"$\rm{Image\ y" + axes_unit_labels[1] + "}$",
]
else:
coordinates = self.ds.coordinates
axis_names = coordinates.image_axis_name[axis_index]
xax = coordinates.x_axis[axis_index]
yax = coordinates.y_axis[axis_index]
if hasattr(coordinates, "axis_default_unit_name"):
axes_unit_labels = [
coordinates.axis_default_unit_name[xax],
coordinates.axis_default_unit_name[yax],
]
labels = [
r"$\rm{" + axis_names[0] + axes_unit_labels[0] + r"}$",
r"$\rm{" + axis_names[1] + axes_unit_labels[1] + r"}$",
]
if hasattr(coordinates, "axis_field"):
if xax in coordinates.axis_field:
xmin, xmax = coordinates.axis_field[xax](
0, self.xlim, self.ylim
)
else:
xmin, xmax = (float(x) for x in extentx)
if yax in coordinates.axis_field:
ymin, ymax = coordinates.axis_field[yax](
1, self.xlim, self.ylim
)
else:
ymin, ymax = (float(y) for y in extenty)
new_extent = (xmin, xmax, ymin, ymax)
if swap_axes:
new_extent = _swap_axes_extents(new_extent)
self.plots[f].image.set_extent(new_extent)
self.plots[f].axes.set_aspect("auto")
x_label, y_label, colorbar_label = self._get_axes_labels(f)
if x_label is not None:
labels[0] = x_label
if y_label is not None:
labels[1] = y_label
if swap_axes:
labels.reverse()
self.plots[f].axes.set_xlabel(labels[0])
self.plots[f].axes.set_ylabel(labels[1])
# Determine the units of the data
units = Unit(image.units, registry=self.ds.unit_registry)
units = units.latex_representation()
if colorbar_label is None:
colorbar_label = image.info["label"]
if getattr(self, "moment", 1) == 2:
colorbar_label = f"{colorbar_label} \\rm{{Standard Deviation}}"
if hasattr(self, "projected"):
colorbar_label = f"$\\rm{{Projected }}$ {colorbar_label}"
if units is not None and units != "":
colorbar_label += _get_units_label(units)
parser = MathTextParser("Agg")
try:
parser.parse(colorbar_label)
except Exception as err:
# unspecified exceptions might be raised from matplotlib via its own dependencies
raise YTCannotParseUnitDisplayName(f, colorbar_label, str(err)) from err
self.plots[f].cb.set_label(colorbar_label)
# x-y axes minorticks
if f not in self._minorticks:
self._minorticks[f] = True
if self._minorticks[f]:
self.plots[f].axes.minorticks_on()
else:
self.plots[f].axes.minorticks_off()
if not draw_axes:
self.plots[f]._toggle_axes(draw_axes, draw_frame)
self._set_font_properties()
self.run_callbacks()
if self._flip_horizontal or self._flip_vertical:
# some callbacks (e.g., streamlines) fail when applied to a
# flipped axis, so flip only at the end.
for f in field_list:
if self._flip_horizontal:
ax = self.plots[f].axes
ax.invert_xaxis()
if self._flip_vertical:
ax = self.plots[f].axes
ax.invert_yaxis()
self._plot_valid = True
def setup_callbacks(self):
issue_deprecation_warning(
"The PWViewer.setup_callbacks method is a no-op.",
since="4.1",
stacklevel=3,
)
@invalidate_plot
def clear_annotations(self, index: int | None = None):
"""
Clear callbacks from the plot. If index is not set, clear all
callbacks. If index is set, clear that index (ie 0 is the first one
created, 1 is the 2nd one created, -1 is the last one created, etc.)
"""
if index is None:
self._callbacks.clear()
else:
self._callbacks.pop(index)
return self
def list_annotations(self):
"""
List the current callbacks for the plot, along with their index. This
index can be used with `clear_annotations` to remove a callback from the
current plot.
"""
for i, cb in enumerate(self._callbacks):
print(i, cb)
def run_callbacks(self):
for f in self.fields:
keys = self.frb.keys()
for callback in self._callbacks:
# need to pass _swap_axes and adjust all the callbacks
cbw = CallbackWrapper(
self,
self.plots[f],
self.frb,
f,
self._font_properties,
self._font_color,
)
try:
callback(cbw)
except (NotImplementedError, YTDataTypeUnsupported):
raise
except Exception as e:
raise YTPlotCallbackError(callback._type_name) from e
for key in self.frb.keys():
if key not in keys:
del self.frb[key]
def export_to_mpl_figure(
self,
nrows_ncols,
axes_pad=1.0,
label_mode="L",
cbar_location="right",
cbar_size="5%",
cbar_mode="each",
cbar_pad="0%",
):
r"""
Creates a matplotlib figure object with the specified axes arrangement,
nrows_ncols, and maps the underlying figures to the matplotlib axes.
Note that all of these parameters are fed directly to the matplotlib ImageGrid
class to create the new figure layout.
Parameters
----------
nrows_ncols : tuple
the number of rows and columns of the axis grid (e.g., nrows_ncols=(2,2,))
axes_pad : float
padding between axes in inches
label_mode : one of "L", "1", "all"
arrangement of axes that are labeled
cbar_location : one of "left", "right", "bottom", "top"
where to place the colorbar
cbar_size : string (percentage)
scaling of the colorbar (e.g., "5%")
cbar_mode : one of "each", "single", "edge", None
how to represent the colorbar
cbar_pad : string (percentage)
padding between the axis and colorbar (e.g. "5%")
Returns
-------
The return is a matplotlib figure object.
Examples
--------
>>> import yt
>>> ds = yt.load_sample("IsolatedGalaxy")
>>> fields = ["density", "velocity_x", "velocity_y", "velocity_magnitude"]
>>> p = yt.SlicePlot(ds, "z", fields)
>>> p.set_log("velocity_x", False)
>>> p.set_log("velocity_y", False)
>>> fig = p.export_to_mpl_figure((2, 2))
>>> fig.tight_layout()
>>> fig.savefig("test.png")
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
fig = plt.figure()
grid = ImageGrid(
fig,
111,
nrows_ncols=nrows_ncols,
axes_pad=axes_pad,
label_mode=label_mode,
cbar_location=cbar_location,
cbar_size=cbar_size,
cbar_mode=cbar_mode,
cbar_pad=cbar_pad,
)
fields = self.fields
if len(fields) > len(grid):
raise IndexError("not enough axes for the number of fields")
for i, f in enumerate(self.fields):
plot = self.plots[f]
plot.figure = fig
plot.axes = grid[i].axes
plot.cax = grid.cbar_axes[i]
self._setup_plots()
return fig
class NormalPlot:
"""This is the abstraction for SlicePlot and ProjectionPlot, where
we define the common sanitizing mechanism for user input (normal direction).
It is implemented as a mixin class.
"""
@staticmethod
def sanitize_normal_vector(ds, normal) -> str | np.ndarray:
"""Return the name of a cartesian axis whener possible,
or a 3-element 1D ndarray of float64 in any other valid case.
Fail with a descriptive error message otherwise.
"""
axis_names = ds.coordinates.axis_order
if isinstance(normal, str):
if normal not in axis_names:
names_str = ", ".join(f"'{name}'" for name in axis_names)
raise ValueError(
f"'{normal}' is not a valid axis name. Expected one of {names_str}."
)
return normal
if isinstance(normal, (int, np.integer)):
if normal not in (0, 1, 2):
raise ValueError(
f"{normal} is not a valid axis identifier. Expected either 0, 1, or 2."
)
return axis_names[normal]
if not is_sequence(normal):
raise TypeError(
f"{normal} is not a valid normal vector identifier. "
"Expected a string, integer or sequence of 3 floats."
)
if len(normal) != 3:
raise ValueError(
f"{normal} with length {len(normal)} is not a valid normal vector. "
"Expected a 3-element sequence."
)
try:
retv = np.array(normal, dtype="float64")
if retv.shape != (3,):
raise ValueError(f"{normal} is incorrectly shaped.")
except ValueError as exc:
raise TypeError(f"{normal} is not a valid normal vector.") from exc
nonzero_idx = np.nonzero(retv)[0]
if len(nonzero_idx) == 0:
raise ValueError(f"A null vector {normal} isn't a valid normal vector.")
if len(nonzero_idx) == 1:
return axis_names[nonzero_idx[0]]
return retv
class SlicePlot(NormalPlot):
r"""
A dispatch class for :class:`yt.visualization.plot_window.AxisAlignedSlicePlot`
and :class:`yt.visualization.plot_window.OffAxisSlicePlot` objects. This
essentially allows for a single entry point to both types of slice plots,
the distinction being determined by the specified normal vector to the
projection.
The returned plot object can be updated using one of the many helper
functions defined in PlotWindow.
Parameters
----------
ds : :class:`yt.data_objects.static_output.Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
normal : int, str, or 3-element sequence of floats
This specifies the normal vector to the slice.
Valid int values are 0, 1 and 2. Corresponding str values depend on the
geometry of the dataset and are generally given by `ds.coordinates.axis_order`.
E.g. in cartesian they are 'x', 'y' and 'z'.
An arbitrary normal vector may be specified as a 3-element sequence of floats.
This returns a :class:`OffAxisSlicePlot` object or a
:class:`AxisAlignedSlicePlot` object, depending on whether the requested
normal directions corresponds to a natural axis of the dataset's geometry.
fields : a (or a list of) 2-tuple of strings (ftype, fname)
The name of the field(s) to be plotted.
The following are nominally keyword arguments passed onto the respective
slice plot objects generated by this function.
Keyword Arguments
-----------------
center : 'center', 'c', 'left', 'l', 'right', 'r', id of a global extremum, or array-like
The coordinate of the selection's center.
Defaults to the 'center', i.e. center of the domain.
Centering on the min or max of a field is supported by passing a tuple
such as ('min', ('gas', 'density')) or ('max', ('gas', 'temperature'). A
single string may also be used (e.g. "min_density" or
"max_temperature"), though it's not as flexible and does not allow to
select an exact field/particle type. With this syntax, the first field
matching the provided name is selected.
'max' or 'm' can be used as a shortcut for ('max', ('gas', 'density'))
'min' can be used as a shortcut for ('min', ('gas', 'density'))
One can also select an exact point as a 3 element coordinate sequence,
e.g. [0.5, 0.5, 0]
Units can be specified by passing in *center* as a tuple containing a
3-element coordinate sequence and string unit name, e.g. ([0, 0.5, 0.5], "cm"),
or by passing in a YTArray. Code units are assumed if unspecified.
The domain edges along the selected *axis* can be selected with
'left'/'l' and 'right'/'r' respectively.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
window that is 10 kiloparsecs wide along the x axis and 15
kiloparsecs wide along the y axis. In the other two examples, code
units are assumed, for example (0.2, 0.3) requests a plot that has an
x width of 0.2 and a y width of 0.3 in code units. If units are
provided the resulting plot axis labels will use the supplied units.
axes_unit : string
The name of the unit for the tick labels on the x and y axes.
Defaults to None, which automatically picks an appropriate unit.
If axes_unit is '1', 'u', or 'unitary', it will not display the
units, and only show the axes name.
origin : string or length 1, 2, or 3 sequence.
The location of the origin of the plot coordinate system for
`AxisAlignedSlicePlot` object; for `OffAxisSlicePlot` objects this
parameter is discarded. This is typically represented by a '-'
separated string or a tuple of strings. In the first index the
y-location is given by 'lower', 'upper', or 'center'. The second index
is the x-location, given as 'left', 'right', or 'center'. Finally, the
whether the origin is applied in 'domain' space, plot 'window' space or
'native' simulation coordinate system is given. For example, both
'upper-right-domain' and ['upper', 'right', 'domain'] place the
origin in the upper right hand corner of domain space. If x or y
are not given, a value is inferred. For instance, 'left-domain'
corresponds to the lower-left hand corner of the simulation domain,
'center-domain' corresponds to the center of the simulation domain,
or 'center-window' for the center of the plot window. In the event
that none of these options place the origin in a desired location,
a sequence of tuples and a string specifying the
coordinate space can be given. If plain numeric types are input,
units of `code_length` are assumed. Further examples:
=============================================== ===============================
format example
=============================================== ===============================
'{space}' 'domain'
'{xloc}-{space}' 'left-window'
'{yloc}-{space}' 'upper-domain'
'{yloc}-{xloc}-{space}' 'lower-right-window'
('{space}',) ('window',)
('{xloc}', '{space}') ('right', 'domain')
('{yloc}', '{space}') ('lower', 'window')
('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')
((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window')
(xloc, yloc, '{space}') (0.23, 0.5, 'domain')
=============================================== ===============================
north_vector : a sequence of floats
A vector defining the 'up' direction in the `OffAxisSlicePlot`; not
used in `AxisAlignedSlicePlot`. This option sets the orientation of the
slicing plane. If not set, an arbitrary grid-aligned north-vector is
chosen.
fontsize : integer
The size of the fonts for the axis, colorbar, and tick labels.
field_parameters : dictionary
A dictionary of field parameters than can be accessed by derived
fields.
data_source : YTSelectionContainer Object
Object to be used for data selection. Defaults to a region covering
the entire simulation.
swap_axes : bool
Raises
------
ValueError or TypeError
If `normal` cannot be interpreted as a valid normal direction.
Examples
--------
>>> from yt import load
>>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
>>> slc = SlicePlot(ds, "x", ("gas", "density"), center=[0.2, 0.3, 0.4])
>>> slc = SlicePlot(
... ds, [0.4, 0.2, -0.1], ("gas", "pressure"), north_vector=[0.2, -0.3, 0.1]
... )
"""
# ignoring type check here, because mypy doesn't allow __new__ methods to
# return instances of subclasses. The design we use here is however based
# on the pathlib.Path class from the standard library
# https://github.com/python/mypy/issues/1020
def __new__( # type: ignore
cls, ds, normal, fields, *args, **kwargs
) -> Union["AxisAlignedSlicePlot", "OffAxisSlicePlot"]:
if cls is SlicePlot:
normal = cls.sanitize_normal_vector(ds, normal)
if isinstance(normal, str):
cls = AxisAlignedSlicePlot
else:
cls = OffAxisSlicePlot
self = object.__new__(cls)
return self # type: ignore [return-value]
class ProjectionPlot(NormalPlot):
r"""
A dispatch class for :class:`yt.visualization.plot_window.AxisAlignedProjectionPlot`
and :class:`yt.visualization.plot_window.OffAxisProjectionPlot` objects. This
essentially allows for a single entry point to both types of projection plots,
the distinction being determined by the specified normal vector to the
slice.
The returned plot object can be updated using one of the many helper
functions defined in PlotWindow.
Parameters
----------
ds : :class:`yt.data_objects.static_output.Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
normal : int, str, or 3-element sequence of floats
This specifies the normal vector to the projection.
Valid int values are 0, 1 and 2. Corresponding str values depend on the
geometry of the dataset and are generally given by `ds.coordinates.axis_order`.
E.g. in cartesian they are 'x', 'y' and 'z'.
An arbitrary normal vector may be specified as a 3-element sequence of floats.
This function will return a :class:`OffAxisProjectionPlot` object or a
:class:`AxisAlignedProjectionPlot` object, depending on whether the requested
normal directions corresponds to a natural axis of the dataset's geometry.
fields : a (or a list of) 2-tuple of strings (ftype, fname)
The name of the field(s) to be plotted.
Any additional positional and keyword arguments are passed down to the appropriate
return class. See :class:`yt.visualization.plot_window.AxisAlignedProjectionPlot`
and :class:`yt.visualization.plot_window.OffAxisProjectionPlot`.
Raises
------
ValueError or TypeError
If `normal` cannot be interpreted as a valid normal direction.
"""
# ignoring type check here, because mypy doesn't allow __new__ methods to
# return instances of subclasses. The design we use here is however based
# on the pathlib.Path class from the standard library
# https://github.com/python/mypy/issues/1020
def __new__( # type: ignore
cls, ds, normal, fields, *args, **kwargs
) -> Union["AxisAlignedProjectionPlot", "OffAxisProjectionPlot"]:
if cls is ProjectionPlot:
normal = cls.sanitize_normal_vector(ds, normal)
if isinstance(normal, str):
cls = AxisAlignedProjectionPlot
else:
cls = OffAxisProjectionPlot
self = object.__new__(cls)
return self # type: ignore [return-value]
class AxisAlignedSlicePlot(SlicePlot, PWViewerMPL):
r"""Creates a slice plot from a dataset
Given a ds object, an axis to slice along, and a field name
string, this will return a PWViewerMPL object containing
the plot.
The plot can be updated using one of the many helper functions
defined in PlotWindow.
Parameters
----------
ds : `Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
normal : int or one of 'x', 'y', 'z'
An int corresponding to the axis to slice along (0=x, 1=y, 2=z)
or the axis name itself
fields : string
The name of the field(s) to be plotted.
center : 'center', 'c', 'left', 'l', 'right', 'r', id of a global extremum, or array-like
The coordinate of the selection's center.
Defaults to the 'center', i.e. center of the domain.
Centering on the min or max of a field is supported by passing a tuple
such as ('min', ('gas', 'density')) or ('max', ('gas', 'temperature'). A
single string may also be used (e.g. "min_density" or
"max_temperature"), though it's not as flexible and does not allow to
select an exact field/particle type. With this syntax, the first field
matching the provided name is selected.
'max' or 'm' can be used as a shortcut for ('max', ('gas', 'density'))
'min' can be used as a shortcut for ('min', ('gas', 'density'))
One can also select an exact point as a 3 element coordinate sequence,
e.g. [0.5, 0.5, 0]
Units can be specified by passing in *center* as a tuple containing a
3-element coordinate sequence and string unit name, e.g. ([0, 0.5, 0.5], "cm"),
or by passing in a YTArray. Code units are assumed if unspecified.
The domain edges along the selected *axis* can be selected with
'left'/'l' and 'right'/'r' respectively.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
window that is 10 kiloparsecs wide along the x axis and 15
kiloparsecs wide along the y axis. In the other two examples, code
units are assumed, for example (0.2, 0.3) requests a plot that has an
x width of 0.2 and a y width of 0.3 in code units. If units are
provided the resulting plot axis labels will use the supplied units.
origin : string or length 1, 2, or 3 sequence.
The location of the origin of the plot coordinate system. This
is typically represented by a '-' separated string or a tuple of
strings. In the first index the y-location is given by 'lower',
'upper', or 'center'. The second index is the x-location, given as
'left', 'right', or 'center'. Finally, whether the origin is
applied in 'domain' space, plot 'window' space or 'native'
simulation coordinate system is given. For example, both
'upper-right-domain' and ['upper', 'right', 'domain'] place the
origin in the upper right hand corner of domain space. If x or y
are not given, a value is inferred. For instance, 'left-domain'
corresponds to the lower-left hand corner of the simulation domain,
'center-domain' corresponds to the center of the simulation domain,
or 'center-window' for the center of the plot window. In the event
that none of these options place the origin in a desired location,
a sequence of tuples and a string specifying the
coordinate space can be given. If plain numeric types are input,
units of `code_length` are assumed. Further examples:
=============================================== ===============================
format example
=============================================== ===============================
'{space}' 'domain'
'{xloc}-{space}' 'left-window'
'{yloc}-{space}' 'upper-domain'
'{yloc}-{xloc}-{space}' 'lower-right-window'
('{space}',) ('window',)
('{xloc}', '{space}') ('right', 'domain')
('{yloc}', '{space}') ('lower', 'window')
('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')
((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window')
(xloc, yloc, '{space}') (0.23, 0.5, 'domain')
=============================================== ===============================
axes_unit : string
The name of the unit for the tick labels on the x and y axes.
Defaults to None, which automatically picks an appropriate unit.
If axes_unit is '1', 'u', or 'unitary', it will not display the
units, and only show the axes name.
fontsize : integer
The size of the fonts for the axis, colorbar, and tick labels.
field_parameters : dictionary
A dictionary of field parameters than can be accessed by derived
fields.
data_source: YTSelectionContainer object
Object to be used for data selection. Defaults to ds.all_data(), a
region covering the full domain
buff_size: length 2 sequence
Size of the buffer to use for the image, i.e. the number of resolution elements
used. Effectively sets a resolution limit to the image if buff_size is
smaller than the finest gridding.
Examples
--------
This will save an image in the file 'sliceplot_Density.png'
>>> from yt import load
>>> ds = load("IsolatedGalaxy/galaxy0030/galaxy0030")
>>> p = SlicePlot(ds, 2, "density", "c", (20, "kpc"))
>>> p.save("sliceplot")
"""
_plot_type = "Slice"
_frb_generator = FixedResolutionBuffer
def __init__(
self,
ds,
normal,
fields,
center="center",
width=None,
axes_unit=None,
origin="center-window",
fontsize=18,
field_parameters=None,
window_size=8.0,
aspect=None,
data_source=None,
buff_size=(800, 800),
*,
north_vector=None,
):
if north_vector is not None:
# this kwarg exists only for symmetry reasons with OffAxisSlicePlot
mylog.warning(
"Ignoring 'north_vector' keyword as it is ill-defined for "
"an AxisAlignedSlicePlot object."
)
del north_vector
normal = self.sanitize_normal_vector(ds, normal)
# this will handle time series data and controllers
axis = fix_axis(normal, ds)
# print('center at SlicePlot init: ', center)
# print('current domain left edge: ', ds.domain_left_edge)
(bounds, center, display_center) = get_window_parameters(
axis, center, width, ds
)
# print('center after get_window_parameters: ', center)
if field_parameters is None:
field_parameters = {}
if isinstance(ds, YTSpatialPlotDataset):
slc = ds.all_data()
slc.axis = axis
if slc.axis != ds.parameters["axis"]:
raise RuntimeError(f"Original slice axis is {ds.parameters['axis']}.")
else:
slc = ds.slice(
axis,
center[axis],
field_parameters=field_parameters,
center=center,
data_source=data_source,
)
slc.get_data(fields)
validate_mesh_fields(slc, fields)
PWViewerMPL.__init__(
self,
slc,
bounds,
origin=origin,
fontsize=fontsize,
fields=fields,
window_size=window_size,
aspect=aspect,
buff_size=buff_size,
geometry=ds.geometry,
)
if axes_unit is None:
axes_unit = get_axes_unit(width, ds)
self.set_axes_unit(axes_unit)
class AxisAlignedProjectionPlot(ProjectionPlot, PWViewerMPL):
r"""Creates a projection plot from a dataset
Given a ds object, an axis to project along, and a field name
string, this will return a PWViewerMPL object containing
the plot.
The plot can be updated using one of the many helper functions
defined in PlotWindow.
Parameters
----------
ds : `Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
normal : int or one of 'x', 'y', 'z'
An int corresponding to the axis to slice along (0=x, 1=y, 2=z)
or the axis name itself
fields : string
The name of the field(s) to be plotted.
center : 'center', 'c', 'left', 'l', 'right', 'r', id of a global extremum, or array-like
The coordinate of the selection's center.
Defaults to the 'center', i.e. center of the domain.
Centering on the min or max of a field is supported by passing a tuple
such as ('min', ('gas', 'density')) or ('max', ('gas', 'temperature'). A
single string may also be used (e.g. "min_density" or
"max_temperature"), though it's not as flexible and does not allow to
select an exact field/particle type. With this syntax, the first field
matching the provided name is selected.
'max' or 'm' can be used as a shortcut for ('max', ('gas', 'density'))
'min' can be used as a shortcut for ('min', ('gas', 'density'))
One can also select an exact point as a 3 element coordinate sequence,
e.g. [0.5, 0.5, 0]
Units can be specified by passing in *center* as a tuple containing a
3-element coordinate sequence and string unit name, e.g. ([0, 0.5, 0.5], "cm"),
or by passing in a YTArray. Code units are assumed if unspecified.
The domain edges along the selected *axis* can be selected with
'left'/'l' and 'right'/'r' respectively.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
window that is 10 kiloparsecs wide along the x axis and 15
kiloparsecs wide along the y axis. In the other two examples, code
units are assumed, for example (0.2, 0.3) requests a plot that has an
x width of 0.2 and a y width of 0.3 in code units. If units are
provided the resulting plot axis labels will use the supplied units.
axes_unit : string
The name of the unit for the tick labels on the x and y axes.
Defaults to None, which automatically picks an appropriate unit.
If axes_unit is '1', 'u', or 'unitary', it will not display the
units, and only show the axes name.
origin : string or length 1, 2, or 3 sequence.
The location of the origin of the plot coordinate system. This
is typically represented by a '-' separated string or a tuple of
strings. In the first index the y-location is given by 'lower',
'upper', or 'center'. The second index is the x-location, given as
'left', 'right', or 'center'. Finally, whether the origin is
applied in 'domain' space, plot 'window' space or 'native'
simulation coordinate system is given. For example, both
'upper-right-domain' and ['upper', 'right', 'domain'] place the
origin in the upper right hand corner of domain space. If x or y
are not given, a value is inferred. For instance, 'left-domain'
corresponds to the lower-left hand corner of the simulation domain,
'center-domain' corresponds to the center of the simulation domain,
or 'center-window' for the center of the plot window. In the event
that none of these options place the origin in a desired location,
a sequence of tuples and a string specifying the
coordinate space can be given. If plain numeric types are input,
units of `code_length` are assumed. Further examples:
=============================================== ===============================
format example
=============================================== ===============================
'{space}' 'domain'
'{xloc}-{space}' 'left-window'
'{yloc}-{space}' 'upper-domain'
'{yloc}-{xloc}-{space}' 'lower-right-window'
('{space}',) ('window',)
('{xloc}', '{space}') ('right', 'domain')
('{yloc}', '{space}') ('lower', 'window')
('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')
((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window')
(xloc, yloc, '{space}') (0.23, 0.5, 'domain')
=============================================== ===============================
data_source : YTSelectionContainer Object
Object to be used for data selection. Defaults to a region covering
the entire simulation.
weight_field : string
The name of the weighting field. Set to None for no weight.
max_level: int
The maximum level to project to.
fontsize : integer
The size of the fonts for the axis, colorbar, and tick labels.
method : string
The method of projection. Valid methods are:
"integrate" with no weight_field specified : integrate the requested
field along the line of sight.
"integrate" with a weight_field specified : weight the requested
field by the weighting field and integrate along the line of sight.
"max" : pick out the maximum value of the field in the line of sight.
"min" : pick out the minimum value of the field in the line of sight.
"sum" : This method is the same as integrate, except that it does not
multiply by a path length when performing the integration, and is
just a straight summation of the field along the given axis. WARNING:
This should only be used for uniform resolution grid datasets, as other
datasets may result in unphysical images.
window_size : float
The size of the window in inches. Set to 8 by default.
aspect : float
The aspect ratio of the plot. Set to None for 1.
field_parameters : dictionary
A dictionary of field parameters than can be accessed by derived
fields.
data_source: YTSelectionContainer object
Object to be used for data selection. Defaults to ds.all_data(), a
region covering the full domain
buff_size: length 2 sequence
Size of the buffer to use for the image, i.e. the number of resolution elements
used. Effectively sets a resolution limit to the image if buff_size is
smaller than the finest gridding.
moment : integer, optional
for a weighted projection, moment = 1 (the default) corresponds to a
weighted average. moment = 2 corresponds to a weighted standard
deviation.
Examples
--------
Create a projection plot with a width of 20 kiloparsecs centered on the
center of the simulation box:
>>> from yt import load
>>> ds = load("IsolateGalaxygalaxy0030/galaxy0030")
>>> p = AxisAlignedProjectionPlot(ds, "z", ("gas", "density"), width=(20, "kpc"))
"""
_plot_type = "Projection"
_frb_generator = FixedResolutionBuffer
def __init__(
self,
ds,
normal,
fields,
center="center",
width=None,
axes_unit=None,
weight_field=None,
max_level=None,
origin="center-window",
fontsize=18,
field_parameters=None,
data_source=None,
method="integrate",
window_size=8.0,
buff_size=(800, 800),
aspect=None,
*,
moment=1,
):
if method == "mip":
issue_deprecation_warning(
"'mip' method is a deprecated alias for 'max'. "
"Please use method='max' directly.",
since="4.1",
stacklevel=3,
)
method = "max"
normal = self.sanitize_normal_vector(ds, normal)
axis = fix_axis(normal, ds)
# If a non-weighted integral projection, assure field-label reflects that
if weight_field is None and method == "integrate":
self.projected = True
(bounds, center, display_center) = get_window_parameters(
axis, center, width, ds
)
if field_parameters is None:
field_parameters = {}
# We don't use the plot's data source for validation like in the other
# plotting classes to avoid an exception
test_data_source = ds.all_data()
validate_mesh_fields(test_data_source, fields)
if isinstance(ds, YTSpatialPlotDataset):
proj = ds.all_data()
proj.axis = axis
if proj.axis != ds.parameters["axis"]:
raise RuntimeError(
f"Original projection axis is {ds.parameters['axis']}."
)
if weight_field is not None:
proj.weight_field = proj._determine_fields(weight_field)[0]
else:
proj.weight_field = weight_field
proj.center = center
else:
proj = ds.proj(
fields,
axis,
weight_field=weight_field,
center=center,
data_source=data_source,
field_parameters=field_parameters,
method=method,
max_level=max_level,
moment=moment,
)
self.moment = moment
PWViewerMPL.__init__(
self,
proj,
bounds,
fields=fields,
origin=origin,
fontsize=fontsize,
window_size=window_size,
aspect=aspect,
buff_size=buff_size,
geometry=ds.geometry,
)
if axes_unit is None:
axes_unit = get_axes_unit(width, ds)
self.set_axes_unit(axes_unit)
class OffAxisSlicePlot(SlicePlot, PWViewerMPL):
r"""Creates an off axis slice plot from a dataset
Given a ds object, a normal vector defining a slicing plane, and
a field name string, this will return a PWViewerMPL object
containing the plot.
The plot can be updated using one of the many helper functions
defined in PlotWindow.
Parameters
----------
ds : :class:`yt.data_objects.static_output.Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
normal : a sequence of floats
The vector normal to the slicing plane.
fields : string
The name of the field(s) to be plotted.
center : 'center', 'c' id of a global extremum, or array-like
The coordinate of the selection's center.
Defaults to the 'center', i.e. center of the domain.
Centering on the min or max of a field is supported by passing a tuple
such as ('min', ('gas', 'density')) or ('max', ('gas', 'temperature'). A
single string may also be used (e.g. "min_density" or
"max_temperature"), though it's not as flexible and does not allow to
select an exact field/particle type. With this syntax, the first field
matching the provided name is selected.
'max' or 'm' can be used as a shortcut for ('max', ('gas', 'density'))
'min' can be used as a shortcut for ('min', ('gas', 'density'))
One can also select an exact point as a 3 element coordinate sequence,
e.g. [0.5, 0.5, 0]
Units can be specified by passing in *center* as a tuple containing a
3-element coordinate sequence and string unit name, e.g. ([0, 0.5, 0.5], "cm"),
or by passing in a YTArray. Code units are assumed if unspecified.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
window that is 10 kiloparsecs wide along the x axis and 15
kiloparsecs wide along the y axis. In the other two examples, code
units are assumed, for example (0.2, 0.3) requests a plot that has an
x width of 0.2 and a y width of 0.3 in code units. If units are
provided the resulting plot axis labels will use the supplied units.
axes_unit : string
The name of the unit for the tick labels on the x and y axes.
Defaults to None, which automatically picks an appropriate unit.
If axes_unit is '1', 'u', or 'unitary', it will not display the
units, and only show the axes name.
north_vector : a sequence of floats
A vector defining the 'up' direction in the plot. This
option sets the orientation of the slicing plane. If not
set, an arbitrary grid-aligned north-vector is chosen.
fontsize : integer
The size of the fonts for the axis, colorbar, and tick labels.
field_parameters : dictionary
A dictionary of field parameters than can be accessed by derived
fields.
data_source : YTSelectionContainer Object
Object to be used for data selection. Defaults ds.all_data(), a
region covering the full domain.
buff_size: length 2 sequence
Size of the buffer to use for the image, i.e. the number of resolution elements
used. Effectively sets a resolution limit to the image if buff_size is
smaller than the finest gridding.
"""
_plot_type = "OffAxisSlice"
_frb_generator = FixedResolutionBuffer
_supported_geometries = ("cartesian", "spectral_cube")
def __init__(
self,
ds,
normal,
fields,
center="center",
width=None,
axes_unit=None,
north_vector=None,
fontsize=18,
field_parameters=None,
data_source=None,
buff_size=(800, 800),
*,
origin=None,
):
if origin is not None:
# this kwarg exists only for symmetry reasons with AxisAlignedSlicePlot
# in OffAxisSlicePlot, the origin is hardcoded
mylog.warning(
"Ignoring 'origin' keyword as it is ill-defined for "
"an OffAxisSlicePlot object."
)
del origin
if ds.geometry not in self._supported_geometries:
raise NotImplementedError(
f"off-axis slices are not supported for {ds.geometry!r} geometry\n"
f"currently supported geometries: {self._supported_geometries!r}"
)
# bounds are in cutting plane coordinates, centered on 0:
# [xmin, xmax, ymin, ymax]. Can derive width/height back
# from these. unit is code_length
(bounds, center_rot) = get_oblique_window_parameters(normal, center, width, ds)
if field_parameters is None:
field_parameters = {}
if isinstance(ds, YTSpatialPlotDataset):
cutting = ds.all_data()
cutting.axis = None
cutting._inv_mat = ds.parameters["_inv_mat"]
else:
cutting = ds.cutting(
normal,
center,
north_vector=north_vector,
field_parameters=field_parameters,
data_source=data_source,
)
cutting.get_data(fields)
validate_mesh_fields(cutting, fields)
# Hard-coding the origin keyword since the other two options
# aren't well-defined for off-axis data objects
PWViewerMPL.__init__(
self,
cutting,
bounds,
fields=fields,
origin="center-window",
periodic=False,
oblique=True,
fontsize=fontsize,
buff_size=buff_size,
)
if axes_unit is None:
axes_unit = get_axes_unit(width, ds)
self.set_axes_unit(axes_unit)
class OffAxisProjectionDummyDataSource:
_type_name = "proj"
_key_fields: list[str] = []
def __init__(
self,
center,
ds,
normal_vector,
width,
fields,
interpolated,
weight=None,
volume=None,
no_ghost=False,
le=None,
re=None,
north_vector=None,
depth=None,
method="integrate",
data_source=None,
*,
moment=1,
):
validate_moment(moment, weight)
self.center = center
self.ds = ds
self.axis = None # always true for oblique data objects
self.normal_vector = normal_vector
self.width = width
self.depth = depth
if data_source is None:
self.dd = ds.all_data()
else:
self.dd = data_source
fields = self.dd._determine_fields(fields)
self.fields = fields
self.interpolated = interpolated
if weight is not None:
weight = self.dd._determine_fields(weight)[0]
self.weight_field = weight
self.volume = volume
self.no_ghost = no_ghost
self.le = le
self.re = re
self.north_vector = north_vector
self.method = method
self.orienter = Orientation(normal_vector, north_vector=north_vector)
self.moment = moment
def _determine_fields(self, *args):
return self.dd._determine_fields(*args)
class OffAxisProjectionPlot(ProjectionPlot, PWViewerMPL):
r"""Creates an off axis projection plot from a dataset
Given a ds object, a normal vector to project along, and
a field name string, this will return a PWViewerMPL object
containing the plot.
The plot can be updated using one of the many helper functions
defined in PlotWindow.
Parameters
----------
ds : :class:`yt.data_objects.static_output.Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
normal : a sequence of floats
The vector normal to the slicing plane.
fields : string
The name of the field(s) to be plotted.
center : 'center', 'c', id of a global extremum, or array-like
The coordinate of the selection's center.
Defaults to the 'center', i.e. center of the domain.
Centering on the min or max of a field is supported by passing a tuple
such as ('min', ('gas', 'density')) or ('max', ('gas', 'temperature'). A
single string may also be used (e.g. "min_density" or
"max_temperature"), though it's not as flexible and does not allow to
select an exact field/particle type. With this syntax, the first field
matching the provided name is selected.
'max' or 'm' can be used as a shortcut for ('max', ('gas', 'density'))
'min' can be used as a shortcut for ('min', ('gas', 'density'))
One can also select an exact point as a 3 element coordinate sequence,
e.g. [0.5, 0.5, 0]
Units can be specified by passing in *center* as a tuple containing a
3-element coordinate sequence and string unit name, e.g. ([0, 0.5, 0.5], "cm"),
or by passing in a YTArray. Code units are assumed if unspecified.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
window that is 10 kiloparsecs wide along the x axis and 15
kiloparsecs wide along the y axis. In the other two examples, code
units are assumed, for example (0.2, 0.3) requests a plot that has an
x width of 0.2 and a y width of 0.3 in code units. If units are
provided the resulting plot axis labels will use the supplied units.
depth : A tuple or a float
A tuple containing the depth to project through and the string
key of the unit: (width, 'unit'). If set to a float, code units
are assumed
weight_field : string
The name of the weighting field. Set to None for no weight.
max_level: int
The maximum level to project to.
axes_unit : string
The name of the unit for the tick labels on the x and y axes.
Defaults to None, which automatically picks an appropriate unit.
If axes_unit is '1', 'u', or 'unitary', it will not display the
units, and only show the axes name.
north_vector : a sequence of floats
A vector defining the 'up' direction in the plot. This
option sets the orientation of the slicing plane. If not
set, an arbitrary grid-aligned north-vector is chosen.
fontsize : integer
The size of the fonts for the axis, colorbar, and tick labels.
method : string
The method of projection. Valid methods are:
"integrate" with no weight_field specified : integrate the requested
field along the line of sight.
"integrate" with a weight_field specified : weight the requested
field by the weighting field and integrate along the line of sight.
"sum" : This method is the same as integrate, except that it does not
multiply by a path length when performing the integration, and is
just a straight summation of the field along the given axis. WARNING:
This should only be used for uniform resolution grid datasets, as other
datasets may result in unphysical images.
moment : integer, optional
for a weighted projection, moment = 1 (the default) corresponds to a
weighted average. moment = 2 corresponds to a weighted standard
deviation.
data_source: YTSelectionContainer object
Object to be used for data selection. Defaults to ds.all_data(), a
region covering the full domain
buff_size: length 2 sequence
Size of the buffer to use for the image, i.e. the number of resolution elements
used. Effectively sets a resolution limit to the image if buff_size is
smaller than the finest gridding.
"""
_plot_type = "OffAxisProjection"
_frb_generator = OffAxisProjectionFixedResolutionBuffer
_supported_geometries = ("cartesian", "spectral_cube")
def __init__(
self,
ds,
normal,
fields,
center="center",
width=None,
depth=None,
axes_unit=None,
weight_field=None,
max_level=None,
north_vector=None,
volume=None,
no_ghost=False,
le=None,
re=None,
interpolated=False,
fontsize=18,
method="integrate",
moment=1,
data_source=None,
buff_size=(800, 800),
):
if ds.geometry not in self._supported_geometries:
raise NotImplementedError(
"off-axis slices are not supported"
f" for {ds.geometry!r} geometry\n"
"currently supported geometries:"
f" {self._supported_geometries!r}"
)
# center_rot normalizes the center to (0,0),
# units match bounds
# for SPH data, we want to input the original center
# the cython backend handles centering to this point and
# rotation.
# get3bounds gets a depth 0.5 * diagonal + margin in the
# depth=None case.
(bounds, center_rot) = get_oblique_window_parameters(
normal,
center,
width,
ds,
depth=depth,
get3bounds=True,
)
# will probably fail if you try to project an SPH and non-SPH
# field in a single call
# checks for SPH fields copied from the
# _ortho_pixelize method in cartesian_coordinates.py
## data_source might be None here
## (OffAxisProjectionDummyDataSource gets used later)
if data_source is None:
data_source = ds.all_data()
field = data_source._determine_fields(fields)[0]
finfo = data_source.ds.field_info[field]
is_sph_field = finfo.is_sph_field
particle_datasets = (ParticleDataset, StreamParticlesDataset)
dom_width = data_source.ds.domain_width
cubic_domain = dom_width.max() == dom_width.min()
if (isinstance(data_source.ds, particle_datasets) and is_sph_field) or (
isinstance(data_source.ds.index, OctreeIndex) and cubic_domain
):
center_use = parse_center_array(center, ds=data_source.ds, axis=None)
else:
center_use = center_rot
fields = list(iter_fields(fields))[:]
# oap_width = ds.arr(
# (bounds[1] - bounds[0],
# bounds[3] - bounds[2])
# )
OffAxisProj = OffAxisProjectionDummyDataSource(
center_use,
ds,
normal,
width,
fields,
interpolated,
weight=weight_field,
volume=volume,
no_ghost=no_ghost,
le=le,
re=re,
north_vector=north_vector,
depth=depth,
method=method,
data_source=data_source,
moment=moment,
)
validate_mesh_fields(OffAxisProj, fields)
if max_level is not None:
OffAxisProj.dd.max_level = max_level
# If a non-weighted, integral projection, assure field label
# reflects that
if weight_field is None and OffAxisProj.method == "integrate":
self.projected = True
self.moment = moment
# Hard-coding the origin keyword since the other two options
# aren't well-defined for off-axis data objects
PWViewerMPL.__init__(
self,
OffAxisProj,
bounds,
fields=fields,
origin="center-window",
periodic=False,
oblique=True,
fontsize=fontsize,
buff_size=buff_size,
)
if axes_unit is None:
axes_unit = get_axes_unit(width, ds)
self.set_axes_unit(axes_unit)
class WindowPlotMPL(ImagePlotMPL):
"""A container for a single PlotWindow matplotlib figure and axes"""
def __init__(
self,
data,
extent,
figure_size,
fontsize,
aspect,
figure,
axes,
cax,
mpl_proj,
mpl_transform,
*,
norm_handler: NormHandler,
colorbar_handler: ColorbarHandler,
alpha: AlphaT = None,
):
self._projection = mpl_proj
self._transform = mpl_transform
self._setup_layout_constraints(figure_size, fontsize)
self._draw_frame = True
self._aspect = ((extent[1] - extent[0]) / (extent[3] - extent[2])).in_cgs()
self._unit_aspect = aspect
# Compute layout
self._figure_size = figure_size
self._draw_axes = True
fontscale = float(fontsize) / self.__class__._default_font_size
if fontscale < 1.0:
fontscale = np.sqrt(fontscale)
if is_sequence(figure_size):
self._cb_size = 0.0375 * figure_size[0]
else:
self._cb_size = 0.0375 * figure_size
self._ax_text_size = [1.2 * fontscale, 0.9 * fontscale]
self._top_buff_size = 0.30 * fontscale
super().__init__(
figure=figure,
axes=axes,
cax=cax,
norm_handler=norm_handler,
colorbar_handler=colorbar_handler,
)
self._init_image(data, extent, aspect, alpha=alpha)
def _create_axes(self, axrect):
self.axes = self.figure.add_axes(axrect, projection=self._projection)
def plot_2d(
ds,
fields,
center="center",
width=None,
axes_unit=None,
origin="center-window",
fontsize=18,
field_parameters=None,
window_size=8.0,
aspect=None,
data_source=None,
) -> AxisAlignedSlicePlot:
r"""Creates a plot of a 2D dataset
Given a ds object and a field name string, this will return a
PWViewerMPL object containing the plot.
The plot can be updated using one of the many helper functions
defined in PlotWindow.
Parameters
----------
ds : `Dataset`
This is the dataset object corresponding to the
simulation output to be plotted.
fields : string
The name of the field(s) to be plotted.
center : 'center', 'c', id of a global extremum, or array-like
The coordinate of the selection's center.
Defaults to the 'center', i.e. center of the domain.
Centering on the min or max of a field is supported by passing a tuple
such as ('min', ('gas', 'density')) or ('max', ('gas', 'temperature'). A
single string may also be used (e.g. "min_density" or
"max_temperature"), though it's not as flexible and does not allow to
select an exact field/particle type. With this syntax, the first field
matching the provided name is selected.
'max' or 'm' can be used as a shortcut for ('max', ('gas', 'density'))
'min' can be used as a shortcut for ('min', ('gas', 'density'))
One can also select an exact point as a 3 element coordinate sequence,
e.g. [0.5, 0.5, 0]
Units can be specified by passing in *center* as a tuple containing a
3-element coordinate sequence and string unit name, e.g. ([0, 0.5, 0.5], "cm"),
or by passing in a YTArray. Code units are assumed if unspecified.
plot_2d also accepts a coordinate in two dimensions.
width : tuple or a float.
Width can have four different formats to support windows with variable
x and y widths. They are:
================================== =======================
format example
================================== =======================
(float, string) (10,'kpc')
((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
float 0.2
(float, float) (0.2, 0.3)
================================== =======================
For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
window that is 10 kiloparsecs wide along the x axis and 15
kiloparsecs wide along the y axis. In the other two examples, code
units are assumed, for example (0.2, 0.3) requests a plot that has an
x width of 0.2 and a y width of 0.3 in code units. If units are
provided the resulting plot axis labels will use the supplied units.
origin : string or length 1, 2, or 3 sequence.
The location of the origin of the plot coordinate system. This
is typically represented by a '-' separated string or a tuple of
strings. In the first index the y-location is given by 'lower',
'upper', or 'center'. The second index is the x-location, given as
'left', 'right', or 'center'. Finally, whether the origin is
applied in 'domain' space, plot 'window' space or 'native'
simulation coordinate system is given. For example, both
'upper-right-domain' and ['upper', 'right', 'domain'] place the
origin in the upper right hand corner of domain space. If x or y
are not given, a value is inferred. For instance, 'left-domain'
corresponds to the lower-left hand corner of the simulation domain,
'center-domain' corresponds to the center of the simulation domain,
or 'center-window' for the center of the plot window. In the event
that none of these options place the origin in a desired location,
a sequence of tuples and a string specifying the
coordinate space can be given. If plain numeric types are input,
units of `code_length` are assumed. Further examples:
=============================================== ===============================
format example
=============================================== ===============================
'{space}' 'domain'
'{xloc}-{space}' 'left-window'
'{yloc}-{space}' 'upper-domain'
'{yloc}-{xloc}-{space}' 'lower-right-window'
('{space}',) ('window',)
('{xloc}', '{space}') ('right', 'domain')
('{yloc}', '{space}') ('lower', 'window')
('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window')
((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window')
(xloc, yloc, '{space}') (0.23, 0.5, 'domain')
=============================================== ===============================
axes_unit : string
The name of the unit for the tick labels on the x and y axes.
Defaults to None, which automatically picks an appropriate unit.
If axes_unit is '1', 'u', or 'unitary', it will not display the
units, and only show the axes name.
fontsize : integer
The size of the fonts for the axis, colorbar, and tick labels.
field_parameters : dictionary
A dictionary of field parameters than can be accessed by derived
fields.
data_source: YTSelectionContainer object
Object to be used for data selection. Defaults to ds.all_data(), a
region covering the full domain
"""
if ds.dimensionality != 2:
raise RuntimeError("plot_2d only plots 2D datasets!")
match ds.geometry:
case Geometry.CARTESIAN | Geometry.POLAR | Geometry.SPECTRAL_CUBE:
axis = "z"
case Geometry.CYLINDRICAL:
axis = "theta"
case Geometry.SPHERICAL:
axis = "phi"
case Geometry.GEOGRAPHIC | Geometry.INTERNAL_GEOGRAPHIC:
raise NotImplementedError(
f"plot_2d does not yet support datasets with {ds.geometry} geometries"
)
case _:
assert_never(ds.geometry)
# Part of the convenience of plot_2d is to eliminate the use of the
# superfluous coordinate, so we do that also with the center argument
if not isinstance(center, str) and obj_length(center) == 2:
c0_string = isinstance(center[0], str)
c1_string = isinstance(center[1], str)
if not c0_string and not c1_string:
if obj_length(center[0]) == 2 and c1_string:
# turning off type checking locally because center arg is hard to type correctly
center = ds.arr(center[0], center[1]) # type: ignore [unreachable]
elif not isinstance(center, YTArray):
center = ds.arr(center, "code_length")
center.convert_to_units("code_length")
center = ds.arr([center[0], center[1], ds.domain_center[2]])
return AxisAlignedSlicePlot(
ds,
axis,
fields,
center=center,
width=width,
axes_unit=axes_unit,
origin=origin,
fontsize=fontsize,
field_parameters=field_parameters,
window_size=window_size,
aspect=aspect,
data_source=data_source,
)
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@visualization@[email protected]_END.py
|
{
"filename": "_idssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/pie/_idssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="idssrc", parent_name="pie", **kwargs):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@pie@[email protected]_END.py
|
{
"filename": "train.py",
"repo_name": "ML4GW/aframe",
"repo_path": "aframe_extracted/aframe-main/aframe/tasks/train/train.py",
"type": "Python"
}
|
import base64
import json
import shlex
import sys
from pathlib import Path
from typing import Dict
import law
import luigi
import yaml
from kr8s.objects import Secret
from luigi.contrib.kubernetes import KubernetesJobTask
from luigi.util import inherits
from aframe.base import AframeSingularityTask, AframeWrapperTask, logger
from aframe.config import s3, wandb
from aframe.helm import authenticate
from aframe.targets import Bytes, LawS3Target
from aframe.tasks.train.base import RemoteTrainBase, TrainBase
from aframe.tasks.train.utils import stream_command
class TrainLocal(TrainBase, AframeSingularityTask):
@property
def default_image(self):
return "train.sif"
def sandbox_env(self, _) -> Dict[str, str]:
env = super().sandbox_env(_)
for key in ["name", "entity", "project", "group", "tags", "api_key"]:
value = getattr(wandb(), key)
if value:
env[f"WANDB_{key.upper()}"] = value
return env
def run(self):
"""
Run local training in subprocess so that lightning
can properly handle multi-gpu distribution.
"""
args = self.get_args()
if len(self.gpus.split(",")) > 1:
args.append("--trainer.strategy=ddp")
cmd = [sys.executable, "-m", "train", "fit"] + args
cmd_str = shlex.join(cmd)
logger.info(f"Executing command {cmd_str}")
stream_command(cmd)
def output(self):
dir = law.LocalDirectoryTarget(str(self.run_dir))
return dir.child("model.pt", type="f")
class TrainRemote(KubernetesJobTask, RemoteTrainBase):
dev = luigi.BoolParameter(default=False)
use_init_container = luigi.BoolParameter(
default=False,
description="Whether to use the git-sync init-container to sync "
"a remote aframe git repository into the pod. Defaults to False, "
"in which case the code added to the container image at build "
"time will be used",
)
git_url = luigi.Parameter(
default="[email protected]:ML4GW/aframev2.git",
description="The git repository to clone into the pod"
"Only relevant if `use_init_container` is True",
)
git_ref = luigi.Parameter(
default="main",
description="The git branch or commit to checkout"
"Only relevant if `use_init_container` is True",
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not str(self.run_dir).startswith("s3://"):
raise ValueError(
"run_dir must be an s3 path for remote training tasks"
)
if not str(self.data_dir).startswith("s3://"):
raise ValueError(
"data_dir must be an s3 path for remote training tasks"
)
@property
def default_image(self):
return None
@property
def name(self):
return self.__class__.__name__.lower()
@property
def kubernetes_namespace(self):
return "bbhnet"
@property
def pod_creation_wait_interal(self):
return 7200
def get_config(self):
# read in training config into a json string
# to pass to the remote training job via
# the jsonargparse command line
with open(self.config, "r") as f:
doc = yaml.safe_load(f)
json_string = json.dumps(doc)
return json_string
def get_args(self):
# get args from local train base class, removing the first three
# which reference the path to a local config file.
# For remote training, we'll need to set the config
# as an environment variable of raw yaml content
args = super().get_args()
args = args[2:]
args = ["fit", "--config", self.get_config()] + args
return args
def output(self):
return LawS3Target(str(self.run_dir / "model.pt"), format=Bytes)
@property
def gpu_constraints(self):
spec = {}
spec["affinity"] = {}
spec["affinity"]["nodeAffinity"] = {}
spec["affinity"]["nodeAffinity"][
"requiredDuringSchedulingIgnoredDuringExecution"
] = {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "nvidia.com/gpu.memory",
"operator": "Gt",
"values": [f"{self.min_gpu_memory}"],
}
]
}
]
}
return spec
@property
def backoff_limit(self):
return 1
@property
def s3_secret(self):
# kubernetes config for creating
# secret containing credentials for s3 access
spec = {
"apiVersion": "v1",
"kind": "Secret",
"metadata": {"name": "s3-credentials", "type": "Opaque"},
}
spec["stringData"] = s3().get_s3_credentials()
return Secret(resource=spec)
def git_secret(self):
# kubernetes config for creating
# secret containing users ssh
# key for git access if using git-sync init containers
spec = {
"apiVersion": "v1",
"kind": "Secret",
"metadata": {"name": "git-creds", "type": "Opaque"},
}
ssh_key = Path.home() / ".ssh" / "id_rsa"
with open(ssh_key, "r") as f:
key = f.read()
ssh_key = base64.b64encode(key.encode("ascii")).decode("ascii")
spec["data"] = {"ssh": ssh_key}
return Secret(resource=spec)
@property
def init_containers(self):
# kubernetes config for creating
# init container to sync a remote git
# repository into the pod at run time
config = [
{
"name": "git-sync",
"image": "registry.k8s.io/git-sync/git-sync:v4.2.1",
"env": [
{"name": "GITSYNC_REPO", "value": self.git_url},
{"name": "GITSYNC_REF", "value": self.git_ref},
{"name": "GITSYNC_ROOT", "value": "/opt"},
{"name": "GITSYNC_LINK", "value": "aframe"},
{"name": "GITSYNC_ONE_TIME", "value": "true"},
{"name": "GITSYNC_SSH_KNOWN_HOSTS", "value": "false"},
{"name": "GITSYNC_SUBMODULES", "value": "recursive"},
{"name": "GITSYNC_ADD_USER", "value": "true"},
{"name": "GITSYNC_SYNC_TIMEOUT", "value": "360s"},
],
"volumeMounts": [
{"name": self.name, "mountPath": "/opt"},
{
"name": f"{self.name}-git-secret",
"mountPath": "/etc/git-secret",
"readOnly": True,
},
],
"securityContext": {"runAsUser": 65533},
}
]
return config
@property
def spec_schema(self):
spec = self.gpu_constraints
spec["containers"] = [
{
"name": "train",
"image": self.remote_image,
"volumeMounts": [
{"mountPath": "/dev/shm", "name": "dshm"},
],
"imagePullPolicy": "Always",
"command": ["python", "-m", "train"],
"args": self.get_args(),
"resources": {
"limits": {
"memory": f"{self.cpu_memory}",
"cpu": f"{self.num_cpus}",
"nvidia.com/gpu": f"{self.request_gpus}",
},
"requests": {
"memory": f"{self.cpu_memory}",
"cpu": f"{self.num_cpus}",
"nvidia.com/gpu": f"{self.request_gpus}",
},
},
"envFrom": [{"secretRef": {"name": "s3-credentials"}}],
"env": [
{
"name": "AWS_ENDPOINT_URL",
"value": s3().get_internal_s3_url(),
},
{
"name": "WANDB_API_KEY",
"value": wandb().api_key,
},
],
}
]
if self.use_init_container:
spec["initContainers"] = self.init_containers
spec["containers"][0]["volumeMounts"].append(
{"mountPath": "/opt", "name": self.name}
)
spec["volumes"] = [
{
"name": "dshm",
"emptyDir": {"sizeLimit": "256Gi", "medium": "Memory"},
},
]
if self.use_init_container:
spec["volumes"] += [
{"name": self.name, "emptyDir": {}},
{
"name": f"{self.name}-git-secret",
"secret": {"secretName": "git-creds"},
},
]
return spec
def run(self):
authenticate()
if not self.s3_secret.exists():
self.s3_secret.create()
if self.use_init_container and not self.git_secret().exists():
self.git_secret().create()
super().run()
def on_failure(self, exc):
authenticate()
self.s3_secret.delete()
if self.use_init_container:
self.git_secret().delete()
super().on_failure(exc)
def on_success(self):
authenticate()
self.s3_secret.delete()
if self.use_init_container:
self.git_secret().delete()
super().on_success()
@inherits(TrainLocal, TrainRemote)
class Train(AframeWrapperTask):
"""
Class that dynamically chooses between
remote training on nautilus or local training on LDG.
Useful for incorporating into pipelines where
you don't care where the training is run.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.train_remote = self.validate_dirs()
def validate_dirs(self) -> bool:
# train remotely if run_dir stars with s3://
# Note: one can specify a remote data_dir, but
# train locally
train_remote = str(self.run_dir).startswith("s3://")
if train_remote and not str(self.data_dir).startswith("s3://"):
raise ValueError(
"If run_dir is an s3 path, data_dir must also be an s3 path"
"Got data_dir: {self.data_dir} and run_dir: {self.run_dir}"
)
return train_remote
def requires(self):
if self.train_remote:
return TrainRemote.req(self)
else:
return TrainLocal.req(self)
|
ML4GWREPO_NAMEaframePATH_START.@aframe_extracted@aframe-main@aframe@tasks@[email protected]@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "li-yangyang/lotus",
"repo_path": "lotus_extracted/lotus-main/setup.py",
"type": "Python"
}
|
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
import os, sys, re
import codecs
NAME = "lotus_nlte"
PACKAGES = find_packages(where='src')
META_PATH = os.path.join("src", NAME, "__init__.py")
EXTRA_REQUIRE = {
"advanced-interp": ["rbf", "torch", "gpytorch"],
"doc": [
"sphinx-book-theme",
],
}
CLASSIFIERS = [
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
]
HERE = os.path.dirname(os.path.realpath(__file__))
def readme():
with open("README.md") as f:
return f.read()
with open('requirements.txt') as infd:
INSTALL_REQUIRES = [x.strip('\n') for x in infd.readlines()]
print(INSTALL_REQUIRES)
def read(*parts: str) -> str:
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
def find_meta(meta: str, meta_file: str = read(META_PATH)) -> str:
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta), meta_file, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
#class Install(_install):
# def run(self):
# _install.do_egg_install(self)
# from lotus_nlte.config import *
setup(
name=NAME,
use_scm_version={
"write_to": os.path.join(
"src", NAME, "{0}_version.py".format(NAME)
),
"write_to_template": '__version__ = "{version}"\n',
},
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
url=find_meta("url"),
license=find_meta("license"),
description=find_meta("description"),
long_description=readme(),
long_description_content_type="text/markdown",
packages=PACKAGES,
package_dir={"": "src"},
package_data={'lotus_nlte': ['package_data/linelist/*', 'test/data/*']},
include_package_data=True,
#cmdclass={'install': Install},
#setup_requires=['lotus-nlte'],
python_requires=">= 3.7, < 3.8",
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRA_REQUIRE,
zip_safe=False
)
|
li-yangyangREPO_NAMElotusPATH_START.@lotus_extracted@[email protected]@.PATH_END.py
|
{
"filename": "setup_baroclinic.py",
"repo_name": "fdebras/ECLIPS3D",
"repo_path": "ECLIPS3D_extracted/ECLIPS3D-master/ECLIPS3D/2D_axi/python/setup_baroclinic.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 12 16:38:10 2017
@author: florian
"""
from netCDF4 import Dataset
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from scipy.misc import derivative
import scipy.optimize as Opt
three=False
#three = True
Nlong = 35
Nlat = 25
Nz = 12
#parameters from Ulrich et al. 2014
a = 6.371229e06
b = 2.
d0 = a/2.
g = 9.80616
k = 3.
p0 = 1.0E5
R = 287.0
T0_E = 310
T0_P = 240
V_p = 1.0
z_t = 1.5e4
Gamma = 0.005
lambda_c = np.pi/9.
phi_c = 2.*np.pi/9.
omega = 7.29212e-5
Height = 30.0E3
cp = 1005.0
A = 1./Gamma
T0 = 0.5 * (T0_E+T0_P)
B = (T0-T0_P)/(T0*T0_P)
C = (k+2.)/2. * (T0_E-T0_P)/(T0_E*T0_P)
H = R*T0/g
dlong = 2*np.pi/Nlong
dlat = 0.5*np.pi/Nlat
dz = Height/Nz
XU = np.linspace(0,Nlong-1,Nlong)*dlong
XV = XU+0.5*dlong
YU = np.linspace(0,Nlat-1,Nlat)*dlat + 0.5*dlat
YV = np.linspace(0,Nlat,Nlat+1)*dlat
ZU = np.linspace(0,Nz-1,Nz)*dz + 0.5*dz
ZW = np.linspace(0,Nz,Nz+1)*dz
#=============================================
# Intermediate quantities
#=============================================
t1 = lambda z : A*Gamma/T0*np.exp(Gamma/T0*(z-a)) + \
B*(1. - 2.*((z-a)/(b*H))**2)*np.exp(-((z-a)/(b*H))**2)
t2 = lambda z : C*(1. - 2.*((z-a)/(b*H))**2)*np.exp(-((z-a)/(b*H))**2)
t1_int = lambda z : A * (np.exp(Gamma/T0*(z-a)) - 1) + \
B*(z-a) * np.exp(-((z-a)/(b*H))**2)
t2_int = lambda z : C*(z-a) * np.exp(-((z-a)/(b*H))**2)
dt1_dz = lambda z : A*(Gamma/T0)**2*np.exp(Gamma/T0*(z-a)) + \
B*(- 6.*(z-a)/(b*H)**2 + 4.*(z-a)**3/(b*H)**4)*np.exp(-((z-a)/(b*H))**2)
dt2_dz = lambda z :C*(- 6.*(z-a)/(b*H)**2 + 4.*(z-a)**3/(b*H)**4)*np.exp(-((z-a)/(b*H))**2)
#=============================================
# Thermodynamic values
#=============================================
T = lambda y,z : (a/z)**2 * (t1(z) - t2(z)* ( (z/a * np.cos(y))**k - k/(k+2.)*(z/a * np.cos(y))**(k+2.)) ) ** (-1.)
dT_dphi = lambda y,z : (z/a)**2 * t2(z) * k*np.tan(y)* \
( -(z/a*np.cos(y))**k + (z/a*np.cos(y))**(k+2) ) * T(y,z)**2
dT_dr = lambda y,z : T(y,z) * (-2/z - (dt1_dz(z)-dt2_dz(z)* ( (z/a * np.cos(y))**k - k/(k+2.)*(z/a * np.cos(y))**(k+2.)) - \
t2(z)*k*(z**(k-1)*(np.cos(y)/a)**k - z**(k+1)*(np.cos(y)/a)**(k+2))) / \
(t1(z) - t2(z)* ( (z/a * np.cos(y))**k - k/(k+2.)*(z/a * np.cos(y))**(k+2.)) ) )
p = lambda y,z : p0 * np.exp(-g/R * t1_int(z) + g/R * t2_int(z) * \
( (z/a*np.cos(y))**k - k/(k+2.)*(z*np.cos(y)/a)**(k+2.)) )
dp_dphi = lambda y,z : k*g/R*np.tan(y)*t2_int(z)* \
( -(z/a*np.cos(y))**k + (z/a*np.cos(y))**(k+2) ) * p(y,z)
dp_dr = lambda y,z : g/R*p(y,z) * (-t1(z)+t2(z)*((z/a*np.cos(y))**k - k/(k+2.)*(z*np.cos(y)/a)**(k+2.)) \
+t2_int(z)*k*(z**(k-1)*(np.cos(y)/a)**k - z**(k+1)*(np.cos(y)/a)**(k+2)) )
theta = lambda y,z : (p0/p(y,z))**(R/cp)*T(y,z)
dtheta_dphi = lambda y,z : -(R/cp)*p0**(R/cp)/p(y,z)**(R/cp+1)*dp_dphi(y,z)*T(y,z)+ \
dT_dphi(y,z)*(p0/p(y,z))**(R/cp)
dtheta_dr = lambda y,z : -(R/cp)*p0**(R/cp)/p(y,z)**(R/cp+1)*dp_dr(y,z)*T(y,z)+ \
dT_dr(y,z)*(p0/p(y,z))**(R/cp)
rho = lambda y,z : p(y,z)/(R*T(y,z))
drho_dphi = lambda y,z : 1./(R*T(y,z))*dp_dphi(y,z) - p(y,z)/(R*T(y,z)**2)*dT_dphi(y,z)
drho_dr = lambda y,z : 1./(R*T(y,z))*dp_dr(y,z) - p(y,z)/(R*T(y,z)**2)*dT_dr(y,z)
#=============================================
# Speed
#=============================================
U = lambda y,z : g/a * k * T(y,z) *t2_int(z) * (\
(z/a*np.cos(y))**(k-1.) - (z/a*np.cos(y))**(k+1.) )
dU_dphi = lambda y,z : dT_dphi(y,z)*U(y,z)/T(y,z)+ g/a*k*T(y,z)*t2_int(z)*\
np.sin(y)*((k+1)*(z/a)**(k+1)*np.cos(y)**(k) - (k-1)*(z/a)**(k-1)*np.cos(y)**(k-2))
dU_dr = lambda y,z : dT_dr(y,z)*U(y,z)/T(y,z) + t2(z)*U(y,z)/t2_int(z) + \
g/a*k*T(y,z)*t2_int(z)*((k-1)*z**(k-2)*(np.cos(y)/a)**(k-1) - (k+1)*z**k*(np.cos(y)/a)**(k+1))
u = lambda y,z : -omega*z*np.cos(y) + np.sqrt((omega*z*np.cos(y))**2 + \
z*np.cos(y)*U(y,z) )
du_dphi = lambda y,z : omega*z*np.sin(y) + 1./(2*np.sqrt((omega*z*np.cos(y))**2 + \
z*np.cos(y)*U(y,z) )) * (-2*z**2*omega**2*np.sin(y)*np.cos(y) - z*np.sin(y)*U(y,z) + \
z*np.cos(y)*dU_dphi(y,z))
du_dr = lambda y,z : -omega*np.cos(y) + 1./(2*np.sqrt((omega*z*np.cos(y))**2 + \
z*np.cos(y)*U(y,z) )) * (2*z*(omega*np.cos(y))**2 + np.cos(y)*U(y,z) + \
z*np.cos(y)*dU_dr(y,z) )
#=============================================
# Perturbed speed
#=============================================
dzeta = lambda z : 1.-3.*(z/z_t)**2 + 2.*(z/z_t)**3
dd = lambda x,y : a*np.arccos(np.sin(phi_c)*np.sin(y) + np.cos(phi_c)*np.cos(y)*np.cos(x-lambda_c) )
u1 = lambda x,y,z : -16.*V_p/(3.*np.sqrt(3.))*dzeta(z)*np.cos(np.pi*max(0,min(d0,dd(x,y))/(2*d0)))**3* \
np.sin(np.pi*max(0,min(d0,dd(x,y))/(2*d0))) * \
(-np.sin(phi_c)*np.cos(y) + np.cos(phi_c)*np.sin(y)*np.cos(x-lambda_c))/(np.sin(dd(x,y)/a))
#u1 = lambda x,y,z : -16.*V_p/(3.*np.sqrt(3.))*dzeta(z)*np.cos(np.pi*dd(x,y)/(2*d0))**3* \
#np.sin(np.pi*dd(x,y)/(2*d0)) * \
#(-np.sin(phi_c)*np.cos(y) + np.cos(phi_c)*np.sin(y)*np.cos(x-lambda_c))/(np.sin(dd(x,y)/a))
v1 = lambda x,y,z : -16.*V_p/(3.*np.sqrt(3.))*dzeta(z)*np.cos(np.pi*max(0,min(d0,dd(x,y))/(2*d0)))**3*\
np.sin(np.pi*max(0,min(d0,dd(x,y))/(2*d0))) * \
(np.cos(phi_c)*np.sin(x-lambda_c))/(np.sin(dd(x,y)/a))
u_u = np.zeros((Nlong,Nlat,Nz))
u_v = np.zeros((Nlong,Nlat+1,Nz))
u_w = np.zeros((Nlong,Nlat,Nz+1))
u_p = np.zeros((Nlong,Nlat,Nz))
v_u = np.zeros((Nlong,Nlat,Nz))
v_v = np.zeros((Nlong,Nlat+1,Nz))
v_w = np.zeros((Nlong,Nlat,Nz+1))
v_p = np.zeros((Nlong,Nlat,Nz))
w_u = np.zeros((Nlong,Nlat,Nz))
w_v = np.zeros((Nlong,Nlat+1,Nz))
w_w = np.zeros((Nlong,Nlat,Nz+1))
w_p = np.zeros((Nlong,Nlat,Nz))
p_u = np.zeros((Nlong,Nlat,Nz))
p_v = np.zeros((Nlong,Nlat+1,Nz))
p_w = np.zeros((Nlong,Nlat,Nz+1))
p_p = np.zeros((Nlong,Nlat,Nz))
theta_u = np.zeros((Nlong,Nlat,Nz))
theta_v = np.zeros((Nlong,Nlat+1,Nz))
theta_w = np.zeros((Nlong,Nlat,Nz+1))
theta_p = np.zeros((Nlong,Nlat,Nz))
rho_u = np.zeros((Nlong,Nlat,Nz))
rho_v = np.zeros((Nlong,Nlat+1,Nz))
rho_w = np.zeros((Nlong,Nlat,Nz+1))
rho_p = np.zeros((Nlong,Nlat,Nz))
#--------------------------------------------------------
#Derivatives on u
#--------------------------------------------------------
du_dlong_u = np.zeros((Nlong,Nlat,Nz))
du_dphi_u = np.zeros((Nlong,Nlat,Nz))
du_dz_u = np.zeros((Nlong,Nlat,Nz))
dp_dlong_u = np.zeros((Nlong,Nlat,Nz))
drho_dlong_u = np.zeros((Nlong,Nlat,Nz))
drho_dphi_u = np.zeros((Nlong,Nlat,Nz))
drho_dz_u = np.zeros((Nlong,Nlat,Nz))
#--------------------------------------------------------
#Derivatives on v
#--------------------------------------------------------
dv_dlong_v = np.zeros((Nlong,Nlat+1,Nz))
dv_dphi_v = np.zeros((Nlong,Nlat+1,Nz))
dv_dz_v = np.zeros((Nlong,Nlat+1,Nz))
dp_dphi_v = np.zeros((Nlong,Nlat+1,Nz))
drho_dlong_v = np.zeros((Nlong,Nlat+1,Nz))
drho_dphi_v = np.zeros((Nlong,Nlat+1,Nz))
drho_dz_v = np.zeros((Nlong,Nlat+1,Nz))
#--------------------------------------------------------
#Derivatives on p
#--------------------------------------------------------
du_dlong_p = np.zeros((Nlong,Nlat,Nz))
dv_dphi_p = np.zeros((Nlong,Nlat,Nz))
dp_dlong_p = np.zeros((Nlong,Nlat,Nz))
dp_dphi_p = np.zeros((Nlong,Nlat,Nz))
dw_dz_p = np.zeros((Nlong,Nlat,Nz))
dtheta_dz_p =np.zeros((Nlong,Nlat,Nz))
drho_dlong_p = np.zeros((Nlong,Nlat,Nz))
drho_dphi_p = np.zeros((Nlong,Nlat,Nz))
#--------------------------------------------------------
#Derivatives on w
#--------------------------------------------------------
dp_dz_w = np.zeros((Nlong,Nlat,Nz+1))
dw_dlong_w = np.zeros((Nlong,Nlat,Nz+1))
dw_dphi_w = np.zeros((Nlong,Nlat,Nz+1))
dw_dz_w = np.zeros((Nlong,Nlat,Nz+1))
dtheta_dlong_w = np.zeros((Nlong,Nlat,Nz+1))
dtheta_dphi_w = np.zeros((Nlong,Nlat,Nz+1))
dtheta_dz_w = np.zeros((Nlong,Nlat,Nz+1))
drho_dlong_w = np.zeros((Nlong,Nlat,Nz+1))
drho_dphi_w = np.zeros((Nlong,Nlat,Nz+1))
drho_dz_w = np.zeros((Nlong,Nlat,Nz+1))
T_u=np.zeros((Nlong,Nlat,Nz))
#=============================================
# Perturbed speed
#=============================================
for i in range(Nlat):
for j in range(Nz) :
u_u[:,i,j]=np.zeros(Nlong)+np.array(u(YU[i],a+ZU[j]))
u_v[:,i,j]=np.zeros(Nlong)+np.array(u(YV[i],a+ZU[j]))
u_w[:,i,j]=np.zeros(Nlong)+np.array(u(YU[i],a+ZW[j]))
u_p[:,i,j]=np.zeros(Nlong)+np.array(u(YU[i],a+ZU[j]))
T_u[:,i,j]=np.zeros(Nlong)+np.array(T(YU[i],a+ZU[j]))
p_u[:,i,j]=np.zeros(Nlong)+np.array(p(YU[i],a+ZU[j]))
p_v[:,i,j]=np.zeros(Nlong)+np.array(p(YV[i],a+ZU[j]))
p_w[:,i,j]=np.zeros(Nlong)+np.array(p(YU[i],a+ZW[j]))
p_p[:,i,j]=np.zeros(Nlong)+np.array(p(YU[i],a+ZU[j]))
theta_u[:,i,j]=np.zeros(Nlong)+np.array(theta(YU[i],a+ZU[j]))
theta_v[:,i,j]=np.zeros(Nlong)+np.array(theta(YV[i],a+ZU[j]))
theta_w[:,i,j]=np.zeros(Nlong)+np.array(theta(YU[i],a+ZW[j]))
theta_p[:,i,j]=np.zeros(Nlong)+np.array(theta(YU[i],a+ZU[j]))
rho_u[:,i,j]=np.zeros(Nlong)+np.array(rho(YU[i],a+ZU[j]))
rho_v[:,i,j]=np.zeros(Nlong)+np.array(rho(YV[i],a+ZU[j]))
rho_w[:,i,j]=np.zeros(Nlong)+np.array(rho(YU[i],a+ZW[j]))
rho_p[:,i,j]=np.zeros(Nlong)+np.array(rho(YU[i],a+ZU[j]))
du_dphi_u[:,i,j]=np.zeros(Nlong)+np.array(du_dphi(YU[i],a+ZU[j]))
du_dz_u[:,i,j] = np.zeros(Nlong)+np.array(du_dr(YU[i],a+ZU[j]))
drho_dphi_u[:,i,j] = np.zeros(Nlong)+np.array(drho_dphi(YU[i],a+ZU[j]))
drho_dz_u[:,i,j] = np.zeros(Nlong)+np.array(drho_dr(YU[i],a+ZU[j]))
dp_dphi_v[:,i,j] = np.zeros(Nlong)+np.array(dp_dphi(YV[i],a+ZU[j]))
drho_dphi_v[:,i,j] = np.zeros(Nlong)+np.array(drho_dphi(YV[i],a+ZU[j]))
drho_dz_v[:,i,j] = np.zeros(Nlong)+np.array(drho_dr(YV[i],a+ZU[j]))
dp_dphi_p[:,i,j] = np.zeros(Nlong)+np.array(dp_dphi(YU[i],a+ZU[j]))
dtheta_dz_p[:,i,j] = np.zeros(Nlong)+np.array(dtheta_dr(YU[i],a+ZU[j]))
drho_dphi_p[:,i,j] = np.zeros(Nlong)+np.array(drho_dphi(YU[i],a+ZU[j]))
dp_dz_w[:,i,j] = np.zeros(Nlong)+np.array(dp_dr(YU[i],a+ZW[j]))
dtheta_dphi_w[:,i,j] = np.zeros(Nlong)+np.array(dtheta_dphi(YU[i],a+ZW[j]))
dtheta_dz_w[:,i,j] = np.zeros(Nlong)+np.array(dtheta_dr(YU[i],a+ZW[j]))
drho_dphi_w[:,i,j] = np.zeros(Nlong)+np.array(drho_dphi(YU[i],a+ZW[j]))
drho_dz_w[:,i,j] = np.zeros(Nlong)+np.array(drho_dr(YU[i],a+ZW[j]))
u_w[:,i,Nz]=np.zeros(Nlong)+np.array(u(YU[i],a+ZW[Nz]))
p_w[:,i,Nz]=np.zeros(Nlong)+np.array(p(YU[i],a+ZW[Nz]))
theta_w[:,i,Nz]=np.zeros(Nlong)+np.array(theta(YU[i],a+ZW[Nz]))
rho_w[:,i,Nz]=np.zeros(Nlong)+np.array(rho(YU[i],a+ZW[Nz]))
dp_dz_w[:,i,Nz] = np.zeros(Nlong)+np.array(dp_dr(YU[i],a+ZW[Nz]))
dtheta_dphi_w[:,i,Nz] = np.zeros(Nlong)+np.array(dtheta_dphi(YU[i],a+ZW[Nz]))
dtheta_dz_w[:,i,Nz] = np.zeros(Nlong)+np.array(dtheta_dr(YU[i],a+ZW[Nz]))
drho_dphi_w[:,i,Nz] = np.zeros(Nlong)+np.array(drho_dphi(YU[i],a+ZW[Nz]))
drho_dz_w[:,i,Nz] = np.zeros(Nlong)+np.array(drho_dr(YU[i],a+ZW[Nz]))
for j in range(Nz) :
u_v[:,Nlat,j]=np.zeros(Nlong)+np.array(u(YV[i],a+ZU[j]))
p_v[:,Nlat,j]=np.zeros(Nlong)+np.array(p(YV[i],a+ZU[j]))
theta_v[:,Nlat,j]=np.zeros(Nlong)+np.array(theta(YV[i],a+ZU[j]))
rho_v[:,Nlat,j]=np.zeros(Nlong)+np.array(rho(YV[i],a+ZU[j]))
dp_dphi_v[:,Nlat,j] = np.zeros(Nlong)+np.array(dp_dphi(YV[Nlat],a+ZU[j]))
drho_dphi_v[:,Nlat,j] = np.zeros(Nlong)+np.array(drho_dphi(YV[Nlat],a+ZU[j]))
drho_dz_v[:,Nlat,j] = np.zeros(Nlong)+np.array(drho_dr(YV[Nlat],a+ZU[j]))
print('perturbation ...')
#
#
for i in range(Nlong) :
for j in range(Nlat) :
for kk in range(Nz) :
u_u[i,j,kk] = u_u[i,j,kk]+ u1(XU[i],YU[j],min(z_t,ZU[kk]))
u_v[i,j,kk] += u1(XV[i],YV[j],min(z_t,ZU[kk]))
u_w[i,j,kk] += u1(XV[i],YU[j],min(z_t,ZW[kk]))
u_p[i,j,kk] += u1(XV[i],YU[j],min(z_t,ZU[kk]))
v_u[i,j,kk] += v1(XU[i],YU[j],min(z_t,ZU[kk]))
v_v[i,j,kk] += v1(XV[i],YV[j],min(z_t,ZU[kk]))
v_w[i,j,kk] += v1(XV[i],YU[j],min(z_t,ZW[kk]))
v_p[i,j,kk] += v1(XV[i],YU[j],min(z_t,ZU[kk]))
du_dlong_u[i,j,kk] += derivative(lambda x : u1(x,YU[j],min(z_t,ZU[kk])),XU[i])
du_dphi_u[i,j,kk] += derivative(lambda y : u1(XU[i],y,min(z_t,ZU[kk])),YU[j])
du_dz_u[i,j,kk] += derivative(lambda z : u1(XU[i],YU[j],z),min(z_t,ZU[kk]))
dv_dlong_v[i,j,kk] += derivative(lambda x : v1(x,YV[j],min(z_t,ZU[kk])),XV[i])
dv_dphi_v[i,j,kk] += derivative(lambda y : v1(XV[i],y,min(z_t,ZU[kk])),YV[j])
dv_dz_v[i,j,kk] += derivative(lambda z : v1(XV[i],YV[j],z),min(z_t,ZU[kk]))
du_dlong_p[i,j,kk] += derivative(lambda x : u1(x,YU[j],min(z_t,ZU[kk])),XV[i])
dv_dphi_p[i,j,kk] += derivative(lambda y : v1(XV[i],y,min(z_t,ZU[kk])),YU[j])
u_w[i,j,Nz] += u1(XV[i],YU[j],min(z_t,ZW[Nz]))
v_w[i,j,Nz] += v1(XV[i],YU[j],min(z_t,ZW[Nz]))
for kk in range(Nz) :
u_v[i,Nlat,kk] += u1(XV[i],YV[Nlat],min(z_t,ZU[kk]))
v_v[i,Nlat,kk] += v1(XV[i],YV[Nlat],min(z_t,ZU[kk]))
dv_dlong_v[i,Nlat,kk] += derivative(lambda x : v1(x,YV[Nlat],min(z_t,ZU[kk])),XV[i])
dv_dphi_v[i,Nlat,kk] += derivative(lambda y : v1(XV[i],y,min(z_t,ZU[kk])),YV[Nlat])
dv_dz_v[i,Nlat,kk] += derivative(lambda z : v1(XV[i],YV[Nlat],z),min(z_t,ZU[kk]))
#
print ('over')
if three :
res=np.append(u_u,u_v)
res=np.append(res,u_p)
res=np.append(res,u_w)
res=np.append(res,v_u)
res=np.append(res,v_v)
res=np.append(res,v_p)
res=np.append(res,v_w)
res=np.append(res,p_u)
res=np.append(res,p_v)
res=np.append(res,p_p)
res=np.append(res,p_w)
res=np.append(res,w_u)
res=np.append(res,w_v)
res=np.append(res,w_p)
res=np.append(res,w_w)
res=np.append(res,theta_u)
res=np.append(res,theta_v)
res=np.append(res,theta_p)
res=np.append(res,theta_w)
res=np.append(res,rho_u)
res=np.append(res,rho_v)
res=np.append(res,rho_p)
res=np.append(res,rho_w)
dures=np.append(du_dlong_u,du_dphi_u)
dures=np.append(dures,du_dz_u)
dures=np.append(dures,dp_dlong_u)
dures=np.append(dures,drho_dlong_u)
dures=np.append(dures,drho_dphi_u)
dures=np.append(dures,drho_dz_u)
dvres=np.append(dv_dlong_v,dv_dphi_v)
dvres=np.append(dvres,dv_dz_v)
dvres=np.append(dvres,dp_dphi_v)
dvres=np.append(dvres,drho_dlong_v)
dvres=np.append(dvres,drho_dphi_v)
dvres=np.append(dvres,drho_dz_v)
dpres=np.append(du_dlong_p,dv_dphi_p)
dpres=np.append(dpres,dp_dlong_p)
dpres=np.append(dpres,dp_dphi_p)
dpres=np.append(dpres,dw_dz_p)
dpres=np.append(dpres,dtheta_dz_p)
dpres=np.append(dpres,drho_dlong_p)
dpres=np.append(dpres,drho_dphi_p)
dwres=np.append(dp_dz_w,dw_dlong_w)
dwres=np.append(dwres,dw_dphi_w)
dwres=np.append(dwres,dw_dz_w)
dwres=np.append(dwres,dtheta_dlong_w)
dwres=np.append(dwres,dtheta_dphi_w)
dwres=np.append(dwres,dtheta_dz_w)
dwres=np.append(dwres,drho_dlong_w)
dwres=np.append(dwres,drho_dphi_w)
dwres=np.append(dwres,drho_dz_w)
file=open('/Users/florian/Desktop/MyWork/3D_sca/data/data.dat','w')
dufile=open('/Users/florian/Desktop/MyWork/3D_sca/data/dudata.dat','w')
dvfile=open('/Users/florian/Desktop/MyWork/3D_sca/data/dvdata.dat','w')
dpfile=open('/Users/florian/Desktop/MyWork/3D_sca/data/dpdata.dat','w')
dwfile=open('/Users/florian/Desktop/MyWork/3D_sca/data/dwdata.dat','w')
np.savetxt('/Users/florian/Desktop/MyWork/3D_sca/data/data.dat',res, fmt='%.8e')
np.savetxt('/Users/florian/Desktop/MyWork/3D_sca/data/dudata.dat',dures, fmt='%.8e')
np.savetxt('/Users/florian/Desktop/MyWork/3D_sca/data/dvdata.dat',dvres, fmt='%.8e')
np.savetxt('/Users/florian/Desktop/MyWork/3D_sca/data/dpdata.dat',dpres, fmt='%.8e')
np.savetxt('/Users/florian/Desktop/MyWork/3D_sca/data/dwdata.dat',dwres, fmt='%.8e')
file.close()
dufile.close()
dvfile.close()
dpfile.close()
dwfile.close()
else :
u_f=u_u[0]
u_hf=u_w[0]
u_lf=u_v[0]
v_f=v_u[0]
v_hf=v_w[0]
v_lf=v_v[0]
w_f=w_u[0]
w_hf=w_w[0]
w_lf=w_v[0]
p_f=p_u[0]
p_hf=p_w[0]
p_lf=p_v[0]
theta_f=theta_u[0]
theta_hf=theta_w[0]
theta_lf=theta_v[0]
rho_f=rho_u[0]
rho_hf=rho_w[0]
rho_lf=rho_v[0]
du_dphi_f = du_dphi_u[0]
du_dz_f = du_dz_u[0]
dv_dphi_f = dv_dphi_p[0]
dv_dphi_lf = dv_dphi_v[0]
dv_dz_lf = dv_dz_v[0]
dw_dphi_hf = dw_dphi_w[0]
dw_dz_hf = dw_dz_w[0]
dw_dz_f = dw_dz_p[0]
dp_dphi_f = dp_dphi_p[0]
dp_dphi_lf = dp_dphi_v[0]
dp_dz_hf = dp_dz_w[0]
dtheta_dz_f = dtheta_dz_p[0]
dtheta_dphi_hf = dtheta_dphi_w[0]
dtheta_dz_hf = dtheta_dz_w[0]
drho_dphi_f = drho_dphi_u[0]
drho_dphi_hf = drho_dphi_w[0]
drho_dphi_lf = drho_dphi_v[0]
drho_dz_f = drho_dz_u[0]
drho_dz_hf = drho_dz_w[0]
drho_dz_lf = drho_dz_v[0]
print(u_f.shape)
res=np.append(u_f,u_hf)
res=np.append(res,u_lf)
res=np.append(res,v_f)
res=np.append(res,v_hf)
res=np.append(res,v_lf)
res=np.append(res,p_f)
res=np.append(res,p_hf)
res=np.append(res,p_lf)
res=np.append(res,theta_f)
res=np.append(res,theta_hf)
res=np.append(res,theta_lf)
res=np.append(res,rho_f)
res=np.append(res,rho_hf)
res=np.append(res,rho_lf)
res=np.append(res,w_f)
res=np.append(res,w_hf)
res=np.append(res,w_lf)
dres=np.append(du_dphi_f,du_dz_f)
dres=np.append(dres,dv_dphi_f)
dres=np.append(dres,dv_dphi_lf)
dres=np.append(dres,dv_dz_lf)
dres=np.append(dres,dp_dphi_f)
dres=np.append(dres,dp_dphi_lf)
dres=np.append(dres,dp_dz_hf)
dres=np.append(dres,dtheta_dz_f)
dres=np.append(dres,dtheta_dphi_hf)
dres=np.append(dres,dtheta_dz_hf)
dres=np.append(dres,drho_dphi_f)
dres=np.append(dres,drho_dphi_hf)
dres=np.append(dres,drho_dphi_lf)
dres=np.append(dres,drho_dz_f)
dres=np.append(dres,drho_dz_hf)
dres=np.append(dres,drho_dz_lf)
dres=np.append(dres,dw_dphi_hf)
dres=np.append(dres,dw_dz_f)
dres=np.append(dres,dw_dz_hf)
file=open('/Users/florian/Desktop/MyWork/2D/data_grid/steady_state/data.dat','w')
dfile=open('/Users/florian/Desktop/MyWork/2D/data_grid/steady_state/ddata.dat','w')
#fin=np.append(fin,theta_f.T)
#file.write(w_f.tofile())
np.savetxt('/Users/florian/Desktop/MyWork/2D/data_grid/steady_state/data.dat',res, fmt='%.8e')
np.savetxt('/Users/florian/Desktop/MyWork/2D/data_grid/steady_state/ddata.dat',dres, fmt='%.8e')
file.close()
dfile.close()
|
fdebrasREPO_NAMEECLIPS3DPATH_START.@ECLIPS3D_extracted@ECLIPS3D-master@ECLIPS3D@2D_axi@python@[email protected]_END.py
|
{
"filename": "fit.py",
"repo_name": "timothydmorton/isochrones",
"repo_path": "isochrones_extracted/isochrones-master/isochrones/fit.py",
"type": "Python"
}
|
import os, sys
import pandas as pd
import numpy as np
import emcee3
from emcee3.backends import Backend, HDFBackend
class Emcee3Model(emcee3.Model):
def __init__(self, mod, *args, **kwargs):
self.mod = mod
super().__init__(*args, **kwargs)
def compute_log_prior(self, state):
state.log_prior = self.mod.lnprior(state.coords)
return state
def compute_log_likelihood(self, state):
state.log_likelihood = self.mod.lnlike(state.coords)
return state
class Emcee3PriorModel(emcee3.Model):
def __init__(self, mod, *args, **kwargs):
self.mod = mod
super().__init__(*args, **kwargs)
def compute_log_prior(self, state):
state.log_prior = self.mod.lnprior(state.coords)
return state
def compute_log_likelihood(self, state):
state.log_likelihood = 0
return state
def write_samples(mod, df, resultsdir="results"):
"""df is dataframe of samples, mod is model
"""
if not os.path.exists(resultsdir):
os.makedirs(resultsdir)
samplefile = os.path.join(resultsdir, "{}.h5".format(mod.name))
df.to_hdf(samplefile, "samples")
def fit_emcee3(
mod,
nwalkers=500,
verbose=False,
nsamples=5000,
targetn=4,
iter_chunksize=200,
pool=None,
overwrite=False,
maxiter=10,
sample_directory="mcmc_chains",
nburn=2,
mixedmoves=True,
resultsdir="mcmc_results",
prior_only=False,
**kwargs
):
"""fit model using Emcee3
modeled after https://github.com/dfm/gaia-kepler/blob/master/fit.py
nburn is number of autocorr times to discard as burnin.
"""
# Initialize
if prior_only:
walker = Emcee3PriorModel(mod)
else:
walker = Emcee3Model(mod)
ndim = mod.n_params
if sample_directory is not None:
sample_file = os.path.join(sample_directory, "{}.h5".format(mod.name))
if not os.path.exists(sample_directory):
os.makedirs(sample_directory)
backend = HDFBackend(sample_file)
try:
coords_init = backend.current_coords
except (AttributeError, KeyError):
coords_init = mod.sample_from_prior(nwalkers, require_valid=True, values=True)
else:
backend = Backend()
coords_init = mod.sample_from_prior(nwalkers, require_valid=True, values=True)
if mixedmoves:
moves = [
(emcee3.moves.KDEMove(), 0.4),
(emcee3.moves.DEMove(1.0), 0.4),
(emcee3.moves.DESnookerMove(), 0.2),
]
else:
moves = emcee3.moves.KDEMove()
sampler = emcee3.Sampler(moves, backend=backend)
if overwrite:
sampler.reset()
coords_init = mod.sample_from_prior(nwalkers, require_valid=True, values=True)
if pool is None:
from emcee3.pools import DefaultPool
pool = DefaultPool()
try:
ensemble = emcee3.Ensemble(walker, coords_init, pool=pool)
except ValueError:
import pdb
pdb.set_trace()
def calc_stats(s):
"""returns tau_max, neff
"""
tau = s.get_integrated_autocorr_time(c=1)
tau_max = tau.max()
neff = s.backend.niter / tau_max - nburn
if verbose:
print("Maximum autocorrelation time: {0}".format(tau_max))
print("N_eff: {0} ({1})\n".format(neff * nwalkers, neff - nburn))
return tau_max, neff
done = False
if not overwrite:
try:
if verbose:
print("Status from previous run:")
tau_max, neff = calc_stats(sampler)
if neff > targetn:
done = True
except (emcee3.autocorr.AutocorrError, KeyError):
pass
chunksize = iter_chunksize
for iteration in range(maxiter):
if done:
break
if verbose:
print("Iteration {0}...".format(iteration + 1))
sampler.run(ensemble, chunksize, progress=verbose)
try:
tau_max, neff = calc_stats(sampler)
except emcee3.autocorr.AutocorrError:
tau_max = 0
continue
if neff > targetn:
done = True
burnin = int(nburn * tau_max)
ntot = nsamples
samples = sampler.get_coords(flat=True, discard=burnin)
total_samples = len(samples)
if ntot > total_samples:
ntot = total_samples
if verbose:
print("Discarding {0} samples for burn-in".format(burnin))
print("Randomly choosing {0} samples".format(ntot))
inds = np.random.choice(total_samples, size=ntot, replace=False)
samples = samples[inds]
df = pd.DataFrame(samples, columns=mod.param_names)
write_samples(mod, df, resultsdir=resultsdir)
return df
# return sampler
|
timothydmortonREPO_NAMEisochronesPATH_START.@isochrones_extracted@isochrones-master@[email protected]@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "mr-superonion/FPFS",
"repo_path": "FPFS_extracted/FPFS-master/setup.py",
"type": "Python"
}
|
import os
from setuptools import setup, find_packages
this_dir = os.path.dirname(os.path.realpath(__file__))
__version__ = ""
fname = os.path.join(this_dir, "fpfs", "__version__.py")
with open(fname, "r") as ff:
exec(ff.read())
long_description = open(os.path.join(this_dir, "README.md")).read()
scripts = [
"bin/fpfs_config",
"bin/fpfs_sim.py",
"bin/fpfs_process_sim.py",
"bin/fpfs_summary_sim.py",
"bin/fpfs_process_descsim.py",
"bin/fpfs_summary_descsim.py",
]
setup(
name="fpfs",
version=__version__,
description="FPFS shear estimator",
author="Xiangchong Li",
author_email="[email protected]",
python_requires=">=3.8",
install_requires=[
"numpy",
"schwimmbad",
"jax>=0.4.9",
"jaxlib>=0.4.9",
"galsim",
"astropy",
"matplotlib",
],
packages=find_packages(),
scripts=scripts,
include_package_data=True,
zip_safe=False,
url="https://github.com/mr-superonion/FPFS/",
long_description=long_description,
long_description_content_type="text/markdown",
)
|
mr-superonionREPO_NAMEFPFSPATH_START.@FPFS_extracted@[email protected]@.PATH_END.py
|
{
"filename": "delete_mercfiles.py",
"repo_name": "ahermosillo/stochastic-randommigration",
"repo_path": "stochastic-randommigration_extracted/stochastic-randommigration-main/mercury6/delete_mercfiles.py",
"type": "Python"
}
|
import time
import os
import shutil
directory = "./"
files_in_directory = os.listdir(directory)
filtered_files = [file for file in files_in_directory if file.endswith(".dmp") \
or file.endswith(".tmp") or file.endswith(".out") or file.endswith(".aei")\
or file.endswith(".dat")]
# filtered_files = [file if file.endswith(".dmp") else file.endswith(".tmp") for file in files_in_directory]
# filtered_files.append([file for file in files_in_directory if file.endswith(".tmp")])
print("these are the files we are going to delete. Are you sure you want to proceed?\n\
type y or n for yes or no")
print(filtered_files)
yn = input()
if yn == 'y':
for file in filtered_files:
path_to_file = os.path.join(directory, file)
os.remove(path_to_file)
print("deleted ", file)
else:
print("files were not deleted")
|
ahermosilloREPO_NAMEstochastic-randommigrationPATH_START.@stochastic-randommigration_extracted@stochastic-randommigration-main@mercury6@[email protected]_END.py
|
{
"filename": "_separatethousands.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/scene/xaxis/_separatethousands.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="separatethousands",
parent_name="layout.scene.xaxis",
**kwargs,
):
super(SeparatethousandsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@layout@scene@xaxis@[email protected]_END.py
|
{
"filename": "launch.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/setuptools/py2/setuptools/launch.py",
"type": "Python"
}
|
"""
Launch the Python script on the command line after
setuptools is bootstrapped via import.
"""
# Note that setuptools gets imported implicitly by the
# invocation of this script using python -m setuptools.launch
import tokenize
import sys
def run():
"""
Run the script in sys.argv[1] as if it had
been invoked naturally.
"""
__builtins__
script_name = sys.argv[1]
namespace = dict(
__file__=script_name,
__name__='__main__',
__doc__=None,
)
sys.argv[:] = sys.argv[1:]
open_ = getattr(tokenize, 'open', open)
script = open_(script_name).read()
norm_script = script.replace('\\r\\n', '\\n')
code = compile(norm_script, script_name, 'exec')
exec(code, namespace)
if __name__ == '__main__':
run()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@setuptools@py2@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/scatter3d/hoverlabel/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@scatter3d@hoverlabel@[email protected]_END.py
|
{
"filename": "_linepositionsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/volume/hoverlabel/font/_linepositionsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="linepositionsrc",
parent_name="volume.hoverlabel.font",
**kwargs,
):
super(LinepositionsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@volume@hoverlabel@font@[email protected]_END.py
|
{
"filename": "kafkaAlert.py",
"repo_name": "jlenain/flaapluc",
"repo_path": "flaapluc_extracted/flaapluc-master/flaapluc/kafkaAlert.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Time-stamp: "2022-11-17 12:13:36 jlenain"
"""
Alert producer for FLaapLUC using the kafka protocol and AVRO alert format
"""
import yaml
import os
from confluent_kafka import Producer
from flaapluc import avroUtils
import flaapluc
class AlertProducer:
def __init__(self, conf_path=None):
try:
with open(conf_path) as f:
self.conf = yaml.load(f, Loader=yaml.FullLoader)
except FileNotFoundError as e:
print(e)
raise
self.topic = self.conf['topic']
self.prod_conf = {'bootstrap.servers': self.conf['server'],
'group.id': self.conf['group']}
self.schema = f"{os.path.dirname(flaapluc.__file__)}/../schemas/{self.conf['schema']}"
def sendAlert(self, alert=None):
p = Producer(self.prod_conf)
avro_alert = avroUtils.encode_into_avro(alert, self.schema)
p.produce(self.topic, avro_alert)
p.flush()
|
jlenainREPO_NAMEflaaplucPATH_START.@flaapluc_extracted@flaapluc-master@[email protected]@.PATH_END.py
|
{
"filename": "volterra_tikhonov_cg.py",
"repo_name": "hmuellergoe/mrbeam",
"repo_path": "mrbeam_extracted/mrbeam-main/mr_beam/itreg/examples/volterra_tikhonov_cg.py",
"type": "Python"
}
|
from regpy.operators.volterra import Volterra
from regpy.hilbert import L2
from regpy.discrs import UniformGrid
from regpy.solvers import HilbertSpaceSetting
from regpy.solvers.tikhonov import TikhonovCG
import regpy.stoprules as rules
import numpy as np
import logging
import matplotlib.pyplot as plt
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s %(name)-20s :: %(message)s')
grid = UniformGrid((0, 2*np.pi, 200))
op = Volterra(grid)
exact_solution = np.sin(grid.coords[0])
exact_data = op(exact_solution)
noise = 0.03 * op.domain.randn()
data = exact_data + noise
init = op.domain.ones()
setting = HilbertSpaceSetting(op=op, Hdomain=L2, Hcodomain=L2)
solver = TikhonovCG(setting, data, regpar=0.01)
stoprule = (
rules.CountIterations(1000) +
rules.Discrepancy(
setting.Hcodomain.norm, data,
noiselevel=setting.Hcodomain.norm(noise),
tau=1.1
)
)
reco, reco_data = solver.run(stoprule)
plt.plot(grid.coords[0], exact_solution.T, label='exact solution')
plt.plot(grid.coords[0], reco, label='reco')
plt.plot(grid.coords[0], exact_data, label='exact data')
plt.plot(grid.coords[0], data, label='data')
plt.plot(grid.coords[0], reco_data, label='reco data')
plt.legend()
plt.show()
|
hmuellergoeREPO_NAMEmrbeamPATH_START.@mrbeam_extracted@mrbeam-main@mr_beam@itreg@examples@[email protected]_END.py
|
{
"filename": "TemperatureDensity.py",
"repo_name": "mmicromegas/ransX",
"repo_path": "ransX_extracted/ransX-master/EQUATIONS/TemperatureDensity.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class TemperatureDensity(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, intc, data_prefix):
super(TemperatureDensity, self).__init__(ig)
# load data to structured array
eht = self.customLoad(filename)
# load grid
xzn0 = self.getRAdata(eht, 'xzn0')
# pick specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd = self.getRAdata(eht, 'dd')[intc]
tt = self.getRAdata(eht, 'tt')[intc]
# assign global data to be shared across whole class
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.dd = dd
self.tt = tt
self.ig = ig
def plot_ttdd(self, LAXIS, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
"""Plot temperature and density stratification in the model"""
# check supported geometries
if self.ig != 1 and self.ig != 2:
print("ERROR(TemperatureDensity.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
to_plt1 = np.log10(self.tt)
to_plt2 = np.log10(self.dd)
if self.ig == 1:
xlabel_1 = r'x (cm)'
elif self.ig == 2:
xlabel_1 = r'r (cm)'
ylabel_1 = r'log $\overline{T}$ (K)'
ylabel_2 = r'log $\overline{\rho}$ (g cm$^{-3}$)'
plabel_1 = r'$\overline{T}$'
plabel_2 = r'$\overline{\rho}$'
# calculate indices of grid boundaries
xzn0 = np.asarray(self.xzn0)
xlm = np.abs(xzn0 - xbl)
xrm = np.abs(xzn0 - xbr)
idxl = int(np.where(xlm == xlm.min())[0][0])
idxr = int(np.where(xrm == xrm.min())[0][0])
# create FIGURE
fig, ax1 = plt.subplots(figsize=(7, 6))
ax1.axis([xbl, xbr, np.min(to_plt1[idxl:idxr]), np.max(to_plt1[idxl:idxr])])
ax1.plot(xzn0, to_plt1, color='r', label=plabel_1)
ax1.set_xlabel(xlabel_1)
ax1.set_ylabel(ylabel_1)
ax1.legend(loc=7, prop={'size': 18})
ax2 = ax1.twinx()
ax2.axis([xbl, xbr, np.min(to_plt2[idxl:idxr]), np.max(to_plt2[idxl:idxr])])
ax2.plot(xzn0, to_plt2, color='b', label=plabel_2)
ax2.set_ylabel(ylabel_2)
ax2.tick_params('y')
ax2.legend(loc=1, prop={'size': 18})
# convective boundary markers
#plt.axvline(bconv, linestyle='--', linewidth=0.7, color='k')
#plt.axvline(tconv, linestyle='--', linewidth=0.7, color='k')
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_ttdd.png')
plt.savefig('RESULTS/' + self.data_prefix + 'mean_ttdd.eps')
|
mmicromegasREPO_NAMEransXPATH_START.@ransX_extracted@ransX-master@[email protected]@.PATH_END.py
|
{
"filename": "sample.py",
"repo_name": "astropy/photutils",
"repo_path": "photutils_extracted/photutils-main/photutils/isophote/sample.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides a class to sample data along an elliptical path.
"""
import copy
import numpy as np
from photutils.isophote.geometry import EllipseGeometry
from photutils.isophote.integrator import INTEGRATORS
__all__ = ['EllipseSample']
class EllipseSample:
"""
Class to sample image data along an elliptical path.
The image intensities along the elliptical path can be extracted
using a selection of integration algorithms.
The ``geometry`` attribute describes the geometry of the elliptical
path.
Parameters
----------
image : 2D `~numpy.ndarray`
The input image.
sma : float
The semimajor axis length in pixels.
x0, y0 : float, optional
The (x, y) coordinate of the ellipse center.
astep : float, optional
The step value for growing/shrinking the semimajor axis. It can
be expressed either in pixels (when ``linear_growth=True``) or
as a relative value (when ``linear_growth=False``). The default
is 0.1.
eps : float, optional
The ellipticity of the ellipse. The default is 0.2.
position_angle : float, optional
The position angle of ellipse in relation to the positive x axis
of the image array (rotating towards the positive y axis). The
default is 0.
sclip : float, optional
The sigma-clip sigma value. The default is 3.0.
nclip : int, optional
The number of sigma-clip iterations. Set to zero to skip
sigma-clipping. The default is 0.
linear_growth : bool, optional
The semimajor axis growing/shrinking mode. The default is
`False`.
integrmode : {'bilinear', 'nearest_neighbor', 'mean', 'median'}, optional
The area integration mode. The default is 'bilinear'.
geometry : `~photutils.isophote.EllipseGeometry` instance or `None`
The geometry that describes the ellipse. This can be
used in lieu of the explicit specification of parameters
``sma``, ``x0``, ``y0``, ``eps``, etc. In any case, the
`~photutils.isophote.EllipseGeometry` instance becomes an
attribute of the `~photutils.isophote.EllipseSample` object. The
default is `None`.
Attributes
----------
values : 2D `~numpy.ndarray`
The sampled values as a 2D array, where the rows contain the
angles, radii, and extracted intensity values, respectively.
mean : float
The mean intensity along the elliptical path.
geometry : `~photutils.isophote.EllipseGeometry` instance
The geometry of the elliptical path.
gradient : float
The local radial intensity gradient.
gradient_error : float
The error associated with the local radial intensity gradient.
gradient_relative_error : float
The relative error associated with the local radial intensity
gradient.
sector_area : float
The average area of the sectors along the elliptical path from
which the sample values were integrated.
total_points : int
The total number of sample values that would cover the entire
elliptical path.
actual_points : int
The actual number of sample values that were taken from the
image. It can be smaller than ``total_points`` when the ellipse
encompasses regions outside the image, or when sigma-clipping
removed some of the points.
"""
def __init__(self, image, sma, x0=None, y0=None, astep=0.1, eps=0.2,
position_angle=0.0, sclip=3.0, nclip=0, linear_growth=False,
integrmode='bilinear', geometry=None):
self.image = image
self.integrmode = integrmode
if geometry:
# when the geometry is inherited from somewhere else,
# its sma attribute must be replaced by the value
# explicitly passed to the constructor.
self.geometry = copy.deepcopy(geometry)
self.geometry.sma = sma
else:
# if no center was specified, assume it's roughly
# coincident with the image center
_x0 = x0
_y0 = y0
if not _x0 or not _y0:
_x0 = image.shape[1] / 2
_y0 = image.shape[0] / 2
self.geometry = EllipseGeometry(_x0, _y0, sma, eps,
position_angle, astep,
linear_growth)
# sigma-clip parameters
self.sclip = sclip
self.nclip = nclip
# extracted values associated with this sample.
self.values = None
self.mean = None
self.gradient = None
self.gradient_error = None
self.gradient_relative_error = None
self.sector_area = None
# total_points reports the total number of pairs angle-radius that
# were attempted. actual_points reports the actual number of sampled
# pairs angle-radius that resulted in valid values.
self.total_points = 0
self.actual_points = 0
def extract(self):
"""
Extract sample data by scanning an elliptical path over the
image array.
Returns
-------
result : 2D `~numpy.ndarray`
The rows of the array contain the angles, radii, and
extracted intensity values, respectively.
"""
# the sample values themselves are kept cached to prevent
# multiple calls to the integrator code.
if self.values is not None:
return self.values
s = self._extract()
self.values = s
return s
def _extract(self, phi_min=0.05):
# Here the actual sampling takes place. This is called only once
# during the life of an EllipseSample instance, because it's an
# expensive calculation. This method should not be called from
# external code.
# To force it to rerun, set "sample.values = None" before
# calling sample.extract().
# individual extracted sample points will be stored in here
angles = []
radii = []
intensities = []
sector_areas = []
# reset counters
self.total_points = 0
self.actual_points = 0
# build integrator
integrator = INTEGRATORS[self.integrmode](self.image, self.geometry,
angles, radii, intensities)
# initialize walk along elliptical path
radius = self.geometry.initial_polar_radius
phi = self.geometry.initial_polar_angle
# In case of an area integrator, ask the integrator to deliver a
# hint of how much area the sectors will have. In case of too
# small areas, tests showed that the area integrators (mean,
# median) won't perform properly. In that case, we override the
# caller's selection and use the bilinear integrator regardless.
if integrator.is_area():
integrator.integrate(radius, phi)
area = integrator.get_sector_area()
# this integration that just took place messes up with the
# storage arrays and the constructors. We have to build a new
# integrator instance from scratch, even if it is the same
# kind as originally selected by the caller.
angles = []
radii = []
intensities = []
if area < 1.0:
integrator = INTEGRATORS['bilinear'](
self.image, self.geometry, angles, radii, intensities)
else:
integrator = INTEGRATORS[self.integrmode](self.image,
self.geometry,
angles, radii,
intensities)
# walk along elliptical path, integrating at specified
# places defined by polar vector. Need to go a bit beyond
# full circle to ensure full coverage.
while phi <= np.pi * 2.0 + phi_min:
# do the integration at phi-radius position, and append
# results to the angles, radii, and intensities lists.
integrator.integrate(radius, phi)
# store sector area locally
sector_areas.append(integrator.get_sector_area())
# update total number of points
self.total_points += 1
# update angle and radius to be used to define
# next polar vector along the elliptical path
phistep_ = integrator.get_polar_angle_step()
phi += min(phistep_, 0.5)
radius = self.geometry.radius(phi)
# average sector area is calculated after the integrator had
# the opportunity to step over the entire elliptical path.
self.sector_area = np.mean(np.array(sector_areas))
# apply sigma-clipping.
angles, radii, intensities = self._sigma_clip(angles, radii,
intensities)
# actual number of sampled points, after sigma-clip removed outliers.
self.actual_points = len(angles)
# pack results in 2-d array
return np.array([np.array(angles), np.array(radii),
np.array(intensities)])
def _sigma_clip(self, angles, radii, intensities):
if self.nclip > 0:
for _ in range(self.nclip):
# do not use list.copy()! must be python2-compliant.
angles, radii, intensities = self._iter_sigma_clip(
angles[:], radii[:], intensities[:])
return np.array(angles), np.array(radii), np.array(intensities)
def _iter_sigma_clip(self, angles, radii, intensities):
# Can't use scipy or astropy tools because they use masked arrays.
# Also, they operate on a single array, and we need to operate on
# three arrays simultaneously. We need something that physically
# removes the clipped points from the arrays, since that is what
# the remaining of the `ellipse` code expects.
r_angles = []
r_radii = []
r_intensities = []
values = np.array(intensities)
mean = np.mean(values)
sig = np.std(values)
lower = mean - self.sclip * sig
upper = mean + self.sclip * sig
count = 0
for k in range(len(intensities)):
if intensities[k] >= lower and intensities[k] < upper:
r_angles.append(angles[k])
r_radii.append(radii[k])
r_intensities.append(intensities[k])
count += 1
return r_angles, r_radii, r_intensities
def update(self, fixed_parameters=None):
"""
Update this `~photutils.isophote.EllipseSample` instance.
This method calls the
:meth:`~photutils.isophote.EllipseSample.extract` method to get
the values that match the current ``geometry`` attribute, and
then computes the mean intensity, local gradient, and other
associated quantities.
Parameters
----------
fixed_parameters : `None` or array_like, optional
An array of the fixed parameters. Must have 4 elements,
corresponding to x center, y center, PA, and EPS.
"""
if fixed_parameters is None:
fixed_parameters = np.array([False, False, False, False])
self.geometry.fix = fixed_parameters
step = self.geometry.astep
# Update the mean value first, using extraction from main sample.
s = self.extract()
self.mean = np.mean(s[2])
# Get sample with same geometry but at a different distance from
# center. Estimate gradient from there.
gradient, gradient_error = self._get_gradient(step)
# Check for meaningful gradient. If no meaningful gradient, try
# another sample, this time using larger radius. Meaningful
# gradient means something shallower, but still close to within
# a factor 3 from previous gradient estimate. If no previous
# estimate is available, guess it by adding the error to the
# current gradient.
previous_gradient = self.gradient
if not previous_gradient:
previous_gradient = gradient + gradient_error
if gradient >= (previous_gradient / 3.0): # gradient is negative!
gradient, gradient_error = self._get_gradient(2 * step)
# If still no meaningful gradient can be measured, try with
# previous one, slightly shallower. A factor 0.8 is not too far
# from what is expected from geometrical sampling steps of 10-20%
# and a deVaucouleurs law or an exponential disk (at least at its
# inner parts, r <~ 5 req). Gradient error is meaningless in this
# case.
if gradient >= (previous_gradient / 3.0):
gradient = previous_gradient * 0.8
gradient_error = None
self.gradient = gradient
self.gradient_error = gradient_error
if gradient_error and gradient < 0.0:
self.gradient_relative_error = gradient_error / np.abs(gradient)
else:
self.gradient_relative_error = None
def _get_gradient(self, step):
gradient_sma = (1.0 + step) * self.geometry.sma
gradient_sample = EllipseSample(
self.image, gradient_sma, x0=self.geometry.x0,
y0=self.geometry.y0, astep=self.geometry.astep, sclip=self.sclip,
nclip=self.nclip, eps=self.geometry.eps,
position_angle=self.geometry.pa,
linear_growth=self.geometry.linear_growth,
integrmode=self.integrmode)
sg = gradient_sample.extract()
mean_g = np.mean(sg[2])
gradient = (mean_g - self.mean) / self.geometry.sma / step
s = self.extract()
sigma = np.std(s[2])
sigma_g = np.std(sg[2])
gradient_error = (np.sqrt(sigma**2 / len(s[2])
+ sigma_g**2 / len(sg[2]))
/ self.geometry.sma / step)
return gradient, gradient_error
def coordinates(self):
"""
Return the (x, y) coordinates associated with each sampled
point.
Returns
-------
x, y : 1D `~numpy.ndarray`
The x and y coordinate arrays.
"""
angles = self.values[0]
radii = self.values[1]
x = np.zeros(len(angles))
y = np.zeros(len(angles))
for i in range(len(x)):
x[i] = (radii[i] * np.cos(angles[i] + self.geometry.pa)
+ self.geometry.x0)
y[i] = (radii[i] * np.sin(angles[i] + self.geometry.pa)
+ self.geometry.y0)
return x, y
class CentralEllipseSample(EllipseSample):
"""
An `~photutils.isophote.EllipseSample` subclass designed to handle
the special case of the central pixel in the galaxy image.
"""
def update(self, fixed_parameters=None):
"""
Update this `~photutils.isophote.EllipseSample` instance with
the intensity integrated at the (x0, y0) center position using
bilinear integration. The local gradient is set to `None`.
Parameters
----------
fixed_parameters : `None` or array_like, optional
An array of the fixed parameters. Must have 4 elements,
corresponding to x center, y center, PA, and EPS. This
keyword is ignored in this subclass.
"""
s = self.extract()
self.mean = s[2][0]
self.gradient = None
self.gradient_error = None
self.gradient_relative_error = None
def _extract(self):
angles = []
radii = []
intensities = []
integrator = INTEGRATORS['bilinear'](self.image, self.geometry,
angles, radii, intensities)
integrator.integrate(0.0, 0.0)
self.total_points = 1
self.actual_points = 1
return np.array([np.array(angles), np.array(radii),
np.array(intensities)])
|
astropyREPO_NAMEphotutilsPATH_START.@photutils_extracted@photutils-main@photutils@[email protected]@.PATH_END.py
|
{
"filename": "test_sample.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/groupby/methods/test_sample.py",
"type": "Python"
}
|
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize("n, frac", [(2, None), (None, 0.2)])
def test_groupby_sample_balanced_groups_shape(n, frac):
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=n, frac=frac)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=n, frac=frac)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_unbalanced_groups_shape():
values = [1] * 10 + [2] * 20
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=5)
values = [1] * 5 + [2] * 5
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=5)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_index_value_spans_groups():
values = [1] * 3 + [2] * 3
df = DataFrame({"a": values, "b": values}, index=[1, 2, 2, 2, 2, 2])
result = df.groupby("a").sample(n=2)
values = [1] * 2 + [2] * 2
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=2)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_n_and_frac_raises():
df = DataFrame({"a": [1, 2], "b": [1, 2]})
msg = "Please enter a value for `frac` OR `n`, not both"
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(n=1, frac=1.0)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(n=1, frac=1.0)
def test_groupby_sample_frac_gt_one_without_replacement_raises():
df = DataFrame({"a": [1, 2], "b": [1, 2]})
msg = "Replace has to be set to `True` when upsampling the population `frac` > 1."
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(frac=1.5, replace=False)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(frac=1.5, replace=False)
@pytest.mark.parametrize("n", [-1, 1.5])
def test_groupby_sample_invalid_n_raises(n):
df = DataFrame({"a": [1, 2], "b": [1, 2]})
if n < 0:
msg = "A negative number of rows requested. Please provide `n` >= 0."
else:
msg = "Only integers accepted as `n` values"
with pytest.raises(ValueError, match=msg):
df.groupby("a").sample(n=n)
with pytest.raises(ValueError, match=msg):
df.groupby("a")["b"].sample(n=n)
def test_groupby_sample_oversample():
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(frac=2.0, replace=True)
values = [1] * 20 + [2] * 20
expected = DataFrame({"a": values, "b": values}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(frac=2.0, replace=True)
expected = Series(values, name="b", index=result.index)
tm.assert_series_equal(result, expected)
def test_groupby_sample_without_n_or_frac():
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values})
result = df.groupby("a").sample(n=None, frac=None)
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=result.index)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=None, frac=None)
expected = Series([1, 2], name="b", index=result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index, expected_index",
[(["w", "x", "y", "z"], ["w", "w", "y", "y"]), ([3, 4, 5, 6], [3, 3, 5, 5])],
)
def test_groupby_sample_with_weights(index, expected_index):
# GH 39927 - tests for integer index needed
values = [1] * 2 + [2] * 2
df = DataFrame({"a": values, "b": values}, index=Index(index))
result = df.groupby("a").sample(n=2, replace=True, weights=[1, 0, 1, 0])
expected = DataFrame({"a": values, "b": values}, index=Index(expected_index))
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].sample(n=2, replace=True, weights=[1, 0, 1, 0])
expected = Series(values, name="b", index=Index(expected_index))
tm.assert_series_equal(result, expected)
def test_groupby_sample_with_selections():
# GH 39928
values = [1] * 10 + [2] * 10
df = DataFrame({"a": values, "b": values, "c": values})
result = df.groupby("a")[["b", "c"]].sample(n=None, frac=None)
expected = DataFrame({"b": [1, 2], "c": [1, 2]}, index=result.index)
tm.assert_frame_equal(result, expected)
def test_groupby_sample_with_empty_inputs():
# GH48459
df = DataFrame({"a": [], "b": []})
groupby_df = df.groupby("a")
result = groupby_df.sample()
expected = df
tm.assert_frame_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@groupby@methods@[email protected]_END.py
|
{
"filename": "objects.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/client/schemas/objects.py",
"type": "Python"
}
|
import datetime
import warnings
from collections.abc import Callable, Mapping
from functools import partial
from typing import (
TYPE_CHECKING,
Annotated,
Any,
ClassVar,
Generic,
Optional,
Union,
cast,
overload,
)
from uuid import UUID, uuid4
import orjson
import pendulum
from pydantic import (
ConfigDict,
Discriminator,
Field,
HttpUrl,
IPvAnyNetwork,
SerializationInfo,
SerializerFunctionWrapHandler,
Tag,
field_validator,
model_serializer,
model_validator,
)
from typing_extensions import Literal, Self, TypeVar
from prefect._internal.compatibility import deprecated
from prefect._internal.compatibility.migration import getattr_migration
from prefect._internal.schemas.bases import ObjectBaseModel, PrefectBaseModel
from prefect._internal.schemas.fields import CreatedBy, UpdatedBy
from prefect._internal.schemas.validators import (
get_or_create_run_name,
list_length_50_or_less,
raise_on_name_alphanumeric_dashes_only,
set_run_policy_deprecated_fields,
validate_block_document_name,
validate_default_queue_id_not_none,
validate_max_metadata_length,
validate_message_template_variables,
validate_name_present_on_nonanonymous_blocks,
validate_not_negative,
validate_parent_and_ref_diff,
)
from prefect.client.schemas.schedules import SCHEDULE_TYPES
from prefect.settings import PREFECT_CLOUD_API_URL, PREFECT_CLOUD_UI_URL
from prefect.types import (
MAX_VARIABLE_NAME_LENGTH,
KeyValueLabelsField,
Name,
NonNegativeInteger,
PositiveInteger,
StrictVariableValue,
)
from prefect.utilities.collections import AutoEnum, listrepr, visit_collection
from prefect.utilities.names import generate_slug
from prefect.utilities.pydantic import handle_secret_render
if TYPE_CHECKING:
from prefect.client.schemas.actions import StateCreate
from prefect.results import BaseResult, ResultRecordMetadata
DateTime = pendulum.DateTime
else:
from pydantic_extra_types.pendulum_dt import DateTime
R = TypeVar("R", default=Any)
DEFAULT_BLOCK_SCHEMA_VERSION = "non-versioned"
DEFAULT_AGENT_WORK_POOL_NAME = "default-agent-pool"
FLOW_RUN_NOTIFICATION_TEMPLATE_KWARGS = [
"flow_run_notification_policy_id",
"flow_id",
"flow_name",
"flow_run_url",
"flow_run_id",
"flow_run_name",
"flow_run_parameters",
"flow_run_state_type",
"flow_run_state_name",
"flow_run_state_timestamp",
"flow_run_state_message",
]
class StateType(AutoEnum):
"""Enumeration of state types."""
SCHEDULED = AutoEnum.auto()
PENDING = AutoEnum.auto()
RUNNING = AutoEnum.auto()
COMPLETED = AutoEnum.auto()
FAILED = AutoEnum.auto()
CANCELLED = AutoEnum.auto()
CRASHED = AutoEnum.auto()
PAUSED = AutoEnum.auto()
CANCELLING = AutoEnum.auto()
TERMINAL_STATES = {
StateType.COMPLETED,
StateType.CANCELLED,
StateType.FAILED,
StateType.CRASHED,
}
class WorkPoolStatus(AutoEnum):
"""Enumeration of work pool statuses."""
READY = AutoEnum.auto()
NOT_READY = AutoEnum.auto()
PAUSED = AutoEnum.auto()
@property
def display_name(self):
return self.name.replace("_", " ").capitalize()
class WorkerStatus(AutoEnum):
"""Enumeration of worker statuses."""
ONLINE = AutoEnum.auto()
OFFLINE = AutoEnum.auto()
class DeploymentStatus(AutoEnum):
"""Enumeration of deployment statuses."""
READY = AutoEnum.auto()
NOT_READY = AutoEnum.auto()
class WorkQueueStatus(AutoEnum):
"""Enumeration of work queue statuses."""
READY = AutoEnum.auto()
NOT_READY = AutoEnum.auto()
PAUSED = AutoEnum.auto()
class ConcurrencyLimitStrategy(AutoEnum):
"""Enumeration of concurrency limit strategies."""
ENQUEUE = AutoEnum.auto()
CANCEL_NEW = AutoEnum.auto()
class ConcurrencyOptions(PrefectBaseModel):
"""
Class for storing the concurrency config in database.
"""
collision_strategy: ConcurrencyLimitStrategy
class ConcurrencyLimitConfig(PrefectBaseModel):
"""
Class for storing the concurrency limit config in database.
"""
limit: int
collision_strategy: ConcurrencyLimitStrategy = ConcurrencyLimitStrategy.ENQUEUE
class StateDetails(PrefectBaseModel):
flow_run_id: Optional[UUID] = None
task_run_id: Optional[UUID] = None
# for task runs that represent subflows, the subflow's run ID
child_flow_run_id: Optional[UUID] = None
scheduled_time: Optional[DateTime] = None
cache_key: Optional[str] = None
cache_expiration: Optional[DateTime] = None
deferred: Optional[bool] = None
untrackable_result: bool = False
pause_timeout: Optional[DateTime] = None
pause_reschedule: bool = False
pause_key: Optional[str] = None
run_input_keyset: Optional[dict[str, str]] = None
refresh_cache: Optional[bool] = None
retriable: Optional[bool] = None
transition_id: Optional[UUID] = None
task_parameters_id: Optional[UUID] = None
def data_discriminator(x: Any) -> str:
if isinstance(x, dict) and "type" in x and x["type"] != "unpersisted":
return "BaseResult"
elif isinstance(x, dict) and "storage_key" in x:
return "ResultRecordMetadata"
return "Any"
class State(ObjectBaseModel, Generic[R]):
"""
The state of a run.
"""
type: StateType
name: Optional[str] = Field(default=None)
timestamp: DateTime = Field(default_factory=lambda: pendulum.now("UTC"))
message: Optional[str] = Field(default=None, examples=["Run started"])
state_details: StateDetails = Field(default_factory=StateDetails)
data: Annotated[
Union[
Annotated["BaseResult[R]", Tag("BaseResult")],
Annotated["ResultRecordMetadata", Tag("ResultRecordMetadata")],
Annotated[Any, Tag("Any")],
],
Discriminator(data_discriminator),
] = Field(default=None)
@overload
def result(
self: "State[R]",
raise_on_failure: Literal[True] = ...,
fetch: bool = ...,
retry_result_failure: bool = ...,
) -> R:
...
@overload
def result(
self: "State[R]",
raise_on_failure: Literal[False] = False,
fetch: bool = ...,
retry_result_failure: bool = ...,
) -> Union[R, Exception]:
...
@deprecated.deprecated_parameter(
"fetch",
when=lambda fetch: fetch is not True,
start_date="Oct 2024",
end_date="Jan 2025",
help="Please ensure you are awaiting the call to `result()` when calling in an async context.",
)
def result(
self,
raise_on_failure: bool = True,
fetch: bool = True,
retry_result_failure: bool = True,
) -> Union[R, Exception]:
"""
Retrieve the result attached to this state.
Args:
raise_on_failure: a boolean specifying whether to raise an exception
if the state is of type `FAILED` and the underlying data is an exception. When flow
was run in a different memory space (using `run_deployment`), this will only raise
if `fetch` is `True`.
fetch: a boolean specifying whether to resolve references to persisted
results into data. For synchronous users, this defaults to `True`.
For asynchronous users, this defaults to `False` for backwards
compatibility.
retry_result_failure: a boolean specifying whether to retry on failures to
load the result from result storage
Raises:
TypeError: If the state is failed but the result is not an exception.
Returns:
The result of the run
Examples:
Get the result from a flow state
>>> @flow
>>> def my_flow():
>>> return "hello"
>>> my_flow(return_state=True).result()
hello
Get the result from a failed state
>>> @flow
>>> def my_flow():
>>> raise ValueError("oh no!")
>>> state = my_flow(return_state=True) # Error is wrapped in FAILED state
>>> state.result() # Raises `ValueError`
Get the result from a failed state without erroring
>>> @flow
>>> def my_flow():
>>> raise ValueError("oh no!")
>>> state = my_flow(return_state=True)
>>> result = state.result(raise_on_failure=False)
>>> print(result)
ValueError("oh no!")
Get the result from a flow state in an async context
>>> @flow
>>> async def my_flow():
>>> return "hello"
>>> state = await my_flow(return_state=True)
>>> await state.result()
hello
Get the result with `raise_on_failure` from a flow run in a different memory space
>>> @flow
>>> async def my_flow():
>>> raise ValueError("oh no!")
>>> my_flow.deploy("my_deployment/my_flow")
>>> flow_run = run_deployment("my_deployment/my_flow")
>>> await flow_run.state.result(raise_on_failure=True) # Raises `ValueError("oh no!")`
"""
from prefect.states import get_state_result
return get_state_result(
self,
raise_on_failure=raise_on_failure,
fetch=fetch,
retry_result_failure=retry_result_failure,
)
def to_state_create(self) -> "StateCreate":
"""
Convert this state to a `StateCreate` type which can be used to set the state of
a run in the API.
This method will drop this state's `data` if it is not a result type. Only
results should be sent to the API. Other data is only available locally.
"""
from prefect.client.schemas.actions import StateCreate
from prefect.results import (
BaseResult,
ResultRecord,
should_persist_result,
)
if isinstance(self.data, BaseResult):
data = cast(BaseResult[R], self.data)
elif isinstance(self.data, ResultRecord) and should_persist_result():
data = self.data.metadata
else:
data = None
return StateCreate(
type=self.type,
name=self.name,
message=self.message,
data=data,
state_details=self.state_details,
)
@model_validator(mode="after")
def default_name_from_type(self) -> Self:
"""If a name is not provided, use the type"""
# if `type` is not in `values` it means the `type` didn't pass its own
# validation check and an error will be raised after this function is called
name = self.name
if name is None and self.type:
self.name = " ".join([v.capitalize() for v in self.type.split("_")])
return self
@model_validator(mode="after")
def default_scheduled_start_time(self) -> Self:
if self.type == StateType.SCHEDULED:
if not self.state_details.scheduled_time:
self.state_details.scheduled_time = pendulum.DateTime.now("utc")
return self
@model_validator(mode="after")
def set_unpersisted_results_to_none(self) -> Self:
if isinstance(self.data, dict) and self.data.get("type") == "unpersisted":
self.data = None
return self
def is_scheduled(self) -> bool:
return self.type == StateType.SCHEDULED
def is_pending(self) -> bool:
return self.type == StateType.PENDING
def is_running(self) -> bool:
return self.type == StateType.RUNNING
def is_completed(self) -> bool:
return self.type == StateType.COMPLETED
def is_failed(self) -> bool:
return self.type == StateType.FAILED
def is_crashed(self) -> bool:
return self.type == StateType.CRASHED
def is_cancelled(self) -> bool:
return self.type == StateType.CANCELLED
def is_cancelling(self) -> bool:
return self.type == StateType.CANCELLING
def is_final(self) -> bool:
return self.type in TERMINAL_STATES
def is_paused(self) -> bool:
return self.type == StateType.PAUSED
def model_copy(
self, *, update: Optional[Mapping[str, Any]] = None, deep: bool = False
) -> Self:
"""
Copying API models should return an object that could be inserted into the
database again. The 'timestamp' is reset using the default factory.
"""
update = {
"timestamp": self.model_fields["timestamp"].get_default(),
**(update or {}),
}
return super().model_copy(update=update, deep=deep)
def fresh_copy(self, **kwargs: Any) -> Self:
"""
Return a fresh copy of the state with a new ID.
"""
return self.model_copy(
update={
"id": uuid4(),
"created": pendulum.now("utc"),
"updated": pendulum.now("utc"),
"timestamp": pendulum.now("utc"),
},
**kwargs,
)
def __repr__(self) -> str:
"""
Generates a complete state representation appropriate for introspection
and debugging, including the result:
`MyCompletedState(message="my message", type=COMPLETED, result=...)`
"""
result = self.data
display = dict(
message=repr(self.message),
type=str(self.type.value),
result=repr(result),
)
return f"{self.name}({', '.join(f'{k}={v}' for k, v in display.items())})"
def __str__(self) -> str:
"""
Generates a simple state representation appropriate for logging:
`MyCompletedState("my message", type=COMPLETED)`
"""
display: list[str] = []
if self.message:
display.append(repr(self.message))
if TYPE_CHECKING:
assert self.name is not None
if self.type.lower() != self.name.lower():
display.append(f"type={self.type.value}")
return f"{self.name}({', '.join(display)})"
def __hash__(self) -> int:
return hash(
(
getattr(self.state_details, "flow_run_id", None),
getattr(self.state_details, "task_run_id", None),
self.timestamp,
self.type,
)
)
class FlowRunPolicy(PrefectBaseModel):
"""Defines of how a flow run should be orchestrated."""
max_retries: int = Field(
default=0,
description=(
"The maximum number of retries. Field is not used. Please use `retries`"
" instead."
),
deprecated=True,
)
retry_delay_seconds: float = Field(
default=0,
description=(
"The delay between retries. Field is not used. Please use `retry_delay`"
" instead."
),
deprecated=True,
)
retries: Optional[int] = Field(default=None, description="The number of retries.")
retry_delay: Optional[int] = Field(
default=None, description="The delay time between retries, in seconds."
)
pause_keys: Optional[set[str]] = Field(
default_factory=set, description="Tracks pauses this run has observed."
)
resuming: Optional[bool] = Field(
default=False, description="Indicates if this run is resuming from a pause."
)
retry_type: Optional[Literal["in_process", "reschedule"]] = Field(
default=None, description="The type of retry this run is undergoing."
)
@model_validator(mode="before")
@classmethod
def populate_deprecated_fields(cls, values: Any) -> Any:
if isinstance(values, dict):
return set_run_policy_deprecated_fields(values)
return values
class FlowRun(ObjectBaseModel):
name: str = Field(
default_factory=lambda: generate_slug(2),
description=(
"The name of the flow run. Defaults to a random slug if not specified."
),
examples=["my-flow-run"],
)
flow_id: UUID = Field(default=..., description="The id of the flow being run.")
state_id: Optional[UUID] = Field(
default=None, description="The id of the flow run's current state."
)
deployment_id: Optional[UUID] = Field(
default=None,
description=(
"The id of the deployment associated with this flow run, if available."
),
)
deployment_version: Optional[str] = Field(
default=None,
description="The version of the deployment associated with this flow run.",
examples=["1.0"],
)
work_queue_name: Optional[str] = Field(
default=None, description="The work queue that handled this flow run."
)
flow_version: Optional[str] = Field(
default=None,
description="The version of the flow executed in this flow run.",
examples=["1.0"],
)
parameters: dict[str, Any] = Field(
default_factory=dict, description="Parameters for the flow run."
)
idempotency_key: Optional[str] = Field(
default=None,
description=(
"An optional idempotency key for the flow run. Used to ensure the same flow"
" run is not created multiple times."
),
)
context: dict[str, Any] = Field(
default_factory=dict,
description="Additional context for the flow run.",
examples=[{"my_var": "my_val"}],
)
empirical_policy: FlowRunPolicy = Field(
default_factory=FlowRunPolicy,
)
tags: list[str] = Field(
default_factory=list,
description="A list of tags on the flow run",
examples=[["tag-1", "tag-2"]],
)
labels: KeyValueLabelsField = Field(default_factory=dict)
parent_task_run_id: Optional[UUID] = Field(
default=None,
description=(
"If the flow run is a subflow, the id of the 'dummy' task in the parent"
" flow used to track subflow state."
),
)
run_count: int = Field(
default=0, description="The number of times the flow run was executed."
)
expected_start_time: Optional[DateTime] = Field(
default=None,
description="The flow run's expected start time.",
)
next_scheduled_start_time: Optional[DateTime] = Field(
default=None,
description="The next time the flow run is scheduled to start.",
)
start_time: Optional[DateTime] = Field(
default=None, description="The actual start time."
)
end_time: Optional[DateTime] = Field(
default=None, description="The actual end time."
)
total_run_time: datetime.timedelta = Field(
default=datetime.timedelta(0),
description=(
"Total run time. If the flow run was executed multiple times, the time of"
" each run will be summed."
),
)
estimated_run_time: datetime.timedelta = Field(
default=datetime.timedelta(0),
description="A real-time estimate of the total run time.",
)
estimated_start_time_delta: datetime.timedelta = Field(
default=datetime.timedelta(0),
description="The difference between actual and expected start time.",
)
auto_scheduled: bool = Field(
default=False,
description="Whether or not the flow run was automatically scheduled.",
)
infrastructure_document_id: Optional[UUID] = Field(
default=None,
description="The block document defining infrastructure to use this flow run.",
)
infrastructure_pid: Optional[str] = Field(
default=None,
description="The id of the flow run as returned by an infrastructure block.",
)
created_by: Optional[CreatedBy] = Field(
default=None,
description="Optional information about the creator of this flow run.",
)
work_queue_id: Optional[UUID] = Field(
default=None, description="The id of the run's work pool queue."
)
work_pool_id: Optional[UUID] = Field(
default=None, description="The work pool with which the queue is associated."
)
work_pool_name: Optional[str] = Field(
default=None,
description="The name of the flow run's work pool.",
examples=["my-work-pool"],
)
state: Optional[State] = Field(
default=None,
description="The state of the flow run.",
examples=["State(type=StateType.COMPLETED)"],
)
job_variables: Optional[dict[str, Any]] = Field(
default=None,
description="Job variables for the flow run.",
)
# These are server-side optimizations and should not be present on client models
# TODO: Deprecate these fields
state_type: Optional[StateType] = Field(
default=None, description="The type of the current flow run state."
)
state_name: Optional[str] = Field(
default=None, description="The name of the current flow run state."
)
def __eq__(self, other: Any) -> bool:
"""
Check for "equality" to another flow run schema
Estimates times are rolling and will always change with repeated queries for
a flow run so we ignore them during equality checks.
"""
if isinstance(other, FlowRun):
exclude_fields = {"estimated_run_time", "estimated_start_time_delta"}
return self.model_dump(exclude=exclude_fields) == other.model_dump(
exclude=exclude_fields
)
return super().__eq__(other)
@field_validator("name", mode="before")
@classmethod
def set_default_name(cls, name: Optional[str]) -> str:
return get_or_create_run_name(name)
class TaskRunPolicy(PrefectBaseModel):
"""Defines of how a task run should retry."""
max_retries: int = Field(
default=0,
description=(
"The maximum number of retries. Field is not used. Please use `retries`"
" instead."
),
deprecated=True,
)
retry_delay_seconds: float = Field(
default=0,
description=(
"The delay between retries. Field is not used. Please use `retry_delay`"
" instead."
),
deprecated=True,
)
retries: Optional[int] = Field(default=None, description="The number of retries.")
retry_delay: Union[None, int, list[int]] = Field(
default=None,
description="A delay time or list of delay times between retries, in seconds.",
)
retry_jitter_factor: Optional[float] = Field(
default=None, description="Determines the amount a retry should jitter"
)
@model_validator(mode="after")
def populate_deprecated_fields(self):
"""
If deprecated fields are provided, populate the corresponding new fields
to preserve orchestration behavior.
"""
# We have marked these fields as deprecated, so we need to filter out the
# deprecation warnings _we're_ generating here
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
if not self.retries and self.max_retries != 0:
self.retries = self.max_retries
if not self.retry_delay and self.retry_delay_seconds != 0:
self.retry_delay = int(self.retry_delay_seconds)
return self
@field_validator("retry_delay")
@classmethod
def validate_configured_retry_delays(
cls, v: Optional[list[float]]
) -> Optional[list[float]]:
return list_length_50_or_less(v)
@field_validator("retry_jitter_factor")
@classmethod
def validate_jitter_factor(cls, v: Optional[float]) -> Optional[float]:
return validate_not_negative(v)
class TaskRunInput(PrefectBaseModel):
"""
Base class for classes that represent inputs to task runs, which
could include, constants, parameters, or other task runs.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(frozen=True)
if not TYPE_CHECKING:
# subclasses provide the concrete type for this field
input_type: str
class TaskRunResult(TaskRunInput):
"""Represents a task run result input to another task run."""
input_type: Literal["task_run"] = "task_run"
id: UUID
class Parameter(TaskRunInput):
"""Represents a parameter input to a task run."""
input_type: Literal["parameter"] = "parameter"
name: str
class Constant(TaskRunInput):
"""Represents constant input value to a task run."""
input_type: Literal["constant"] = "constant"
type: str
class TaskRun(ObjectBaseModel):
name: str = Field(
default_factory=lambda: generate_slug(2), examples=["my-task-run"]
)
flow_run_id: Optional[UUID] = Field(
default=None, description="The flow run id of the task run."
)
task_key: str = Field(
default=..., description="A unique identifier for the task being run."
)
dynamic_key: str = Field(
default=...,
description=(
"A dynamic key used to differentiate between multiple runs of the same task"
" within the same flow run."
),
)
cache_key: Optional[str] = Field(
default=None,
description=(
"An optional cache key. If a COMPLETED state associated with this cache key"
" is found, the cached COMPLETED state will be used instead of executing"
" the task run."
),
)
cache_expiration: Optional[DateTime] = Field(
default=None, description="Specifies when the cached state should expire."
)
task_version: Optional[str] = Field(
default=None, description="The version of the task being run."
)
empirical_policy: TaskRunPolicy = Field(
default_factory=TaskRunPolicy,
)
tags: list[str] = Field(
default_factory=list,
description="A list of tags for the task run.",
examples=[["tag-1", "tag-2"]],
)
labels: KeyValueLabelsField = Field(default_factory=dict)
state_id: Optional[UUID] = Field(
default=None, description="The id of the current task run state."
)
task_inputs: dict[str, list[Union[TaskRunResult, Parameter, Constant]]] = Field(
default_factory=dict,
description=(
"Tracks the source of inputs to a task run. Used for internal bookkeeping. "
"Note the special __parents__ key, used to indicate a parent/child "
"relationship that may or may not include an input or wait_for semantic."
),
)
state_type: Optional[StateType] = Field(
default=None, description="The type of the current task run state."
)
state_name: Optional[str] = Field(
default=None, description="The name of the current task run state."
)
run_count: int = Field(
default=0, description="The number of times the task run has been executed."
)
flow_run_run_count: int = Field(
default=0,
description=(
"If the parent flow has retried, this indicates the flow retry this run is"
" associated with."
),
)
expected_start_time: Optional[DateTime] = Field(
default=None,
description="The task run's expected start time.",
)
# the next scheduled start time will be populated
# whenever the run is in a scheduled state
next_scheduled_start_time: Optional[DateTime] = Field(
default=None,
description="The next time the task run is scheduled to start.",
)
start_time: Optional[DateTime] = Field(
default=None, description="The actual start time."
)
end_time: Optional[DateTime] = Field(
default=None, description="The actual end time."
)
total_run_time: datetime.timedelta = Field(
default=datetime.timedelta(0),
description=(
"Total run time. If the task run was executed multiple times, the time of"
" each run will be summed."
),
)
estimated_run_time: datetime.timedelta = Field(
default=datetime.timedelta(0),
description="A real-time estimate of total run time.",
)
estimated_start_time_delta: datetime.timedelta = Field(
default=datetime.timedelta(0),
description="The difference between actual and expected start time.",
)
state: Optional[State] = Field(
default=None,
description="The state of the task run.",
examples=["State(type=StateType.COMPLETED)"],
)
@field_validator("name", mode="before")
@classmethod
def set_default_name(cls, name: Optional[str]) -> Name:
return get_or_create_run_name(name)
class Workspace(PrefectBaseModel):
"""
A Prefect Cloud workspace.
Expected payload for each workspace returned by the `me/workspaces` route.
"""
account_id: UUID = Field(..., description="The account id of the workspace.")
account_name: str = Field(..., description="The account name.")
account_handle: str = Field(..., description="The account's unique handle.")
workspace_id: UUID = Field(..., description="The workspace id.")
workspace_name: str = Field(..., description="The workspace name.")
workspace_description: str = Field(..., description="Description of the workspace.")
workspace_handle: str = Field(..., description="The workspace's unique handle.")
model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore")
@property
def handle(self) -> str:
"""
The full handle of the workspace as `account_handle` / `workspace_handle`
"""
return self.account_handle + "/" + self.workspace_handle
def api_url(self) -> str:
"""
Generate the API URL for accessing this workspace
"""
return (
f"{PREFECT_CLOUD_API_URL.value()}"
f"/accounts/{self.account_id}"
f"/workspaces/{self.workspace_id}"
)
def ui_url(self) -> str:
"""
Generate the UI URL for accessing this workspace
"""
return (
f"{PREFECT_CLOUD_UI_URL.value()}"
f"/account/{self.account_id}"
f"/workspace/{self.workspace_id}"
)
def __hash__(self) -> int:
return hash(self.handle)
class IPAllowlistEntry(PrefectBaseModel):
ip_network: IPvAnyNetwork
enabled: bool
description: Optional[str] = Field(
default=None, description="A description of the IP entry."
)
last_seen: Optional[str] = Field(
default=None,
description="The last time this IP was seen accessing Prefect Cloud.",
)
class IPAllowlist(PrefectBaseModel):
"""
A Prefect Cloud IP allowlist.
Expected payload for an IP allowlist from the Prefect Cloud API.
"""
entries: list[IPAllowlistEntry]
class IPAllowlistMyAccessResponse(PrefectBaseModel):
"""Expected payload for an IP allowlist access response from the Prefect Cloud API."""
allowed: bool
detail: str
class BlockType(ObjectBaseModel):
"""An ORM representation of a block type"""
name: Name = Field(default=..., description="A block type's name")
slug: str = Field(default=..., description="A block type's slug")
logo_url: Optional[HttpUrl] = Field(
default=None, description="Web URL for the block type's logo"
)
documentation_url: Optional[HttpUrl] = Field(
default=None, description="Web URL for the block type's documentation"
)
description: Optional[str] = Field(
default=None,
description="A short blurb about the corresponding block's intended use",
)
code_example: Optional[str] = Field(
default=None,
description="A code snippet demonstrating use of the corresponding block",
)
is_protected: bool = Field(
default=False, description="Protected block types cannot be modified via API."
)
class BlockSchema(ObjectBaseModel):
"""A representation of a block schema."""
checksum: str = Field(default=..., description="The block schema's unique checksum")
fields: dict[str, Any] = Field(
default_factory=dict, description="The block schema's field schema"
)
block_type_id: Optional[UUID] = Field(default=..., description="A block type ID")
block_type: Optional[BlockType] = Field(
default=None, description="The associated block type"
)
capabilities: list[str] = Field(
default_factory=list,
description="A list of Block capabilities",
)
version: str = Field(
default=DEFAULT_BLOCK_SCHEMA_VERSION,
description="Human readable identifier for the block schema",
)
class BlockDocument(ObjectBaseModel):
"""An ORM representation of a block document."""
name: Optional[Name] = Field(
default=None,
description=(
"The block document's name. Not required for anonymous block documents."
),
)
data: dict[str, Any] = Field(
default_factory=dict, description="The block document's data"
)
block_schema_id: UUID = Field(default=..., description="A block schema ID")
block_schema: Optional[BlockSchema] = Field(
default=None, description="The associated block schema"
)
block_type_id: UUID = Field(default=..., description="A block type ID")
block_type_name: Optional[str] = Field(None, description="A block type name")
block_type: Optional[BlockType] = Field(
default=None, description="The associated block type"
)
block_document_references: dict[str, dict[str, Any]] = Field(
default_factory=dict, description="Record of the block document's references"
)
is_anonymous: bool = Field(
default=False,
description=(
"Whether the block is anonymous (anonymous blocks are usually created by"
" Prefect automatically)"
),
)
_validate_name_format = field_validator("name")(validate_block_document_name)
@model_validator(mode="before")
@classmethod
def validate_name_is_present_if_not_anonymous(
cls, values: dict[str, Any]
) -> dict[str, Any]:
return validate_name_present_on_nonanonymous_blocks(values)
@model_serializer(mode="wrap")
def serialize_data(
self, handler: SerializerFunctionWrapHandler, info: SerializationInfo
) -> Any:
self.data = visit_collection(
self.data,
visit_fn=partial(handle_secret_render, context=info.context or {}),
return_data=True,
)
return handler(self)
class Flow(ObjectBaseModel):
"""An ORM representation of flow data."""
name: Name = Field(
default=..., description="The name of the flow", examples=["my-flow"]
)
tags: list[str] = Field(
default_factory=list,
description="A list of flow tags",
examples=[["tag-1", "tag-2"]],
)
labels: KeyValueLabelsField
class DeploymentSchedule(ObjectBaseModel):
deployment_id: Optional[UUID] = Field(
default=None,
description="The deployment id associated with this schedule.",
)
schedule: SCHEDULE_TYPES = Field(
default=..., description="The schedule for the deployment."
)
active: bool = Field(
default=True, description="Whether or not the schedule is active."
)
max_scheduled_runs: Optional[PositiveInteger] = Field(
default=None,
description="The maximum number of scheduled runs for the schedule.",
)
class Deployment(ObjectBaseModel):
"""An ORM representation of deployment data."""
name: Name = Field(default=..., description="The name of the deployment.")
version: Optional[str] = Field(
default=None, description="An optional version for the deployment."
)
description: Optional[str] = Field(
default=None, description="A description for the deployment."
)
flow_id: UUID = Field(
default=..., description="The flow id associated with the deployment."
)
paused: bool = Field(
default=False, description="Whether or not the deployment is paused."
)
concurrency_limit: Optional[int] = Field(
default=None, description="The concurrency limit for the deployment."
)
schedules: list[DeploymentSchedule] = Field(
default_factory=list, description="A list of schedules for the deployment."
)
job_variables: dict[str, Any] = Field(
default_factory=dict,
description="Overrides to apply to flow run infrastructure at runtime.",
)
parameters: dict[str, Any] = Field(
default_factory=dict,
description="Parameters for flow runs scheduled by the deployment.",
)
pull_steps: Optional[list[dict[str, Any]]] = Field(
default=None,
description="Pull steps for cloning and running this deployment.",
)
tags: list[str] = Field(
default_factory=list,
description="A list of tags for the deployment",
examples=[["tag-1", "tag-2"]],
)
labels: KeyValueLabelsField
work_queue_name: Optional[str] = Field(
default=None,
description=(
"The work queue for the deployment. If no work queue is set, work will not"
" be scheduled."
),
)
last_polled: Optional[DateTime] = Field(
default=None,
description="The last time the deployment was polled for status updates.",
)
parameter_openapi_schema: Optional[dict[str, Any]] = Field(
default=None,
description="The parameter schema of the flow, including defaults.",
)
path: Optional[str] = Field(
default=None,
description=(
"The path to the working directory for the workflow, relative to remote"
" storage or an absolute path."
),
)
entrypoint: Optional[str] = Field(
default=None,
description=(
"The path to the entrypoint for the workflow, relative to the `path`."
),
)
storage_document_id: Optional[UUID] = Field(
default=None,
description="The block document defining storage used for this flow.",
)
infrastructure_document_id: Optional[UUID] = Field(
default=None,
description="The block document defining infrastructure to use for flow runs.",
)
created_by: Optional[CreatedBy] = Field(
default=None,
description="Optional information about the creator of this deployment.",
)
updated_by: Optional[UpdatedBy] = Field(
default=None,
description="Optional information about the updater of this deployment.",
)
work_queue_id: Optional[UUID] = Field(
default=None,
description=(
"The id of the work pool queue to which this deployment is assigned."
),
)
enforce_parameter_schema: bool = Field(
default=True,
description=(
"Whether or not the deployment should enforce the parameter schema."
),
)
class ConcurrencyLimit(ObjectBaseModel):
"""An ORM representation of a concurrency limit."""
tag: str = Field(
default=..., description="A tag the concurrency limit is applied to."
)
concurrency_limit: int = Field(default=..., description="The concurrency limit.")
active_slots: list[UUID] = Field(
default_factory=list,
description="A list of active run ids using a concurrency slot",
)
class BlockSchemaReference(ObjectBaseModel):
"""An ORM representation of a block schema reference."""
parent_block_schema_id: UUID = Field(
default=..., description="ID of block schema the reference is nested within"
)
parent_block_schema: Optional[BlockSchema] = Field(
default=None, description="The block schema the reference is nested within"
)
reference_block_schema_id: UUID = Field(
default=..., description="ID of the nested block schema"
)
reference_block_schema: Optional[BlockSchema] = Field(
default=None, description="The nested block schema"
)
name: str = Field(
default=..., description="The name that the reference is nested under"
)
class BlockDocumentReference(ObjectBaseModel):
"""An ORM representation of a block document reference."""
parent_block_document_id: UUID = Field(
default=..., description="ID of block document the reference is nested within"
)
parent_block_document: Optional[BlockDocument] = Field(
default=None, description="The block document the reference is nested within"
)
reference_block_document_id: UUID = Field(
default=..., description="ID of the nested block document"
)
reference_block_document: Optional[BlockDocument] = Field(
default=None, description="The nested block document"
)
name: str = Field(
default=..., description="The name that the reference is nested under"
)
@model_validator(mode="before")
@classmethod
def validate_parent_and_ref_are_different(cls, values: Any) -> Any:
if isinstance(values, dict):
return validate_parent_and_ref_diff(values)
return values
class Configuration(ObjectBaseModel):
"""An ORM representation of account info."""
key: str = Field(default=..., description="Account info key")
value: dict[str, Any] = Field(default=..., description="Account info")
class SavedSearchFilter(PrefectBaseModel):
"""A filter for a saved search model. Intended for use by the Prefect UI."""
object: str = Field(default=..., description="The object over which to filter.")
property: str = Field(
default=..., description="The property of the object on which to filter."
)
type: str = Field(default=..., description="The type of the property.")
operation: str = Field(
default=...,
description="The operator to apply to the object. For example, `equals`.",
)
value: Any = Field(
default=..., description="A JSON-compatible value for the filter."
)
class SavedSearch(ObjectBaseModel):
"""An ORM representation of saved search data. Represents a set of filter criteria."""
name: str = Field(default=..., description="The name of the saved search.")
filters: list[SavedSearchFilter] = Field(
default_factory=list, description="The filter set for the saved search."
)
class Log(ObjectBaseModel):
"""An ORM representation of log data."""
name: str = Field(default=..., description="The logger name.")
level: int = Field(default=..., description="The log level.")
message: str = Field(default=..., description="The log message.")
timestamp: DateTime = Field(default=..., description="The log timestamp.")
flow_run_id: Optional[UUID] = Field(
default=None, description="The flow run ID associated with the log."
)
task_run_id: Optional[UUID] = Field(
default=None, description="The task run ID associated with the log."
)
class QueueFilter(PrefectBaseModel):
"""Filter criteria definition for a work queue."""
tags: Optional[list[str]] = Field(
default=None,
description="Only include flow runs with these tags in the work queue.",
)
deployment_ids: Optional[list[UUID]] = Field(
default=None,
description="Only include flow runs from these deployments in the work queue.",
)
class WorkQueue(ObjectBaseModel):
"""An ORM representation of a work queue"""
name: Name = Field(default=..., description="The name of the work queue.")
description: Optional[str] = Field(
default="", description="An optional description for the work queue."
)
is_paused: bool = Field(
default=False, description="Whether or not the work queue is paused."
)
concurrency_limit: Optional[NonNegativeInteger] = Field(
default=None, description="An optional concurrency limit for the work queue."
)
priority: PositiveInteger = Field(
default=1,
description=(
"The queue's priority. Lower values are higher priority (1 is the highest)."
),
)
work_pool_name: Optional[str] = Field(default=None)
# Will be required after a future migration
work_pool_id: Optional[UUID] = Field(
description="The work pool with which the queue is associated."
)
filter: Optional[QueueFilter] = Field(
default=None,
description="DEPRECATED: Filter criteria for the work queue.",
deprecated=True,
)
last_polled: Optional[DateTime] = Field(
default=None, description="The last time an agent polled this queue for work."
)
status: Optional[WorkQueueStatus] = Field(
default=None, description="The queue status."
)
class WorkQueueHealthPolicy(PrefectBaseModel):
maximum_late_runs: Optional[int] = Field(
default=0,
description=(
"The maximum number of late runs in the work queue before it is deemed"
" unhealthy. Defaults to `0`."
),
)
maximum_seconds_since_last_polled: Optional[int] = Field(
default=60,
description=(
"The maximum number of time in seconds elapsed since work queue has been"
" polled before it is deemed unhealthy. Defaults to `60`."
),
)
def evaluate_health_status(
self, late_runs_count: int, last_polled: Optional[pendulum.DateTime] = None
) -> bool:
"""
Given empirical information about the state of the work queue, evaluate its health status.
Args:
late_runs: the count of late runs for the work queue.
last_polled: the last time the work queue was polled, if available.
Returns:
bool: whether or not the work queue is healthy.
"""
healthy = True
if (
self.maximum_late_runs is not None
and late_runs_count > self.maximum_late_runs
):
healthy = False
if self.maximum_seconds_since_last_polled is not None:
if (
last_polled is None
or pendulum.now("UTC").diff(last_polled).in_seconds()
> self.maximum_seconds_since_last_polled
):
healthy = False
return healthy
class WorkQueueStatusDetail(PrefectBaseModel):
healthy: bool = Field(..., description="Whether or not the work queue is healthy.")
late_runs_count: int = Field(
default=0, description="The number of late flow runs in the work queue."
)
last_polled: Optional[DateTime] = Field(
default=None, description="The last time an agent polled this queue for work."
)
health_check_policy: WorkQueueHealthPolicy = Field(
...,
description=(
"The policy used to determine whether or not the work queue is healthy."
),
)
class FlowRunNotificationPolicy(ObjectBaseModel):
"""An ORM representation of a flow run notification."""
is_active: bool = Field(
default=True, description="Whether the policy is currently active"
)
state_names: list[str] = Field(
default=..., description="The flow run states that trigger notifications"
)
tags: list[str] = Field(
default=...,
description="The flow run tags that trigger notifications (set [] to disable)",
)
block_document_id: UUID = Field(
default=..., description="The block document ID used for sending notifications"
)
message_template: Optional[str] = Field(
default=None,
description=(
"A templatable notification message. Use {braces} to add variables."
" Valid variables include:"
f" {listrepr(sorted(FLOW_RUN_NOTIFICATION_TEMPLATE_KWARGS), sep=', ')}"
),
examples=[
"Flow run {flow_run_name} with id {flow_run_id} entered state"
" {flow_run_state_name}."
],
)
@field_validator("message_template")
@classmethod
def validate_message_template_variables(cls, v: Optional[str]) -> Optional[str]:
return validate_message_template_variables(v)
class Agent(ObjectBaseModel):
"""An ORM representation of an agent"""
name: str = Field(
default_factory=lambda: generate_slug(2),
description=(
"The name of the agent. If a name is not provided, it will be"
" auto-generated."
),
)
work_queue_id: UUID = Field(
default=..., description="The work queue with which the agent is associated."
)
last_activity_time: Optional[DateTime] = Field(
default=None, description="The last time this agent polled for work."
)
class WorkPool(ObjectBaseModel):
"""An ORM representation of a work pool"""
name: Name = Field(
description="The name of the work pool.",
)
description: Optional[str] = Field(
default=None, description="A description of the work pool."
)
type: str = Field(description="The work pool type.")
base_job_template: dict[str, Any] = Field(
default_factory=dict, description="The work pool's base job template."
)
is_paused: bool = Field(
default=False,
description="Pausing the work pool stops the delivery of all work.",
)
concurrency_limit: Optional[NonNegativeInteger] = Field(
default=None, description="A concurrency limit for the work pool."
)
status: Optional[WorkPoolStatus] = Field(
default=None, description="The current status of the work pool."
)
# this required field has a default of None so that the custom validator
# below will be called and produce a more helpful error message. Because
# the field metadata is attached via an annotation, the default is hidden
# from type checkers.
default_queue_id: Annotated[
UUID, Field(default=None, description="The id of the pool's default queue.")
]
@property
def is_push_pool(self) -> bool:
return self.type.endswith(":push")
@property
def is_managed_pool(self) -> bool:
return self.type.endswith(":managed")
@field_validator("default_queue_id")
@classmethod
def helpful_error_for_missing_default_queue_id(cls, v: Optional[UUID]) -> UUID:
return validate_default_queue_id_not_none(v)
class Worker(ObjectBaseModel):
"""An ORM representation of a worker"""
name: str = Field(description="The name of the worker.")
work_pool_id: UUID = Field(
description="The work pool with which the queue is associated."
)
last_heartbeat_time: Optional[datetime.datetime] = Field(
default=None, description="The last time the worker process sent a heartbeat."
)
heartbeat_interval_seconds: Optional[int] = Field(
default=None,
description=(
"The number of seconds to expect between heartbeats sent by the worker."
),
)
status: WorkerStatus = Field(
WorkerStatus.OFFLINE,
description="Current status of the worker.",
)
Flow.model_rebuild()
# FlowRun.model_rebuild()
class Artifact(ObjectBaseModel):
key: Optional[str] = Field(
default=None, description="An optional unique reference key for this artifact."
)
type: Optional[str] = Field(
default=None,
description=(
"An identifier that describes the shape of the data field. e.g. 'result',"
" 'table', 'markdown'"
),
)
description: Optional[str] = Field(
default=None, description="A markdown-enabled description of the artifact."
)
# data will eventually be typed as `Optional[Union[Result, Any]]`
data: Optional[Union[dict[str, Any], Any]] = Field(
default=None,
description=(
"Data associated with the artifact, e.g. a result.; structure depends on"
" the artifact type."
),
)
metadata_: Optional[dict[str, str]] = Field(
default=None,
description=(
"User-defined artifact metadata. Content must be string key and value"
" pairs."
),
)
flow_run_id: Optional[UUID] = Field(
default=None, description="The flow run associated with the artifact."
)
task_run_id: Optional[UUID] = Field(
default=None, description="The task run associated with the artifact."
)
@field_validator("metadata_")
@classmethod
def validate_metadata_length(
cls, v: Optional[dict[str, str]]
) -> Optional[dict[str, str]]:
return validate_max_metadata_length(v)
class ArtifactCollection(ObjectBaseModel):
key: str = Field(description="An optional unique reference key for this artifact.")
latest_id: UUID = Field(
description="The latest artifact ID associated with the key."
)
type: Optional[str] = Field(
default=None,
description=(
"An identifier that describes the shape of the data field. e.g. 'result',"
" 'table', 'markdown'"
),
)
description: Optional[str] = Field(
default=None, description="A markdown-enabled description of the artifact."
)
data: Optional[Union[dict[str, Any], Any]] = Field(
default=None,
description=(
"Data associated with the artifact, e.g. a result.; structure depends on"
" the artifact type."
),
)
metadata_: Optional[dict[str, str]] = Field(
default=None,
description=(
"User-defined artifact metadata. Content must be string key and value"
" pairs."
),
)
flow_run_id: Optional[UUID] = Field(
default=None, description="The flow run associated with the artifact."
)
task_run_id: Optional[UUID] = Field(
default=None, description="The task run associated with the artifact."
)
class Variable(ObjectBaseModel):
name: str = Field(
default=...,
description="The name of the variable",
examples=["my_variable"],
max_length=MAX_VARIABLE_NAME_LENGTH,
)
value: StrictVariableValue = Field(
default=...,
description="The value of the variable",
examples=["my_value"],
)
tags: list[str] = Field(
default_factory=list,
description="A list of variable tags",
examples=[["tag-1", "tag-2"]],
)
class FlowRunInput(ObjectBaseModel):
flow_run_id: UUID = Field(description="The flow run ID associated with the input.")
key: str = Field(description="The key of the input.")
value: str = Field(description="The value of the input.")
sender: Optional[str] = Field(default=None, description="The sender of the input.")
@property
def decoded_value(self) -> Any:
"""
Decode the value of the input.
Returns:
Any: the decoded value
"""
return orjson.loads(self.value)
@field_validator("key", check_fields=False)
@classmethod
def validate_name_characters(cls, v: str) -> str:
raise_on_name_alphanumeric_dashes_only(v)
return v
class GlobalConcurrencyLimit(ObjectBaseModel):
"""An ORM representation of a global concurrency limit"""
name: str = Field(description="The name of the global concurrency limit.")
limit: int = Field(
description=(
"The maximum number of slots that can be occupied on this concurrency"
" limit."
)
)
active: Optional[bool] = Field(
default=True,
description="Whether or not the concurrency limit is in an active state.",
)
active_slots: Optional[int] = Field(
default=0,
description="Number of tasks currently using a concurrency slot.",
)
slot_decay_per_second: Optional[float] = Field(
default=0.0,
description=(
"Controls the rate at which slots are released when the concurrency limit"
" is used as a rate limit."
),
)
class CsrfToken(ObjectBaseModel):
token: str = Field(
default=...,
description="The CSRF token",
)
client: str = Field(
default=..., description="The client id associated with the CSRF token"
)
expiration: datetime.datetime = Field(
default=..., description="The expiration time of the CSRF token"
)
__getattr__: Callable[[str], Any] = getattr_migration(__name__)
class Integration(PrefectBaseModel):
"""A representation of an installed Prefect integration."""
name: str = Field(description="The name of the Prefect integration.")
version: str = Field(description="The version of the Prefect integration.")
class WorkerMetadata(PrefectBaseModel):
"""
Worker metadata.
We depend on the structure of `integrations`, but otherwise, worker classes
should support flexible metadata.
"""
integrations: list[Integration] = Field(
default=..., description="Prefect integrations installed in the worker."
)
model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow")
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@client@[email protected]@.PATH_END.py
|
{
"filename": "_util.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/_lib/_util.py",
"type": "Python"
}
|
from __future__ import division, print_function, absolute_import
import functools
import operator
import sys
import warnings
import numbers
from collections import namedtuple
from multiprocessing import Pool
import inspect
import numpy as np
def _broadcast_arrays(a, b):
"""
Same as np.broadcast_arrays(a, b) but old writeability rules.
Numpy >= 1.17.0 transitions broadcast_arrays to return
read-only arrays. Set writeability explicitly to avoid warnings.
Retain the old writeability rules, as our Cython code assumes
the old behavior.
"""
# backport based on gh-10379
x, y = np.broadcast_arrays(a, b)
x.flags.writeable = a.flags.writeable
y.flags.writeable = b.flags.writeable
return x, y
def _valarray(shape, value=np.nan, typecode=None):
"""Return an array of all value.
"""
out = np.ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, np.ndarray):
out = np.asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = _valarray(np.shape(arrays[0]), value=fillvalue, typecode=tcode)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
def _lazyselect(condlist, choicelist, arrays, default=0):
"""
Mimic `np.select(condlist, choicelist)`.
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
All functions in `choicelist` must accept array arguments in the order
given in `arrays` and must return an array of the same shape as broadcasted
`arrays`.
Examples
--------
>>> x = np.arange(6)
>>> np.select([x <3, x > 3], [x**2, x**3], default=0)
array([ 0, 1, 4, 0, 64, 125])
>>> _lazyselect([x < 3, x > 3], [lambda x: x**2, lambda x: x**3], (x,))
array([ 0., 1., 4., 0., 64., 125.])
>>> a = -np.ones_like(x)
>>> _lazyselect([x < 3, x > 3],
... [lambda x, a: x**2, lambda x, a: a * x**3],
... (x, a), default=np.nan)
array([ 0., 1., 4., nan, -64., -125.])
"""
arrays = np.broadcast_arrays(*arrays)
tcode = np.mintypecode([a.dtype.char for a in arrays])
out = _valarray(np.shape(arrays[0]), value=default, typecode=tcode)
for index in range(len(condlist)):
func, cond = choicelist[index], condlist[index]
if np.all(cond is False):
continue
cond, _ = np.broadcast_arrays(cond, arrays[0])
temp = tuple(np.extract(cond, arr) for arr in arrays)
np.place(out, cond, func(*temp))
return out
def _aligned_zeros(shape, dtype=float, order="C", align=None):
"""Allocate a new ndarray with aligned memory.
Primary use case for this currently is working around a f2py issue
in Numpy 1.9.1, where dtype.alignment is such that np.zeros() does
not necessarily create arrays aligned up to it.
"""
dtype = np.dtype(dtype)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
def _prune_array(array):
"""Return an array equivalent to the input array. If the input
array is a view of a much larger array, copy its contents to a
newly allocated array. Otherwise, return the input unchanged.
"""
if array.base is not None and array.size < array.base.size // 2:
return array.copy()
return array
class DeprecatedImport(object):
"""
Deprecated import, with redirection + warning.
Examples
--------
Suppose you previously had in some module::
from foo import spam
If this has to be deprecated, do::
spam = DeprecatedImport("foo.spam", "baz")
to redirect users to use "baz" module instead.
"""
def __init__(self, old_module_name, new_module_name):
self._old_name = old_module_name
self._new_name = new_module_name
__import__(self._new_name)
self._mod = sys.modules[self._new_name]
def __dir__(self):
return dir(self._mod)
def __getattr__(self, name):
warnings.warn("Module %s is deprecated, use %s instead"
% (self._old_name, self._new_name),
DeprecationWarning)
return getattr(self._mod, name)
# copy-pasted from scikit-learn utils/validation.py
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None (or np.random), return the RandomState singleton used
by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _asarray_validated(a, check_finite=True,
sparse_ok=False, objects_ok=False, mask_ok=False,
as_inexact=False):
"""
Helper function for scipy argument validation.
Many scipy linear algebra functions do support arbitrary array-like
input arguments. Examples of commonly unsupported inputs include
matrices containing inf/nan, sparse matrix representations, and
matrices with complicated elements.
Parameters
----------
a : array_like
The array-like input.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
sparse_ok : bool, optional
True if scipy sparse matrices are allowed.
objects_ok : bool, optional
True if arrays with dype('O') are allowed.
mask_ok : bool, optional
True if masked arrays are allowed.
as_inexact : bool, optional
True to convert the input array to a np.inexact dtype.
Returns
-------
ret : ndarray
The converted validated array.
"""
if not sparse_ok:
import scipy.sparse
if scipy.sparse.issparse(a):
msg = ('Sparse matrices are not supported by this function. '
'Perhaps one of the scipy.sparse.linalg functions '
'would work instead.')
raise ValueError(msg)
if not mask_ok:
if np.ma.isMaskedArray(a):
raise ValueError('masked arrays are not supported')
toarray = np.asarray_chkfinite if check_finite else np.asarray
a = toarray(a)
if not objects_ok:
if a.dtype is np.dtype('O'):
raise ValueError('object arrays are not supported')
if as_inexact:
if not np.issubdtype(a.dtype, np.inexact):
a = toarray(a, dtype=np.float_)
return a
# Add a replacement for inspect.getargspec() which is deprecated in python 3.5
# The version below is borrowed from Django,
# https://github.com/django/django/pull/4846
# Note an inconsistency between inspect.getargspec(func) and
# inspect.signature(func). If `func` is a bound method, the latter does *not*
# list `self` as a first argument, while the former *does*.
# Hence cook up a common ground replacement: `getargspec_no_self` which
# mimics `inspect.getargspec` but does not list `self`.
#
# This way, the caller code does not need to know whether it uses a legacy
# .getargspec or bright and shiny .signature.
try:
# is it python 3.3 or higher?
inspect.signature
# Apparently, yes. Wrap inspect.signature
ArgSpec = namedtuple('ArgSpec', ['args', 'varargs', 'keywords', 'defaults'])
def getargspec_no_self(func):
"""inspect.getargspec replacement using inspect.signature.
inspect.getargspec is deprecated in python 3. This is a replacement
based on the (new in python 3.3) `inspect.signature`.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
argspec : ArgSpec(args, varargs, varkw, defaults)
This is similar to the result of inspect.getargspec(func) under
python 2.x.
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not* included in argspec.args.
This is done for consistency between inspect.getargspec() under
python 2.x, and inspect.signature() under python 3.x.
"""
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
] or None
return ArgSpec(args, varargs, varkw, defaults)
except AttributeError:
# python 2.x
def getargspec_no_self(func):
"""inspect.getargspec replacement for compatibility with python 3.x.
inspect.getargspec is deprecated in python 3. This wraps it, and
*removes* `self` from the argument list of `func`, if present.
This is done for forward compatibility with python 3.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
argspec : ArgSpec(args, varargs, varkw, defaults)
This is similar to the result of inspect.getargspec(func) under
python 2.x.
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not* included in argspec.args.
This is done for consistency between inspect.getargspec() under
python 2.x, and inspect.signature() under python 3.x.
"""
argspec = inspect.getargspec(func)
if argspec.args[0] == 'self':
argspec.args.pop(0)
return argspec
class MapWrapper(object):
"""
Parallelisation wrapper for working with map-like callables, such as
`multiprocessing.Pool.map`.
Parameters
----------
pool : int or map-like callable
If `pool` is an integer, then it specifies the number of threads to
use for parallelization. If ``int(pool) == 1``, then no parallel
processing is used and the map builtin is used.
If ``pool == -1``, then the pool will utilise all available CPUs.
If `pool` is a map-like callable that follows the same
calling sequence as the built-in map function, then this callable is
used for parallelisation.
"""
def __init__(self, pool=1):
self.pool = None
self._mapfunc = map
self._own_pool = False
if callable(pool):
self.pool = pool
self._mapfunc = self.pool
else:
# user supplies a number
if int(pool) == -1:
# use as many processors as possible
self.pool = Pool()
self._mapfunc = self.pool.map
self._own_pool = True
elif int(pool) == 1:
pass
elif int(pool) > 1:
# use the number of processors requested
self.pool = Pool(processes=int(pool))
self._mapfunc = self.pool.map
self._own_pool = True
else:
raise RuntimeError("Number of workers specified must be -1,"
" an int >= 1, or an object with a 'map' method")
def __enter__(self):
return self
def __del__(self):
self.close()
self.terminate()
def terminate(self):
if self._own_pool:
self.pool.terminate()
def join(self):
if self._own_pool:
self.pool.join()
def close(self):
if self._own_pool:
self.pool.close()
def __exit__(self, exc_type, exc_value, traceback):
if self._own_pool:
self.pool.close()
self.pool.terminate()
def __call__(self, func, iterable):
# only accept one iterable because that's all Pool.map accepts
try:
return self._mapfunc(func, iterable)
except TypeError:
# wrong number of arguments
raise TypeError("The map-like callable must be of the"
" form f(func, iterable)")
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@_lib@[email protected]_END.py
|
{
"filename": "sel_utils.py",
"repo_name": "arviz-devs/arviz",
"repo_path": "arviz_extracted/arviz-main/arviz/sel_utils.py",
"type": "Python"
}
|
"""Utilities for selecting and iterating on xarray objects."""
from itertools import product, tee
import numpy as np
import xarray as xr
from .labels import BaseLabeller
__all__ = ["xarray_sel_iter", "xarray_var_iter", "xarray_to_ndarray"]
def selection_to_string(selection):
"""Convert dictionary of coordinates to a string for labels.
Parameters
----------
selection : dict[Any] -> Any
Returns
-------
str
key1: value1, key2: value2, ...
"""
return ", ".join([f"{v}" for _, v in selection.items()])
def make_label(var_name, selection, position="below"):
"""Consistent labelling for plots.
Parameters
----------
var_name : str
Name of the variable
selection : dict[Any] -> Any
Coordinates of the variable
position : str
Whether to position the coordinates' label "below" (default) or "beside"
the name of the variable
Returns
-------
label
A text representation of the label
"""
if selection:
sel = selection_to_string(selection)
if position == "below":
base = "{}\n{}"
elif position == "beside":
base = "{}[{}]"
else:
sel = ""
base = "{}{}"
return base.format(var_name, sel)
def _dims(data, var_name, skip_dims):
return [dim for dim in data[var_name].dims if dim not in skip_dims]
def _zip_dims(new_dims, vals):
return [dict(zip(new_dims, prod)) for prod in product(*vals)]
def xarray_sel_iter(data, var_names=None, combined=False, skip_dims=None, reverse_selections=False):
"""Convert xarray data to an iterator over variable names and selections.
Iterates over each var_name and all of its coordinates, returning the variable
names and selections that allow properly obtain the data from ``data`` as desired.
Parameters
----------
data : xarray.Dataset
Posterior data in an xarray
var_names : iterator of strings (optional)
Should be a subset of data.data_vars. Defaults to all of them.
combined : bool
Whether to combine chains or leave them separate
skip_dims : set
dimensions to not iterate over
reverse_selections : bool
Whether to reverse selections before iterating.
Returns
-------
Iterator of (var_name: str, selection: dict(str, any))
The string is the variable name, the dictionary are coordinate names to values,.
To get the values of the variable at these coordinates, do
``data[var_name].sel(**selection)``.
"""
if skip_dims is None:
skip_dims = set()
if combined:
skip_dims = skip_dims.union({"chain", "draw"})
else:
skip_dims.add("draw")
if var_names is None:
if isinstance(data, xr.Dataset):
var_names = list(data.data_vars)
elif isinstance(data, xr.DataArray):
var_names = [data.name]
data = {data.name: data}
for var_name in var_names:
if var_name in data:
new_dims = _dims(data, var_name, skip_dims)
vals = [list(dict.fromkeys(data[var_name][dim].values)) for dim in new_dims]
dims = _zip_dims(new_dims, vals)
idims = _zip_dims(new_dims, [range(len(v)) for v in vals])
if reverse_selections:
dims = reversed(dims)
idims = reversed(idims)
for selection, iselection in zip(dims, idims):
yield var_name, selection, iselection
def xarray_var_iter(
data, var_names=None, combined=False, skip_dims=None, reverse_selections=False, dim_order=None
):
"""Convert xarray data to an iterator over vectors.
Iterates over each var_name and all of its coordinates, returning the 1d
data.
Parameters
----------
data : xarray.Dataset
Posterior data in an xarray
var_names : iterator of strings (optional)
Should be a subset of data.data_vars. Defaults to all of them.
combined : bool
Whether to combine chains or leave them separate
skip_dims : set
dimensions to not iterate over
reverse_selections : bool
Whether to reverse selections before iterating.
dim_order: list
Order for the first dimensions. Skips dimensions not found in the variable.
Returns
-------
Iterator of (str, dict(str, any), np.array)
The string is the variable name, the dictionary are coordinate names to values,
and the array are the values of the variable at those coordinates.
"""
data_to_sel = data
if var_names is None and isinstance(data, xr.DataArray):
data_to_sel = {data.name: data}
if isinstance(dim_order, str):
dim_order = [dim_order]
for var_name, selection, iselection in xarray_sel_iter(
data,
var_names=var_names,
combined=combined,
skip_dims=skip_dims,
reverse_selections=reverse_selections,
):
selected_data = data_to_sel[var_name].sel(**selection)
if dim_order is not None:
dim_order_selected = [dim for dim in dim_order if dim in selected_data.dims]
if dim_order_selected:
selected_data = selected_data.transpose(*dim_order_selected, ...)
yield var_name, selection, iselection, selected_data.values
def xarray_to_ndarray(data, *, var_names=None, combined=True, label_fun=None):
"""Take xarray data and unpacks into variables and data into list and numpy array respectively.
Assumes that chain and draw are in coordinates
Parameters
----------
data: xarray.DataSet
Data in an xarray from an InferenceData object. Examples include posterior or sample_stats
var_names: iter
Should be a subset of data.data_vars not including chain and draws. Defaults to all of them
combined: bool
Whether to combine chain into one array
Returns
-------
var_names: list
List of variable names
data: np.array
Data values
"""
if label_fun is None:
label_fun = BaseLabeller().make_label_vert
data_to_sel = data
if var_names is None and isinstance(data, xr.DataArray):
data_to_sel = {data.name: data}
iterator1, iterator2 = tee(xarray_sel_iter(data, var_names=var_names, combined=combined))
vars_and_sel = list(iterator1)
unpacked_var_names = [
label_fun(var_name, selection, isel) for var_name, selection, isel in vars_and_sel
]
# Merge chains and variables, check dtype to be compatible with divergences data
data0 = data_to_sel[vars_and_sel[0][0]].sel(**vars_and_sel[0][1])
unpacked_data = np.empty((len(unpacked_var_names), data0.size), dtype=data0.dtype)
for idx, (var_name, selection, _) in enumerate(iterator2):
unpacked_data[idx] = data_to_sel[var_name].sel(**selection).values.flatten()
return unpacked_var_names, unpacked_data
|
arviz-devsREPO_NAMEarvizPATH_START.@arviz_extracted@arviz-main@arviz@[email protected]_END.py
|
{
"filename": "test_gp_opt.py",
"repo_name": "scikit-optimize/scikit-optimize",
"repo_path": "scikit-optimize_extracted/scikit-optimize-master/skopt/tests/test_gp_opt.py",
"type": "Python"
}
|
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from skopt import gp_minimize
from skopt.benchmarks import bench1
from skopt.benchmarks import bench2
from skopt.benchmarks import bench3
from skopt.benchmarks import bench4
from skopt.benchmarks import branin
from skopt.space.space import Real, Categorical, Space
from skopt.utils import cook_estimator
def check_minimize(func, y_opt, bounds, acq_optimizer, acq_func,
margin, n_calls, n_initial_points=10, init_gen="random"):
r = gp_minimize(func, bounds, acq_optimizer=acq_optimizer,
acq_func=acq_func, n_initial_points=n_initial_points,
n_calls=n_calls, random_state=1,
initial_point_generator=init_gen,
noise=1e-10)
assert r.fun < y_opt + margin
SEARCH = ["sampling", "lbfgs"]
ACQUISITION = ["LCB", "EI"]
INITGEN = ["random", "lhs", "halton", "hammersly", "sobol"]
@pytest.mark.slow_test
@pytest.mark.parametrize("search", SEARCH)
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench1(search, acq):
check_minimize(bench1, 0.,
[(-2.0, 2.0)], search, acq, 0.05, 20)
@pytest.mark.slow_test
@pytest.mark.parametrize("search", ["sampling"])
@pytest.mark.parametrize("acq", ["LCB"])
@pytest.mark.parametrize("initgen", INITGEN)
def test_gp_minimize_bench1_initgen(search, acq, initgen):
check_minimize(bench1, 0.,
[(-2.0, 2.0)], search, acq, 0.05, 20, init_gen=initgen)
@pytest.mark.slow_test
@pytest.mark.parametrize("search", SEARCH)
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench2(search, acq):
check_minimize(bench2, -5,
[(-6.0, 6.0)], search, acq, 0.05, 20)
@pytest.mark.slow_test
@pytest.mark.parametrize("search", SEARCH)
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench3(search, acq):
check_minimize(bench3, -0.9,
[(-2.0, 2.0)], search, acq, 0.05, 20)
@pytest.mark.fast_test
@pytest.mark.parametrize("search", ["sampling"])
@pytest.mark.parametrize("acq", ACQUISITION)
def test_gp_minimize_bench4(search, acq):
# this particular random_state picks "2" twice so we can make an extra
# call to the objective without repeating options
check_minimize(bench4, 0,
[("-2", "-1", "0", "1", "2")], search, acq, 1.05, 20)
@pytest.mark.fast_test
def test_n_jobs():
r_single = gp_minimize(bench3, [(-2.0, 2.0)], acq_optimizer="lbfgs",
acq_func="EI", n_calls=4, n_initial_points=2,
random_state=1, noise=1e-10)
r_double = gp_minimize(bench3, [(-2.0, 2.0)], acq_optimizer="lbfgs",
acq_func="EI", n_calls=4, n_initial_points=2,
random_state=1, noise=1e-10, n_jobs=2)
assert_array_equal(r_single.x_iters, r_double.x_iters)
@pytest.mark.fast_test
def test_gpr_default():
"""Smoke test that gp_minimize does not fail for default values."""
gp_minimize(branin, ((-5.0, 10.0), (0.0, 15.0)), n_initial_points=2,
n_calls=2)
@pytest.mark.fast_test
def test_use_given_estimator():
""" Test that gp_minimize does not use default estimator if one is passed
in explicitly. """
domain = [(1.0, 2.0), (3.0, 4.0)]
noise_correct = 1e+5
noise_fake = 1e-10
estimator = cook_estimator("GP", domain, noise=noise_correct)
res = gp_minimize(branin, domain, n_calls=4, n_initial_points=2,
base_estimator=estimator, noise=noise_fake)
assert res['models'][-1].noise == noise_correct
@pytest.mark.fast_test
def test_use_given_estimator_with_max_model_size():
""" Test that gp_minimize does not use default estimator if one is passed
in explicitly. """
domain = [(1.0, 2.0), (3.0, 4.0)]
noise_correct = 1e+5
noise_fake = 1e-10
estimator = cook_estimator("GP", domain, noise=noise_correct)
res = gp_minimize(branin, domain, n_calls=4, n_initial_points=2,
base_estimator=estimator, noise=noise_fake,
model_queue_size=1)
assert len(res['models']) == 1
assert res['models'][-1].noise == noise_correct
@pytest.mark.fast_test
def test_categorical_integer():
def f(params):
return np.random.uniform()
dims = [[1]]
res = gp_minimize(f, dims, n_calls=2, n_initial_points=2,
random_state=1)
assert res.x_iters[0][0] == dims[0][0]
@pytest.mark.parametrize("initgen", INITGEN)
def test_mixed_categoricals(initgen):
space = Space([
Categorical(name="x", categories=["1", "2", "3"]),
Categorical(name="y", categories=[4, 5, 6]),
Real(name="z", low=1.0, high=5.0)
])
def objective(param_list):
x = param_list[0]
y = param_list[1]
z = param_list[2]
loss = int(x) + y * z
return loss
res = gp_minimize(objective, space, n_calls=20, random_state=1,
initial_point_generator=initgen)
assert res["x"] in [['1', 4, 1.0], ['2', 4, 1.0]]
@pytest.mark.parametrize("initgen", INITGEN)
def test_mixed_categoricals2(initgen):
space = Space([
Categorical(name="x", categories=["1", "2", "3"]),
Categorical(name="y", categories=[4, 5, 6])
])
def objective(param_list):
x = param_list[0]
y = param_list[1]
loss = int(x) + y
return loss
res = gp_minimize(objective, space, n_calls=12, random_state=1,
initial_point_generator=initgen)
assert res["x"] == ['1', 4]
|
scikit-optimizeREPO_NAMEscikit-optimizePATH_START.@scikit-optimize_extracted@scikit-optimize-master@skopt@tests@[email protected]_END.py
|
{
"filename": "getting_started.md",
"repo_name": "rbuehler/vasca",
"repo_path": "vasca_extracted/vasca-main/docs/getting_started.md",
"type": "Markdown"
}
|
# Getting started
## Installation
VASCA is tested to work with Python 3.10 and 3.11. Typically, you want to create a new
Python environment, e.g., with [pyenv-virtualenv](https://github.com/pyenv/pyenv-virtualenv)
or [mamba](https://github.com/mamba-org/mamba):
```bash
mamba create -n vasca python=3.11
mamba activate vasca
```
For standard usage, install VASCA from [PyPi](https://pypi.org/project/vasca/):
```bash
pip install vasca
```
Standard usage covers all pipeline functions for data from instruments that are already
implemented in VASCA. For more information consult the [list](user_guide/instruments.md#supported-instruments)
of supported instruments.
In order to extend VASCA to incorporate another instrument's data, install it directly
from the Github repository.
```bash
pip install -e git+https://github.com/rbuehler/vasca
```
This will ensure that all resources for testing and the jupyter examples are included in
the installation.
## Resource management
Management of the input observational data is handled in VASCA via the
[](#ResourceManager) class. Before first use, users need to edit environment variables
that specify the data storage locations in an `.env` file in VASCA's root directory.
:::{Tip}
The easiest way to set up the resource manager is to duplicate the `.env_template` and
rename it. Then edit your paths to the location of cloud-synced or local directories.
:::
The system is very flexible and can be tailored to your needs. New environment variables
specifying storage locations can be added in the [`resource_envs.yml`](https://github.com/rbuehler/vasca/blob/main/vasca/resource_metadata/resource_envs.yml)
file. New data items that might be required to support additional instruments can be
added in the [`resource_catalog.yml`](https://github.com/rbuehler/vasca/blob/main/vasca/resource_metadata/resource_catalog.yml)
file.
## Running the pipeline and post-processing
To start the pipeline processing go into the `vasca` directory and start the command line
script:
```shell
vasca-pipe <path-to-config.yml>
```
See the [user guide](user_guide/configuration.md#configuration) for more infos on how to
configure the pipeline in the yaml file.
We use [Jupyter Lab](https://github.com/jupyterlab/jupyterlab) for post-processing, with
functional examples provided in `vasca/examples`.
## Coding guidelines
We use the [PEP 8](https://realpython.com/python-pep8/) coding conventions. Before
contributing, please consider the use of automatic code formatting tools. We recommend [`ruff`](https://docs.astral.sh/ruff/).
It is a one-stop-shop that combines all style rules from tools like [`isort`](https://github.com/pycqa/isort),
[`flake8`](https://github.com/PyCQA/flake8), and [`black`](https://black.readthedocs.io/en/stable/#).
All ruff configurations can be found in the `pyproject.toml` file. We set [88 characters](https://black.readthedocs.io/en/stable/the_black_code_style/current_style.html?highlight=88%20#line-length)
as the default line width. The recommended Python version to use is 3.11. For docstrings,
we use the [numpy format](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html).
## Build the documentation
For documentation, we use [Sphinx](https://www.sphinx-doc.org/en/master/). To build it
locally, run the following command from VASCA's `root` directory:
```shell
sphinx-build docs docs/_build
```
To create Unified Modeling Language diagrams, install [pyreverse](https://pylint.pycqa.org/en/latest/pyreverse.html)
and [graphviz](https://graphviz.org/), then run:
```shell
pyreverse vasca -o png -d ./docs/
```
|
rbuehlerREPO_NAMEvascaPATH_START.@vasca_extracted@vasca-main@docs@[email protected]_END.py
|
{
"filename": "plot_diff.py",
"repo_name": "AFD-Illinois/iharm3d",
"repo_path": "iharm3d_extracted/iharm3d-master/script/analysis/plot_diff.py",
"type": "Python"
}
|
################################################################################
# #
# PLOT DIFFERENCES IN TWO FILES #
# #
################################################################################
from __future__ import print_function, division
import plot as bplt
import util
import hdf5_to_dict as io
import os,sys
import numpy as np
import matplotlib.pyplot as plt
USEARRSPACE=True
NLINES = 20
SIZE = 600
FIGX = 20
FIGY = 16
dump1file = sys.argv[1]
dump2file = sys.argv[2]
imname = sys.argv[3]
hdr, geom, dump1 = io.load_all(dump1file, derived_vars=False)
#Hopefully this fails for dumps that shouldn't be compared
dump2 = io.load_dump(dump2file, hdr, geom, derived_vars=False)
N1 = hdr['n1']; N2 = hdr['n2']; N3 = hdr['n3']
log_floor = -60
# TODO properly option log, rel, lim
def plot_diff_xy(ax, var, rel=False, lim=None):
if rel:
if lim is not None:
bplt.plot_xy(ax, geom, np.abs((dump1[var] - dump2[var])/dump1[var]), vmin=0, vmax=lim, label=var, cbar=False, arrayspace=USEARRSPACE)
else:
bplt.plot_xy(ax, geom, np.abs((dump1[var] - dump2[var])/dump1[var]), label=var, cbar=False, arrayspace=USEARRSPACE)
else:
if lim is not None:
bplt.plot_xy(ax, geom, np.log10(np.abs(dump1[var] - dump2[var])), vmin=log_floor, vmax=lim, label=var, cbar=False, arrayspace=USEARRSPACE)
else:
bplt.plot_xy(ax, geom, np.log10(np.abs(dump1[var] - dump2[var])), vmin=log_floor, vmax=0, label=var, cbar=False, arrayspace=USEARRSPACE)
def plot_diff_xz(ax, var, rel=False, lim=None):
if rel:
if lim is not None:
bplt.plot_xz(ax, geom, np.abs((dump1[var] - dump2[var])/dump1[var]), vmin=0, vmax=lim, label=var, cbar=False, arrayspace=USEARRSPACE)
else:
bplt.plot_xz(ax, geom, np.abs((dump1[var] - dump2[var])/dump1[var]), label=var, cbar=False, arrayspace=USEARRSPACE)
else:
if lim is not None:
bplt.plot_xz(ax, geom, np.log10(np.abs(dump1[var] - dump2[var])), vmin=log_floor, vmax=lim, label=var, cbar=False, arrayspace=USEARRSPACE)
else:
bplt.plot_xz(ax, geom, np.log10(np.abs(dump1[var] - dump2[var])), vmin=log_floor, vmax=0, label=var, cbar=False, arrayspace=USEARRSPACE)
# Plot the difference
nxplot = 4
nyplot = 3
vars = list(hdr['prim_names'])+['fail','divB']
fig = plt.figure(figsize=(FIGX, FIGY))
for i,name in enumerate(vars):
ax = plt.subplot(nyplot, nxplot, i+1)
plot_diff_xy(ax, name)
ax.set_xlabel('')
ax.set_ylabel('')
plt.tight_layout()
plt.savefig(imname+"_xy.png", dpi=100)
plt.close(fig)
fig = plt.figure(figsize=(FIGX, FIGY))
for i,name in enumerate(vars):
ax = plt.subplot(nyplot, nxplot, i+1)
plot_diff_xz(ax, name)
ax.set_xlabel('')
ax.set_ylabel('')
plt.tight_layout()
plt.savefig(imname+"_xz.png", dpi=100)
plt.close(fig)
|
AFD-IllinoisREPO_NAMEiharm3dPATH_START.@iharm3d_extracted@iharm3d-master@script@analysis@[email protected]_END.py
|
{
"filename": "SettledDisk.py",
"repo_name": "psheehan/pdspy",
"repo_path": "pdspy_extracted/pdspy-master/pdspy/modeling/SettledDisk.py",
"type": "Python"
}
|
import numpy
import h5py
from scipy.integrate import trapz
from ..constants.physics import G, m_p
from ..constants.astronomy import AU, M_sun
from ..constants.math import pi
from ..dust import Dust
from ..gas import Gas
class SettledDisk:
def __init__(self, mass=1.0e-3, rmin=0.1, rmax=300, plrho=2.37, h0=0.1, \
plh=58./45., t0=None, plt=None, dust=None, gap_rin=[], gap_rout=[],\
gap_delta=[], tmid0=None, tatm0=None, zq0=None, pltgas=None, \
delta=None, aturb=None, gaussian_gaps=False, amin=0.05, amax=1000.,\
pla=3.5, alpha_settle=1.0e-3, gamma_taper=None):
self.mass = mass
self.rmin = rmin
self.rmax = rmax
self.plrho = plrho
self.h0 = h0
self.plh = plh
self.gamma_taper = gamma_taper
self.t0 = t0
self.plt = plt
self.gap_rin = gap_rin
self.gap_rout = gap_rout
self.gap_delta = gap_delta
self.gaussian_gaps = gaussian_gaps
# Dust parameters.
self.amin = amin
self.amax = amax
self.pla = pla
self.alpha_settle = alpha_settle
if (dust != None):
self.dust = dust
# The gas parameter lists.
self.gas = []
self.abundance = []
self.tmid0 = tmid0
self.tatm0 = tatm0
self.zq0 = zq0
self.pltgas = pltgas
self.delta = delta
self.aturb = aturb
def add_gas(self, gas, abundance):
self.gas.append(gas)
self.abundance.append(abundance)
def density(self, r, theta, phi, na=100, normalize=True):
##### Set up the coordinates
rt, tt, pp = numpy.meshgrid(r*AU, theta, phi,indexing='ij')
rr = rt*numpy.sin(tt)
zz = rt*numpy.cos(tt)
##### Calculate the fraction of the mass in each grain size bin.
aa = numpy.logspace(numpy.log10(self.amin), numpy.log10(self.amax),na+1)
f = aa**-self.pla
da = aa[1:] - aa[0:-1]
fa3 = f * aa**3
mass_frac = (fa3[1:] + fa3[0:-1]) / 2. * da / numpy.trapz(fa3, x=aa)
##### Get a list of the appropriate dust grain sizes.
a = (aa[1:] + aa[0:-1]) / 2.
##### Make the gas density model for a protoplanetary disk.
Sigma = self.surface_density(rr/AU, normalize=normalize)
h_g = self.scale_height(rr/AU)
rho = numpy.zeros(Sigma.shape + (100,))
for i in range(na):
gamma0 = 2.
rho_mid = 100 * Sigma / (numpy.sqrt(2*numpy.pi)*h_g)
b = (1 + gamma0)**-0.5 * self.alpha_settle * rho_mid * h_g / \
(self.dust.rho * a[i] * 1.0e-4)
y = numpy.sqrt(b / (1. + b))
h = y * h_g
rho[:,:,:,i] = mass_frac[i] * Sigma / (numpy.sqrt(2*numpy.pi)*h) * \
numpy.exp(-0.5*(zz / h)**2)
rho[numpy.isnan(rho)] = 0.
return a, rho
def number_density(self, r, theta, phi, gas=0):
##### Set up the coordinates
rt, tt, pp = numpy.meshgrid(r*AU, theta, phi,indexing='ij')
rr = rt*numpy.sin(tt)
zz = rt*numpy.cos(tt)
# Get the surface density and scale height.
Sigma = self.surface_density(rr/AU)
h_g = self.scale_height(rr/AU)
# Now calculate the density.
rho = Sigma / (numpy.sqrt(2*numpy.pi)*h_g) * numpy.exp(-0.5*(zz/h_g)**2)
rho_gas = rho * 100
rho_gas_critical = (100. / 0.8) * 2.37*m_p
rho_gas[rho_gas < rho_gas_critical] = 1.0e-50
n_H2 = rho_gas * 0.8 / (2.37*m_p)
n = n_H2 * self.abundance[gas]
return n
def surface_density(self, r, normalize=True):
# Get the disk parameters.
rin = self.rmin * AU
rout = self.rmax * AU
mass = self.mass * M_sun
gamma = self.plrho - self.plh
# Set up the surface density.
Sigma0 = (2-gamma)*mass/(2*pi*(1*AU)**(gamma)) / \
(rout**(-gamma+2) - rin**(-gamma+2))
Sigma = Sigma0 * r**(-gamma)
Sigma[(r >= rout/AU) ^ (r <= rin/AU)] = 0e0
# In case of r == 0 (a singularity), get the value from slightly off 0.
dr = r[r > 0].min()
Sigma[r == 0] = Sigma0 * (0.7*dr)**(-gamma)
# Add gaps to the disk.
for i in range(len(self.gap_rin)):
if self.gaussian_gaps:
gap_r = (self.gap_rin[i] + self.gap_rout[i])/2
gap_w = self.gap_rout[i] - self.gap_rin[i]
Sigma /= 1 + 1./self.gap_delta[i] * numpy.exp(-4*numpy.log(2.)*\
(r - gap_r)**2 / gap_w**2)
else:
Sigma[(r >= self.gap_rin[i]) & \
(r <= self.gap_rout[i])] *= self.gap_delta[i]
##### Normalize the surface density correctly.
if normalize:
r_high = numpy.logspace(numpy.log10(self.rmin), \
numpy.log10(self.rmax), 1000)
Sigma_high = self.surface_density(r_high, normalize=False)
scale = mass / (2*numpy.pi*trapz(r_high*AU*Sigma_high, r_high*AU))
Sigma *= scale
return Sigma
def scale_height(self, r):
return self.h0 * AU * r**self.plh
def temperature(self, r, theta, phi):
##### Disk Parameters
rin = self.rmin * AU
rout = self.rmax * AU
t0 = self.t0
plt = self.plt
##### Set up the coordinates
rt, tt, pp = numpy.meshgrid(r*AU, theta, phi,indexing='ij')
rr = rt*numpy.sin(tt)
zz = rt*numpy.cos(tt)
##### Make the dust density model for a protoplanetary disk.
t = t0 * (rr / (1*AU))**(-plt)
t[(rr >= rout) ^ (rr <= rin)] = 0e0
t[t > 10000.] = 10000.
return t
def temperature_1d(self, r):
rin = self.rmin * AU
rout = self.rmax * AU
t0 = self.t0
plt = self.plt
T = t0 * r**(-plt)
T[(r >= rout/AU) ^ (r <= rin/AU)] = 0.0
dr = r[r > 0].min()
T[r == 0] = t0 * (0.7*dr)**(-plt)
return T
def gas_temperature(self, r, theta, phi):
##### Disk Parameters
rin = self.rmin * AU
rout = self.rmax * AU
pltgas = self.pltgas
tmid0 = self.tmid0
tatm0 = self.tatm0
zq0 = self.zq0
delta = self.delta
##### Set up the coordinates
rt, tt, pp = numpy.meshgrid(r*AU, theta, phi,indexing='ij')
rr = rt*numpy.sin(tt)
zz = rt*numpy.cos(tt)
##### Make the dust density model for a protoplanetary disk.
zq = zq0 * (rt / rin)**1.3
tmid = tmid0 * (rr / rin)**(-pltgas)
tatm = tatm0 * (rr / rin)**(-pltgas)
t = numpy.zeros(tatm.shape)
t[zz >= zq] = tatm[zz >= zq]
t[zz < zq] = tatm[zz < zq] + (tmid[zz < zq] - tatm[zz < zq]) * \
(numpy.cos(numpy.pi * zz[zz < zq] / (2*zq[zz < zq])))**2*delta
return t
def microturbulence(self, r, theta, phi):
##### Disk Parameters
rin = self.rmin * AU
rout = self.rmax * AU
t0 = self.t0
plt = self.plt
##### Set up the coordinates
rt, tt, pp = numpy.meshgrid(r*AU, theta, phi,indexing='ij')
rr = rt*numpy.sin(tt)
zz = rt*numpy.cos(tt)
##### Make the dust density model for a protoplanetary disk.
aturb = numpy.ones(rr.shape)*self.aturb*1.0e5
return aturb
def velocity(self, r, theta, phi, mstar=0.5):
mstar *= M_sun
rt, tt, pp = numpy.meshgrid(r*AU, theta, phi,indexing='ij')
rr = rt*numpy.sin(tt)
zz = rt*numpy.cos(tt)
v_r = numpy.zeros(rr.shape)
v_theta = numpy.zeros(rr.shape)
v_phi = numpy.sqrt(G*mstar*rr**2/rt**3)
return numpy.array((v_r, v_theta, v_phi))
def read(self, filename=None, usefile=None):
if (usefile == None):
f = h5py.File(filename, "r")
else:
f = usefile
self.mass = f['mass'][()]
self.rmin = f['rmin'][()]
self.rmax = f['rmax'][()]
self.plrho = f['plrho'][()]
self.h0 = f['h0'][()]
self.plh = f['plh'][()]
if 'gamma_taper' in f:
self.gamma_taper = gamma_taper
self.amin = f['amin'][()]
self.amax = f['amax'][()]
self.pla = f['pla'][()]
self.alpha_settle = f['alpha_settle'][()]
if 't0' in f:
self.t0 = f['t0'][()]
self.plt = f['plt'][()]
if 'tmid0' in f:
self.tmid0 = f['tmid0'][()]
self.tatm0 = f['tatm0'][()]
self.zq0 = f['zq0'][()]
self.pltgas = f['pltgas'][()]
self.delta = f['delta'][()]
if 'aturb' in f:
self.aturb = f['aturb'][()]
if ('Dust' in f):
self.dust = Dust()
self.dust.set_properties_from_file(usefile=f['Dust'])
if ('Gas' in f):
for name in f['Gas']:
self.gas.append(Gas())
self.abundance.append(f['Gas'][name]['Abundance'][()])
self.gas[-1].set_properties_from_file(usefile=f['Gas'][name])
if (usefile == None):
f.close()
def write(self, filename=None, usefile=None):
if (usefile == None):
f = h5py.File(filename, "w")
else:
f = usefile
f['mass'] = self.mass
f['rmin'] = self.rmin
f['rmax'] = self.rmax
f['plrho'] = self.plrho
f['h0'] = self.h0
f['plh'] = self.plh
if self.gamma_taper != None:
f['gamma_taper'] = self.gamma_taper
if self.t0 != None:
f['t0'] = self.t0
f['plt'] = self.plt
if self.tmid0 != None:
f['tmid0'] = self.tmid0
f['tatm0'] = self.tatm0
f['zq0'] = self.zq0
f['pltgas'] = self.pltgas
f['delta'] = self.delta
if self.aturb != None:
f['aturb'] = self.aturb
f['amin'] = self.amin
f['amax'] = self.amax
f['pla'] = self.pla
f['alpha_settle'] = self.alpha_settle
if hasattr(self, 'dust'):
dust = f.create_group("Dust")
self.dust.write(usefile=dust)
gases = []
if hasattr(self, 'gas'):
gas = f.create_group("Gas")
for i in range(len(self.gas)):
gases.append(gas.create_group("Gas{0:d}".format(i)))
gases[i]["Abundance"] = self.abundance[i]
self.gas[i].write(usefile=gases[i])
if (usefile == None):
f.close()
|
psheehanREPO_NAMEpdspyPATH_START.@pdspy_extracted@pdspy-master@pdspy@[email protected]@.PATH_END.py
|
{
"filename": "ascii.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/encodings/ascii.py",
"type": "Python"
}
|
""" Python 'ascii' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.ascii_encode
decode = codecs.ascii_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.ascii_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.ascii_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.ascii_decode
decode = codecs.ascii_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ascii',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@[email protected]@.PATH_END.py
|
{
"filename": "pep425tags.py",
"repo_name": "davidharvey1986/pyRRG",
"repo_path": "pyRRG_extracted/pyRRG-master/unittests/bugFixPyRRG/lib/python3.7/site-packages/wheel/pep425tags.py",
"type": "Python"
}
|
"""Generate and work with PEP 425 Compatibility Tags."""
import distutils.util
import platform
import sys
import os
import sysconfig
import warnings
from .macosx_libfile import extract_macosx_min_system_version
try:
from importlib.machinery import all_suffixes as get_all_suffixes
except ImportError:
from imp import get_suffixes
def get_all_suffixes():
return [suffix[0] for suffix in get_suffixes()]
def get_config_var(var):
try:
return sysconfig.get_config_var(var)
except IOError as e: # pip Issue #1074
warnings.warn("{0}".format(e), RuntimeWarning)
return None
def get_abbr_impl():
"""Return abbreviated implementation name."""
impl = platform.python_implementation()
if impl == 'PyPy':
return 'pp'
elif impl == 'Jython':
return 'jy'
elif impl == 'IronPython':
return 'ip'
elif impl == 'CPython':
return 'cp'
raise LookupError('Unknown Python implementation: ' + impl)
def get_impl_ver():
"""Return implementation version."""
impl_ver = get_config_var("py_version_nodot")
if not impl_ver:
impl_ver = ''.join(map(str, get_impl_version_info()))
return impl_ver
def get_impl_version_info():
"""Return sys.version_info-like tuple for use in decrementing the minor
version."""
return sys.version_info[0], sys.version_info[1]
def get_flag(var, fallback, expected=True, warn=True):
"""Use a fallback method for determining SOABI flags if the needed config
var is unset or unavailable."""
val = get_config_var(var)
if val is None:
if warn:
warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
"be incorrect".format(var), RuntimeWarning, 2)
return fallback()
return val == expected
def get_abi_tag():
"""Return the ABI tag based on SOABI (if available) or emulate SOABI
(CPython 2, PyPy)."""
soabi = get_config_var('SOABI')
impl = get_abbr_impl()
if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
d = ''
m = ''
u = ''
if get_flag('Py_DEBUG',
lambda: hasattr(sys, 'gettotalrefcount'),
warn=(impl == 'cp')):
d = 'd'
if get_flag('WITH_PYMALLOC',
lambda: impl == 'cp',
warn=(impl == 'cp' and
sys.version_info < (3, 8))) \
and sys.version_info < (3, 8):
m = 'm'
if get_flag('Py_UNICODE_SIZE',
lambda: sys.maxunicode == 0x10ffff,
expected=4,
warn=(impl == 'cp' and
sys.version_info < (3, 3))) \
and sys.version_info < (3, 3):
u = 'u'
abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
elif soabi and soabi.startswith('cpython-'):
abi = 'cp' + soabi.split('-')[1]
elif soabi:
abi = soabi.replace('.', '_').replace('-', '_')
else:
abi = None
return abi
def calculate_macosx_platform_tag(archive_root, platform_tag):
"""
Calculate proper macosx platform tag basing on files which are included to wheel
Example platform tag `macosx-10.14-x86_64`
"""
prefix, base_version, suffix = platform_tag.split('-')
base_version = tuple([int(x) for x in base_version.split(".")])
if len(base_version) >= 2:
base_version = base_version[0:2]
assert len(base_version) == 2
if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
deploy_target = tuple([int(x) for x in os.environ[
"MACOSX_DEPLOYMENT_TARGET"].split(".")])
if len(deploy_target) >= 2:
deploy_target = deploy_target[0:2]
if deploy_target < base_version:
sys.stderr.write(
"[WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value ({}) than the "
"version on which the Python interpreter was compiled ({}), and will be "
"ignored.\n".format('.'.join(str(x) for x in deploy_target),
'.'.join(str(x) for x in base_version))
)
else:
base_version = deploy_target
assert len(base_version) == 2
start_version = base_version
versions_dict = {}
for (dirpath, dirnames, filenames) in os.walk(archive_root):
for filename in filenames:
if filename.endswith('.dylib') or filename.endswith('.so'):
lib_path = os.path.join(dirpath, filename)
min_ver = extract_macosx_min_system_version(lib_path)
if min_ver is not None:
versions_dict[lib_path] = min_ver[0:2]
if len(versions_dict) > 0:
base_version = max(base_version, max(versions_dict.values()))
# macosx platform tag do not support minor bugfix release
fin_base_version = "_".join([str(x) for x in base_version])
if start_version < base_version:
problematic_files = [k for k, v in versions_dict.items() if v > start_version]
problematic_files = "\n".join(problematic_files)
if len(problematic_files) == 1:
files_form = "this file"
else:
files_form = "these files"
error_message = \
"[WARNING] This wheel needs a higher macOS version than {} " \
"To silence this warning, set MACOSX_DEPLOYMENT_TARGET to at least " +\
fin_base_version + " or recreate " + files_form + " with lower " \
"MACOSX_DEPLOYMENT_TARGET: \n" + problematic_files
if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
error_message = error_message.format("is set in MACOSX_DEPLOYMENT_TARGET variable.")
else:
error_message = error_message.format(
"the version your Python interpreter is compiled against.")
sys.stderr.write(error_message)
platform_tag = prefix + "_" + fin_base_version + "_" + suffix
return platform_tag
def get_platform(archive_root):
"""Return our platform name 'win32', 'linux_x86_64'"""
# XXX remove distutils dependency
result = distutils.util.get_platform()
if result.startswith("macosx") and archive_root is not None:
result = calculate_macosx_platform_tag(archive_root, result)
result = result.replace('.', '_').replace('-', '_')
if result == "linux_x86_64" and sys.maxsize == 2147483647:
# pip pull request #3497
result = "linux_i686"
return result
def get_supported(archive_root, versions=None, supplied_platform=None):
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
versions = []
version_info = get_impl_version_info()
major = version_info[:-1]
# Support all previous minor Python versions.
for minor in range(version_info[-1], -1, -1):
versions.append(''.join(map(str, major + (minor,))))
impl = get_abbr_impl()
abis = []
abi = get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set()
for suffix in get_all_suffixes():
if suffix.startswith('.abi'):
abi3s.add(suffix.split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
platforms = []
if supplied_platform:
platforms.append(supplied_platform)
platforms.append(get_platform(archive_root))
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in platforms:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# abi3 modules compatible with older version of Python
for version in versions[1:]:
# abi3 was introduced in Python 3.2
if version in ('31', '30'):
break
for abi in abi3s: # empty set if not Python 3
for arch in platforms:
supported.append(("%s%s" % (impl, version), abi, arch))
# No abi / arch, but requires our implementation:
for i, version in enumerate(versions):
supported.append(('%s%s' % (impl, version), 'none', 'any'))
if i == 0:
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# Major Python version + platform; e.g. binaries not using the Python API
for arch in platforms:
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported
|
davidharvey1986REPO_NAMEpyRRGPATH_START.@pyRRG_extracted@pyRRG-master@unittests@bugFixPyRRG@[email protected]@site-packages@[email protected]@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "teuben/QAC",
"repo_path": "QAC_extracted/QAC-master/casa/README.md",
"type": "Markdown"
}
|
Here we discuss CASA and derived products to do array combinations.
## CASA
CASA: (Common Astronomy Software Applications) is used for all ALMA
and VLA calibration and imaging, and the natural starting point
for any array combination procedures. Please have CASA 5.1
installed on your laptop, and follow the guidelines in
https://casa.nrao.edu/casa_obtaining.shtml
Some people have experience with MIRIAD, and we will make a comparison
with MIRIAD procedures and capabilities at some points during the
workshop.
### Testing your SD2018 environment
If you have installed all the components below, and when you start up
casa, from the CASA prompt the following commands should work and do
something sensible:
qac_version()
tp2vis_version()
SD2vis?
aU?
### CASA Guides
We will be using some examples from the CASA guides, most notably
* https://casaguides.nrao.edu/index.php/M100_Band3_SingleDish_5.1 [online had 2 errors - we fixed those here]
* https://casaguides.nrao.edu/index.php/M100_Band3_Combine_5.1 [online not working yet]
* https://casaguides.nrao.edu/index.php/M100_Band3_Combine_4.3 [working older version of CASA, with clean]
You can use CASA's extractCASAscript.py (we have a copy in SD2018) to
extract a working version:
./extractCASAscript.py https://casaguides.nrao.edu/index.php/M100_Band3_Combine_5.1
and now you could cheat and run the demo even from the Unix command line
casa -c M100Band3Combine5.1.py
Until further notice, don't do this,
it's not working yet. NRAO working on improving this CASA guide, we will be using our own version during SD2018
based on QAC's workflow6a.py
## QAC
QAC: (Quick Array Combinations) is a set of functions to help you
call CASA routines to exersize array combination techniques. It will
use a number of other methods (see tp2vis, sd2vis, ssc etc.)
See https://github.com/teuben/QAC how to install but here is the basic
rundown assuming you have CASA installed:
# install
cd ~/mycasastuff # where-ever you want this to be
git clone https://github.com/teuben/QAC
mkdir ~/.casa # in case it didn't exist
ln -s `pwd`/QAC ~/.casa/QAC
cd QAC
make tp2vis
cat casa.init.py >> ~/.casa/init.py
# run a benchmark, should be around 3 mins
cd test
curl http://admit.astro.umd.edu/~teuben/QAC/qac_bench.tar.gz | tar zxf -
casa-config --version
time casa --nogui -c bench.py
MacOS people may need to replace the casa command by (something like)
time /Applications/CASA.app/Contents/MacOS/casa --nogui -c bench.py
For version 5.1.2 the final reported flux should be around 383.6 and you should see an "OK" at the
end of the last REGRESSION line
## TP2VIS
TP2VIS: a new routine to help with a Joint Deconvolution style of
combining single dish (TP) with interferometric data (VIS).
Installation can be done via QAC. See above or https://github.com/tp2vis/distribute
## SD2VIS
SD2VIS: similar to TP2VIS but only works on single pointings, no mosaic.
Installation can be done via QAC via our install_sd2vis script.
See also https://www.oso.nordic-alma.se/software-tools.php
## AU
The Analysis Utilities (AU) are a set of CASA functions, and can be installed very much like
QAC by patching up your ~/.casa/init.py file. Details are on
https://casaguides.nrao.edu/index.php/Analysis_Utilities but the following cut and pastable
commands should work from your Unix terminal:
cd ~/.casa
and grab the tar file and install it in your .casa tree
wget ftp://ftp.cv.nrao.edu/pub/casaguides/analysis_scripts.tar
tar xf analysis_scripts.tar
or
curl ftp://ftp.cv.nrao.edu/pub/casaguides/analysis_scripts.tar | tar xf -
and now add the following four lines to your ~/.casa/init.py file:
sys.path.append(os.environ['HOME'] + '/.casa/analysis_scripts')
import analysisUtils as au
import analysisUtils as aU
print "Added au"
(both variants "au" and "aU" have been seen in the wild, but 'aU' is supposed to be the official one)
|
teubenREPO_NAMEQACPATH_START.@QAC_extracted@QAC-master@[email protected]@.PATH_END.py
|
{
"filename": "_textfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/histogram2dcontour/_textfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2dcontour"
_path_str = "histogram2dcontour.textfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Textfont object
For this trace it only has an effect if `coloring` is set to
"heatmap". Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram2dcontour.Textfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2dcontour.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@histogram2dcontour@[email protected]_END.py
|
{
"filename": "deployments.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/server/models/deployments.py",
"type": "Python"
}
|
"""
Functions for interacting with deployment ORM objects.
Intended for internal use by the Prefect REST API.
"""
import datetime
from typing import Dict, Iterable, List, Optional, Sequence, TypeVar, cast
from uuid import UUID, uuid4
import pendulum
import sqlalchemy as sa
from sqlalchemy import delete, or_, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.sql import Select
from prefect.server import models, schemas
from prefect.server.database import orm_models
from prefect.server.database.dependencies import db_injector
from prefect.server.database.interface import PrefectDBInterface
from prefect.server.events.clients import PrefectServerEventsClient
from prefect.server.exceptions import ObjectNotFoundError
from prefect.server.models.events import deployment_status_event
from prefect.server.schemas.statuses import DeploymentStatus
from prefect.server.utilities.database import json_contains
from prefect.settings import (
PREFECT_API_SERVICES_SCHEDULER_MAX_RUNS,
PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME,
PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS,
PREFECT_API_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME,
)
T = TypeVar("T", bound=tuple)
async def _delete_scheduled_runs(
session: AsyncSession,
deployment_id: UUID,
auto_scheduled_only: bool = False,
) -> None:
"""
This utility function deletes all of a deployment's runs that are in a Scheduled state
and haven't run yet. It should be run any time a deployment is created or
modified in order to ensure that future runs comply with the deployment's latest values.
Args:
deployment_id: the deployment for which we should delete runs.
auto_scheduled_only: if True, only delete auto scheduled runs. Defaults to `False`.
"""
delete_query = sa.delete(orm_models.FlowRun).where(
orm_models.FlowRun.deployment_id == deployment_id,
orm_models.FlowRun.state_type == schemas.states.StateType.SCHEDULED.value,
orm_models.FlowRun.run_count == 0,
)
if auto_scheduled_only:
delete_query = delete_query.where(
orm_models.FlowRun.auto_scheduled.is_(True),
)
await session.execute(delete_query)
@db_injector
async def create_deployment(
db: PrefectDBInterface,
session: AsyncSession,
deployment: schemas.core.Deployment,
) -> Optional[orm_models.Deployment]:
"""Upserts a deployment.
Args:
session: a database session
deployment: a deployment model
Returns:
orm_models.Deployment: the newly-created or updated deployment
"""
# set `updated` manually
# known limitation of `on_conflict_do_update`, will not use `Column.onupdate`
# https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#the-set-clause
deployment.updated = pendulum.now("UTC") # type: ignore[assignment]
deployment.labels = await with_system_labels_for_deployment(session, deployment)
schedules = deployment.schedules
insert_values = deployment.model_dump_for_orm(
exclude_unset=True, exclude={"schedules"}
)
requested_concurrency_limit = insert_values.pop("concurrency_limit", "unset")
# The job_variables field in client and server schemas is named
# infra_overrides in the database.
job_variables = insert_values.pop("job_variables", None)
if job_variables:
insert_values["infra_overrides"] = job_variables
conflict_update_fields = deployment.model_dump_for_orm(
exclude_unset=True,
exclude={
"id",
"created",
"created_by",
"schedules",
"job_variables",
"concurrency_limit",
},
)
if job_variables:
conflict_update_fields["infra_overrides"] = job_variables
insert_stmt = (
db.insert(orm_models.Deployment)
.values(**insert_values)
.on_conflict_do_update(
index_elements=db.deployment_unique_upsert_columns,
set_={**conflict_update_fields},
)
)
await session.execute(insert_stmt)
# Get the id of the deployment we just created or updated
result = await session.execute(
sa.select(orm_models.Deployment.id).where(
sa.and_(
orm_models.Deployment.flow_id == deployment.flow_id,
orm_models.Deployment.name == deployment.name,
)
)
)
deployment_id = result.scalar_one_or_none()
if not deployment_id:
return None
# Because this was possibly an upsert, we need to delete any existing
# schedules and any runs from the old deployment.
await _delete_scheduled_runs(
session=session, deployment_id=deployment_id, auto_scheduled_only=True
)
await delete_schedules_for_deployment(session=session, deployment_id=deployment_id)
if schedules:
await create_deployment_schedules(
session=session,
deployment_id=deployment_id,
schedules=[
schemas.actions.DeploymentScheduleCreate(
schedule=schedule.schedule,
active=schedule.active, # type: ignore[call-arg]
)
for schedule in schedules
],
)
if requested_concurrency_limit != "unset":
await _create_or_update_deployment_concurrency_limit(
session, deployment_id, deployment.concurrency_limit
)
query = (
sa.select(orm_models.Deployment)
.where(
sa.and_(
orm_models.Deployment.flow_id == deployment.flow_id,
orm_models.Deployment.name == deployment.name,
)
)
.execution_options(populate_existing=True)
)
refreshed_result = await session.execute(query)
return refreshed_result.scalar()
async def update_deployment(
session: AsyncSession,
deployment_id: UUID,
deployment: schemas.actions.DeploymentUpdate,
) -> bool:
"""Updates a deployment.
Args:
session: a database session
deployment_id: the ID of the deployment to modify
deployment: changes to a deployment model
Returns:
bool: whether the deployment was updated
"""
from prefect.server.api.workers import WorkerLookups
schedules = deployment.schedules
# exclude_unset=True allows us to only update values provided by
# the user, ignoring any defaults on the model
update_data = deployment.model_dump_for_orm(
exclude_unset=True,
exclude={"work_pool_name"},
)
requested_concurrency_limit_update = update_data.pop("concurrency_limit", "unset")
# The job_variables field in client and server schemas is named
# infra_overrides in the database.
job_variables = update_data.pop("job_variables", None)
if job_variables:
update_data["infra_overrides"] = job_variables
should_update_schedules = update_data.pop("schedules", None) is not None
if deployment.work_pool_name and deployment.work_queue_name:
# If a specific pool name/queue name combination was provided, get the
# ID for that work pool queue.
update_data[
"work_queue_id"
] = await WorkerLookups()._get_work_queue_id_from_name(
session=session,
work_pool_name=deployment.work_pool_name,
work_queue_name=deployment.work_queue_name,
create_queue_if_not_found=True,
)
elif deployment.work_pool_name:
# If just a pool name was provided, get the ID for its default
# work pool queue.
update_data[
"work_queue_id"
] = await WorkerLookups()._get_default_work_queue_id_from_work_pool_name(
session=session,
work_pool_name=deployment.work_pool_name,
)
elif deployment.work_queue_name:
# If just a queue name was provided, ensure the queue exists and
# get its ID.
work_queue = await models.work_queues.ensure_work_queue_exists(
session=session, name=update_data["work_queue_name"]
)
update_data["work_queue_id"] = work_queue.id
update_stmt = (
sa.update(orm_models.Deployment)
.where(orm_models.Deployment.id == deployment_id)
.values(**update_data)
)
result = await session.execute(update_stmt)
# delete any auto scheduled runs that would have reflected the old deployment config
await _delete_scheduled_runs(
session=session, deployment_id=deployment_id, auto_scheduled_only=True
)
if should_update_schedules:
# If schedules were provided, remove the existing schedules and
# replace them with the new ones.
await delete_schedules_for_deployment(
session=session, deployment_id=deployment_id
)
await create_deployment_schedules(
session=session,
deployment_id=deployment_id,
schedules=[
schemas.actions.DeploymentScheduleCreate(
schedule=schedule.schedule,
active=schedule.active, # type: ignore[call-arg]
)
for schedule in schedules
],
)
if requested_concurrency_limit_update != "unset":
await _create_or_update_deployment_concurrency_limit(
session, deployment_id, deployment.concurrency_limit
)
return result.rowcount > 0
async def _create_or_update_deployment_concurrency_limit(
session: AsyncSession, deployment_id: UUID, limit: Optional[int]
):
deployment = await session.get(orm_models.Deployment, deployment_id)
assert deployment is not None
if (
deployment.global_concurrency_limit
and deployment.global_concurrency_limit.limit == limit
) or (deployment.global_concurrency_limit is None and limit is None):
return
deployment._concurrency_limit = limit
if limit is None:
await _delete_related_concurrency_limit(
session=session, deployment_id=deployment_id
)
await session.refresh(deployment)
elif deployment.global_concurrency_limit:
deployment.global_concurrency_limit.limit = limit
else:
limit_name = f"deployment:{deployment_id}"
new_limit = orm_models.ConcurrencyLimitV2(name=limit_name, limit=limit)
deployment.global_concurrency_limit = new_limit
session.add(deployment)
async def read_deployment(
session: AsyncSession, deployment_id: UUID
) -> Optional[orm_models.Deployment]:
"""Reads a deployment by id.
Args:
session: A database session
deployment_id: a deployment id
Returns:
orm_models.Deployment: the deployment
"""
return await session.get(orm_models.Deployment, deployment_id)
async def read_deployment_by_name(
session: AsyncSession, name: str, flow_name: str
) -> Optional[orm_models.Deployment]:
"""Reads a deployment by name.
Args:
session: A database session
name: a deployment name
flow_name: the name of the flow the deployment belongs to
Returns:
orm_models.Deployment: the deployment
"""
result = await session.execute(
select(orm_models.Deployment)
.join(orm_models.Flow, orm_models.Deployment.flow_id == orm_models.Flow.id)
.where(
sa.and_(
orm_models.Flow.name == flow_name,
orm_models.Deployment.name == name,
)
)
.limit(1)
)
return result.scalar()
async def _apply_deployment_filters(
query: Select[T],
flow_filter: Optional[schemas.filters.FlowFilter] = None,
flow_run_filter: Optional[schemas.filters.FlowRunFilter] = None,
task_run_filter: Optional[schemas.filters.TaskRunFilter] = None,
deployment_filter: Optional[schemas.filters.DeploymentFilter] = None,
work_pool_filter: Optional[schemas.filters.WorkPoolFilter] = None,
work_queue_filter: Optional[schemas.filters.WorkQueueFilter] = None,
) -> Select[T]:
"""
Applies filters to a deployment query as a combination of EXISTS subqueries.
"""
if deployment_filter:
query = query.where(deployment_filter.as_sql_filter())
if flow_filter:
flow_exists_clause = select(orm_models.Deployment.id).where(
orm_models.Deployment.flow_id == orm_models.Flow.id,
flow_filter.as_sql_filter(),
)
query = query.where(flow_exists_clause.exists())
if flow_run_filter or task_run_filter:
flow_run_exists_clause = select(orm_models.FlowRun).where(
orm_models.Deployment.id == orm_models.FlowRun.deployment_id
)
if flow_run_filter:
flow_run_exists_clause = flow_run_exists_clause.where(
flow_run_filter.as_sql_filter()
)
if task_run_filter:
flow_run_exists_clause = flow_run_exists_clause.join(
orm_models.TaskRun,
orm_models.TaskRun.flow_run_id == orm_models.FlowRun.id,
).where(task_run_filter.as_sql_filter())
query = query.where(flow_run_exists_clause.exists())
if work_pool_filter or work_queue_filter:
work_pool_exists_clause = select(orm_models.WorkQueue).where(
orm_models.Deployment.work_queue_id == orm_models.WorkQueue.id
)
if work_queue_filter:
work_pool_exists_clause = work_pool_exists_clause.where(
work_queue_filter.as_sql_filter()
)
if work_pool_filter:
work_pool_exists_clause = work_pool_exists_clause.join(
orm_models.WorkPool,
orm_models.WorkPool.id == orm_models.WorkQueue.work_pool_id,
).where(work_pool_filter.as_sql_filter())
query = query.where(work_pool_exists_clause.exists())
return query
async def read_deployments(
session: AsyncSession,
offset: Optional[int] = None,
limit: Optional[int] = None,
flow_filter: Optional[schemas.filters.FlowFilter] = None,
flow_run_filter: Optional[schemas.filters.FlowRunFilter] = None,
task_run_filter: Optional[schemas.filters.TaskRunFilter] = None,
deployment_filter: Optional[schemas.filters.DeploymentFilter] = None,
work_pool_filter: Optional[schemas.filters.WorkPoolFilter] = None,
work_queue_filter: Optional[schemas.filters.WorkQueueFilter] = None,
sort: schemas.sorting.DeploymentSort = schemas.sorting.DeploymentSort.NAME_ASC,
) -> Sequence[orm_models.Deployment]:
"""
Read deployments.
Args:
session: A database session
offset: Query offset
limit: Query limit
flow_filter: only select deployments whose flows match these criteria
flow_run_filter: only select deployments whose flow runs match these criteria
task_run_filter: only select deployments whose task runs match these criteria
deployment_filter: only select deployment that match these filters
work_pool_filter: only select deployments whose work pools match these criteria
work_queue_filter: only select deployments whose work pool queues match these criteria
sort: the sort criteria for selected deployments. Defaults to `name` ASC.
Returns:
List[orm_models.Deployment]: deployments
"""
query = select(orm_models.Deployment).order_by(sort.as_sql_sort())
query = await _apply_deployment_filters(
query=query,
flow_filter=flow_filter,
flow_run_filter=flow_run_filter,
task_run_filter=task_run_filter,
deployment_filter=deployment_filter,
work_pool_filter=work_pool_filter,
work_queue_filter=work_queue_filter,
)
if offset is not None:
query = query.offset(offset)
if limit is not None:
query = query.limit(limit)
result = await session.execute(query)
return result.scalars().unique().all()
async def count_deployments(
session: AsyncSession,
flow_filter: Optional[schemas.filters.FlowFilter] = None,
flow_run_filter: Optional[schemas.filters.FlowRunFilter] = None,
task_run_filter: Optional[schemas.filters.TaskRunFilter] = None,
deployment_filter: Optional[schemas.filters.DeploymentFilter] = None,
work_pool_filter: Optional[schemas.filters.WorkPoolFilter] = None,
work_queue_filter: Optional[schemas.filters.WorkQueueFilter] = None,
) -> int:
"""
Count deployments.
Args:
session: A database session
flow_filter: only count deployments whose flows match these criteria
flow_run_filter: only count deployments whose flow runs match these criteria
task_run_filter: only count deployments whose task runs match these criteria
deployment_filter: only count deployment that match these filters
work_pool_filter: only count deployments that match these work pool filters
work_queue_filter: only count deployments that match these work pool queue filters
Returns:
int: the number of deployments matching filters
"""
query = select(sa.func.count(sa.text("*"))).select_from(orm_models.Deployment)
query = await _apply_deployment_filters(
query=query,
flow_filter=flow_filter,
flow_run_filter=flow_run_filter,
task_run_filter=task_run_filter,
deployment_filter=deployment_filter,
work_pool_filter=work_pool_filter,
work_queue_filter=work_queue_filter,
)
result = await session.execute(query)
return result.scalar_one()
async def delete_deployment(session: AsyncSession, deployment_id: UUID) -> bool:
"""
Delete a deployment by id.
Args:
session: A database session
deployment_id: a deployment id
Returns:
bool: whether or not the deployment was deleted
"""
# delete scheduled runs, both auto- and user- created.
await _delete_scheduled_runs(
session=session, deployment_id=deployment_id, auto_scheduled_only=False
)
await _delete_related_concurrency_limit(
session=session, deployment_id=deployment_id
)
result = await session.execute(
delete(orm_models.Deployment).where(orm_models.Deployment.id == deployment_id)
)
return result.rowcount > 0
async def _delete_related_concurrency_limit(session: AsyncSession, deployment_id: UUID):
return await session.execute(
delete(orm_models.ConcurrencyLimitV2).where(
orm_models.ConcurrencyLimitV2.id
== sa.select(orm_models.Deployment.concurrency_limit_id)
.where(orm_models.Deployment.id == deployment_id)
.scalar_subquery()
)
)
async def schedule_runs(
session: AsyncSession,
deployment_id: UUID,
start_time: Optional[datetime.datetime] = None,
end_time: Optional[datetime.datetime] = None,
min_time: Optional[datetime.timedelta] = None,
min_runs: Optional[int] = None,
max_runs: Optional[int] = None,
auto_scheduled: bool = True,
) -> Sequence[UUID]:
"""
Schedule flow runs for a deployment
Args:
session: a database session
deployment_id: the id of the deployment to schedule
start_time: the time from which to start scheduling runs
end_time: runs will be scheduled until at most this time
min_time: runs will be scheduled until at least this far in the future
min_runs: a minimum amount of runs to schedule
max_runs: a maximum amount of runs to schedule
This function will generate the minimum number of runs that satisfy the min
and max times, and the min and max counts. Specifically, the following order
will be respected.
- Runs will be generated starting on or after the `start_time`
- No more than `max_runs` runs will be generated
- No runs will be generated after `end_time` is reached
- At least `min_runs` runs will be generated
- Runs will be generated until at least `start_time` + `min_time` is reached
Returns:
a list of flow run ids scheduled for the deployment
"""
if min_runs is None:
min_runs = PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS.value()
if max_runs is None:
max_runs = PREFECT_API_SERVICES_SCHEDULER_MAX_RUNS.value()
if start_time is None:
start_time = pendulum.now("UTC")
if end_time is None:
end_time = start_time + (
PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME.value()
)
if min_time is None:
min_time = PREFECT_API_SERVICES_SCHEDULER_MIN_SCHEDULED_TIME.value()
actual_start_time = pendulum.instance(start_time)
actual_end_time = pendulum.instance(end_time)
runs = await _generate_scheduled_flow_runs(
session=session,
deployment_id=deployment_id,
start_time=actual_start_time,
end_time=actual_end_time,
min_time=min_time,
min_runs=min_runs,
max_runs=max_runs,
auto_scheduled=auto_scheduled,
)
return await _insert_scheduled_flow_runs(session=session, runs=runs)
async def _generate_scheduled_flow_runs(
session: AsyncSession,
deployment_id: UUID,
start_time: datetime.datetime,
end_time: datetime.datetime,
min_time: datetime.timedelta,
min_runs: int,
max_runs: int,
auto_scheduled: bool = True,
) -> List[Dict]:
"""
Given a `deployment_id` and schedule, generates a list of flow run objects and
associated scheduled states that represent scheduled flow runs. This method
does NOT insert generated runs into the database, in order to facilitate
batch operations. Call `_insert_scheduled_flow_runs()` to insert these runs.
Runs include an idempotency key which prevents duplicate runs from being inserted
if the output from this function is used more than once.
Args:
session: a database session
deployment_id: the id of the deployment to schedule
start_time: the time from which to start scheduling runs
end_time: runs will be scheduled until at most this time
min_time: runs will be scheduled until at least this far in the future
min_runs: a minimum amount of runs to schedule
max_runs: a maximum amount of runs to schedule
This function will generate the minimum number of runs that satisfy the min
and max times, and the min and max counts. Specifically, the following order
will be respected.
- Runs will be generated starting on or after the `start_time`
- No more than `max_runs` runs will be generated
- No runs will be generated after `end_time` is reached
- At least `min_runs` runs will be generated
- Runs will be generated until at least `start_time + min_time` is reached
Returns:
a list of dictionary representations of the `FlowRun` objects to schedule
"""
runs = []
deployment = await session.get(orm_models.Deployment, deployment_id)
if not deployment:
return []
active_deployment_schedules = await read_deployment_schedules(
session=session,
deployment_id=deployment.id,
deployment_schedule_filter=schemas.filters.DeploymentScheduleFilter(
active=schemas.filters.DeploymentScheduleFilterActive(eq_=True)
),
)
for deployment_schedule in active_deployment_schedules:
dates = []
# generate up to `n` dates satisfying the min of `max_runs` and `end_time`
for dt in deployment_schedule.schedule._get_dates_generator(
n=max_runs, start=start_time, end=end_time
):
dates.append(dt)
# at any point, if we satisfy both of the minimums, we can stop
if len(dates) >= min_runs and dt >= (start_time + min_time):
break
tags = deployment.tags
if auto_scheduled:
tags = ["auto-scheduled"] + tags
for date in dates:
runs.append(
{
"id": uuid4(),
"flow_id": deployment.flow_id,
"deployment_id": deployment_id,
"deployment_version": deployment.version,
"work_queue_name": deployment.work_queue_name,
"work_queue_id": deployment.work_queue_id,
"parameters": deployment.parameters,
"infrastructure_document_id": deployment.infrastructure_document_id,
"idempotency_key": f"scheduled {deployment.id} {date}",
"tags": tags,
"auto_scheduled": auto_scheduled,
"state": schemas.states.Scheduled(
scheduled_time=date,
message="Flow run scheduled",
).model_dump(),
"state_type": schemas.states.StateType.SCHEDULED,
"state_name": "Scheduled",
"next_scheduled_start_time": date,
"expected_start_time": date,
}
)
return runs
@db_injector
async def _insert_scheduled_flow_runs(
db: PrefectDBInterface, session: AsyncSession, runs: List[Dict]
) -> Sequence[UUID]:
"""
Given a list of flow runs to schedule, as generated by `_generate_scheduled_flow_runs`,
inserts them into the database. Note this is a separate method to facilitate batch
operations on many scheduled runs.
Args:
session: a database session
runs: a list of dicts representing flow runs to insert
Returns:
a list of flow run ids that were created
"""
if not runs:
return []
# gracefully insert the flow runs against the idempotency key
# this syntax (insert statement, values to insert) is most efficient
# because it uses a single bind parameter
await session.execute(
db.insert(orm_models.FlowRun).on_conflict_do_nothing(
index_elements=db.flow_run_unique_upsert_columns
),
runs,
)
# query for the rows that were newly inserted (by checking for any flow runs with
# no corresponding flow run states)
inserted_rows = sa.select(db.FlowRun.id).where(
db.FlowRun.id.in_([r["id"] for r in runs]),
~select(db.FlowRunState.id)
.where(db.FlowRunState.flow_run_id == db.FlowRun.id)
.exists(),
)
inserted_flow_run_ids = (await session.execute(inserted_rows)).scalars().all()
# insert flow run states that correspond to the newly-insert rows
insert_flow_run_states = [
{"id": uuid4(), "flow_run_id": r["id"], **r["state"]}
for r in runs
if r["id"] in inserted_flow_run_ids
]
if insert_flow_run_states:
# this syntax (insert statement, values to insert) is most efficient
# because it uses a single bind parameter
await session.execute(
orm_models.FlowRunState.__table__.insert(), # type: ignore[attr-defined]
insert_flow_run_states,
)
# set the `state_id` on the newly inserted runs
stmt = db.set_state_id_on_inserted_flow_runs_statement(
inserted_flow_run_ids=inserted_flow_run_ids,
insert_flow_run_states=insert_flow_run_states,
)
await session.execute(stmt)
return inserted_flow_run_ids
async def check_work_queues_for_deployment(
session: AsyncSession, deployment_id: UUID
) -> Sequence[orm_models.WorkQueue]:
"""
Get work queues that can pick up the specified deployment.
Work queues will pick up a deployment when all of the following are met.
- The deployment has ALL tags that the work queue has (i.e. the work
queue's tags must be a subset of the deployment's tags).
- The work queue's specified deployment IDs match the deployment's ID,
or the work queue does NOT have specified deployment IDs.
- The work queue's specified flow runners match the deployment's flow
runner or the work queue does NOT have a specified flow runner.
Notes on the query:
- Our database currently allows either "null" and empty lists as
null values in filters, so we need to catch both cases with "or".
- `json_contains(A, B)` should be interpreted as "True if A
contains B".
Returns:
List[orm_models.WorkQueue]: WorkQueues
"""
deployment = await session.get(orm_models.Deployment, deployment_id)
if not deployment:
raise ObjectNotFoundError(f"Deployment with id {deployment_id} not found")
query = (
select(orm_models.WorkQueue)
# work queue tags are a subset of deployment tags
.filter(
or_(
json_contains(deployment.tags, orm_models.WorkQueue.filter["tags"]),
json_contains([], orm_models.WorkQueue.filter["tags"]),
json_contains(None, orm_models.WorkQueue.filter["tags"]),
)
)
# deployment_ids is null or contains the deployment's ID
.filter(
or_(
json_contains(
orm_models.WorkQueue.filter["deployment_ids"],
str(deployment.id),
),
json_contains(None, orm_models.WorkQueue.filter["deployment_ids"]),
json_contains([], orm_models.WorkQueue.filter["deployment_ids"]),
)
)
)
result = await session.execute(query)
return result.scalars().unique().all()
async def create_deployment_schedules(
session: AsyncSession,
deployment_id: UUID,
schedules: List[schemas.actions.DeploymentScheduleCreate],
) -> List[schemas.core.DeploymentSchedule]:
"""
Creates a deployment's schedules.
Args:
session: A database session
deployment_id: a deployment id
schedules: a list of deployment schedule create actions
"""
schedules_with_deployment_id = []
for schedule in schedules:
data = schedule.model_dump()
data["deployment_id"] = deployment_id
schedules_with_deployment_id.append(data)
models = [
orm_models.DeploymentSchedule(**schedule)
for schedule in schedules_with_deployment_id
]
session.add_all(models)
await session.flush()
return [
schemas.core.DeploymentSchedule.model_validate(m, from_attributes=True)
for m in models
]
async def read_deployment_schedules(
session: AsyncSession,
deployment_id: UUID,
deployment_schedule_filter: Optional[
schemas.filters.DeploymentScheduleFilter
] = None,
) -> List[schemas.core.DeploymentSchedule]:
"""
Reads a deployment's schedules.
Args:
session: A database session
deployment_id: a deployment id
Returns:
list[schemas.core.DeploymentSchedule]: the deployment's schedules
"""
query = (
sa.select(orm_models.DeploymentSchedule)
.where(orm_models.DeploymentSchedule.deployment_id == deployment_id)
.order_by(orm_models.DeploymentSchedule.updated.desc())
)
if deployment_schedule_filter:
query = query.where(deployment_schedule_filter.as_sql_filter())
result = await session.execute(query)
return [
schemas.core.DeploymentSchedule.model_validate(s, from_attributes=True)
for s in result.scalars().all()
]
async def update_deployment_schedule(
session: AsyncSession,
deployment_id: UUID,
deployment_schedule_id: UUID,
schedule: schemas.actions.DeploymentScheduleUpdate,
) -> bool:
"""
Updates a deployment's schedules.
Args:
session: A database session
deployment_schedule_id: a deployment schedule id
schedule: a deployment schedule update action
"""
result = await session.execute(
sa.update(orm_models.DeploymentSchedule)
.where(
sa.and_(
orm_models.DeploymentSchedule.id == deployment_schedule_id,
orm_models.DeploymentSchedule.deployment_id == deployment_id,
)
)
.values(**schedule.model_dump(exclude_none=True))
)
return result.rowcount > 0
async def delete_schedules_for_deployment(
session: AsyncSession, deployment_id: UUID
) -> bool:
"""
Deletes a deployment schedule.
Args:
session: A database session
deployment_id: a deployment id
"""
result = await session.execute(
sa.delete(orm_models.DeploymentSchedule).where(
orm_models.DeploymentSchedule.deployment_id == deployment_id
)
)
return result.rowcount > 0
async def delete_deployment_schedule(
session: AsyncSession,
deployment_id: UUID,
deployment_schedule_id: UUID,
) -> bool:
"""
Deletes a deployment schedule.
Args:
session: A database session
deployment_schedule_id: a deployment schedule id
"""
result = await session.execute(
sa.delete(orm_models.DeploymentSchedule).where(
sa.and_(
orm_models.DeploymentSchedule.id == deployment_schedule_id,
orm_models.DeploymentSchedule.deployment_id == deployment_id,
)
)
)
return result.rowcount > 0
@db_injector
async def mark_deployments_ready(
db: PrefectDBInterface,
deployment_ids: Optional[Iterable[UUID]] = None,
work_queue_ids: Optional[Iterable[UUID]] = None,
) -> None:
deployment_ids = deployment_ids or []
work_queue_ids = work_queue_ids or []
if not deployment_ids and not work_queue_ids:
return
async with db.session_context(
begin_transaction=True,
) as session:
result = await session.execute(
select(orm_models.Deployment.id).where(
sa.or_(
orm_models.Deployment.id.in_(deployment_ids),
orm_models.Deployment.work_queue_id.in_(work_queue_ids),
),
orm_models.Deployment.status == DeploymentStatus.NOT_READY,
)
)
unready_deployments = list(result.scalars().unique().all())
last_polled = pendulum.now("UTC")
await session.execute(
sa.update(orm_models.Deployment)
.where(
sa.or_(
orm_models.Deployment.id.in_(deployment_ids),
orm_models.Deployment.work_queue_id.in_(work_queue_ids),
)
)
.values(status=DeploymentStatus.READY, last_polled=last_polled)
)
if not unready_deployments:
return
async with PrefectServerEventsClient() as events:
for deployment_id in unready_deployments:
await events.emit(
await deployment_status_event(
session=session,
deployment_id=deployment_id,
status=DeploymentStatus.READY,
occurred=last_polled,
)
)
@db_injector
async def mark_deployments_not_ready(
db: PrefectDBInterface,
deployment_ids: Optional[Iterable[UUID]] = None,
work_queue_ids: Optional[Iterable[UUID]] = None,
) -> None:
deployment_ids = deployment_ids or []
work_queue_ids = work_queue_ids or []
if not deployment_ids and not work_queue_ids:
return
async with db.session_context(
begin_transaction=True,
) as session:
result = await session.execute(
select(orm_models.Deployment.id).where(
sa.or_(
orm_models.Deployment.id.in_(deployment_ids),
orm_models.Deployment.work_queue_id.in_(work_queue_ids),
),
orm_models.Deployment.status == DeploymentStatus.READY,
)
)
ready_deployments = list(result.scalars().unique().all())
await session.execute(
sa.update(orm_models.Deployment)
.where(
sa.or_(
orm_models.Deployment.id.in_(deployment_ids),
orm_models.Deployment.work_queue_id.in_(work_queue_ids),
)
)
.values(status=DeploymentStatus.NOT_READY)
)
if not ready_deployments:
return
async with PrefectServerEventsClient() as events:
for deployment_id in ready_deployments:
await events.emit(
await deployment_status_event(
session=session,
deployment_id=deployment_id,
status=DeploymentStatus.NOT_READY,
occurred=pendulum.now("UTC"),
)
)
async def with_system_labels_for_deployment(
session: AsyncSession,
deployment: schemas.core.Deployment,
) -> schemas.core.KeyValueLabels:
"""Augment user supplied labels with system default labels for a deployment."""
default_labels = cast(
schemas.core.KeyValueLabels,
{
"prefect.flow.id": str(deployment.flow_id),
},
)
user_supplied_labels = deployment.labels or {}
parent_labels = (
await models.flows.read_flow_labels(session, deployment.flow_id)
) or {}
return parent_labels | default_labels | user_supplied_labels
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@server@[email protected]@.PATH_END.py
|
{
"filename": "_symmetric.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergl/error_x/_symmetric.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SymmetricValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="symmetric", parent_name="scattergl.error_x", **kwargs
):
super(SymmetricValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergl@error_x@[email protected]_END.py
|
{
"filename": "cifar10.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/docs/en/datasets/classify/cifar10.md",
"type": "Markdown"
}
|
---
comments: true
description: Explore the CIFAR-10 dataset, featuring 60,000 color images in 10 classes. Learn about its structure, applications, and how to train models using YOLO.
keywords: CIFAR-10, dataset, machine learning, computer vision, image classification, YOLO, deep learning, neural networks
---
# CIFAR-10 Dataset
The [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) (Canadian Institute For Advanced Research) dataset is a collection of images used widely for [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and computer vision algorithms. It was developed by researchers at the CIFAR institute and consists of 60,000 32x32 color images in 10 different classes.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/fLBbyhPbWzY"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> How to Train an <a href="https://www.ultralytics.com/glossary/image-classification">Image Classification</a> Model with CIFAR-10 Dataset using Ultralytics YOLO11
</p>
## Key Features
- The CIFAR-10 dataset consists of 60,000 images, divided into 10 classes.
- Each class contains 6,000 images, split into 5,000 for training and 1,000 for testing.
- The images are colored and of size 32x32 pixels.
- The 10 different classes represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks.
- CIFAR-10 is commonly used for training and testing in the field of machine learning and computer vision.
## Dataset Structure
The CIFAR-10 dataset is split into two subsets:
1. **Training Set**: This subset contains 50,000 images used for training machine learning models.
2. **Testing Set**: This subset consists of 10,000 images used for testing and benchmarking the trained models.
## Applications
The CIFAR-10 dataset is widely used for training and evaluating [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) models in image classification tasks, such as [Convolutional Neural Networks](https://www.ultralytics.com/glossary/convolutional-neural-network-cnn) (CNNs), Support Vector Machines (SVMs), and various other machine learning algorithms. The diversity of the dataset in terms of classes and the presence of color images make it a well-rounded dataset for research and development in the field of machine learning and computer vision.
## Usage
To train a YOLO model on the CIFAR-10 dataset for 100 epochs with an image size of 32x32, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Train Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="cifar10", epochs=100, imgsz=32)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=cifar10 model=yolo11n-cls.pt epochs=100 imgsz=32
```
## Sample Images and Annotations
The CIFAR-10 dataset contains color images of various objects, providing a well-structured dataset for image classification tasks. Here are some examples of images from the dataset:

The example showcases the variety and complexity of the objects in the CIFAR-10 dataset, highlighting the importance of a diverse dataset for training robust image classification models.
## Citations and Acknowledgments
If you use the CIFAR-10 dataset in your research or development work, please cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@TECHREPORT{Krizhevsky09learningmultiple,
author={Alex Krizhevsky},
title={Learning multiple layers of features from tiny images},
institution={},
year={2009}
}
```
We would like to acknowledge Alex Krizhevsky for creating and maintaining the CIFAR-10 dataset as a valuable resource for the machine learning and [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) research community. For more information about the CIFAR-10 dataset and its creator, visit the [CIFAR-10 dataset website](https://www.cs.toronto.edu/~kriz/cifar.html).
## FAQ
### How can I train a YOLO model on the CIFAR-10 dataset?
To train a YOLO model on the CIFAR-10 dataset using Ultralytics, you can follow the examples provided for both Python and CLI. Here is a basic example to train your model for 100 [epochs](https://www.ultralytics.com/glossary/epoch) with an image size of 32x32 pixels:
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="cifar10", epochs=100, imgsz=32)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=cifar10 model=yolo11n-cls.pt epochs=100 imgsz=32
```
For more details, refer to the model [Training](../../modes/train.md) page.
### What are the key features of the CIFAR-10 dataset?
The CIFAR-10 dataset consists of 60,000 color images divided into 10 classes. Each class contains 6,000 images, with 5,000 for training and 1,000 for testing. The images are 32x32 pixels in size and vary across the following categories:
- Airplanes
- Cars
- Birds
- Cats
- Deer
- Dogs
- Frogs
- Horses
- Ships
- Trucks
This diverse dataset is essential for training image classification models in fields such as machine learning and computer vision. For more information, visit the CIFAR-10 sections on [dataset structure](#dataset-structure) and [applications](#applications).
### Why use the CIFAR-10 dataset for image classification tasks?
The CIFAR-10 dataset is an excellent benchmark for image classification due to its diversity and structure. It contains a balanced mix of 60,000 labeled images across 10 different categories, which helps in training robust and generalized models. It is widely used for evaluating deep learning models, including Convolutional [Neural Networks](https://www.ultralytics.com/glossary/neural-network-nn) (CNNs) and other machine learning algorithms. The dataset is relatively small, making it suitable for quick experimentation and algorithm development. Explore its numerous applications in the [applications](#applications) section.
### How is the CIFAR-10 dataset structured?
The CIFAR-10 dataset is structured into two main subsets:
1. **Training Set**: Contains 50,000 images used for training machine learning models.
2. **Testing Set**: Consists of 10,000 images for testing and benchmarking the trained models.
Each subset comprises images categorized into 10 classes, with their annotations readily available for model training and evaluation. For more detailed information, refer to the [dataset structure](#dataset-structure) section.
### How can I cite the CIFAR-10 dataset in my research?
If you use the CIFAR-10 dataset in your research or development projects, make sure to cite the following paper:
!!! quote ""
=== "BibTeX"
```bibtex
@TECHREPORT{Krizhevsky09learningmultiple,
author={Alex Krizhevsky},
title={Learning multiple layers of features from tiny images},
institution={},
year={2009}
}
```
Acknowledging the dataset's creators helps support continued research and development in the field. For more details, see the [citations and acknowledgments](#citations-and-acknowledgments) section.
### What are some practical examples of using the CIFAR-10 dataset?
The CIFAR-10 dataset is often used for training image classification models, such as Convolutional Neural Networks (CNNs) and [Support Vector Machines](https://www.ultralytics.com/glossary/support-vector-machine-svm) (SVMs). These models can be employed in various computer vision tasks including [object detection](https://www.ultralytics.com/glossary/object-detection), [image recognition](https://www.ultralytics.com/glossary/image-recognition), and automated tagging. To see some practical examples, check the code snippets in the [usage](#usage) section.
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@docs@en@datasets@[email protected]@.PATH_END.py
|
{
"filename": "68041e36e11b_adding_new_heraautos_table.py",
"repo_name": "HERA-Team/hera_mc",
"repo_path": "hera_mc_extracted/hera_mc-main/alembic/versions/68041e36e11b_adding_new_heraautos_table.py",
"type": "Python"
}
|
"""add HeraAutos table
Revision ID: 68041e36e11b
Revises: b022867d09e3
Create Date: 2019-12-30 20:56:04.518300+00:00
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "68041e36e11b"
down_revision = "b022867d09e3"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"hera_autos",
sa.Column("time", sa.BigInteger(), nullable=False),
sa.Column("antenna_number", sa.Integer(), nullable=False),
sa.Column("antenna_feed_pol", sa.String(), nullable=False),
sa.Column("measurement_type", sa.String(), nullable=False),
sa.Column("value", sa.Float(), nullable=False),
sa.PrimaryKeyConstraint("time", "antenna_number", "antenna_feed_pol"),
)
# ### end Alembic commands ###
def downgrade():
op.drop_table("hera_autos")
# ### end Alembic commands ###
|
HERA-TeamREPO_NAMEhera_mcPATH_START.@hera_mc_extracted@hera_mc-main@alembic@versions@[email protected]_END.py
|
{
"filename": "_stylesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/pie/insidetextfont/_stylesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StylesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="stylesrc", parent_name="pie.insidetextfont", **kwargs
):
super(StylesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@pie@insidetextfont@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "mit-ll/spacegym-kspdg",
"repo_path": "spacegym-kspdg_extracted/spacegym-kspdg-main/src/kspdg/private_src/python3_13/Darwin_x86_64/pyarmor_runtime_000000/__init__.py",
"type": "Python"
}
|
# Pyarmor 9.0.6 (trial), 000000, 2024-12-09T10:19:33.924322
from .pyarmor_runtime import __pyarmor__
|
mit-llREPO_NAMEspacegym-kspdgPATH_START.@spacegym-kspdg_extracted@spacegym-kspdg-main@src@kspdg@private_src@python3_13@Darwin_x86_64@pyarmor_runtime_000000@[email protected]_END.py
|
{
"filename": "plot_constraints.py",
"repo_name": "MikeSWang/HorizonGRound",
"repo_path": "HorizonGRound_extracted/HorizonGRound-master/application/pipeline/plot_constraints.py",
"type": "Python"
}
|
"""Plot 2-d parameter constraints from parameter chains.
"""
import warnings
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, ListedColormap, to_rgb
from matplotlib.ticker import AutoMinorLocator
from scipy.integrate import cumtrapz, simps
from scipy.ndimage import gaussian_filter
try:
from config.program import stylesheet
except ImportError:
# pylint: disable=multiple-imports
import os, sys
current_file_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, "".join([current_file_dir, "/../"]))
from config.program import stylesheet
plt.style.use(stylesheet)
AREA_FILL_ALPHA = 1./3.
ONE_SIGMA_QUANTILES = [0.158655, 0.841345]
SIGMA_LEVELS = [0.864665, 0.393469, 0.000001]
legend_state = ([], [])
def gradient_colour_map(colour, bg_colour='#FFFFFF', name=None):
"""Generate a colour map using gradients between two colours.
Parameters
----------
colour : str
Primary colour as a hex code string.
bg_colour : str
Background colour as a hex code string (default is '#FFFFFF').
name : str
Name of the colour map (default is `None`).
Returns
-------
cmap
Generated colour map.
"""
r0, g0, b0 = to_rgb(bg_colour)
r1, g1, b1 = to_rgb(colour)
colour_dict = {
'red': ((0, r0, r0), (1, r1, r1)),
'green': ((0, g0, g0), (1, g1, g1)),
'blue': ((0, b0, b0), (1, b1, b1))
}
cmap = LinearSegmentedColormap(name, colour_dict)
return cmap
def convert_chains_to_grid(chains, bins=None, smooth=None,
range_x=None, range_y=None):
"""Convert 2-parameter chains to the join posterior binned over a
2-d parameter grid.
Parameters
----------
chains : float, array_like
Parameter chains.
bins : int, array_like, optional
Number of bins (for each paramter) (default is `None`).
smooth : float or None, optional
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the posterior grid.
If `None` (default), no smoothing is applied.
range_x, range_y : float array_like, optional
Parameter range (default is `None`).
Returns
-------
posterior_grid, x_grid, y_grid : :class:`numpy.ndarray`
Posterior and parameter grids from histogram binning.
"""
x, y = np.transpose(chains)
try:
bin_range = [
[min(range_x), max(range_x)], [min(range_y), max(range_y)]
]
except TypeError:
bin_range = None
posterior_grid, x_edges, y_edges = np.histogram2d(
x.flatten(), y.flatten(), bins=bins, range=bin_range
)
if smooth:
posterior_grid = gaussian_filter(posterior_grid, smooth)
x_grid = (x_edges[1:] + x_edges[:-1]) / 2
y_grid = (y_edges[1:] + y_edges[:-1]) / 2
return posterior_grid, x_grid, y_grid
def plot_2d_contours(posterior, x, y, x_range=None, y_range=None,
estimate=None, x_precision=None, y_precision=None,
cmap=None, alpha=None, linestyle=None, fig=None):
"""Plot 2-d contours from the joint parameter posterior on a grid.
Parameters
----------
posterior : float, array_like
Posterior evaluations.
x, y : float, array_like
Parameter coordinates.
x_range, y_range : sequence or None
Parameter range as a sequence of length 2 (default is `None`).
estimate : {'median', 'maximum', None}, optional
Parameter estimate type, if any (default is `None`).
x_precision, y_precision : int or None, optional
(Deprecated) Parameter precision as a number of decimal places
(default is `None`).
cmap : str or None, optional
Principal colour map (default is `None`).
alpha : str or None, optional
Principal alpha transparency (default is `None`).
fig : :class:`matplotlib.figure.Figure` *or None*, optional
Existing figure object to plot on.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
Plotted figure object.
x_estimate, y_estimate : tuple of float or None
Parameter estimates with low and upper uncertainties. `None`
returned if `estimate` is `None`.
patch : :class:`matplotlib.patches.Rectangle`
A colour patch to be used in the legend.
"""
if x_precision is not None or y_precision is not None:
warnings.warn(
"`x_precision` and `y_precision` are deprecated "
"and have no effect.",
DeprecationWarning
)
# Set up the plottable grid.
if x_range:
x_selector = slice(
np.argmin(np.abs(x - x_range[0])),
np.argmin(np.abs(x - x_range[1])) + 1
)
else:
x_selector = slice(None)
if y_range:
y_selector = slice(
np.argmin(np.abs(y - y_range[0])),
np.argmin(np.abs(y - y_range[1])) + 1
)
else:
y_selector = slice(None)
x, y = np.asarray(x)[x_selector], np.asarray(y)[y_selector]
xx, yy = np.meshgrid(x, y, indexing='ij')
posterior = np.asarray(posterior)[x_selector, y_selector]
posterior /= simps([simps(xslice, y) for xslice in posterior], x)
# Set up plottable areas.
if fig is None:
fig = plt.figure()
xy_panel = plt.subplot2grid((4, 4), (1, 0), rowspan=3, colspan=3)
x_panel = plt.subplot2grid((4, 4), (0, 0), colspan=3, sharex=xy_panel)
y_panel = plt.subplot2grid((4, 4), (1, 3), rowspan=3, sharey=xy_panel)
else:
xy_panel, x_panel, y_panel = fig.axes
# Locate posterior contours.
h_flat = np.flip(np.sort(posterior.flatten()))
cum_h = np.cumsum(h_flat)
cum_h /= cum_h[-1]
h_levels = np.zeros_like(SIGMA_LEVELS)
for n_sigma, sigma_level in enumerate(SIGMA_LEVELS):
try:
h_levels[n_sigma] = h_flat[cum_h <= sigma_level][-1]
except IndexError:
h_levels[n_sigma] = h_flat[0]
# Plot posterior contours.
try:
contour = xy_panel.contourf(
xx, yy, posterior, h_levels, antialiased=True,
cmap=cmap, alpha=alpha, zorder=2
)
primary_colour = contour.cmap(contour.cmap.N)
patch = plt.Rectangle(
(0., 0.), 2., 1., ec=None, ls=linestyle,
fc=contour.collections[-1].get_facecolor()[0]
)
except ValueError as error:
if str(error) == "Contour levels must be increasing":
raise ValueError(
"Cannot process posterior values into contours."
) from error
raise ValueError from error
xy_panel.contour(
contour, colors=primary_colour, linestyles=linestyle,
alpha=min(2*alpha, 1.) if isinstance(alpha, float) else 1.,
zorder=3
)
# Marginalise to PDFs.
pdf_x = np.asarray([simps(xslice, y) for xslice in posterior])
pdf_y = np.asarray([simps(yslice, x) for yslice in posterior.T])
cdf_x = cumtrapz(pdf_x, x, initial=0.)
cdf_y = cumtrapz(pdf_y, y, initial=0.)
pdf_x /= cdf_x[-1]
pdf_y /= cdf_y[-1]
cdf_x /= cdf_x[-1]
cdf_y /= cdf_y[-1]
# Plot marginal posteriors.
x_panel.plot(x, pdf_x, c=primary_colour, ls=linestyle, zorder=3)
y_panel.plot(pdf_y, y, c=primary_colour, ls=linestyle, zorder=3)
# Make estimates.
if estimate:
# Determine closest parameter estimate indices.
if estimate == 'maximum':
x_fit_idx, y_fit_idx = np.argmax(pdf_x), np.argmax(pdf_y)
elif estimate == 'median':
x_fit_idx = np.argmin(np.abs(cdf_x - 1./2.))
y_fit_idx = np.argmin(np.abs(cdf_y - 1./2.))
x_lower_idx = np.argmin(np.abs(cdf_x - ONE_SIGMA_QUANTILES[0]))
y_lower_idx = np.argmin(np.abs(cdf_y - ONE_SIGMA_QUANTILES[0]))
x_upper_idx = np.argmin(np.abs(cdf_x - ONE_SIGMA_QUANTILES[-1]))
y_upper_idx = np.argmin(np.abs(cdf_y - ONE_SIGMA_QUANTILES[-1]))
# Determined closest parameter estimates.
x_fit, x_lower, x_upper = x[[x_fit_idx, x_lower_idx, x_upper_idx]]
y_fit, y_lower, y_upper = y[[y_fit_idx, y_lower_idx, y_upper_idx]]
x_estimate = x_fit, x_lower, x_upper
y_estimate = y_fit, y_lower, y_upper
# Plot estimates.
x_panel.fill_between(
x[x_lower_idx:(x_upper_idx + 1)],
pdf_x[x_lower_idx:(x_upper_idx + 1)],
antialiased=True, facecolor=[primary_colour], edgecolor='none',
alpha=AREA_FILL_ALPHA, zorder=2
)
y_panel.fill_betweenx(
y[y_lower_idx:(y_upper_idx + 1)],
pdf_y[y_lower_idx:(y_upper_idx + 1)],
antialiased=True, facecolor=[primary_colour], edgecolor='none',
alpha=AREA_FILL_ALPHA, zorder=2
)
else:
x_estimate, y_estimate = None, None
return fig, x_estimate, y_estimate, patch
def plot_2d_constraints(chains, bins=None, smooth=None,
range_x=None, range_y=None, label_x='', label_y='',
estimate='median', precision_x=None, precision_y=None,
truth_x=None, truth_y=None, fig=None, figsize=None,
label=None, cmap=None, linestyle=None, alpha=None,
show_estimates=True):
"""Plot 2-d parameter constraints from sample chains.
Parameters
----------
chains : float :class:`numpy.ndarray`
Parameter chains.
bins : int, array_like or None, optional
Number of bins for (both) parameters (default is `None`).
smooth : float or None, optional
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the posterior grid.
If `None` (default), no smoothing is applied.
range_x, range_y : tuple, optional
Renormalisation range for the parameters (default is ()).
label_x, label_y : str, optional
Parameter name as a TeX string (default is '').
estimate : {'maximum', 'median', None}, optional
Parameter estimate type (default is 'median').
precision_x, precision_y : int or None, optional
(Deprecated) Precision for the parameter estimate as a number of
decimal places (default is `None`).
truth_x, truth_y : float or None, optional
Truth value for the parameter (default is `None`).
fig : :class:`matplotlib.figure.Figure` *or None, optional*
Any existing figures to plot on (default is `None`).
figsize : tuple of float or None, optional
Figure size in inches (default is `None`).
label : (sequence of) str or None, optional
Label for the parameter constraint (default is `None`).
cmap : :class:`matplotlib.ScalarMappable` or None, optional
Colour map for constraint contours (default is `None`).
linestyle : str or None, optional
Linestyle for the contour. See
:class:`matplotlib.axes.Axes.contour`.
alpha : float or None, optional
Transparency value for constraint contours (default is `None`).
show_estimates : bool, optional
If `True`, display the estimates if available.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
Any existing figures to plot on.
x_estimate, y_estimate : list of tuple
Parameter estimate, lower uncertainty and upper uncertainty of
for each likelihood value sets.
"""
posterior, x, y = convert_chains_to_grid(
chains, bins=bins, smooth=smooth, range_x=range_x, range_y=range_y
)
# Set up plottable areas.
# pylint: disable=global-statement
global legend_state
if fig is None:
fig = plt.figure("2-d constraint", figsize=figsize or (5.5, 5.5))
canvas = plt.subplot2grid((4, 4), (1, 0), rowspan=3, colspan=3)
top_panel = plt.subplot2grid((4, 4), (0, 0), colspan=3, sharex=canvas)
side_panel = plt.subplot2grid((4, 4), (1, 3), rowspan=3, sharey=canvas)
legend_state = ([], [])
else:
canvas, top_panel, side_panel = fig.axes
try:
cmap = ListedColormap(sns.color_palette(cmap))
except (TypeError, ValueError):
pass
range_x = range_x or [np.min(x), np.max(x)]
range_y = range_y or [np.min(y), np.max(y)]
# Fill in plottable areas.
fig, x_estimate, y_estimate, patch = plot_2d_contours(
posterior, x, y, fig=fig, cmap=cmap, linestyle=linestyle, alpha=alpha,
estimate=estimate, x_precision=precision_x, y_precision=precision_y,
x_range=range_x, y_range=range_y
)
legend_state[0].insert(0, patch)
legend_state[1].insert(0, label)
if truth_x is not None:
canvas.axvline(truth_x, c='k', ls='--', zorder=3)
if truth_y is not None:
canvas.axhline(truth_y, c='k', ls='--', zorder=3)
# Adjust plottable areas.
canvas.legend(*legend_state, handlelength=1.6)
canvas.set_xlim(max(np.min(x), range_x[0]), min(np.max(x), range_x[-1]))
canvas.set_ylim(max(np.min(y), range_y[0]), min(np.max(y), range_y[-1]))
canvas.axes.tick_params(axis='x', which='both', direction='in', top=True)
canvas.axes.tick_params(axis='y', which='both', direction='in', right=True)
canvas.xaxis.set_minor_locator(AutoMinorLocator())
canvas.yaxis.set_minor_locator(AutoMinorLocator())
canvas.set_xlabel(r'${}$'.format(label_x), labelpad=8)
canvas.set_ylabel(r'${}$'.format(label_y), labelpad=8)
if show_estimates:
top_panel.legend(
bbox_to_anchor=[1.25, 0.775], loc='center',
handlelength=1.25, labelspacing=0.
)
top_panel.autoscale(axis='y')
top_panel.set_ylim(bottom=0)
top_panel.axes.tick_params(
axis='x', which='both', top=False, bottom=False, labelbottom=False
)
top_panel.axes.tick_params(
axis='y', which='both', left=False, right=False, labelleft=False
)
top_panel.spines['top'].set_visible(False)
top_panel.spines['left'].set_visible(False)
top_panel.spines['right'].set_visible(False)
if show_estimates:
side_panel.legend(
bbox_to_anchor=[0.75, 1.075],
loc='center', handlelength=1.25, labelspacing=0.
)
side_panel.autoscale(axis='x')
side_panel.set_xlim(left=0)
side_panel.axes.tick_params(
axis='x', which='both', top=False, bottom=False, labelbottom=False
)
side_panel.axes.tick_params(
axis='y', which='both', left=False, right=False, labelleft=False
)
side_panel.spines['top'].set_visible(False)
side_panel.spines['bottom'].set_visible(False)
side_panel.spines['right'].set_visible(False)
return fig, x_estimate, y_estimate
|
MikeSWangREPO_NAMEHorizonGRoundPATH_START.@HorizonGRound_extracted@HorizonGRound-master@application@pipeline@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/smith/imaginaryaxis/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._visible import VisibleValidator
from ._tickwidth import TickwidthValidator
from ._tickvalssrc import TickvalssrcValidator
from ._tickvals import TickvalsValidator
from ._ticksuffix import TicksuffixValidator
from ._ticks import TicksValidator
from ._tickprefix import TickprefixValidator
from ._ticklen import TicklenValidator
from ._tickformat import TickformatValidator
from ._tickfont import TickfontValidator
from ._tickcolor import TickcolorValidator
from ._showticksuffix import ShowticksuffixValidator
from ._showtickprefix import ShowtickprefixValidator
from ._showticklabels import ShowticklabelsValidator
from ._showline import ShowlineValidator
from ._showgrid import ShowgridValidator
from ._linewidth import LinewidthValidator
from ._linecolor import LinecolorValidator
from ._layer import LayerValidator
from ._labelalias import LabelaliasValidator
from ._hoverformat import HoverformatValidator
from ._gridwidth import GridwidthValidator
from ._griddash import GriddashValidator
from ._gridcolor import GridcolorValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._visible.VisibleValidator",
"._tickwidth.TickwidthValidator",
"._tickvalssrc.TickvalssrcValidator",
"._tickvals.TickvalsValidator",
"._ticksuffix.TicksuffixValidator",
"._ticks.TicksValidator",
"._tickprefix.TickprefixValidator",
"._ticklen.TicklenValidator",
"._tickformat.TickformatValidator",
"._tickfont.TickfontValidator",
"._tickcolor.TickcolorValidator",
"._showticksuffix.ShowticksuffixValidator",
"._showtickprefix.ShowtickprefixValidator",
"._showticklabels.ShowticklabelsValidator",
"._showline.ShowlineValidator",
"._showgrid.ShowgridValidator",
"._linewidth.LinewidthValidator",
"._linecolor.LinecolorValidator",
"._layer.LayerValidator",
"._labelalias.LabelaliasValidator",
"._hoverformat.HoverformatValidator",
"._gridwidth.GridwidthValidator",
"._griddash.GriddashValidator",
"._gridcolor.GridcolorValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@smith@imaginaryaxis@[email protected]_END.py
|
{
"filename": "_ygap.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/heatmap/_ygap.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YgapValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="ygap", parent_name="heatmap", **kwargs):
super(YgapValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@heatmap@[email protected]_END.py
|
{
"filename": "mock_primitive.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/recipe_system/adcc/client/qap_specviewer/mock_primitive.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
Mock plotSpectraForQA primitive.
This module emulates a pipeline that reproduces how the plotSpectraForQA would
behave in real time.
In practice, it creates JSON data and send it to the ADCC Server in a timed loop.
"""
import json
import time
import urllib.error
import urllib.request
import numpy as np
from scipy import ndimage
URL = "http://localhost:8777/spec_report"
def main():
"""
Main function.
"""
np.random.seed(0)
args = _parse_arguments()
url = args.url
n_frames = args.n_frames
n_apertures = args.n_apertures
sleep_between_frames = args.sleep_time
data_size = 4000
snr = 10.
pixel_scale = 0.1614
wavelength_min = 300.
wavelength_max = 800.
wavelength_units = "nm"
file_index = 1
program_index = 1
while True:
obj_max_weight = 1000.
obj_continnum = 300. + 0.01 * np.arange(data_size)
noise_level = obj_continnum / snr
wavelength = np.linspace(wavelength_min, wavelength_max, data_size)
dispersion = np.mean(np.diff(wavelength))
data = [create_1d_spectrum(data_size, 20, obj_max_weight) + obj_continnum * i for i in range(n_apertures)]
center = np.random.randint(100, 900, size=n_apertures+1)
lower = np.random.randint(-15, -1, size=n_apertures+1)
upper = np.random.randint(1, 15, size=n_apertures+1)
year = 2020
today = 20200131
program_id = "GX-{}C-Q-{:03d}".format(year, program_index)
group_index = 1
group_id = "{:s}-{:02d}".format(program_id, group_index)
for frame_index in range(n_frames):
data_label = "{:s}-{:03d}".format(group_id, frame_index + 1)
filename = "X{}S{:03d}_frame.fits".format(today, file_index)
if frame_index == 0:
stack_data_label = "{:s}-{:03d}_stack".format(group_id, frame_index + 1)
stack_filename = "X{}S{:03d}_stack.fits".format(today, file_index)
def aperture_generator(i):
delta_center = (np.random.rand() - 0.5) * 0.9 / pixel_scale
center[i] = center[i] + delta_center
center[i] = np.round(center[i])
_data = data[i]
_error = np.random.poisson(_data) + noise_level * (np.random.rand(_data.size) - 0.5)
_aperture = ApertureModel(
int(center[i]), int(lower[i]), int(upper[i]), wavelength_units, dispersion,
wavelength, _data, _error)
return _aperture.__dict__
def stack_aperture_generator(i):
delta_center = (np.random.rand() - 0.5) * 0.9 / pixel_scale
center[i] = center[i] + delta_center
_data = data[i]
_error = np.random.rand(_data.size) - 0.5
_error *= noise_level / (frame_index + 1)
_error += np.random.poisson(_data)
_aperture = ApertureModel(
int(center[i]), int(lower[i]), int(upper[i]), wavelength_units, dispersion,
wavelength, _data, _error)
return _aperture.__dict__
n = np.random.randint(n_apertures-1, n_apertures+1)
apertures = [aperture_generator(i) for i in range(n)]
n = np.random.randint(n_apertures - 1, n_apertures + 1)
stack_apertures = [stack_aperture_generator(i) for i in range(n)]
frame = SpecPackModel(
data_label=data_label,
group_id=group_id,
filename=filename,
is_stack=False,
pixel_scale=pixel_scale,
program_id=program_id,
stack_size=1,
apertures=apertures)
stack = SpecPackModel(
data_label=stack_data_label,
group_id=group_id,
filename=stack_filename,
is_stack=True,
pixel_scale=pixel_scale,
program_id=program_id,
stack_size=frame_index + 1,
apertures=stack_apertures)
json_list = [frame.__dict__, stack.__dict__]
json_data = json.dumps(json_list).encode("utf-8")
print("\n Created JSON for single frame with: ")
print(" Program ID: {}".format(program_id))
print(" Group-id: {}".format(group_id))
print(" Data-label: {}".format(data_label))
print(" Filename: {}".format(filename))
print(" Apertures: {}".format(center))
print(" Performing request...")
try:
post_request = urllib.request.Request(url)
post_request.add_header("Content-Type", "application/json")
postr = urllib.request.urlopen(post_request, json_data)
postr.read()
postr.close()
except urllib.error.URLError:
import sys
print("\n Error trying to open URL: {}".format(url))
print(" Please, check that the server is running "
"and run again.\n")
sys.exit()
print(" Done.")
print(" Sleeping for {} seconds ...".format(sleep_between_frames))
time.sleep(sleep_between_frames)
file_index += 1
program_index += 1
group_index += 1
def _parse_arguments():
"""
Parses arguments received from the command line.
Returns
-------
namespace
all the default and customized options parsed from the command line.
"""
import argparse
parser = argparse.ArgumentParser(
description="A script that simulates a pipeline running the "
"plotSpectraForQA and posting JSON data to the "
"ADCC server.")
parser.add_argument(
'-a', '--apertures',
default=3,
dest="n_apertures",
help="Number of aperetures for each data",
type=int,
)
parser.add_argument(
'-f', '--frames',
default=3,
dest="n_frames",
help="Number of frames for each Group ID.",
type=int,
)
parser.add_argument(
'-u', '--url',
default=URL,
help="URL of the ADCC server (e.g.: http://localhost:8777/spec_report)",
type=str,
)
parser.add_argument(
'-s', '--sleep',
default=10.,
dest="sleep_time",
help="Sleep time between post requests",
type=float,
)
return parser.parse_args()
def create_1d_spectrum(width, n_lines, max_weight):
"""
Generates a 1D NDArray that simulates a random spectrum.
Parameters
----------
width : int
Number of array elements.
n_lines : int
Number of artificial lines.
max_weight : float
Maximum weight (or flux, or intensity) of the lines.
Returns
-------
sky_1d_spectrum : numpy.ndarray
"""
lines = np.random.randint(low=0, high=width, size=n_lines)
weights = max_weight * np.random.random(size=n_lines)
spectrum = np.zeros(width)
spectrum[lines] = weights
spectrum = ndimage.gaussian_filter1d(spectrum, 5)
return spectrum
class ApertureModel:
def __init__(self, center, lower, upper, wavelength_units, dispersion, wavelength, intensity, error):
wavelength = np.round(wavelength, 3)
intensity = np.round(intensity)
error = np.round(error)
self.center = center
self.lower = lower
self.upper = upper
self.dispersion = dispersion
self.wavelength_units = wavelength_units
self.intensity = [[w, int(d)] for w, d in zip(wavelength, intensity)]
self.stddev = [[w, int(d)] for w, d in zip(wavelength, error)]
class SpecPackModel:
def __init__(self, data_label, group_id, filename, is_stack, pixel_scale, program_id, stack_size, apertures):
self.data_label = data_label
self.group_id = group_id
self.filename = filename
self.msgtype = "specjson"
self.is_stack = is_stack
self.pixel_scale = pixel_scale
self.program_id = program_id
self.stack_size = stack_size
self.timestamp = time.time()
self.apertures = apertures
if __name__ == '__main__':
main()
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@recipe_system@adcc@client@qap_specviewer@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "seeh/surprise",
"repo_path": "surprise_extracted/surprise-master/surprise/__init__.py",
"type": "Python"
}
|
__author__ = 'Sebastian Seehars'
__email__ = '[email protected]'
__version__ = '0.1.0'
__credits__ = 'ETH Zurich, Institute for Astronomy'
#for py27+py33 compatibility
try:
from surprise import Surprise
except ImportError:
from surprise.surprise import Surprise
|
seehREPO_NAMEsurprisePATH_START.@surprise_extracted@surprise-master@surprise@[email protected]_END.py
|
{
"filename": "fast_ffts.py",
"repo_name": "pscicluna/precision",
"repo_path": "precision_extracted/precision-master/image_registration/fft_tools/fast_ffts.py",
"type": "Python"
}
|
######################################################################
#
# This file is from Adam Ginsburg's image_registration
# library (originally released un the MIT license) and
# is used to provide precise image registration. At
# some point, I may replace it with a more elegant
# solution than simply copying the files into the repo
# but we're not there yet. Any copyright pertaining
# to this file remains with Adam Ginsburg.
#
# P.S. 2016 01 14
#
######################################################################
import numpy as np
import warnings
try:
import fftw3
has_fftw = True
def fftwn(array, nthreads=1):
array = array.astype('complex').copy()
outarray = array.copy()
fft_forward = fftw3.Plan(array, outarray, direction='forward',
flags=['estimate'], nthreads=nthreads)
fft_forward.execute()
return outarray
def ifftwn(array, nthreads=1):
array = array.astype('complex').copy()
outarray = array.copy()
fft_backward = fftw3.Plan(array, outarray, direction='backward',
flags=['estimate'], nthreads=nthreads)
fft_backward.execute()
return outarray / np.size(array)
except ImportError:
fftn = np.fft.fftn
ifftn = np.fft.ifftn
has_fftw = False
# I performed some fft speed tests and found that scipy is slower than numpy
# http://code.google.com/p/agpy/source/browse/trunk/tests/test_ffts.py However,
# the speed varied on machines - YMMV. If someone finds that scipy's fft is
# faster, we should add that as an option here... not sure how exactly
def get_ffts(nthreads=1, use_numpy_fft=not has_fftw):
"""
Returns fftn,ifftn using either numpy's fft or fftw
"""
if has_fftw and not use_numpy_fft:
def fftn(*args, **kwargs):
return fftwn(*args, nthreads=nthreads, **kwargs)
def ifftn(*args, **kwargs):
return ifftwn(*args, nthreads=nthreads, **kwargs)
elif use_numpy_fft:
fftn = np.fft.fftn
ifftn = np.fft.ifftn
else:
# yes, this is redundant, but I feel like there could be a third option...
fftn = np.fft.fftn
ifftn = np.fft.ifftn
return fftn,ifftn
|
psciclunaREPO_NAMEprecisionPATH_START.@precision_extracted@precision-master@image_registration@fft_tools@[email protected]_END.py
|
{
"filename": "lg3_envs.py",
"repo_name": "mit-ll/spacegym-kspdg",
"repo_path": "spacegym-kspdg_extracted/spacegym-kspdg-main/src/kspdg/private_src/python3_12/Darwin_x86_64/kspdg_envs/lbg1/lg3_envs.py",
"type": "Python"
}
|
# Pyarmor 8.5.11 (trial), 000000, non-profits, 2024-12-09T10:19:37.718552
from kspdg.private_src.python3_12.Darwin_x86_64.pyarmor_runtime_000000 import __pyarmor__
__pyarmor__(__name__, __file__, b'PY000000\x00\x03\x0c\x00\xcb\r\r\n\x80\x00\x01\x00\x08\x00\x00\x00\x04\x00\x00\x00@\x00\x00\x00&\x7f\x00\x00\x12\t\x04\x00y3\x95\x8b\x97\x94\xc9Yk\xd6\xeak\xeb\xe9\xed\x84\x00\x00\x00\x00\x00\x00\x00\x00\xdb\x946\xd03x\xb2]Hp\x96\xfc\xa2\x03\x9a\xdc(\x02H\'\xfeH\x04\xa8cz>\xac%^\xc0%;\x11\xad\x1a\xe9\'\xf0zd\x14\xb4&\xa4"\xa2\x99\x84@\xe0\x19\x1e\x1e\x9c\xea\xb5)B\xd0&)\x8dm0A\x80\x9f\x961Uq\xe9~\x81\xed|;\x18\xebBC\x06\xec}\xce\xa5\xf4\xe4\x87gPH\x04\xc4\xee/\x1d\xe8\xf1\xfb& \xf5\xcf\xce\x8fe#\xb9#7\x8c\x87i\xa9\xba\xf8\xf5IAIm\x183\x13^^\xea\x86e\x14\x9b\xbb{\xb2P)\xc4\x8f\x94\x11cb\xa3\xe7.by\x883\xfd\x83R\x1aoJ\xfeD\xb1\xf0\xce\x87C\xe2\xd4\xd0\xaa\xe3\xad\xd9\x98\x16\x1a\xedw^\xcc\x05\x1e \xb0\x9e\xeam\x00\xf0\xe5\x7f[\x13-\xc42\xbe:q\xfeC\x89\xc9\x93\xc11\xbd&"\xcc\xad\xc8\xc8\x19r\x03\xe32s\x15\xc0\xbb\x03Z3\x9a\\rj\xb7IQC\x8f\x99\xfb\xfc\xb0[\x9b?\x19\x9fC<\x13\xce\x8c\xb9\x96\x93\xdb`&,I\xdc\x84\xbe\x80\x89\xfb\x88\xf73\xa7\xae7;\xea\x11d\x89,\x9a\xab%\xaeD\x85\xa6\x01\xf6\x9b\x9c\x90,\x8fs$\x026<\xdfj\xc5_\xc6\xb1\x0b\xb3!\xbd\x98w\xb5\x13\x00[n\x0fJ\xedvz\x82\xf4\x98C\x12\xd6\x1f\x99XQ\xe9|9 \x94\x94\x8c9\xca\xc9p\xba\x8a<\r\x16\x90{O&h4ad1b\x8f=\x00.\\\xe3\x1f\x1e\xfc2\x87\xb0nZ\xe1\xd3\xff\x98N4\xd2P\x02\x98z\xd1q(\xcdWT\x95\x93SG\x11\x1aN\xb2D\xa1\x1c\xd16&\x1c\xac\xc6\xbc\x07\r\xdaV\x835h\x05\xc2{Y\x94n<\xe4\xb8\x0c\x9e\x90\xea]k\xc6v\xe3\xd5O\x11\x94\xe8\x9dHJV\x11\xd4P\xb3\x9b\x92\xb8\xe7\x05\xbf\xbbe\x87x\xab\xfd\xd7\x9b\xaes\x13\xb5\x93@\x15\xe9\xc8\xf0\x90,\\\xdf\xed\xfbS\x0c\xd8\xec\x08h\xdcQ\xcc\xc2e\x82\xef\xe5\\\xdcD0\xbe\xa6V\xaf\xc8!$l\xe4w\x1c\x893W I\xc3\x96\xac\x88\xb9\xd2`\x03sW;\x13\xfb\x04\x95\xcfs\x99\xa9\xb1??\xe0\x17\x94/-\xe7\xb4P\x86WMcf\r\x9f\xdf\x89_w\xc4\'\x7f\x94M-U\xcc\x94\xcas\xfaW(V\xb3\xf0\xd82LF\xfb%\x80\xe7\xcb\xfd\xd9\x106\x95|\x04\xa97\xdc\xf1\x15|\xa4Z>$\xe0*\x00t\x02\xfcj\xbc\x86\x8dX\xe2\x15\xdd\xf2H\xdd\x1b\x86G\xab\x8f\xad\xd9Bi\x06\xbd\xbf\xe9\x92\xb3>\xdc\x90VZ\xbd\x85\x9f\xed\xf7\xaar\xda\x9a\xeal\x07\xb2\xa0\xf7EG\x02p-\x01\x1dbAK`\xe5\x08I\xa6}z\xd9\x08M\x1c\xb4\xb3\xa9\xe1\xb8t\xadfc\x98q\x92\xdcA\xb2\x92\x06\xc0\x92#\xb2{%|%p\\\xdb\x898\xf0\xb7~\x93\x00\xfa6\xa6\xee\xdav\xbf}\xf8sM\x1b\x93a\xe5+i\xc0\x9f\xa5,\n\x8f\xfe\x8f\xe0(4j\xf0\x0eR4g\x9f\xa9\xbdG\xf0\xdc\xf0Q\xd0;\xc9K2.Y\xa1\xd4\xf7@+\x05\xf4)\xdf\x9c\xacl\xb8,<d}\x9dP<\xce`\x97]M\xedE\x84\n\xaa\xa2OyS\xdf#\xb5g\xe90c\xc47\xc3\xa2\x07\x9ec\x9e\x9e\x1a\x8f\xc1W\xf4!\x81\x07a\x00\xd0\xe6\x07\xac\xe8\xebT\x84\x8b\xa0\xba\xf6k\xe8\x82\x03\xf7\xe9\x00RG\xaa\xb7\x9e\xe6j\xd4\xef\x92E\xffE\xe3\x0e\xc8k\xfeD\xec\xd9\xafa\x8dA \xd4v\x98\x07\x99\xf4D4\x18,<\xbc\x8cz\xeam\xf0\xd8-\x19=%/\xb3\x00\xaf\x11N\xf3\x9d\xfc\x14\xb4\xf3m(\x7f\xc9\x82\x90\xdbc\xc4\xd5OR\xce3w\xf0\x0c\x1cv\xb3R\xdaQ\xb5r\x90\xfbT\xe9t\xa5:1\x04C\xd77\xa69<\xf0\xf6\x93\xa5v\xcc\x0f\x17:\xa20p\x8b\xeb#:\xfbgq\xb6\xd3\xee\x87\xf9S\x8e\x87\x8fEp\x99x\x88\xaa\xf0\xa92\x11$A\xc2\x9d\xb6\xe0J\x9a"t\\\xce\xa4;\xa3a8\x05b|vP\xb4>/\x0e\x8c\x12\xd7KYu[\xf1D%\xbc\rl8\x15\'\x8d/(.i]ht\x94\xdc\xd07W\xbc=u^\xca@\x8e\xa1\xb1\x81\xc0\x04\x80\x8e\xc6\xeb{J\xbb|8F\xb2\xd4\xd94\x1e\r"\xdd\xeb\xd7\xec\x83\xa2\xb7\xd5$\xc1\xe7>\x11\xf3\xa8\xbfi7C\x8e\xe73\xec\xee\xa7d\xd4\xab\xc4L\x96\xd6\xce\x93\x16\xe504\x7fWd\xce\xd3~n\x08\xa5\xd7\x1a\xf4\xe9N\x8d*_&~P\x89\xad\x0e&\xbf\x1cT\x9f ^\xca\xaeB\xc4\xc0\xf6\x92\xb2$\xce\xe4L\x0c\\I\xae\xf6\x18\xe7/\xb3\xf0\x1e\x15\xeb\x0c\x99c\xf1\x8d\x7f\x9d\xc6\x98\xa8\xcf\xd5m\xc2J\x1f\x18H\x12i\xdcLO\xea\x8f\x85\x072\xfcq\x8add\xcb\xd3|\xcdl\xd7\xbb`\'l[\x0b\x8f\xf7\xe7e\\\x9b\xca\x0e\xe9\x9c\xf9}\x85;\xc1z\xe7\xe4k\xe2?6WM\xe6\x13\xc1\xa7\x0c\xcb\x06\xdb \x9b\x12^z\xb2W\xaaq/\xce$\xde\x87\xb6\xd7\xc9\x0e\xbb,\xaa\xfc\xd0jz_J\x91zr\xaa[\xfe\x97|\x1cd\xfeT\xea\x8c\x81\xee\x97R7_\xa8g\x86_f\x9e\xf9H\xdcG\xfb\x83c\x07re\xa6\xe9\x7f<\x14\xe2\x0e\xbe\xbc\xc5\x83\xc1\xcf\x8d\xd8\x97\xc58\x0cD\x95!9\x8e\x08V\xbcX\xe0\xccVH\xa8M\xe7\x016\xf5\x8d\xb7\x9f\xa7\x1c\x0e\x86\x9d\\@4\xef\'\xb2\xd8\xc7\' \xed\x97\x05r,#\xde\xda\x0bF\x05\xd7-\x8a\xe6\xad\xf0\x8c\x1e`eV\xf7\x8e\x93\x11WT\x15?K|\x9e\x1bA\xf2\xb7\xb6RT\xe7L\x93?\xbfF\x92\xb1\xc8\xb2\x84\x00\xf4*5\xd3hc\xe7K\xe5,EN\x02\xc3b\xc5}Plc\xba\x96\xc1\xf2u4n\xe2\xf5pm\xfd\x8f\x98I\x00w\xb9\xdez\x95\xe7,\x9c1aH\xe7\x91(\xe7\x98\xd2\x04\x81TN\x93\xd4\x88\x8d7\'c\xcc\x80K\xc1-\x9b\x17^Nj\x7f\x1e\x88V\xe5\x7f\x82\x16G:@\xfdI2\xa1\x07\xf4\xbe\x01\x8d\\4\xad,\x9c\x9cG\xc4\x96b\x8cu\xa3\xcd\x82\x01\xa5\x96\xaa\x11D\x92g\x16\xebc\x95\xd2\x1a\xf8_\xd2\xca\x03N\x9f\xb6kQ\xd6EE\x9a\x99\xc4ae"\x9a\xba\xc6*\'\xd5]\xdc\x15cs\xd7Z\xa8q\x1aB\x01\xfc\x04;\x02gc\xf6\xdd\xc4\xdb\xe2\x7f\xd2l/\x8b\xcf\x98\x07\x84\xe4\xe6\xaa\xbb\x0f\xa6]\x1e5\x8a"\xd0G\xe9\xdc\x1e\xe1\xaa\xd2\x908\xc82\xce\xfe\x88\xf03]l=\xf6\xbf\x024pE\xf7\xccC\xe8\xd5\xaf\x86\xd8\xc1\xc1\xbb\xf9e\x94Den\x9e&\x82\xa5T\xc7\x13\x08\x9bI9\x13\xd0\x00\xfd\xa1IcR\xac/\xfc\x85\x8bk\xf4\x8ctD\x9c\x0f~\xd6$\xd3\xb1\xf9\x80\xaf\xf2\xf6)@\xa2\x9c\xef!\x91\x0e\xc0\xfb\xc7\x06B\xa1\x167\xf8ux\xc2\xa33D^\x8eo8\xd3\x91\xd4)\xfc\xc3\x9e\x99X\r\xe6\xd9"B\\\xccH\x11\x15\xcf\xecN\x81\n\xbch\xa5\x8a\xceQ/\xfcq5\xb2MS\xa3g\x80\x923M\x03G\'\x96_\xd0\xbc]1]Fe\x04k\x9d\x05\x0f\xc6\x95\x8bW&\\\x83c \xf7\xc9\x97\x02\x85di\x03\x93\x80)\xa3Vn$\x86m\xea<\x13\xb8\xd2\x88\xa0L\xce\x17\xa8_r\x0e\xc9IK\x88\xfd\xa0\x04\x9ex\xdd.e\xbc\x91[\x00hk\x96>v\xf6\x07\xc1\xae\xaf.^0b4\x17By|\x03\xe9\xe0\t\xd59\xbcm\x11\xcb3{nayu\xc8d,\xfe\xbep\xd4\x9c0mO\xfd\xc3F\xe4\x87k4|^\xa4\x89\x88\xf5\x00L3\xdb\xdf\xd7T\xb5;y\xd3\xdf\x9cl\xe1mO\xc0\xc5\xca\x1f$\x17\xb3\x92\xa3C\x0e)q\xd4\xa4>y\xa9\x12 4\t1\x05\xad\xa1\xbb>\x01"=\x136I\xad\xafx\xb6?\x12\xa6V\x86\x16\xdc\x94Q\x9f\xe6%{\x99\x9bU\x18\xdcX\x0f\x9c1\xe9\xd8}N18\xd6E\xa7\xc7\xa8\xf2J\x8ain(\xf8\xb0\xf6\x0e\xe2B\xd6[\x15\xf8@CnApV\xf1\x11\xfd\xe8\x18:se\x8f\x1fPE\xe6\xeePuIAM]f\x88\x1a8\xf3\x19h8\x8b\x88P\x06/\xec\xc6\xf0\x80\xcb\xff\n\xd4[\x9a\x06\x13v!\x85k\x14V\xf9\xd1\x96!\x853`\x0b\xf1/\x97\xc8\xc8\x11!\xe4Z\xf2\xee}\xb1s\xc0\xeb4\x83J\xcb\xbc \x19\xb9\x9f@\x0c\xd3\xd9\xa3\x97\x00s\xc9\xfe\xe8\xc5\'\x154\x9a\x1eP*\x827\xe9\xd9or-<\x0f\x88\x0c0\xca\xe9\x00\xbaT\xe49\xd9\xf1y\xe6\xca%\x1e>\xbe!\xf0\x87\x1d\xe4\x05\xf2\xa8\xe1b\r\x8c\xfc7\xb0{[\x87\xddoq?p!+R4\xe1\xb2]x,\xbc\xde\xff\x83\\\'\x86\xf2\x9b\xea\xfc^\x14\xdf\x19\xe4b\x08\xe0\xc6\xc7F\x9b\x80\xf9\xd4\x8f\xfe\x91\xc9{b\x18\x8a\x12\xef\x13\x1d\xc7\xc1\xcc11\xfaP\xa0%*;6\xb8\x04P\xefN\x08\x9c\xfa^\x05M\x1b\x98s\xa48\x1dc\xe6\x9d\xde|\x92\xac\xa8hI\'MY\xc1;\xcd\xd8\xfd\xe0c\xf0\xe8\xcamD\xfc\xc9\x070R3\x9e\xa6\n\x05p\xed\xa5=.\xf9YV\xf5V\xa78\xd8f\x99CF\x1c\xe2\xf4\xc4\x16\x1b\xa3\xf6\xed\x1b(U\xaa\x90\xe8\xd4\x92\xa1\xaa\x7f_\xdc\x993\\F~\xdaRY\xba\x89\x0b\xa8J\x95\x0f\xc4/\xc4\x9e\xf5\x07y!\x0f\x02\xeeg$\x7f\xf0O\xa9\x16nA\xbe\xc2\xf2\xf7\xc5\x04\xf7\xf1\x81\xdf\x0f\x80\xf5\x05\xcb-3\x91\xea\xab\xa6\xf5\xe1T\xc4\x9e}\xfe\n(9\xf2AM\x06\xcdQe$\xa5"q~\x83\x1f\x1aSYl\x11.\x15\x8f35|!{\xe3\xe81I\x87\xb4q\x99\xa7&\xf0\xd9\xea\xaf\xd8\x03\xb0\x04\xfe\xcd\x03u\x99MN\xd9\x91\x1f].T\xc5\xaa\xbd\xeat\x94.7j\x0c\x7fN\x86\x0cm%9\xe0\x1c\xfaeU\xe0\xb3N\xdb\x8c\r\x9c\xb2D\x1e\xd2\xb5ly\xacH_\xc4\xfeL\x14\xb0#\xb9-\xc8_\n\x8d\x12\xd5\xa6\x81\x0e\xdf\x98\xd6\x12\xc6\xcb_\xeb\x02\xa3\x1e\xc7\x81\x9c\xbe\xa7*\xd2%R\xeeD\xa0.\xee\x15O\xdd+6\x81)\xaa\x85z\x15\xbc\xd2TM\xa0\x16\xd9\x88y\xab\xcb\x0bR\x85\xa4\xb1\xa5\xfd8\x94\x11l:\xbe\rV\xa9\x99\x18\xe5%{\xb4\xf7\xd2\xaf\n\xa0\xb0\x8f\xc4~l;\xa3$\x8a\xc3\x1aci\x0b\x81\xe4\x81\x07\xdb\xf40\xd5\xc85\xee\xdd\xf4\x82^\x87\x16q\x91\xd5Po\xe2\xff4\x83\x01w\xbc[=G[y\xa5y\xb4\x98\x03\x08{\x00%5\x15pX\xa3\x97G\x15rc\x95Vy\xca\x93\x82\x18\x92\x82Y\xac\x9bV\xe5\x90\xf8\xf0\x1b\xb8Lk\xb1N\x07\x11\x11{\xce\xbc\xd9\t\xac\xc1\x06]e\xfa;mJ\x81h\xec\x1c\xc8\xc1\xd6\xfd\x05\xa2\x9b)\xb4\x97\xa9\xcf\xa8\xc8\x19Z+\xe0\xca\xf5\x8f\xc9a\x88\x0c\x12Y\xba\xdd\x00\xf8\\\xc04\x95\xe8\x7fJ,\xf8m\xea\xdf\x04c\x83R:i\xd60/\xa50k\xb8\xf1`\x9a}\xbew\xd4\xb6NT\x87\xaa\x87\x1c\xbfp\x8c\x08\xb0\x8dV\xa0\x1dPNd\xdej\x91\xb5\xb8\xa7)\x14T_o\xc5\x8c\xcf\xd5_\xc7\xe9oa\xb6\xa4\r\xef\xf6\x84\xee\xeai\x05\x13\xc5\x96\x85.oV\x80\x1c\x7f\xf8\x92\xaa\x93{\xdf/\x9e\xde\xd2\x00\xae\xd4\x86\'\x01\xd43.C\xd10\xfc\xeaK\x0c\x81\xb2y\xfe\xb5\xf1\x1c,9\xad\xe5"\xca\x10\xc1\xd4\xc0\xed\xd4\xce\x9frL\r\xd2H\xb5\xb0&\x8eVhGM\xdb\xebj^\xbc\xa0 \x0cS\x96\x7flS>s\xea\xc0\xa3!$\x9e=\xa7E\xea\x1f\x1a\xa8bV\xdd\xcb\xd3\xab\xc6\x08M/\xcdo\xba?\n\xb9\xef\x85\xcd#\xc9\'dC\r\x9cNXs\x8a6\x8f\x07\xc9\xac,\xbe\xb1\x02\xcd\xbe\xccx\x0e\xa3\xa2x(o/\xa5_\x92o\xc0Qfp%$\x82\xc6\xfb\xf5\x93\x0e\xd4\x0c\xfd.\x7f\xb9B\xf9A\xfaC!?2\x8e\xeb03J\xdd|\x95\xc4\xdd)\xfb\x95\xc3\xaa\x94WV\xf4J$\xe1\x98\xa2\x8aO\x143\x81\x0fm\x8c>N\xbe\x1b\xab\xcdEjL\xe0\x11\x97V\x9f\xeb\x03\x8b\x00n*\xdfFq\x08\xa5~\xa7\xdb3\x96#\xf9\x904#\x10<\x9aK\xceY\x14\xc8!934\xa4\t\xbc\xc8\xb7G\x05u{\xf5YX_\x16\xffP\xc1\x82\xbe\x05g\xed\x10\xb5\xdf\xa1f\\\x88Vt\xbb\x9b{\xf1v\x7fO\xc8!\xac\xe4\xb2F\xaf\xday\xcd\xf2$\x1b\x97)\x1bi\xfd\x0b>Q\xfc\xfc_\xd0\x8f\xf9\xd2\xba\x17yd(\x93.\xa9\xb7\xaf\xc0k\x9c\xcfG\xe3\xfc\x0f\xa6\x16\xbb\xd9"\x03\xc8\xd3q%\x9e\xceX\x85\xcf\xfdR,\x9a8\x89\xf3\xce\x18\\m\xa1\xdb.\x7f\x83\xa26\xc07\xb7\xe3\xfe\x95M\xaa\x96i.\xfc\xdel\r\x16\xf4\x14\xbe7=\x01"P8]m\xce\x18q~\x03\x12\xc8\x8b\x89\xcc\x85\r\x93\x07H>\xc0\x85\xea\xbcy\x10C\x16\xe7\xf0\r<Pv\x05\xe1\xfdy\x89>{8\x97\xa7r\xf5\xc3i\x96\xc1z\x8e59c<\xf5\xd1\x95\x8cy<\xc5\x87:\x9c\x99\xc8I\n\xec\xaf\xf9\x8boS>\x8aa\x02\x94};\xc09\xec\x82\x03\xed\x01\x81Nv,\xae\xc3K\x90z\xe2\x8b4\\\xb82\xca\x02\x8b\x18\xf9\xa4lVx\xe0v3\x04c\xdcI@\xcd3\x10\xd1yO%v\x90\xee\xe1\xb6\xb1\xe6\xdbj\xc5\xa0j\x0b\x15\x08\x136\xe9\x12\x9b\xd3\xeeQ\xf6\x90]\x0b8n\x83\x7f\xef\x95>\xd9\xc5\x90\x85\xe8\x92eL}g\xd7\x92\x0e\x85l\xf9\xbc\xbd\x9do\xf1-\xb5\xbc\xbe\xd2\xa3&\xa6\x89\x07\xb7\xee\x04 \xf4\xbc@\xd2P\x12\xc0\xef\xd3\x8d\xc0.i#?\xc6M\xfa\xb8qF\x8b\xdfi\xe1t\x84\xaem0\xf3\x01\xd6\x04\xa3\n\n\xc9 \xc7\xbf.6\x81\x8d.wM\x98T$\x99\x8c\x0b\x9bB\x04\x84\x02T\x97)T\xc8\xcb\x1ea\\\xd9\x8c\xa5\x89N6h\x9e\x15/\xa7\x9buD\x07\x99\x84{\x103\xd5\x152\xf01\x96\x17= \xd9\xc8\xb1\xa8\xd5b\xf4\x08R\x0b=\x91\xb8\x1bDR\x8eR\xc5\xbaK|\xc1\t]{\x0f\xd3^\x15\x1bZ\xc4\xb1\xa9\'KjA\x98rbya\xe0\x16~\xe7\xb3t\xad\x1f\x90\x18@\xbes\x94\x11*\xe8\xe1\xd3\xf5\x1f\xa7`\x8fc$\x13\x0b\x12\xa3\xd7\x06\xbc\xb9\xae\xa2\xc8m~\x145\x8a\x11G\xac\xcd\x92/\x0e\x94\xd9#4\x12\x05.T\xf2}\xa3P\xee~y!\x1cE\x0b\xd8\xa9\x89\xa0\x1d\x9a\xe0>$\xe8\xc8\xbc\xba\x10{\x1b\x93y~\x18c\x00\x80\x91|76b#.\xb99\xc6<\xa2\x98\x1d\x8c\x1c\x9fT,\xfd\xf6\x8cp~wI\x1e\xa3\x17j$\x88J\xecU\x8b\x98\x8e!\x82\xce2\xd6\x12\x18\xdf\xb6\xbek\xbc\x1d{\xf2\x1d0\xfe\x80\x9ce\xe9\xd27\xf1\xcav\xfd\x96\xe3\xf4&\x1d\x9e\xd7\x12\xde\xbc)\x81\xc4\'\x19\x1b\x8a\xe9eG\xa2\xff\xe1g\x99\xd0\xc2\xf0\xd9\x02\x85\xb9\xf8\xddl\xe9z\xa7X\x8b\xa5\xb7S\xed\x8ar\x037\xef\xcd\xe9\xa6<\xf5!\x05\xd0\xcc\xba\xf5\xb4MV\xec\xf9H\xe4\x18\xa1\xbf\x03v541\xbd\x02\x1b\xf4\xd1\xbeZ\xd1\xc6dbr\x91\xe9\t\x10\xdc\xe1\xa8\xe7\x03xG\n\x06\xdc\xbc\x140s\x19\xfb$3Y\xda\xa6_\xc9\x7fP64\x11\xf1S\xab\xcf-\xd3\xef\xc2\x04\xfaY\xbe\xc1\xc6v\x8c\x92M\x00\xe4\xd3\x8f\x04\xff\x85\x83\x88=\xf31\xc3\x86\xa9\xa9\xebs]\xf9\x15\xb8\xe3]\xf66\xad_\xa2\xdcN\xe4\x05DY\xf9\xdc\xed\xc8\xfb\xf5\xe2u\xe1\x9b\x98\xd9u\xe5\xf6b\xd5a\xf1J\xcea\x02\x9b8\xd1\x85\x13\x9c\xfb\x18\xac^0\xab+\xa3\r\xf3\x85\xe5\x9f\\\xc2\xda\x88\xc7<\xbd<i\xc8B#f\xce\xb7\x86\x08q\xdap\xb3\xaf\xe3\xc8\xd0F\xb0\x8e\x19\xec\x1d\xfa\x0b%C\x84\x84\xc2\x99\x10Y\xc9\xcbh\x06b\xaeo{\x9d\x10\x93\xee \xc4\x85\xe2\x1b46\xa3\xb1\xbaR|P\xbbW\xc6m\xed\xe6x\xaeL\xaf\xcb1yU\xde\x198\xe5\x10\xdf\xaaHQ\xe3G\x8c?b\x15\xa3"\xcag\xfc\xff5\xb4R\xef\xa9\xfa{\x08\xb8\x80\x04\xb0\xe4\xedi\xd8\xbc\xf2)W\x8a\x0b\xfa_>E\xee\x9bA\xe8\xbf\x00g\x95\xdem\r\xf7\xb9\xfe\x8dJ\x84!\xc7\x11\xf9\xa2\xc4!`\xbf\xfae\xd5\'1X\xe16{\xec\x19^\xee\xcf\xe6\xfb_\x06\xd8\xe0^\xdb7\xb6EG\xc6\xce\xcc]\xf4r<}\x07fI\xc3\xac\x8d4ze+\xf0\xe3\xc6\x8e\xda\xd6\xd9!\x8b\xac\x93]\xe9/\x83\x80)\xc4v\x99\xc5T<\xe0\x8e\x1e{\t\x98\xc3\x19\tzW\xf6V\x80\xe7\x95\r\xa8\xaaf\x04\x0b\xeb\x8b.\xd4\xf3(O\xd3\x8b\xcdY\xfbF\xdc\xccD\xd1"\x8f!S\n\xc2\xb4\xfe#}\xcb\xf2\xaekI\xae\xc9\'9\x1b\x88\x0e\xf1\xd8\xb2+<\n\x1d\xf6\xee\xe9|\x89\x1b\x11\x83\xc5\xc9\xfc\xd4\xd8\xd9\xe9\xae\n\xf5\x08\xffcWu\xe8\x9b\xf5\x0fV\x8a"n/_\x10\x85\xdb\xee3\xe8j\xc0X>^\xe4Y\x19\xc1\xb7?\xe2\xbe\xe3\x8c4fcD%\xcb\x8c7\xee\xf5\xca/\x0eQ}\xf8\xd1\xe3\xaet\x8b\t\x9f\x17\xff&\xaa\x03\xef\xdb\xaf\xe0\xc6R\x15\xc5y\xadDj\x17\x8c`;I\xba\xa1t-\x8b\x94\x9d\x9a\xdch\x03\x0e<\xae\x81[\x86\x82\xcc\xbc\x86H\xb1\x0bu\xc4\xcd\xd8\xb8a\xaap_\xb2\x01"\xd4\xab\xc4sz\xc4(Y\xdb\xe2uJ\xc5t\x0fjr!!\x96S\xc7\xe3\xb5n\xa9\x06\xf8\xee\x7fn\xe5\x8a\xdd\xef\x9d \xc5\xba\xee49\x07\x04\xa8\xbd\xff\x83\xe4\xea/\xc9o\x11\xaf"\xc0\x0fv@)(\x10\x8c\x19\x0f|\xb7\xc1\xacJ|pTTba\x88\x8d\xd1\x07EyK(\x1a\x00\xd8\xc7B\xb7\x92\x04k\x1f\xae\xec}\xe8\xf0\xbf.\x13\x14A\x9e_\xf8F\xf3\xf5\x89\x8c\xce$\xd4\xe3\x93\x97\xf3\xe8\x1aT\xc2\xc0\xd1\xf83\x9f\x88~\xc3\xf3\x16\xd7[j\xb9\xf0\xa95\xab\x90\xbc\xaa\t%\xc0=\xb2\x85\xa6\xdfM\xa6\x87+\x98\xc8\x0eI\x81\xdf\x0f\x19^o\x87\x1b\xf8\xf4-l\x00V\xfc\xd6\x1b\xde<e\xca\xc951(\xc0\xb9\x14\nE\xbc\x7f\xfbF\x17B\xcf8\xd4\xf74H\xfb4i[A\xe0B\x94}\xa2\x1c\xabx\xea\\\xf9\xe3\xa5g+mY\xe9\x06;\x91u\xff\\\xc7\xa2z\xd1\xf6\x8fT\xe1\xe6\xb2\xb4e\x02\xc3\x8b\t\xd4N\xf9XJ\xa3\x10\x8a\x04\x13\xe5\xbf\xd8\xb2\xc1\xc7,.\x95\xc4)T\xaeF2\xe3\xc1\x1f\xc8\x1e\xe0\xd5\xe2\xfa\xb2\x16\x1fYQ\xaf\x02\xcfC2\x01\xa6\xa1\xaa\xb7\xc9\xdd\xd9E\x05\xa0\x85?\xa2(\\\x12\xd8\xb4\xc71|\xf7\xa2\xc5~\x0f9\xd1#P\x91\xfb\xdd\x8b\xcf\x9csk\xac\xd0\xcbp\xc9\x19}\xb2\x16\x0b\xc8E\xb1E\x83\x8b\xf3\x92\r\x08\x1c\xd8\xc2\xcf\xc5\xe7j\xb80\x06f`\x0e\xb7\xb2\x17\x8e}\x82\xbf\xc73Bi\xc7\x92\xe1s\xe0~\x96\xb3\xc2\xd5)~\\\x86h(m\xd7_\xca\x8a^\xce\xa5\xfa\x90@\r\xe4\xff\x93(.N\xe7\x8c\xeb\x1b\x93\x1e\x0c_BOCw\x13\xa2{5\x00a\xf7r\x93\xde3J-x\x082t%/ \x97\xe1S\x8f:\xb3/(\x92\'\xe5^\x84\xca\xecL\xd1\xed\xbb\x80\xba\x00\x17\xdc?\xfco\x9cy\x14\xa3\x11\xa5y\xf13\xc8\xa9\n\xd3{\x8a\xe6\x8aA\x89\xe8\xe6\xcb`\xf2\xcf\xd9\xfa\x0c\xa5B\xf8L P\xeb\x8c\xf1\xe5\xb4\xb2\xb8\xcef\xc4u\xbb\xdc\xe7\xaa\n\x9e\xee\xb6s\xd2=V75\x9a;\xa1\xb1p\xfa&\x85\x0e\xd9\ri\xb5/lp\x88i\xaa\xff\x0e]\x95\t\xf5\xa6\xa9\x97W\xbfE\xe8\x7f\xc3Cb\xa5\x8a\x19\xe2vh\xa3g\xf5T*\x82> \xc4?\x83\x98\x80!U#\xf46\xfd\x8c\x03z*&\x15i\xa3i\xe7X$\x12i\xc6=\x17s\x88OX\\q\xd9\xc5\xbaC\x7f\xe6\x07\xde\x03d\xff\xc2\xb5q\x12R\xbe\r\xd0\x0f\x11\x86\xc0\xdaAvP\x1b\x11\xe3\x9f\x05\x13\x03\xfb\x86\xdcy\x9d\xdaI\x94\x9a\xfb\xfd\x8d\xc6v\xae(T\xe4>\xdb\xa0\x12\xa8~\x17)U\xa8\x19\xc1\xbfz\x94+w\xb2}\xee*\xbe\xe6\xa4\xc3\xa1)\xe4\x97\x1ds\xd0\x84\xdbV\xc3\r\xde\xbc\x17^\xf8\xcc>\xa9|T\xae\xeb\xd9\xf0\x07\xa6Q\xee\xa9\xef\xb9w\x8d\xbb(\xea\xd6t\xb8\xe9\x8fn\xcd\xdce\xc0l\xb8\xd8O\x81C\xa1M\x83:\xca\xfd\xddt$MY\xefpH1\xcd\xea\x0fA\x87\xddvbCG\xb3<\xea\x07Y\xfa\x90\xab)\nT6\'\xba\xc7sG\xe7N\xd3%\xf01\xc2\xcf\x91\x1ak\x93[\xbf\xdaO\xd9\x86\x95qEro;\xbd)\x04\x19\x99\xbaU\xcbM\x0cV\xef\x12\xe7\xf3f*Fk\xe4w\x98\xe4\x88\x05\x81+}f\xd2\xff;\xd2u\xf3Y!\x03\xb3\xb93\xcd0g\x12\xfc\xbe-\xab\x8b\xcem\x94"\xbd5\x96\xbd\xceMX#\xcf\xbe\xcf\xe3\x9d\x97\x8f\x9a;x-\xa1hV\xd7\x0eV\xec\x11\x1c\xc0\xa0\x07\x9a\x88\xebM\x0c\xe6\xd2M\x02\x9b\\\xc5\x07\x86C\xac)v\xd1\xfe\xcc\xba\xac\xf6{G[\x19\xd3\xa8\x0e\xf49\xe8\x12I\x7f\xdf\t\xf1\xe9\xe7\x06\xc5\xc8\xb1\xbe\xe6\xf0\xaaG\xae\xb8\xb9\x7f/\x89]/^\x8b\xb94\xbfg\xab\xcde\'9`\xd9\xdf\xa9\xc6\xa1\xb7\x91\x05\xe2J|C?.\x0c\x1b+\x8d\xcb\x8c\xcd\xc0\xc3\xb9\xe5\xb9\x7f\x94l(\xff\xd0\xaa\x88#\xed\xb9#\xc4@\xefz/Q\xefy\x8d4\x05\xdb\xea\xc2\xdb\xab%\xc9@\xec\xe9\xf0\x8a\x95}\x9bD\xcb\xc8$\xea\x87\xd7\xc8a\x0f\x9a\x04\x97i\x9f\xdb`\xfa6\xbb\x7f9\x0f\x04\x18d\xace8\xc6\xa3#\xbdj\xee\x06Z\xb19X\xdf\x7fDb\xa8\x15\x059\xb43"B\xeeK\xc4\xca\xca\xec\xf0\x0e\xa7J\x14\x17Q\x02Q4\xa4(\xe2\xa5B\xb0i/\x97(O\x8f\xa5J\x0b]\xe8w\x11O\xc4\x00\xbf\x186\x00{\x8eIC\xe1X\xa2\x90\x059=\xae\x97H\xae\x91R\x18\xac\xc8"\xb0\xd6\x92,\x88<\x82*A\x1c\x89\x81<\xacM\x94[^\xe4\xe3o\x9d`\x1e63\xc6\x94\x99\x83W\x99 >\xc5?b<u\xb9\t\x8c9h\xa8\x05\x16\xdd\xe3:\x8e\x00\xa7\x89\n{\x9b"j\xfb\x8a\x88DA\x9e\x1d\x90\x14\x1e5\x87!\x9eq\xab_\xa1\xe1e_\x05\x8ct\xd7r\x1bt\xb4\xc4\xdd]\x7f\xfdx\xf4\xe1\x9bK\x1b\xd0#N\x14\x01K6\xe0|8\xff\xab\xa4\xe5V{g\xef\x1e\x03\xa1GG\xba\xea5\x93w\x9d\xa0\xe1}\xa7@fh\x12\x0e\xc9Q\xfbET\r\xf7\xbdB\xbd\x03\xe9\xcd\xc7\x0b\xb2`\xe7\xf7\xf7\x84\x8e\x89\x99\x9f\xca`\xf4\x84n\x80\x8e)\xce\x01\x99\xd3{\x16\xb3sD\x97\xa4W\x89\x12\x0et\xa5\x14\xff\x1c\xf1\x0f\xea\xcc\xa9\xd2\xaf&H\xff\x97{\x88\x8c\x12\xc3\x81\xcd\xf2\xeb$T\xb6\xed\xbc\x86^\xd0\x82\xecKdK\x06a\xd6\xe6he\x84\x8f\xec\xc3\x86Aub\x08\xfb\xf7`\xf2\xf5\x0f\xc9\xa1\xb6z\x04\x93O\xfeW\x86\x07J\xd7\xf0\xfeB\xcb\xea1\x05\xbb-+\xb9\x0bif\xeb\xa3\x7f\x8c\x04\x85\xeeG\xf3-\x9f1T\x1d\x1faeF\xa0\xe2%4\xd433\x00\x8a\xe2\x1a\xd5\x04\xed\xdb\xc6?\x19\xe6\xa1\xa46\xf1\xa2\x85\xe3\xa5Ts4\x84B\x16\xe0\x9e\x90\x0b\x8cSw\xb1\x1b\xc8A\x8c\xf7\xea\xa6\xb1#\x15Fz\xa5\x13\xe1m^<\x81\xc0\xd4\x10\xd9\x02q\xff\xf8)\x0b4\xb5\xff\x9f\x9d#\x05[f\xb8X\xb6E\xe0\xe5;<v\x80\xd3\xea=P\xd0<\x0f\xd2#@F\xb7\x16\x9a\xe2\x87\xb6\xe5\xe9\xac\x00\x94\'\xdc\xbb\'\x89\xd3\x96Qrq\xfeG*/A2j\t\xf2\xa9!\\y\x86\xdc\xe2\x97\xa1\x13W\xca\xd9\xe9<\x81\xdb\x08\xe5\xb0\xbf\x07Mo\x85M\xc8\x85\xb7\xf5\xa5\r\xe9\xfa\xfb\x86\xde\xfds\x8a\xbb\x95\x0e\xfc\xe2\xf3\x9c\xfct,\xfc<\x1c\x19O\xe6\x07\xef5q\x02b(\xc8:\xcbwz\x1b\x9dLRAu\x84u/\xd1C\xc9\x98\x03<.\xe0\xed\x01H\xc8\x17e\xf2z`P\xb3\x0e\xa8\xc8\x1e$$;*\xf6\x91\xa6f\x07\x80\x82\x95\xa9\xa6A+\xf6%\x9e}g\xb9N\xa82\x14\'\xaeh\xd6~\x92r\xcbf\xa9\x8b\xf7\xb7R\xf1\x1f\xa1>\x07\x8a\xd3\x11\xff\x00\x8c\xb8\x1e\xc0\x84\xf6\xe5\x17\xefR\xe7\xa7\xcf\xacv\xe2\xf3\xe3\xa0}9\x82\x0cl\x07\x08\x15\xfe\xe1\xd2v\t&i\xf6#\xe0\xf5Z}\xf3\xbb\x90>\xc6\xed=\x07\xa8\xb0\xdd\x85\x8f3\x84\xb1;A[\xdfz\xbbC\xccL\x98\xcaO\x14\xfe\xcea?\xb7\xd29I(yI<\x0c\xac\x03\xd1ybQ\xc0\xf0\xd8\x8b\x0bG\x80\xf3\xa7\x17\xab}\x07\x1f"}?\xc2Q\x1e\xd4)\xe2Z\x96g\x8b\xe6j*\x02\xbb\x90\x90-\x00\xd8\x80f\x8c\x15\xa5y\xf720\xcc[\x97|\xf7#tPC\xc8`\x07\xa5gn\xbb\xfbU\xf1\\Y\xf9\x92\neZ\xb8\x95gw&\x16\xe6.\xd7\x1d<;\xf3^\xce\x02\xfa\x8a\xc8\x063\x15\x16\xf4]\xa4\xdc\x8b3\xa9\x80\xd7Kx\x06\xe0\x87\xec\x82\xc3\xcfX\xd7:\x97@\xeft\x1e\xde0\xb2\xc9\x9aP:k\xe62[i>r\xaa\xf2\x18 \xcdT5\xb5\xe3\x87\xcd\x8b\x18g<p\x97q\xe9\x0c\xea\xb8\xe4\xe4;\x14\x042\xacL\xa6\x8cK\xf9\rY\xc7\x05\xb5\xaa\x9b{0h\x99\xbb\xd5\xe3\xc0\xc3\x08\x83N\x16%\xb7\xdb>\xfa\x1f\\\xf9\xfb\'i\xaa6pe\x96i\x8eq\x89N\xbc\xc4\xe3\x16\xd5\x8b\xb8\xadx\xcfo\x11[\xd9\xa1\xe6\xd3Y|\x8e\x97\xf7\x9f:\xf2\x0fs\x86\xfacW\x01\xb4k\xa3\x83\xd0\x9e\xee\xe0j\x8d\xa7\xc4u\xfe\t\x92\x92\xad\xfa\x11\xf4\x1fI\'\xf6\xc5-\xb4\t\xdc\xf0\x84p\x8b\x84\x03\x83\x0eMkjdAL\xe1K-\x8d\xe6\xc7\x01c\x85s\xdb%\xcb\xf1Q\xa8\x90\x9dt\xa6M\xbeb\xbd*.Ey<N];\xcf\xfcXO?S\x01\xa0\x01\xe2\xe0\x18\x91&\xb3\x01I\xbbd\xe3.\x86\x8d7+\x98l\x99r\xbd\xa8\xd8\xca\xd5\x8a\x19b\x07\xf0E\x0ca\xa1\xa2=\x8eLu\x00\x82\x99\xe2\xde\rU\xaa\xb8\xa6\x96\x9f\xd4\xe9\xf1\x19\r_+\x8b\x89)\x9c\x0f\x9a;\x0f\xda\x7f\x93p~B\x97\xde\xc0\xe6\x12%wt"\x99v\x0e\xa1\x17[\xe8^\xb3\x04\x96u\xfb\x1c\x11\xc6:\x8aA\x0f$P\xe0\xdf\xb6\xe26\x04\x0b\xce\xac\xdb8\x9a\xa1\x9a\xa0n\xa1\xba\x166\x90\x1f\xf3e\xdb\xea\xb8\xf2\xde;\x08w\xffX\n\xc1\x14\'\\\xd7\xa27xo>P\xacr\x19y\xc4\xd8z~\x18\x94\x8d{\x17\xd4\xa0\xa5\xe2\x01\x16%\xe0S}\xf5\xd5\xed\xa6\xba\xf9%\x9d@t\xd0r\x85,\xe1\xd6\x95\xd9F(\x966\xfa+v6\x80?\x93\x80\xef4>$\xd2\x01gU2{?\xe0N\x02\xa7E\x9e;\x94Z\xd9\xfbl!h\xb9B,\x0c\'\xbd\x80\x16\x13\xf9l\x82Z\xa0PB\x13\xf8g]\x1d\x14\xbf\xec"\xb5F\xc0\xf8-\x8b\x80(1.\xb3\x96\xbb\xca\x84\xa4|.\xf8Nz\x8d\x92nrh\xf9_\xf18\xd0o}Y#\xce\x19\xbc@\x036\xc4\t\xa8\xc0\x90Q\x08z\x00Y%\xbaiZsZHJ\x99\xd8\x05\x9cc\xee\xd8\x88=l\xfa\xad\xcbZ\x0b\xe5\xa7\x89\x89\x82\x9e\x92\xc4t!8\xd3\xc80\xc6 b+\x0b\xad>|\xc3&2\xdf\x943\x1a=\xbd\xf8#\x07\x1e(\xa1V\xbcP\xef\xdf@\x93\xe2\x96\xc7t\x11\x8e\xac\xa7\xf6\xe9\xcf\xae\xee\xe1\xab\xbc\x97rO7 \xa9\xd6\x98f\xd6\xb4\x85\xc6=YU#\xfd~5"\x9c\\\xc3\x13+\xbd\x83\xde\xcdR\xa8\xca\x95\xa8e\xb5\xaa\'w\xa8E7f/+U\xe4W\xcb\x02\xb7as!\xd3\x0e\xde(\x9cH\x0b8D\xc2\xb94@sV\xddrE\x8cqv\xdb=\x92\xa0M\xde@\xd1\xf7\x9f\xfbI\x8ck6 \xa2;\xfa\xe6\xa1}\x10[\xd6\xa1{P\xbb\xa5\x86\xc5\xd2\xfc\xbaj|\xfa5\x18U"\xeaGh\x00\x8e@\xae\xb5r\xc7\x1a\xcfm \xfc\xb7\xa1!\x92\x1c4\xb5*.\x86\x90i\xe3\x05\xf3\x02\x19U\xe7+\xdf\x17v?\n\xc9nS%\xcc\x80\xe6g\x9e_\x1d\xc71B\x15\\\x1ac\xad\xb4\x03\x86\x92\xc4R%\xc2\xd6a\xe6\t\xa1\xbc5\xbf\xc0\x1a\x10&\xf5\x0f\xd6c\r-\x1e$\'A\xed\xeb\xf6=_\xc0\x93$\xec,u\xe2\xdf\xd1\xb6w\x18\x18\xa8J\x80\xa4\xf0\x9d\xd5\xbaS\x9ab\t\xb2\xedr\xf5\x92Vo\xadN\x12\x7f\xe1\xe1\xd7\xbc\x85\xb2\xfa\x9e&\x00qRLk8\xac30\x011\xd5d#\xdeIBu\xc2\xcb\xae\xb9\xae\x03\xf9\xbc\xdd\xf9\xdb\xbd\x8a:\xb8\xa0\x88+Wv+\xb8\x99\xd4\xa0\xa6o\x80\xdd\xf0\xc9\xe7\x05\xf3X\xe5m\n\x92)\xcc/\x179\xa2\x85\x9d 6j\xc4\xc0\xdf\xa8\x1b\xd6qz9!\x04\xecLF\xe5\xa48\x18/\x8f\xeam\xa8\xd0\x9a\xad/\x0e7\x8e\xf7R\x0f\xb8\xe6\x99\x9b\xd43\x93\x9b1\xf4\x90\x0b\xdcY\xf3A\x89Q\x87\\\xca^6\xa5U\xede\xc3\xa6cI\x01]\xe37\x9c;\xcb{\xb0>\xf8\xd4\x14\x8f\xce_1\xfc\x1a\xbb[\x9d\xe1\x9f\xc4e\nE\x7f\xd4!:\xb5\x03\x8c9\r\xd2\xe2\xac\xc9\xa3\xb5?2[\x06\xc8\xf4\x8c\xf2v\x11\xb3\xe8\xc9ySor\xc4\xe1\xe9\xc5\xa4.\x17\xb6\xbc\xca\xb0\xcd`RSi\xa5vC\x7fPa\xc4\xb34\x0f\xaa\xbd\x07\x7f\x00\x9c@0Q\x13e\xccJ\x99>9%\x89J\xa4\xc8[\x12\x15V\x1dY\xda\xdc}j\xdb\x0f\xf8\xa3\xac\x10\xe4\x83\xa7\x928\xe3\xef\x0es9\x0b\xa3b\x91g:\xf2<q\x90\x83ae\x0b\xc4\x8f\xce\xc7\x9a\xaex\x8b\x01\xb8\x88\x9e\xa4M\xb5\xb6\x9a\x94Bf\x02%D\x8d\xa0\x98[\xfa\x15\xf8s\xbeK\xea&\x1a\x19H\xd75y\xb8:\n\xef\xbe\xd4\xe0\x07W\xfa\xdc\xf1\x05.\xf4g\x003\xc0\xb8\xd8\xfc\xaf\xb9\x9eX\x0e1\xa6\xa9:\xd6"\xe87\xa2\xbcSP\xc0\x83\x18\x1e\x91\xe0W\x99\x01\x0bq\xcf\xa4\xfe\xc0o0p\xc6\xadQ\x16\xa3A\xf1\xec*\x7f\x96\xf7\xd9\xe1\x97\x91w*"\x04n\t\xcf\xe2~U\x86.\xc46\x0b\x18"\xf7\x14\x05;\x17\xaa\xefD\x92\x064\x82\xf0\x84\xc1\xc7\xf0\xdeXR\x9c=\xeb\x8d\xe6\xbeQ\xfa\xd5}\xa8\x01\x1f\x1efBt\x0e"\x8dt\xd7\x10\xe3\xb4^1\x14\xf3^\xbf>\xd5\xe3\xf8\x1d\x99\x02\xa5\xabV\x98\x06\xb5\xc8\x8d\xdc]k\xe8\x9b\x00\xea_d\xd5\xe3\x07\xef]\x86\x1a\xec\xab.u\xfd\xf3}P\xcb\xf1\x08\x93\xe0\xe1\xf2\xdc$KY\x1d\tP\x1d\xdap\xb9\xcaB\n?\xc5[7\xb68}-\x00z\x1e\x89^\x82\xac\x99\xf6o>\xd6\xbf\x9bUi\xa0tB\x9b>6\x07\xb3\xd3\xbe[K]\x19\xad?z\xc7il\xb6\xf5,\xcb8\xd8\xe8\xda\x9b\xc5\xe4v\x01Er\x1ao`\xa8sex\xab\xeb0%m\xf7h\x85[\xb5\xf8Onh\x1e\xa7k\xf2Lo\xa3\xf7Q\xbf\x00\xb5\x8c\x9e\xed2\xd0\xbd\xa7\xbc_+B1A\x10\x97\xc1NX\xe2\xd5I\xe9y\xa8s\xd6l7\x9cUF\xe3x\xe8\xe0\x0b\xeb\x83\xb7\xb5,\xf3\x81\xfe`\xc0)\xb8\xcc\xbfFx\xe1\xd6x\xe9\xca\xa6\xd7G\n\xc0\xa2\x9d>\xdbnSt\xc8\xa3[Z\x12\xa7\x86eQ8}\xefP\xcf\xe6\xd8\xb8y\xd0\xe1v\x0ck\xc3\x83\xf2\x01\x7f]99\xa3\xf6\x80\xc7\x17\xbfJB\x88\xd5X\x7f\xab8\xdcJ\x9b\x8f\xf8U\x8f\x97\t,=\xef\xa2\x05e\xc4\x07ZUN\xd7M\x1e\x06\xfc\x044\xf5\x9e\xb9\xf0\xe5\xf2p\xd6\xea\x14\xae\xd32.\xac\xc0\xee\x80=\x96\xb8\xef.MJ{\xb2h#\xe4_O9\x82\xdc\x86\x02?\xa1\x8ai\x96\xad\x83\x89`\xac\xd6\t\x92\xb3E\xcb4\x83Ark\x07\x8b:o\x9f\x86\xdcQ\xbcGQ\xba\xa0\xc5\x08a\x08\x88\x8az\xa22n\xe85 \x0f\x88\xf6\x918O\x99\xc7\x07\x8e}\xe8/z\xb9\xec\x10\xdb\x18\x87F\x7f\xa0b)\xf3\x9e\xb4\xd6\xea\xffyB\\+y1\xdd\xa79q\x8a\xe0a\xa1\xd9\x87\x96{\xbb\xdb\xe5F\xa0\xbf\x81\xda\x95\x92Ti!\tk\xc7N\x17\xdf\xbb6G\x1a\xe2\x84\x08\xf3\xaaK\xabC#*E\xc3)\x84/f\xe1\xef-\xe0\x92\xe7\xfc\x9a\x81\xcd\xfc\xd9zgU\xc0U\xb3\xfe~q\x93\xd4\x81\xdf o^\x8a\xf7\x95\xe9z\xde\xe3\x1dc\xdd\xf9\x14\x1aI80\ne\rxC\x8e\xf4\xfc\x8fs\xc2W\xb1\xd8[#\xa0\x84$\xca\xc9\xf2\xac\xf8\xb0\xb6\xb5x\xd0\'1\xa1\xc9w\x8c\xf2\x98\x06\xc2\x00\x87\xa6s\xb8m\xf6\x0c\x80\x8b\xef\xbdn\xf5j\xa4\x86\xa3%\xaf\x12\xd0\xd7sFF\x9bf\xc5\xae\x1a1\xfe\xb8{\xa0\xb2\xf7\x04\x9c^\x8c\xf7\xfc\xfeJ\x7fa\xde\xe7\xc9Zae\x9e\xd6\xd5BI\xb7\xa2\x1b\xbf\xc2\x0b\x03\x01S\x12\xf4\xbf"C#5\xe3\x0e\xe6\xf0\xb4\xdd\xf7\xad\xf37\xe1*\x0e\x83\x0c\xcf\xfa\xae\x15\xa9\x1e\xe9\x06<\x14\x1b\xf6\xa5\x8ef\x10\xb9(<H3\x04\xe4\xee)/\x86\x97;Lig\xeaUx\x0e+V\x83\xd1\t\r+[\xb0\xcd\xecs\xcds\x8e\xc4\xa5\xd7\x8b\xbc\xb8YU9\xda\x8a\xef\x03xxaz\xde\xc1R\x83\x89>\xcd\xba\xd1\x14\n\xf4S\xb6\x19^\xad^ \xf6\xa4l\x90)\x1e\x83\xde\x8b|\x8e*c\xef\x10\x88J\xb7va)*\\\xed/\x1f@k\xcb\xb7\x83\x1c\x13\xb5.\x10,\x8a\'\xb5\x05\x94\xb3\x9c\x06k\x82\xed\x16{\xf7\x1e\xa8\xe4\x91J\xe7\xcehg\xd3\x169\x99\x8d`;[\xbf\xb3\x939\x93-\x1a\xce\x01}\xd6\x95\x0e\x0e\xe5<\xc9\xc4\x1cr\xb2r\x91\xb0\x1c\xd4\xb8 .\x12\xc4\x0fW\xe4\xabhN\xaf <\x16\x03\r\x0bA\xd5\xfb6k\xd8s\x1f\x82U\xb0\x82U\x94-\x03\xb2\xc9/\xdd\xb6\x1b\xb2\xfcN\x89\x9c\x11\xda\x82\xcb+\xe6A\x0bU\x86\ngo\xdb\'T\x81J\xc8*\x1f\xd9a9\xd1e\xe7\xd4Cy\x81\x05\xf2Ynt\xb5EC\xc9?DJ\x9c\xa8\x88\xc3\xc6\x8b\t\x9b\x15d\x96\x90\xef)OI\x1d\xb8\x9d\xf2\x8f\xf7G[=\xf6~\xf7?j{\xfcZ\xea\xa8\xe5\xa9\x03\x18\xeb\xa5\x087\x83|\xbal\xd5\xe0\xa9\xa9\xfb\xf8:cc\x0c\xb4\x1e/\x82\xf2`\x91\x99z}\xa9e\xdc}#\xfc\xbe\x88\xbe\xb8=\xd2\xb0\xbbV-\x10\xabL\xdc\xb8D/\xe7\x9a\x1aD|\x1d\xf0\xd5\xb4\xc7\x12\x10\x02\x9f,\x8e\x90\x8bU\xdf\x17\xb7}\xf3^\x1e\x9aX\x80\xea\x7f\x9d\\}R\x96\xc8\x16\xa8Q\x95M\xe9\x8dVv\xdf\xde\xf1z\xf2\xcdQ\xf7\t\xdc];4[>\x00\xdf\x1dq\xd0\xa9\xc57\xc1\xdc\xf1\xa1o\xde5_\xf6@]\x19<Q\xd1\x02@\x94\xa2\x12\xb1e2\x83\x1a\x9e\xa83\x81\xe9\xff\x0f3\x9f\xf0\xb1\xc7\xcf\xa4{\x84"EPi\x13\x8b\xcdV\x0b\xbf\xfc\x06\xfa%\xfdu9\xe0\x1a\xbb\xf5\x83a\x17\x93\x12\x12\x84\xa2F=\xa2\xb9\xd0\xd7.\xc8YVyg\x99\xcd\xfev\xf8\xcdG\x1d\xaf\x7f\xdb\x17eQ\xdc}\xae|\t\x0b\xe7e\xf6\x1c\x12\xcc\xdf\xeeZ\xb8|\xadn\xba\xc8!\xfb\x05\xf1\x81\x16\xabXB\xfd\x00\x1c>\xda\x19\x151\xda\xdf\xbc2\x81\xccYfY\x16\x9bW5\xa81\xe4\xb4\r\xac=\xd4\x00\xeet^j`w\x163\xcb\xf9~\xdcUMjEJQ;-\x97\xb9\xea\x8d\xc8\xf4[\x9d\xb2x\xef;u%FC\xeb\xd7\x9fC\xaba\x8c>\x05eQ\xe2.1H\xdb6k\x1a\xc1\x8d\xf3_/1N\xf9\x0e\xcc\xa6FVn]\x83\xa7R\xfb\xee\xf5\x8e\x9c/fORu\xf7\xa6\xfc\x92>=\x91\xa8\x18zI\x18i\xa2\xa5\x9c\xb2w\x10\xd0E\'\xe7\x98\x0f\nI\xd2\xb4\xe2\x02S\xdd.Ysr\x82P\xc0U\xca\x98>@JM\xe3\xd2\xefX8k\xa4R\x82\xec\x91\xd7\x81\xd2\x98\x8dh\xcdh\xd7\x11\x7fw\xd1\x9b\x13\x97\xa4x\x08\x97\x11\x0fP\xec\xaeV\xa4~J\xea\x8a\xf2\x8c\xdf\xb0\xe9\xa0\xf7z\xe4\x18\x03\x0f\x9e\x8f.\xc5\xd3\x13Ni\x1c=\xd4w\x9e\xd7U\x83na\x8a\xe9\x86S\xa4xm\xa05\x116\xae\xf7\xa3\x1e\xbc\xa2\x16\xf5mh\x89\xcc\xd2\xf8R\xfc\x99\x84\xc6\xf8&\xcd`\x8c\xd4\xa1\xf3\xeb\xba+\xac\x94wt\rXudV\x94\xe1U\xbd\xdfh\xf0T\x05R\xfd\xf9\xfe\x05\xb2\xa1\x0b\xcd\x05\x80\x838\x1c\x84}\xb3\xdc\xb5\x9b\x1c\x1d\xbb\xcc?\xac9$\x10\xbaj\xab\xed\xdf{\x89\xb9#\xc2x\x89\xb4i]\xc7\xec\xf1vO(\xbf]pL\xfel\x99\xaep\xdcN\xcb#\x98\xe2\xe1\xff{\xeb\xf4\x11\xc7\r\x91\xa6_\x94\xb4\xd6B\xe79V\xea4`H)\\\x16\xe5a\x9bwT\x1cM\x11\xfe\x82y6bo\xb4\x97\xe8\xd5K\x8b\xc7\x1f\n\x18\xeb0I\x9f\xa7-\xb9\xca\x14\xca6\x07\x1eet\xa0\xbe\xf9C\xaa\x93\x9d\\\x02\xe4\xb9\x97\xfb\xc6`\x00\x8b\xe4S\xf7\xf7788\xc4\x9fZ\xa4\x17\x17\x8b\x05\xe8,\xa9\x17\x84\xcd\x05\x035\x8c\xbe:\x94\xf3\xffEY\x90\xf5\xb2O\x98^\xee\xa3\x1d\x934>\xe3\xda\xb1"\x89\xedd\x0eH \x00<q\xe4\xf5h]vb\x94\x08\n3\x9cK\xc9\xab^\xe0\xe6\'e\x8b#\x8e\x84\x12\xbc:\xde\xf3\xe8\x8a\xb3\xce\xd4s\x8a\xd4\x84\x90\xdf\xb8\x00\x0b6\xa7T\xca\xe5\x879E\x88\xbd\xf6.\xec\xdb\x0c\x8b\xa7\x12\x7fN\x0b\x9e8\xdf\xf1\xed\xd3\xa3O\xbd\xdeM\x19*\xa8,\x9a\x0f\xa8\x8b\x88\x9a\xe4f\x16F6\xfa\xb2[\xbf\xfc0\xe9\xea\xf0\xc7PI\xf11U\x0c\xcf\xf0v\xa9h\x04B\x83=\x96\xc3\xf8\x88\x9d\x11#\x8a\x08i\xa9A6\xf5\x83 \x9bJ\xb7\xa1p7\xb1\xb8\x00\xff\x1d\xc5YT\x91\x91\x9b\xd5#\x14\x8fq\xb0\x1ckARm\x8c\xac\x05*\x03i\xc702\xce\xbf\x9d\xa3\xa5f\xa4\xf4q0\xda\xe3\xa4\x99\xfa\xb2\\\x99\xc0P\xa7\xa2\xfbX\xc0\x02\x06\xd8G(\xe6\xe7\x07z\x7f\xf1\xa5\xf1r#\x94\xf5=\xbfB"\xcdqi\x96\xac\x9c\xde\x07\x1e\x18\xcb\xce)|\xcc\x84\x02#\xfc\x84\x03\x1b\xee\xa1"Yp\x84\xcd3\xe6\xd1\x05:\xb6\x8b\n\x02\xb0\x020\xde\xdff\xd5V.n\x85\x91Z\x8d\xc21\xb9\x88\xee\x8c\xaa\xf8t@U\xf2\x15Ir\x8fZY\xcd\xe6v\xdc\x90\'\xfc\x8du3\x19<u\xcd\xfcx`\xa5,#\xaa\x81\xac\x9a\xbbcr\x96\x01\xe2np\xe8\xbd\x87\xfc\xff\xfd\xa0\xa2\xaa\x10\xd7D\xc9@\xdb\xfe\x95\xc8!\xaa-Tr66!\x16\xaa\xa2\xd1EM\xb4~+\xdc"x\xd8\xf4;=\xe8\x9f\x11V\xfc\x14:\x8a\xce\x12\x913\xebH$\xfa\xe2\xd2\x9fB"!\x96\xd6\xf7\xaf\xadm\xa5\xbb\x9a*."\x02G\\.i\xc4\xe6\xd7\x9b\x18\xcbz\xf6\\\xca\x0bT\xd4\x109l=o\x03\x93\xb9Y\xb8\x97\x17\xdd\x1ad\x9b\xcc\x10\xff\x16Z\x9b\xfa;\x1e\xf7n/,cv\x19\xf7l\x90v\xec\x90\'\x9b\x01\xbb\xfa\xe4\xd5\xd9\xac1\xa9\t\xed\x98QubU\xd3\x1b\x15\x9b\xa5\xbf\x867iU\x12\x0ea\x8e\x17A\xdb\xe9\x9a\xed\xb6\x93:\xe8 U&\x17\xd7\xcc\x94\xa5\xcd\xed\x0fb,\x1a*S\x17z\xc9\x07\xd8\x86b\xfd\xad\xae\x01?\xeeDbMi\x06Ja\xbb\xfb\xfd\xd3h\x83\xee \x8e\x93XK\xd2\xc9\x14\xbb\xd7(C\x178\xa9\xa90e\x85\x11\xb2\xb9\x91}r9D\xbbl\x16\xf4/\x8d\xc0\xf4\x8fH\xf4\xe6\xce\x80\n\xb3:\x00\xbfa\x1fS\xf3\xd7\xe1\xf8\x1037\xc3w\xb0\x80T\xc9\x85\xb6$I\xfat\xd3:w\xba\x1a\x86\x18L\xac/\xb5\xba\xb5\xd8U\xe6\x17\xee\xe8\x173\xb4\xf3\xcd\xf5,X8\x06\xb93\xff\x16\x05Y\xdbq\xc5XN\xd9\x9d2Uu\x92\xdfZ\x9e\x12h\x18{\xf6\x1a\xfa\x1f\x10\xe8\xeeFC\xce\xbc\t\x01O\x7f3\xdd\xe7\x99\x06\xd2\x1b\x04q\xd7s\xddc<c\x8a-\xdc\xc6\xbb\xd7x\x02D\x9e\xba\x15\xdb\x19T\xebA\xad~\x03\x98|\xed+\xb2U\xb0I\xd3F\xc9\x1a\xfb+M\xe3C\xfe\xeb@\x1e\x8e|\xe5\xc6\xf4\xa4\xcb\xd1\xe2\xfb#\xc6\xc9\xef\x80\xa2\xe1D\xfc\x10Z\xc1\x85fn~\xfe\xd4-p+\xf0f%\x83F\xefG$\x99\xf1k\x1d\x95%\x9dy\xf5NI\t2\xf6\xfe!L\x00Z\xdc\xf8f\xffG\xd5k\xb6\x84\x83\'@y\xc7\xf9k3\xea\xe1a\xe6 G\x86\xfa_^\xe2\xdc\xeb\xf4BNC_\xd7RG\xf8\xe6\r\x8a6\xad\xde\xf4\xf94\x05RM\xf3\xb8\x98\x81(\xb9\x90RS\xe2\n\xbcVrd^\xcf\xd2H\xe1\xb0\xb8M;\xb1%\'\x93\xff$\x1fOE(_\xf1b\x85\xf9\xabU\\\xe4C H\xa1\x10<\xcd\x1c\xd1_]\xbb\x11sn\xb6\xe3\x96:\xa5\xf7\xd2{\x01\xa5\x95\xdb\x864\xf5\xef\xa9\xe0\xcc\xa7\r\xc0O\xbd\xe2i\xc4g-\x99\x9e\x12\x00\xcf\x83\xdd\xe0\xaa\\!4<5\xc6\xe6\xb3\x13G\xd6\x8d\xa8\xc9\xa1O\nV~{\xfd&\x17\x0f\x17._H\x18\x0b\xc6\x11\x85\x8fy#/\x00\x9a\x9dO\xc3<0\xa7\xd5\xe3\x10s\xa0~\'{\xac\xbc\xb7Q~\xcaB\x08#-\xbb\x18\xee\x9d\x15\xf0\xbb\x03V\xd2\x05\x03h\xf2\x9a\xce.C\x92u\x0e-\xfa)\x80t\xff\xf2\xe9\xd7\xbc\xd56`\x12\xaa\x10\x1e[2\xa6\xc8oW^\x084G\x0c\xa9Y\xaf\x9f\x05\xbf\xc6l\x8diEF\xe5\xbe\x0eq\xbau<>)\xca\x17\x9b\xf3\x9f\xb4\xb8\x10\xeblgk\x19\xe6\xacm\xe3@k\r\xf9\xb0\xd3\xf6v\x1b\x17\x03B\xc26\x1be\x1aX\x87\xf5w\x82]\x9b\xfe)\x11\xcf!\xec9\x1e\xb5J\xa9\xfde^\x86\xf9\xb7\xe4\x83\x13\xb1m\xa0\xb1\x08@A\xc5\xb6\xe9\x14\xa3.\xd0\x1f6\xe2Y\r\x08=\x163*j\xcaA\xf0\xf5l`\xa1\xa8"\x9d\x0bYt:\x03T\t\x95\x8c37\x18K\xa6-/\x82n\xb5\\\x95B\x0c.q\xc3%}y\x00\xf6\x83\x8e\x88K0o\x94+\xd5yv|\x8f\x15\xf5\xd5r\xe3\x05\x01\r1w\x1e\xbe1\xb6\xf3\x83\xe2f4u\xb6m\xc9l\x9c\x85\r=\x83E\x0f\x82<\xec.\xbd\x88\xe3\xf2\x00\x98\xf3\x05g\x86\xc7QE.E`r\xa1\xd2\x01\x12\x8f\xc8!d\x83\xb4Z\xfae\x13`\xda\xe5htQH\xf9E.[Ph\xff\xdew\xc9\xe4\xb4C\xdd\xea#\xc8\xb5r\x82\t\x01\xca3\x1d\x92\xb1?\r\x86\xbb\xc4\x86\x805*\x1f%\x87\xd8w\xef\xfe\xfc\xfd\xc6]\xfc\xf9\x1da\xfa"\xbbk\xd8\xc5\xfb\x18\xa9\x96%p\xc1\xb7`d\xa8\x1d\xaeLN-\x19\x8b\xeeE\xb4G\xf9\xd4\x9f\xf6miz\xda\xaa\x9a\x9aDY{-\x05`\xcc\x1f3\x10\xe4<Y\xa4`~\xeeR9H\'%\x99\xd9!L\x00\xbf\x87]\xb2\xf0e\xa4\xcd\x1b\xb1OZ\x8d\x9a\x17\x86\xcd2\xf8\xa2\xc9a\x99\xb92\xf7\xcb\x1e\x86R\x92a\xb69\xd0\x9a^\xec\xaeU\xfe\x0b7\xde\x17\xe2\x16\xa3Txo\r\x0e\x02$\x1d\xb5\x0c\x1do\x87\x04\x18\xab\x93rt\x80\x11\xfd\xb4\xf3se)\xd2\xe0\r\xd2H\xdfe\xf8<\xc9v\xdc\xea\xcb\xbbT\xfa\x95\x9dk/[\x97\x8aV\xfaE\xd6\x19HUfM\xc8\xbf"\xc2\xcec*\xd3\xc8"\x81\xaa\xb7,#Y\xad\xe8\x81\xc6\xb2\xf9\x9bAC\xe3\xe4J\x9e\xaf\xe8\xc1"\xd0X\x10\xc3M7t\xc8\x89\xbb2/\r\xee\x00\xc1Vk\xb1U\xda\x9e\xbdl\xe5\xdd\xa8?g\xb9\r$\x14gE\xces=\x0e\x0f\r\xa1\x15\x1e\x08\xec\xc7\xcc\xa0\x7f\xdf`\xd3\x91\xa9\xb0p%\x06_\x8b\x9dBF1\xc8\x9a#kU\xe8#N@\xc5\x87\xa3N\xf0\x13\xac\x08\x9d\x98\xd3\xce\xa2vY\x9d\xb4&\xfc\x1b\xcb\xc7\xcf7\xa3\xac\xeb\xc4@\xba\xa4\xb7\xd9\x8e\xb9*\x92\x9a\xfa\xf0*;4\x10lZiU\xee\xab\x96Q\x87\xa6\x00U\x9e\x13XG\x89.\x11\xe7\xba\xa1\xf4;J\x9b\x0c\x86\xe8\x99]Q\x81\x9b\xe2\x14+\x07Gf)d]\xf0\xf7\x8c\x940N\x8e\x1cRf\x03\xac\x9d\xb0\xaf\xf6\x13\xe0\x02\x1d\xcc\x0f7?\xab]&c\x0ch6\xa0\xc8er/\x9a\x1f\xcav\xc6\t\xb5\xe57\x9f8e\xbd\x10{\xd1\xbc\xd0~\x1e,\xb1fO\x0fa\xd1\xfba\xcd\xf8\xfb\xbfk1\xa1\xee\xfeTy\x9b\xc7\xf0\xfa\xcd|\x06;-\xee\x04\xed\xe3\xd8\x00b\xc39\xe7|\xea,\xe1\xa6Fa\xd28\x9f\xd3\x82?\x08P\x02\xdck\x935\x9b\x03f_\x96\xeb\xce^}\\\xcf\x04l\x9aWc\xcf\xd3\x8d\x07\xbc\x06+\t\x94\xa0\xcd\x13\x8cK\x82\xfc\xcf\x1d\xf9\xa1\x1b9U-\xa6%\x8c\xa7p\x96\x14\xcd.\xa2\x14\x1a\x1c\x88\xb3\xa3\xa7&\xb5\x90\x1b\xaa\xd3sjBrWJ\x94\xfc\x0cSw\xd6B\xd3=\x13\xd5\xc5\x94\xb9\xc2_s55,$\xc1\x1c\xa1e\xd4\x8cMQ\xf7\xd2\xfd\x18\x0c\xcb\x16\xa8X\x14\x87w\xb3\xcb\x12i\xee\xb6\x10\xcb\xd4c\xe9o\xdeo\xde6\x99Z\x9c\xa8vx E\xbc\xaf\x9b\xbc\x10\xf9\xff\xa9\xf4Id \x11\xd0`\x0c\xf9m\xcen\x95W\x18\x08\xd6\xe1\x85\xa7H\\\x1aj\xd0\x1ffV\xf3\x97\xfd_\xfd\xedR\x98z\xb3\xee\x1d\x18\x1d\x14\xfe|he\x97}\x8dP\xd7\nhW%\xff8\x97\xff\x03\x0e[\xfd\xc6\x8c:(\xdb\x0c~\xb4^\xf9v\xaa\xb8p\x9f\x8e\x8b\xee|d\xc0X/7:6hj\xde\xb2-\xb7Xl\xa4|\x04\xa6\xeepy\xe9\xee\x1e\x82SSB5F\xb8\xbc\xc7\xeb\x9dZO\xc5\x1c\x0c\xfa\xa6W\xb9\x9d\x94\xdd\xdf3\xe5\x936\x1f.\xad\x19[i=\xfa\xc3\xfa\x02J\n\xd8\xbf\xa9y\x92\xcba\r\xd7\x83\xf4\x8b\x01>R.\xe5r\xed\x05\x05\xb5\x0e\x13\xa2=\xe9\xd8\x99\xdb1\xaa^\x0e`2\x0fj\x95\x83Y\xf0\x84\x11(\xdeH\x9f\x91\xb0\x07s\xba\xb3N\x104\xcf\xda\x83\x7fa\x11Z\xe6\xf3\xb4\xb6\xc0V\x03\x9c \x90\x16\xf9x$\xb9\xf44[\xc4\xcc\xf0y\xdb\x02\x90U\xba\xe8\x95\x90(a \x1d\xa1\xe4g\xf1@\t\xd5\xae\xbb\xe0\xc3\x1dY_\xb3&\x83\x80\xa3V\xeebZa\xfa_\xf4\xb89\x14\xfd\x9e\x84\xb5\xd6*\x08\xceC\xf3*\xce\xd0\x8f\xec \xb9\xfc\xd3"\x02w%E\x8b\x17?zE\xbc\x0c\xc1\xb3\xc1\xa9\xe7\x01\xbc\xc2.\xa0\x03\x1b\xf2K\xcb\xa2O\xfdrr\xd6\xd8\xf1\xd0\x92\xbd\x97K8\x83\x8bM\x13W\x9e\xded\xa3\x0e \xbf\xdcn\x00\x00\xd1u-$\xcf\x06\xfb\xed<H\xd6\xc4Ao\xc0r1+\n\x07\xb9\x88\x1b8L\x12\xce\xab\xde\xb4\xe5I\xd0\x04\x1e\x99\t\x88$\xa2E\xb5\xf8\x987BV:\'6\xc5\xa9\'\xe1\x80\x0b\x16eb\xff\x98\xc4\xa9\xb7s\x08\xed\xad\x00\x98\xe6_~mo\xf8\xe4t\xc3*\xa25\x99\xc4\xcd\xe4A\x0e\xeawN.\x80\xa6\xa9\x83\xde\xed\xb5b\xa3\xc8pw\x84c\xd21\x07l\x9f0\xb6m\xde\xed2\xe5W\xbe\xb26s*,\xbf\xbc\x1bwXT\x8b\xfb?\x126rj\x7f\x00\xdcq`+\x92\xebo\xe6\x00\xb1zK\x80\x90\xc5u\xb3\x1f;=}z4\xb4\xad\xfc\xcb\x0ec\xdfB\x8f\xd5\x80\xa5U\xe3\x9dv\t\xab\xe8\xb6\xc1\xbc\x96|\xe7\x0f\x9b\xf4[\x86\x84r?\x8c\xab\xeb\xe5\xfdd\xee!\x90\x98\x96^m\xa9t\x12\x90\x11:\x911\x1d\xdd\xfb\xbd_\xd7\x18\xcd\xe3SUj\x8a\xd5M\xeft\x1fE\x85\xdc\xbeq}\x18\xe0J\x0fi?L,\x108`^\x03\xa6\xeaT\x93;\x06\x1f[\xb0=\xf8\xf2i\xfc\xf4\xe2/\xc5k\x0bi\xea\xf5\xdd<Z\xbe\x81\xdf\xfdw\xea\xf6\xef0\x8c \x9d\xff\xa9\n\xaa\x86\x90XEr\xc7\xf2\xb9\x02)\x8d\x9b\x81\xa3\x1ak\x92\xd1\xb4\x87M\x15\xc5\x9ah7\x04\x99\xcf\x04\xb4<\xb30Z$\\\x1b\xd4\x0c`\xe9G\xba\x0c\xb8\xfa\xd1\xb7\x9f\x1f\x7f\xd0<_\x0f\xfao%\xbd\xe2\x0f\x85\x92>B\xb3\xec\x933(\xa0,3\x84\xc8\x7f\xd6\x91\x17\xe2\x04!\xb9l\xa0\xf7\xe1\xc2\xc4\x90]\x83\x90[\x01\x93\x8fv\xdc \x98\xd4\xb2\x08 \xbf\x06O2lI\x94\x04\x97\xe1\xf8B&\xf3v\xfd\x0c\xd5\x1di\x15\x15~\xe9\xd0)\xc2\xa8\x03-\xcd\x03\nj\xbe\xd9\xfbK/,\x1e\xcc\xd7\xdd\xda\x01\n\x95\x01IB-gpINB\xd7:\xd6\x8e\xc4b\xa4\x89N\xf7\x91\xd6\x01\xda\xde4\xa4\xdasS\x8f7\x8e\xdf\x97\t\xb3\x1e\x7f\xf9\x15R\xe9\xb6b\xc4\xf6\x80\xf2\xf1`\x1a \xa3\xad\xde\xa1\x1f\xa1i,\xc7\x05e[\x04\xf3\x8e\n\x85+\x97\xed\xb7\xf8\x16\xbcd\xda\xbf\x9f\x04>\xe4\x8c\xd9\x99\xbcnO"\xe5\xb4\x0c\xcc\x96\x1e\xd8\xab\xeb\xb8\xbe7\xb0I-\x9dR\x17|\x8f4\xe5#\xd6\x90AplGqIK\xeb\xcdI\xd5\xf8\x8f/\x05\xb6\x06\xebH\xa7\xde\xa5\x8e8;\x86$\x80\xb8V\xb7gHQF_c\x06\xe6\xd9\xe6\x81\xa0\xf2\x84\xa4Uv\x98{\xd8\xc5\xd9`o\xdc\x7f\xd0\xf7\x96?\x8bd<%r\xec\x96\xff\\\xb6Ey\x99\xa87\xfa\xa3z\xde\xb6/\xaa)P(h\xbc*L/\x17\x97Z-\xfc\x81\x82`\xe0\xa6\xb0\x1a\xd6K\x8d\xc8i\xe9\xbb\x94(F\x86\x92>\x0f\xc5"~\xe5\xb3\x05\xf2\x0c\xb3\x93\xb8\xaa\xa5X\x15\xbd.gv\x0ey\x8f\x80\xbaP\xe6\x0c\xfe\x88\xd4\nBZ\x1a\xc9\x11\x1d\x94\xe7\xfc\xf5Y\xa6x\xc7s\x90\xeb\xa4\x12/8\xba\x91\xc7\x8aF\x93\xfdh&\xe4\x98F\x0e\xe9\x13\x8ddx\xbf\\F\xd3\xffB\x91\xed\x99\x88YnmD\x9c,#\x8d\xc3\x1f\x18l+\\\xf7#Vm"\xf4Fc5>\xc8\xb0!\x8e22\xb7\xe0\xe5\xbc\xe1\x87\x9f/\xc8\x17\xf3\x1a\xcdM\x82$\xd3\xab\xf2\xa2U\xde\x06\xae0\x0f\x18#RJt\x99\x9d\xbc\xa4/\xd7\x8c\xb2]\xd4\xde\x12s,\x8b\xcf\xaa(\x1b\xe0\\\x01r{\xec\xfa\x1a=\x8aa\xa7wN\xd8\xa2\xda\xac\xf1mr\xd7\xd7\xa3\xd2\x99\x97\xa5\xc2\xbb\xa5\xb3\xf9\xfeY\xbc1g:\xc0\xaf\xab\x81\x97v\x9c\xbd\x07\xe9\x05Q\x89\xfaS{Wj\x9b6A\xc6\xec\x01\x99\x91\x80\x0eWp\x1b5P:\xc6\xfb \x97\xb7\xfc\x02\xf3V\x85\xe7_\xf5{\x9e\xffMb\xdf<#\x13\xa0\xb3b3\x8fP\xd2`\xdd\\I*b\x1d\'{#\x8d+:~\xb4,\xfc\xe5P\x00G\xf0\x842z\xef{\xab\t\xc3\x99\x9d\xd5\xd5\x9f|[7\xa7\x8b\xd7*\xfb$1t\x93rG\x1b\xc9\xae\xdf5\xb3Z\x86*t\x11\xe9\xda\x9d\x0f\xc1\x8c\xaf\x96\xac\xfb\xca\x860m\xd9N]\xdbC"\x03sZ\x96\xd3\xc3\xc4\x8bGHh\x047%J\xfa\xa0aA\xca\xcb\xab\x94<\x8bT\x00N\x89 \x07\x0e\xa7\xe6(!\x7f\xb8\x1f.,>\xeb\\\x12\xe3\xc4L\xce&\t\xd1\x88i\x89\xedu+\x9a\xb6p\xd6B\xa2W\x8b\xf69\x8b\xe7\xb9\xff$WC\x89\x81bn\xb4\x1f(\xed\xd1\x89x\x86G?\x9aP_\xe7i\x02P\xc6\x10\xc2\x1b\x90\xc7\xbd\x99\xf1Yk3\xf5\xb4\xd019|:w\xd8\x85\x9c\xf8:I\xbc7\xd7t\x9b\xc1WF\x1b \x87\xf1\xad{\x82\x1fL\xb6\x10G\x976\xc3\xe9\xdb3\xb3\x0b\xa3\xfc\xd6\t\xb0\x9d(k\xc7\xe2\x87\xb8:a\xbe\xe4\xe6+\xef\x1b[`\xc0\xda\t\x08`g\xbexV\xd8\x9e\x92W\xec\x19\xb8\xec\xb6M\xfc\xd2\xcd\xdf\xc0\x82i\xe7Q\xcc\x87\x18\x90?=\x84;\xb3\xb6\x06\xd9y\xd2\xa0Y\xeaT\x97s\xc1\xbb\xbe\xb0%\xb3\x85:D\xb2\x9f\xb4\xf2%(\xc7\xcc\x18#\xf8m\xfa\xe1\x1a\x1b\x15\x06VB^6\xf4\x15\x87\x90\x00\xbbCP\x8b\x8f\x81\xdei\xc9\xfe<%\xb0\xa8\xc1\xd0"Ye\x10\xcf\x1d*\x7fmp\xd6\x10\xd4\x84fXl\x03\x99~\x03\xb5^}\xe1\x13;\x1aS\x0b_T\xa71\x04X\xbd\xe8\xf5\xe6\xa5\x19\xaa\xc3y$\x8a\x0fras\xd7\x15\xb5"3\xd2U\\\xa4N\xf7\x1a\x16rHM\x989\xbf\xff\x03\xc1C\xd2<XD:\xd8^\xf4\x8er\xd6\x9c\'`\xad\xcf6\xb6\x8dZ\xd0%\x92%\xc3\xc75\xf3\xd4*;\x82\xde\xb0a\xf8 \x15z\xd0\xc5\xfd\x9f\xb4\xaf\xbe&+*\xbb\xac\x8c\xe1\x1af\xdcv\xab\x15\x01$A\xc6q\xd8\x10\xaf\x06\xb9\x1d\x13\xfb\x1fk\xb0e>\xb7}\x86\xfdD\xbaK\x06\x7f\x04\xaeV\x16lV;\xef\x14\x96\xbfc\x9a\xd3\xbdK\r\xdf\x8f\xb2\x1c\xed9(\x98\xc3\x06.\xfd\x12A\x80\x9a\xeeM\xc3c\xe2\xcd\xb6\xc8\x08\x84\xf4\x87\x81\xc3\x07\xd7r\x98\xfc\xef6\x95il\xa5\xf6\xf2_\xd2\xddC\xe1q\x1b\xe5Op\xe6\xaa\xc7\xe6AY6;"Y\x9e\xa2\xba\xf1QFQ\x8e\x00\xd2\x14\xaee3\x9aQ\xed.\x85c\x12\xc8\xa3\xfd\xda\xf20\xbb\xd3\x00\x0b\x83\xb4\x05:\xd0\xfd\xe1Z\xc9\xb0\xc3\x96\xc26\x93\xb3\xcdGQ8\xe1\xcf\x12@\xa5\x84j>\xc5\x8e,\xdf\xad2\x17a\xf5B\x93nn\xc1E\x13\xe9\xfdu\xe1^e\xdf\x12\x8cN\x07U%Sf`,K\x1c\x1e\x15\x15\xafn\x9c6\xf8\x80\x8cWnp\x1c\'9o\xcbB\x1809\x9e\x91!\xae\xc72P\x8d\xc6\xc8\x05\xd6\x8e\xbe\x04\x8e\xb3\xa4V4\x08\xca\xd6\xf7O\xcc\xa6eY\xd4\xa8\xc9Um,*\x04\x17\x81t;^\x1f\x98\x0f\xf9\xe9B\xfc\xc9&\xd9@-\xf33\x96\xb4\x8b\x04\xe20\xa8y\x8b\xf0\xec*\x9c)\xd5\xc1\xce\xf3\xbenH\x92\xb9\xb5C\xefV^\xe7\xa3\xa5\x12Y\xea\xb9$\xe5\xe9\xdf\x91V_\x06\xc4\x1e\x15\xd8\x16{\xf6\x9b|\xaaR~t\x17\xd4\xb4:{\xea\x0e4=T\x1d{\x8aT\xeb\xf5=\xa5rs]\xa0\xa6\xe6b\x0c\xec\x02\xa31q\xdcsq\x17\'4\xef\x92\x11\xb2s\xfa\xb4\xa5\xbe^\xe8\xb5o+\x0e\xc1ah\xbd\x90"?Z\x1b\x982\x82\xbd\xc218KI\xfa\x06^\x1aCs\xdb\x91\xa9\xb1\x1e\xfa\xa4U\xc1j\xb5\xe1\xa3\x8d\x95\xb0\xd7m\xc8\x8d\xd7,%\xf2o\x95\x87\xa7\xf8\x9c\xb0\xc2\\[N\x12\xfe\xafE\x99\xb3\xd9\xca\x0f\x1eu\x14g4Y\xde(re\xd0&\xb7^TQ\x87\xe6sh\xdb\xa0\xa3\xdc5\x9f\x84\xc2\xbdM\xf4\xf1s\x85\x05\x1f\xb7\x1bM\xf3\xa4\xdd\xf8\x10u\xee\xa8t\xba\xf9d\xb2\xde\xfe\xec*\xf3\x0b\x1f\x8a\x9a!\n\x12\x99\x9d\xa1\x99Oc\x8a\xd7\xa2\xc1k\x1aw\xbf\x90\x95\x85\xa7\x08i\xe1\xea\x01S\xbe\xd2\x96\xc7f\xbc\xe3\xc6T\x80\xdf\xd3_,\xb8\x07\xf4\xfez\x95\xff\xb1s\xb8\x84\x97Z!CR\xee\xbb\xde\x0b$\xe9\x01-u(\xed\x9c\x86$\xffS\xfbZ\xf2\xfa\x96?\x18o\x1e\xaf\xc3\x19k\x8d\x02\xf2\x99*7\xab\x8c\xee\x1f\x19~\x1a\x93\xb5\xc6\xcb;\xe8-\xc8\xbd\x00\xe8\xf5m\xb8\x1aWz\x04e\x12\xae\xdcp{\x1e\x95\xd1\x9b\t0\xda\xe9\xabj\x8b\xbc\xb7\xe2\xe5\xa5\xdb\n\xaf\xf0\x8a\x88N\x0e:\xde\xec\x0e?\x1c\xf3\xb1\xb6\xa6\xba\xc9\xa7\n6\xe1\xa6\xcdU\xc0`\xb8+\x07\x1bi\xc6(\xed\xe1&H\xa0\x11fi\x8b\xa4]\x07\xd8\xa6\xdd~\xef\n;T\xdb\xec\xbdW\xbf\xce\xb5\xbc\xb5t\x1c\x95\xb2\'\x18|x\x85\x8a\xa4F\x135\x11\xf5\xd4.\x17\xb2\x9e3t\xd4\xa1\xd5\x11\xa4o\x8b\x11C\x08\xb7;h\x98\x1bxb\xdb\x9f)\xce\x91\x9c\x9b\x86\xfe\x0e7\xa1\\\x03\x1d\xf4\xad3\x07\xdf?J\xabz\x90\xcd\xb0\xc3\xe9\x13\xe2"I\x98\xb8\x06\x00\xe7U\x98\x126\x83B\xfeV\x0c\x911\x95bHD\t\xda\xd8\xe6\xd3\xd8\x0fl\x7f]\x85\x1fV*<p\xd3[l7\xceOX\x9c2\x0fa\x16{\x84\xd0A\xa9A\xfbC\x8e\xd6^\x9d@\xb48;\x96\x87\x8c\n\xd5\xa5j\x10\xd3@*\xbbIw\x05\xed\xc1\xeb`0\xa7y4\x1a\xafJF\xba\xaf\xafU#\x07\xcc\xf6\xfaY\xc6m\x84\xfcCtS]2\x84\xfa\x12\xb6\x80\xae\x03\xe6\xe4a\xecr|Fp\x0f\xcbl\x8bD\x9f\xa2\xd5\xa27\xe3\xb4[\xf7r\xaf\x9cg\x17\x1aG\x12iL\xec\xe0\xf1\xc8v\xde\xf5\x1e\xb4\x9c/\x16`xw6\x95\xb8\xa6ir\x82PT\xe9\x0b3I\xd1)*\xca\x12\xcb\x8d\xff\xf8\xc4\xbfd\x99}\x036\xa30[\x92\xf7\x06i\t\x93\xed\x8d\xf2\xea\xd0dI`\xc5QJ\xcc\x0fz\x14QTR\xed\x84Y\xac\x08\xab\xc2\xb9\x0c^\x9ak\xf9\xfb\xfb\xb9}\xa0\xa92\x10\x07P\x94\xfe\x1aK\x88\xb1\xe8M~Fw\xf6zs\x86\xb4)\xfc,\x82#\xdb\xf7"\x1d\xf5\x8f\xa93t\x91\x16o\xd2y\x8b\xe6\x96\rJ\xa5\x8e\xea\xd4\x94\x15\x14\'\x15J\xa2R>\xbfH\x90$X\x19!\xe0\xd6E\xb4\xdf\xbf\x0c~!\xab\x91\x17\x03\xe3\xfcg\xb7&\x8d\x0c\xa0\xd5@v\x99V\xac\xc2\x082\xc2\xe19\xf1\x9f\xc8\xfc\xff\xd9\xa9\x0e\xb1yb\xf4P\xc6\x97\xacLR\x06\xdc\x91\xd1\x945$I\xf1\x15\x05Y\xf6\xf3u\xaa\xee\xf1\x97k<KY:\xfe\x9d\xb6\x90\xb8b*2\xf5\xec\xa2xo\xdc\x83]\xeb\x9e6\xeb-G\x9aY]\x9c.\x9b1\xec\x7f\x9b1\xa4BK\xb6L\xa9\x8dE\x7fqeZc>\xcc$\x08\xe1XY;\xb4o\x96-$\xe4\x99\x92pV]I\xaa\xffD<\x9e+\x152 \xd5X\xbdE\xe9!\xafV\x8f\rDN-\x18+\xfa\xa1\xae\xc2\x03\xd5\x90[h\xad\x84#\xc9\xebQ)%\xda~+n;m*\x98\x82]E\x87vG\x92(Yt\x15\x98\xfd\xb2\x8dS\xad\xaf 4zh\xa5\xb9\xca\xd0\x85\xc0o\x17#\x1faE\xc7L\xcew\xab6\xfc\xe9\x96\n\xfa/1\x05\xd8\xd0\xd5\x13\xca\xfc)h\xecA]\x8d;\xa6n\xe6\x043Wo\xf7R\x84Sr\x0e\xe8\x99\x88\x7f\x1e?\x05\xb8\xd2\xd8@\xd2-\xa4\xa0\xf4\r\xa8\xc0\xcd\xc6\x0e\xfbl\xa9\x84x\xc7\x0c\xa4(!\x16@\xcb\x9b\x13\xab\xd6\x92\xbdU\xcbd\x04*\x99\x1c\xd9qt\x95\x18t/\xee\x94\x8dL\xc2\x01"\x14\xd7\xf1\xd9\xb1\xcb\xf2\xcf\xbe|\xe4;\x03\xa8\xe0\xec\xc2j\x97\xeb\xfb\x10]\x9d\x118\x18UL\x9art\xfa"-\xfe\xda\xba\xd1G\x9f\x8d\xa6k\x98\x7f=\xea\xf2\x1cl>3\x86\x12>\xf5\xb4\xcf\xac\xd4\xec\xaf\xeb\xfb\x99Z\xc8\xdc\xf7]F\x85\xfb\xb7\xffcB\xcaf\xd8d\xa5\xb6\x10^F\x97\xb5\x82p\xc2\xd5aB\x1a\xe3\x9d\x16\xb1\xa4\x88?\xd6yRCgcV\xc6Ps:{\x1d\x08\x06\xf2\xa8\xb2\xc9\xae\x90\xb7\xed\xf5i\xeb\x06\xa2\xd2\xc3\xc4\x90\xf1&5P\xbe\x8a+%\xfd\x92\xa8\xe8\x19c66\x03Kp\xa8\x15\x8f\x17\xf2\x1c\x96\xf4E\xd2m\x90\x007A\xb4\xef\x08Q\xc3r\xcd\n\xa4\xd2V\x11\xd2\x8bt\'\xb3\x9a\x7fH\x07]\xa3\xd7\xabt`\xe4"[\xaa"\xc2]P\x98\xf8\xbf\x1eULY\x82I^h\x8e\x1b\x01I\xabOmB|\x15\xfb\xec<\xc4Ym\xf7K\xf5\x10X8\x9bc\'b\xbd\x05]\xf5@\tM3\x8bH\x87H\x02X\x89Z\xf6\xf5\xd8\xf1\xae\xb1\xd8>8wQ\xcdp_zi\x88\x88mdO\xd9\xc5\xbd\x19\xfa5P0\xc8\xae\xaa3\x94,\x0cp\xdf\xab\xbb\x04\x11\xec\x11\xd2.AX\xe9\x05\xbaLp\xb2\xf6ji;\x92E\x90\x18uEp\xef\xb2\xc7u\xa2\xe3V\x08E\xf4\xaa\xfb\xf8\xf2\x99\xf7~\xa8\xa5{\xbcMx?`C\xe1+\x1dyk\xdc\x01?|\x1di\n\xec\x8d\xeb\'\x11N%Ua8\x82\xed~\xc5\xc1\xd5]\x8a]\'{~u(\x92\x00\xbc\xc5N\xcd\xa8*P\xcf\xa4d\xc8\xea\x90\xdf\xec\xf7\t\xd8b\x05\xa3\xfd\xb0\xc8\x96y\x98W\x0c\x1f\xa87@=\x8a2\xd1\xadM\xf9D\xcf\xf8\xee\x8e\xc46>\x87F\xa9\r;e\x8e\xdaH\x1e\x161t\x15\x0bT\x9c\xa6\x8e\x05_\xc9\x0e\xc2\xa6_\xda\xf2\xe1\xe4\xa4\xa7\xb2;\xcd\x9d\xc4\xa3\xd6\x84\xf5\x93\xe5C2\xba\xa8\xb1@\xfa>\xa7\xc0@[\xc5\xb3\x99Q\x00\rxsa\x8f\x1etPk5\xf4\x0e\\\x81\xb68\x19\x1e\xc3\xab\x1co\xe9i\xaa\xe6\x02\xa6\xf3\x91?g\x98\xea\x81\x00\xccd\xe2\x8c/\xef\xb8\xc8\xecU\xb6\xc8\xf8\x9b\xb5=LQ2\xbepp\x03\x08\xadT\x10\x90I\xa5\xa3\x9c\x80\xa9,\x1f\x91K\xae\x9f\x18\xed\xcd\xc4\x06lC\x96u:4\xb3K\xbcE\xf7ey`\xaf?c\xfa\x9a\xea\xfa0\x10\t\xc4\x9f\xaf\xd2\xbc\x92\xa3\xc22\n5i\x9f@s\x85\xb6j\xe3^\xd7Ue\xd8\xba\xe9\x95:\xffx\xdd8\x14 \xbc\r\xa4\x05\xa4\x8c"#|\xbcR\xc6@Pj\xc7\xbb\xa9B\xfbN\xa6\xee<\xb6\xb1\x9a\xacs&r\xe7>L\xfc\x07\x17\x98\xac\xb7\xea\x94\x99\x8b\xd5_0\'\xa6i\x8e\x9eb\x9fO\x84)\x11\xf4IF\xfa\xb7\xf8\x1b\njU\xf8\x12\xbf\t\x1f\xdc\xba@\x9bEv\x1b\xb1\xdf\x94\xa6\xa4Ny\xfa^& \xd1;q\x8e\x86\x94\xec(\xf7X\x17\xb4\xe1\x1a\xc4\x8e\xacn\xee\xf8\xdc\xb0^{\x88I\xec\xf1\xa9\x0c\xdcY\xa3\xbb\xf4\x01wo\xc9\x93\t\xaa\xd4\x9cw\xbaB\x8d^(\xa7)\xdc~-8+\xf4e \x07\xa4\x1c%\xb1\xc4\x88\x0b\xcco#\x95\xff\xd8\xed\x83B\x9f^\x16R\xc4\xde\xf3\x99g>\x16C\n;\xac\x06]t~\xdb\xf7n\xb9\x1f\'\x8bm\\\xec\xae?4\x06r\x05v\xd2H\x0e\xff:\x94\xe6UW\xcatXoF\xac4\x8e+\xa9\x97\xb6\xea\xd6\x08T}\xc7$\n\xf5\x85\xb8O\x9e\'\xf9E\xc1o\xb8\xf9\xf0\xd9\'\xc2\xf4C\xf8\x17j\xb8\xf5\xf2\xb7\xec\r\x84\xd4\xe0@RD\x80\xee\xf8\x83\r\xb1\x93W#Q-\xbba\xc4D\x88\xb6hqq"\xc7\xee\xe8\xdep\x83c\x89,\xb3\xee\xc7\xd8\x1bQ\xaeOL\xaeO\xdeH_M&\xeb\\\x85\xa4\xcb\xee/\x9d\xab\xec\xcb7\xc6p\x9cK\x01?\x9f\x8c/_s\xaa\xcc\xd9\x1c\xf9\x88\xcd\xa7\x9b0\x1e\xaa\xd1};9\xc8E\xb9\xc0\xb7\x84\xd4\x96\x86C-\x81\x8e\x0b\xa5\x00R}F:O\xf2\x14h9\xfc#C\xb6\x16\x1f\xe8\x9f2\xf2\xb8y\x80$\xec\x03\xc2\xd1\xe1\x8epO\xdc\xdc\xb3\x8c\x88\xdb,\x05\x8cf\xbc\xd4i\xe61Ox\x11\xf7ig285\x86\xceL\x0c~\xf7\x06\xba\xc00q[\x1a\x06\xe1\x8f\xff\xc2\xed\x1e\xf4#\xcd\x9a+\xa3*M?`\xb7*\xe4FY\x93\xb9\x0ch\xea9S\xd6\x0b%\x18\x8fG\xd6j\xf6\xca\x14D$$3\x8b\x04\x93\x8d\x9e\xd2\xa3\xe2\x06f\x1f\x0e\x84}>\x01u\xf8\r|\xfaz\x0f\x99\x9a5_\x01\xb2cc\x89\x96\xc6\x06i\xb8)\\\xa8l8\x8e\xe8T7C5\xc8\r\xc0V0c\rb\x01\x16P\x15\x92O\x94#\x84\xa0\x1d\xeb\xd49!\xf4\x1c\xd7\xdf\xaf/\'\x94\x9fh\xe7\x87\xb8\x11\xcf\xf5\xce\x8eE\\\x8f\x1f][p\xe4\xc3\x95\x8d\xf3\xc2}\x8e4\xed\x94\xd3E)\x1c\xffa\xc3f\x1c\xce\xf9\x1d\xbe\x84 )\x04EG`H!\x03\xd4\x85)Ur\x18\x82IU %\x12\x10P\x1e\xf3v\xa7\x94\xb7\x86)\x12\x16\xed*)\xd31\xc2\xe9\x98\xcd>\xf4\xe7\xb5_\xb3\xc5Hb\xf9z\x9a\xd3\xcc\xec}\x1b\xf4\x88\xbbk\x87\xf2\xc1\x9d\x91@f\x06$^\xf6\xa2\xd0\xf5`\n\xc9\xc3\xf9&\xa8\x92\x99Ui\xfb\n\x8a\xc9\xdb\x17\x1c\x9f\xd9\x9f|~\xf6\xa2K\x19\xdd\xbeRa\x88\xf2\xb2\x1b\xd1mi5#\xcam\x940\\Y\xa1\xf8\x1f\x04\xa1\xfc\x8a\x9c\xce\n\xe6\xdc\xf1\xf4\xe2\x13\x80\xa27\xedQAev\x0b\xc2\xbdP\xd01\xf6\x04~\xae\x11\xb9geZ\xc8\x93h\xab9\xd9>\x82\xea?r\xa7Y\x81\xcbDl\xb2\xc7\xc6\x0b\xdbC\x04:\x94S%\xaezD\x19+\xf5\xbf\x82\x8e\xea%~#\xff8Sect\xa8p\xf0\xa1\x98~\xba}\xe3:\xc8h\x0f(2\xb6\x15/|&zP}3\xaf\xd0\xbd\xfa\\\xb2\xd9.z`\xff=\xb8\xea(\xea\xf2\x9e\x88gkXq\nV!\x8b\xd5\xdd\xc4\x9a\x06\xdfm\x87\x97\xef\xdc6\x8e\x13\xda\xa1\x19\xec\xcb\xf7M\xf2Rm\xc9)\t\xac\'\xa5X~:\x89\x11\xd4\xb2\'\xd8\x12S\x07\x86U<RD\xa8\xa2\xcf\r\x8b;\x93\x19\x8aq\xb9\xdf\xb5\xce\xaa&\x802\xb8\x8eLC\xcc//\x906\xfe\xe8=m\x1c\x91\xc1\xa9\x15}\x8f\xff35\x03\xc9\x12\x16^\x1e\x80\xf5\xa4\xf9\xa0 \xc4\x84\xfc\x9f\x98E\x98\x11\x90\xa3\xfd.\xe9k\xdb\xb9\x8e\x8c\x8b\x9a\x8d\x9b\xde\xfb!gIb\xa2b\x9cg\xc1\xf3\xaf\xfa\xa9\xc8\x9d\x84\x0c8L\xa2\xb3\x91\x8f<\xf5_G\xe2\x0e\xb9\xd5\xc2Jo\x88k@\x0c\xd6\xc4<\xf6\xc2u\x06S\xf6\xe7\xb0CF\xf6\x93j\xf6e\xda\x14\xca\x82E;\x12Y\xdb\x9a^\x8c\x0bXC\xca\xdf\xbd\xc1\xec\xc1\xc1\xccE\x17\x03\x1b\xcar/X\xea\x81`\x1b\xc0\xbcx\x14\xebR\xf8R\x08R\x80y\x82\xcd\x17\x0fS\xa2\xb1\xf4\xb6\xc9Q\r\xf34\x1f\xeb\xd5\x02\x8c\x87\xe0fZ\xfc\x98\xb2c\xe28]\x80\xdc\x84\x89\xd2\x8al\x88\xac\x13O\x19i\xcc%\xab\xf7\xf6\xaa`\xe1\xf0o*t\xad\xc3\xb6\xda\xb7=G\xa9\x95[\xe3r\xcf\x01\x91\xb2-\x0b\x06\xf3\xfe\x18\x0c\xf0\xec\xcb\r\xe0\x8c\x89~\xba\xcd\xfc\tozr~`H]d\r2\x1b\xb4QU5\xe2\x9cZ\xbe\x98k\xb0\xf4\xe4\xc8Sm"k\x0c\xc5Z|\xf2\xc0\xd1\x86\x11\xfd\xf5\x9f&\x06\x8c`\xe9\xfa\x9d\xc8\x84\x13N\xf3\x98\xf7\xd9.C\x18\xcc/\xcb]\x87X\xbe\xaeb\xf3\x19\xa8\xfd\x8b0\x9ehDF\xea?\x95\x0cS\xa7<\xb7;\x7f\x00p\xf5\xc0\x83}\xbe\x87\x16\x94\x99\xbe\xfc\x84^S\xd3\xd0\xdf\xe4\x7f\x80}p\xa3\xa4\xec\xa6M$\xec\xff\x95\xa2N\xd7\xc0=\xcf\xa6V5*\xdc;M\xe0D\x87\xe8T\xca i\xab\x9b\xd6\xdd|\x88 \x7f\xbe\x91\x8e\x14[z\xf5\xa5N\x1ci\x0b\xea\xf75\xc2\xc7\xb0\xadDS\xa4\x8c2&[\xa2\x91\xd5\xb4\xea\xe6jt\x0f\x87d3\x80Rn\xedd\x9f\xff\x80/.\x96\t\xd0\x80\xee\x91^\x1c\r\xf1A\x84\x16\xb4@\xaa\x1d4Z\x19\\\x14\x9d\xb4\xc1-\x96\xbd\xc6\x194\xc3\xe4>\x97T\xf8\xa4\x02\x85\xa0Db\x0cw\x9a\xc5\xea8kC4y\xc3)4\xbc\xdaI\x0b\xce\xa1w\xd1Q\x8c\x05q\x80\x8e\x1b\x83_\x14\xdd_BU\xc5oS9\x7f\x1e\xdc\xd1\xcf{\xee;\n{\xa7\xa3>\xee\x9f0H\xfdY\xcf\x0b\xd1J\xa34,x\xa6c\xd9\x17\xf7\xfd\xeb}\xb4\xf3\xed\x89ew\xdc\xf5\x88\xda\xe2t>\x03\xbd\xf1O#t\xfa.\x87D\x96\xd7\x9dm\x8b\x0c\xc6\xd5\x89cmS\xe1\xe3R\xf1u\xf6\xe4\xc2\x1b\xf3\'j\xe8\x92\x96BB\x89\xe7\xc0\x97\xf8\x8b\x99\xea\x11$4\x89\xf3\x00n\xea\xb9\xf39s\x90\xff6\x1a\xa9}\xf0\xf4w\xf5\x13)\x080\x07Z\xae3r\x8a\xf1x\xe9D\r\x98\xa4w[\xb2\xb4j\xc0\xfc@\x7f\x97\xbd\xdb\xe0\x93F@\x9f\xbfc*\xc4\xfe\xd1\xd9\xfbc\xc53t|\xfeDW\x13\xac\xd9\xdcB\xa2\x9a&\xbf\x8a\x98\xcbh.\x03\xd2\x1e\xcd\xefU\x1bT\x8f\x8a\x04)A~\x00\x9c (\x00\xe9\xdd\xd02\xa9\xf6\x8d\x94\xf7\xac^:\xea\xf1\x8f\x08\xb1\x1a\x08U7\x07\x06B\x8cQ\xdaX\xf2\xd8"\x96K\xa3\nm\xbb/^~)v\xceU\x83\xf3\x93\xaa?\xe4\x14\xb6r\xb1\xb4KV\xe6a\x84\xca\xbf\xa2\xba\r\x91y\x94A\xf0A\xc0\x7f\xc0\x98\xf3\x89?q]J+\xb9ee\xff\xa5\x81Do\xf4\xd4\x05W\x8f\x07\xfd\x95\\\xea\x9f\xf2\x9a\x97w\xe5Qa\x80\t\x90\x02\x9e\x08,5\xc2A\xb8\x0c)\x9d\xa7\xe5\xddI)\xa4K\xff\xb5\x9a\x1aG\xf8k7%\xd7\xa5\x9e@\xd8\x80|\x83g\xd8Q\xef\xc9\xfc^\xc6\xc3\x94\x0f\x03\xf8\xe8\x93\x8a\x1c\xc1\xb7\x1d&\xdb#\xa4Z\xfb&\xacC\x04\x01\x18\xab\xbf$\x11\x18\xaa^\xd6\x17\x12\xda>w\xa9a\xd7\x13\xffP\xa4\xb3\xe5\xd9\xc3@u\xc9\xc0\xf4\x8e\x9b\xd7\xe1\xd1\x13\x99xT!G\xd7\x0f\xb8\xd8\x92\x1b\x0b\x07\x08\xf5 k\x01\r\x15\x02h\xa3\x80T\xe7m\xdeH\x14]\x9a\xb8\x92\x0e\x9a!\xb3\xad5\x13\xfd\x1e\xa1\xbbA\xfd\xf7\xc2\x7f\xd4\xd0\xe7\xef\xee\x817\x854\x10\xfa\xca\xbe\x14\xbbI\xae\xf9\x06\xc0\x01^\x01/\xc0\x00\x05\x80\x07\xbb\xd8\x8f\x8b\xc3\x08P\x81\x0e(:\x87l3\xb8#\xf4fp^\xb1\xe7^\x8a\x1e\\\x8c\x1e\xe8\xb4\xf0!\xf5\x86$Y\xe0$/s\xf3Q\x97&\xd9\xf5U\x00E\x13\x86r\xbb1\xb3\t\xb4\xf2k\x94\x16 \n<\xba;\n\x97\xa1 $B\x0e\xf9v\x7f|\xd5\x02\x998\xca\xb7\xb6\xd3\xfe\xf1\x13m\x1e\xfaF\xd3\\\xa6\x92.\x11\x00\n\xe9\xa6\x8e\xcaHGR\xc7\x12\x18O\x1f\x0c\xb9\x0c:\x1fF\xff\xbb\xc8\xb3\x89h\xefZ$\x18.\xa1}EF\xc6\x1d\x9d\x07\x8f+9&\xaf\x86\xc7\x9d5\xc3\x99\xf5*\xa9X\x1e\x8d\x9d\x9f\xcf\xe0\x1e\xca\xd0Ct\x84f?\xa6\x1aN\xa9\x94r\x8aqEA8X\x8b{\x86\xa6/\xda\x8e\xab\x7f8\x01\xeb\xdf\xfb\xb77s\xa2a)\x1f\x16D\xf6,Q\x9c\xd2\xe1\x05\x16\xfb\x9a\x12\x0b\'PP\t\xdc\xc9l]\xd4\x94\x10\xc0O)D*\xd9 k\x96\xca\xdf\xd4\xd7\xd4Xg\x89t\xf1e\x18i\nbZ\x95\xe5g\xa8\xc0\xb0\x83\xed\xbc\x01\xe8\x0f\xfd\xd0\x10K\xe7\xd1\xde#\xdb\xe3E\\\xe4\x80\x1d\xb8/\x11t+\xc4q\xcf\xed\xdc\xc1z\xe6\\\xba\\\xf1\x05Q\xda,#\xfa\xbb\xd7\xbd\xf1\xc1\x99!\xe3kt\x84\xe4\x16Txh;8\xe3\x99\xda\xa4`\xbaR\xf0\x1e\x04\xd7!5)\x82\xc8g)\x81\x92\xeaZ\xdf\x8f\xbd)\x16<\x12\x8c\xf3C\xa9\x84h~2\xb9MX\xbe\x8ei\x9bN?\x06[\xa7{\x82\x87\xac\x08\xffK\xa1\xe9\xee\xb0.;W\x9a\xea\xaf\x81S\xb1\xf3\xfa\xbc\x1aZ.\x84X\x88\x11\xa0\x8e9\r\xb7\'\xfc\x7f\x96!\xb8{\x9e\xf2k#\x13\xf3\x14\x85\xac6\xe3Zr\x0f\x0f^\xf2\xdak3\xbd\xc3\x86\x126\x8c\x1a4\x9c\xc67\xb3\x19\xddx,]"\x06V\xc7\xe5\xdbp\x1f.\xfca\xcb\xfd\x0fF\x1bPN\xb6\xac\xacioZ\x9d\xf73\xe9T\x1c\x08\x82\xfcQ5\x8e\xc5k\xcc\x02\xecQ\xca\x99\x89\xb7q\x18\xa8F0\xc7\xae\xdc\xe9\xe56\x18\xb4i\x06\xfc&\x8a\xe7\xdf\xa3\xb1C\xe7#\xe5\xf8l\xd9\x10P\xda\xaap\x81\xa9\x07\xc1]f\x85)!^x\x82\xd9\xe0\xdek\x87\xd5\x80\x05\xb3gj\xb9\xe4Zk\x1c$\xc1\xbb+\x8e\xf3\x18I\x85\x8f}\x11\xf5\xdb2\xc0>\x94\xdb\x90\xc2\x87! &\xad\x1b\x8a\x01\xe1\xc1\xbeUe\xdb\xb6qD`Y\xd4E\xf7U\xdfI\xd9\xb3\x95`\xe6\xcfC\x94\xfc\xf5-\x10\x0fm\x86hqx\xedZ\xe5"\t>9-\xad8\x9f\xcb\xe0{\x89\xa4\xaa\t\xab\n\xf9\xc8\xf1\xc2\xbb\xc5\xdbCH\xbc\xf7\xce\xb1nUP\xe8\xa2<\x1f\xcd\x91[\x14rb\xb4\xd4\t\x97\x15_\xdb\xd6\xc8s\xe7\xf9\x14\xe8r\xb8\x89\xbb_\xc7\xc1\xa7\xd1\x9c\x92\x8b?\x01\xde\xbd\xd4)~Irp\x15\x08\xe7\x11\xa4\xe8\x98\xdcJ\x94\x87\xb3\xba\x1f\xe6\xf3\xa3M\xf5>\xb4\xd4\xbb\xbd(\xb7\xb6/\xf2\xf3\xeb\xfet<\x8ch\xd8\xa6\x15lZ\xfa(\x9cZ\xd7\x89\xd7\xef\xd4\xa3\x9d\xb0\x1c\n$\x8c\xa7\x18\xd2\x80\xe0\\\x07\x14X\xefq"\xf1Z8\x02\xf2\x9e\x84\r-4\xe2p\x88\x1fIac\x0f6\xeb\x0e\xd9R\x89\x18\x8et=\x1b>\x04\xae\x8b\xc0\xb3\xff\xc6\x96HR\x00\xaa,\xec\x9c\xe4\x1d\x01\xc3\x13s1\x88\xc1\rF\xceA\xaaK\xda\x8f\x8d\xa1%\nG>yfPE\xc2\x7f\xe7\xa7~\xf05\xbd\xe8\x9a&\x12U.\xa7\n\xeb\xb1\xd5\t\xc7\xc0\xf1u\xbb\xf6\xb2\xd22\x89H\x97\x04\x8a\xcdmw\x90\x18\xef\xad\xfa\xf3\x1b\xf6*\x17?\r\x02\x14Q~j\x1a\xc3F\xb2\x86\x020\x06\x0cjt\x95>\xa8\xeb%\x08\xf5\xd9\xf7\x14\xf6\x9e\xdd\xe9\x9c2\xd9\x84\xa2\xc5M:)\x91R\xa2\x86O^\x0b=\xcf)"\x08s]\xbc\x10\xa3\xec\x91\x81\xe3%\xc4#\x18g\x9a\xe0\xc6\xc6R\x9b\t\xe4\x85\x0fs48WL\x13\x90yr\x8f@#j\r=_\xfa0\xa7g\xccz\t\xd7\xfc~*;\x9e\x91\xea\x9c \x9dP\xd4\xa9\xb1l\xb8H\x9bF\x9e\x98\r\xb7\xc9bu\xf0\x05Va\xaa(\xe1\xf5\x7f\x12Mv\xb6\x9bPD\xe6\xed\xadI|\x01i\xf5\xb5\xf0\xc2\x8d=\x9f\x1e\x80l6pL\x9b\x12\xec\xdf#\xcc\x00\x7f\xcc\xc7\xfb\xd7\xd5m\xe0\xe0\xee\x04\xd3-\xbb\xc2&\xdc6\xe0W\xd5\x95\xfd\x15\xd4\x06\x93Fc~\r\xc2^\xc0\x9fu\x18b\xc7\xc5\xc7\xd6\xce\x9d_)\x02\xb4s\xb1\x1eR\xc5?\xd7\xa9>\x80\x87\xc4k\x15\xa99\xd2\xd6s\xcf(\xbdD\xf5\xa3\xf8\x88v1\x96\xfaK\x0e\xf56\x0bK\xae\xc5\x8bv\xd6\x99\x0c>U\x85\x0bL\xf1\xe9\xd0\xb8\xec\xb6\n\xbdi\x8d\xc2\xe5G\xff\xf3S\xbe2c\xe7\xc2\x12G\xa1b\xe5\xbd\xe0\xb7\x8bI5\xaenGu}\xf8\x05\x9b\xb7/s?\xaf\xa5Qa\xde1\xf5\xf8\xd6\xce\x03\x1bQ\xad\x1b\xfbH2\x9f\xf4\xfe\x97\x866\x10\xc0\x87\xa9E\xd6h\xe3\xd9\xed\xfc\xc4lw}\xd9DS*{0\xd2x\xf5M\x81\xa3\x7f+\x14\xc6\x8deK5]\x10>\x10\xd2Xm\xcc\x1f\xc7\xd1\x00\xe9\x80\x9e+p8\xb8X9\xd8\x0f\x18\xa4\xca\xde~G\x93\x96\x98\xb7\x89\xd2OH\xe1\x9bu\x08\n2)\x90]\xd0TV\xc8\x7f\xc0\xa1\x0b\xce\x058[\xb1\xd1\xed \xd8\xa3\xf08\x18,t\xc57\xfd\xbf3p9#\x1c\xe8\re\xbd\xc4\xf9\xcbP\xa9\\\xf0\xdf\xba`\xc9\xdcv\x100\x1c6\x14\x97r\xda\xfb\x1c\xebQ\xe3\xeb \xcfz\xde\xdb\xe8\x08\xbd\xdfi\x12\xa4}\x88\xdd\x9e\xa8\x92\xb7\xb6\x11\xa7~4\xf87\xb7\x14z\x7f\xc1+\xeb13j\x01\xcay\xe2\xe3\xf3*\xd3\xd9l\xa8C\x8e\xcd\x88lN\xb8 \xf6\xfaz%\x9d\x8a \x7fPs\xefy\xb7\x95{\xee\x1b\xf9\x03N1\x85\x83\xc0\x9dR\xe8\x1a#\x8f\xca;\xcdE\xc0\xabLtT\xbe\x1d`-\xea\xa9\xb5\xa8\x01\x04\xe6do\xa6\xa4\xd0\xbe\xce\xac\x06\x1f\xe62;\xb0\x9e\xf5%{\x9e\xb7\xc5b\xc9\x17\xdbb\xeal\x0c\x0b\xe70v\x1fu\xe2\xfa\x7f?Q\x87,)"\x14\xf9\xac-^\x89\x1c\xf6A\xf5\x80\xd7\x83aQ\x86\xfcH\xf4\xa7\x92\xd4\x8f\xfeb\xea@\xe1C\x9aD\xd0\xcb7&\xb2z\x1bJ\xc1Q\x9eZzw:\x02}f\x0e\xae\xefn\xeb\xb8$\\\x19A\xb2\xbeV\xdd\xb5\xf7&\xf9\xcaI\xa6\x11\x0eIO\xe2\xf2\x01p\x85\x15}y\xb9B\x15\xe7Z\xe7\xb3\xea\xc3I\xc1\x10dP3r\xa9e\xd1\xca\x83\x8f\xed\xb8\x0e\xbc\xf8t\x03W\x1a\x0e\x8e\x93\t\xb8K\xa3\xe5#\xdc\xd4v)\x96\xf2V\xcd\x05\x1e3\\QO#\x11rE\xc1\xe9\xa2/\xad\xd7\xa5\x9d\xfdk\xc5\x86\x92\x9d\xbe\x90.\x1ceIl1\xe3\xdbT\x88]\xad\xe5\xee\x85\x85\xe6\x97\xc6\xba.\xb6\t\xc6\xb89\x83\xa9Wp\x95#\x0e\xba\xa1\x89R\xbe\x8f\x99\x9a\x98WK\xf9\xe3\x17\x15\xae\xd6\xc9\x1cH\xf9\x80\x86\x86\xd9/\xf5\x82Fp\x1f\x81\x06\x02r\x95G]\x92 .\xa6\x91\x0e\xd8\xfc\xdd"6\x13\xcb\x19\x89\xd1N\xc7\xdc\xbf\x18\xc3\xd4\xe6\x07\x06\xa2Xk\xe2\xc3\x01+\xd3,\xf2/s+R\x8aj9r1\xd7\x117S\x92\xfd%`\xa3\xc8b\xab\xef\x15\xe7\xdd\x0f\xf37\x05\xf1\xf1\xcdi\x9cGz\x07H5\x07J\xb8v_\xa78\xe7\xf2X\x04<\xec\x16\xb4h\xbev\xe9\x01D\x10X(O\x15\xd9\xd1\x1e\xbf\xb3\x98\x9c\xbf\xe6\xe8sn\'l\xe9^\x87%.\xbdF\xc2\xf5\x9b`\x99\xd2\xfcK\xfe]0\xb3Iy\x9c\x97\x14\xe7\x94\xc7\xee\x8a\x9f\xfb+F\xa3\xe0M\x08\\\x95\xc2\xa4\xa4\xc0ah\xa6\xd8\xc5\xc0\x99\x9d\x87\x191\xc2#c\xb9B\xfe<cdl8*|]\x00\x8d34\xf5\x1f\x0e\xe0\xbd\xe1\xa5\xaaR\\\x9e\xc6#S\xaby\x9f\xdd\x1e\xf2\xa5\xdeB\x9b\x8d\x90j\x82|\xfa\xca\xda\xc0\x1b\xea\x81\xca\xd3*\x8b\x16\x19H\xf6UjG\xcbv\xba\xe8u\xc6\x87\xe4\x98\xfe\xb1\x16\x94i\xc7\x84\xf7H@\xf9\xf7\x92\xed\x06\xd9\x00A\xf9[Q\x9e\xce(\xc06W\xc2\x84m\\\x04\xce\x1e\x0c5F\xc1Q\xda\x17[\xc5\xc8\xc4{\xd6~r9\xbeA\xb5F\xa3\\~U\x7f\x8e\x18<\xfa3\x7fg\xa5\\\x14\xcc\xfaL\xc75\xda\x84\xd2\xc9\xca\t\xf8n\xaa\xdd\x8dW\x1a\x18\xb4\x8b\xa3I\xdcS\xae\xcd\xc5XUy\xec\x1e8bF\xe4\xf2i\xa6V-D\xd9C\xbc\xf1\xdf]7\x05\xe2\x93\xe6\x96Z\x9a\xb0\x17\x98\x11\xe1iN\x02\xfa\x8e\xe1\x1d\xbc\xb7By\x18\x8a]\x13\xea\xfd\xb4\xbd5\xe9\x8a\x93,\xc2\xc6\xd3X\xf0\x07\x86E\x9c\xd1\xd7\'?e\x86\xcfb\xb4\xd0i\x19L\xb6 D\x1f\xed\x1c\xe4\xd1;q\x9c*\xbe\xb9LQ\x07o\x96\xd4NP?P\x8drO=\xebp.\xac+\xc3z\xe1\xf4\n\xdf\xdf\xa4\xd0\xcc9\xd4\xae\t\xf5\x9d\n\x93c\x9a\x17b.\x16\xb4\xb1\xd5\xa6l\xaf*3\xf9\x08W\x8dxR\xe6e\x0f\xfez<\x0eL\xb6\xd8\xcfzL\xc2aU\xc6c\xbc\x1b\xd1T\x1e_\xd1\x1b\xc78\xb2\x9f\xd0\x9c4\xcf\xe3P\xd10\xcb+%|\x89\xd4D\xe1F\x9b\x14\xd8\xd9\xf5\x05O\xd5{w:\'\x9e\xcf:\xc6\x80c5]O\xa0\x93iz\xbc%#\xb1g\xd2&\xa8I+\xb5w_\xf7\xdc\x13Hr\xdf\xf2\x97^\t\x9f\xacn\x07 .\xf5lX\xa7\x1fVqA\xa0\n\xb2e\x04\x85l\xe3\x96\xf6\xf8\xcf\x89;\xd1y\xdd!\xf2\xb6\x08:\x11#$\xf4\xe2\x87\x07(/\xb2\xcc\xbb\x802A\xfe?\x0f\xc2\xb6\x02\xfe\x108w#\x84\x95\xa1+\xb22\x98s\x0b\xeb\xf3ql\x82;\xfav\x89\x9dAS}\xd909L\x92\xba\xaa\xb6sf>>2\xd9\x93\xc7v3o\xf4\x1fX\xd4\xaf\xee\xe3\xe0\x9f7\xb3\x1ar\x7f\xfdX\xcb)\x8d\xe7\xb3i\xcd\xe3\xdb\x14\xfbZ\xff\x82\x85\xe0\x036d3\xf6\n\xa5\xdaF\xecg\xabf\xc3H\'\x9d5o\xbc\xdfK\x0bf6\x7f4\xc6\xc3:@\xed\x8a\x02\x1d{\xed\xeb&\x11\xbd\x8016Y\xecHQ\xb5\r4\xc6>@\xa8\xdc\xe5 \x9b\x10\x9e(c\x98\xe0\x919#\x88\x84A\xcb\x7f\xaeW\x9d\xd7\x17\xd2\x96z\x84\x95!\'R\xcbc\x10?\xc0u\\J\x92\xd6\xcc\x0f\xca\x8aj"\x01\xb2W\xb1\xc8\xbc\xddB&\xba\x97\xd6\xb4\xf6,\xfe\xcc\x91\xe4^\xa7\xb9[\xa1\xc2:u\x11t\x04\xd2Z\x973\x0c[;\xad\xf0\xc3\xe8\xf65qa5J6\xa9Y\xa8!\x01c@\xfb\xb1\xd6\xaeQ\x8c0\x1c\x8e\'"\xe3\xc7\x95\xc2\x14:#Fh\xcd\x13$}\x18\xa2<_vaL\x98\xf6vO<6\xeb4\x88B\xb2\xed\x81\xdci\x95Ltx\xe4\xd59`V\xa6\xb3\xdd\x11\x9da\x8aWw\xe1af\x02\xcf\x96!\x81Q\xd6\xce\xe42\xb1):\xe4\xcb\x10\xe9\xf3;\xa8\xa20\xf3\x0c\x84z\xa5\xd4"\xa7\x196`\xa4\x84d+\xb1\xeeO\x83\xc1T\xb6\xc9\x92\xd8\x8c\x9c|\xc9p\xe6ik\xb8f\xb6\x8f\x1c;\xef\xa3)7\xe5f\xe3\x9e\xbfa\xe3\x82\xb5\xc1/?\xbc\xbe\x00\xd2c(j>*<\xfb\t\xa0\xb3dB\x16\x07\xbc-\'\xa3U\xa3\xa2\x05x\xbeJ\x0422\x88\x84\xa0\x1a.-C\x05\x94\x96\xe2\xaa\x15\xea\xf0h\x9b\xb3\x83\x17\x9ex\xfb\xad7\x1e\x06\xd4\tR\x8e\x90\x1bv\xd99\x84\x9d;\xe3\xa1gg\xe7Bo\xb8\xa5\xb1|"\xfd\xf7\xa1\x89\x8b\xf7\xaf:\xc9\xb3\xbbD\x07\xd9a.1\xbez\xf6e\xff\x7fl\x1bp\xa8\xff\x91\xdf\xe1\x9b\xde\xd5\xab}\xf3af;\x9c&$<b\xb1\xfc\xa9\x18\x14EN\x1f\xbb&Bl\xe67z\xbd\xdcjnE\x7fS/\x90\xe4n\xf0l\x94\xd5/$\xb4\xe0\xa7\x0cF\x8cr\x97\xf1\xe8\xcd\xefTW\xedJ\xce\x1d\xd9\x9a\xc9\xce\xc3\xba\xceg\xe9_\xe9\xf7\xf5\xe5Z\x10S\xe0w5B\\b.\xba\xc2\x89b\xc6#\xf5\xb6\x94\x10\x1d\xd451V\xa22\xf1"\xd4\x99\x07\x91\xcc\x85\x88.H\xee\x85\xff]\x84\x12\x15\x01\x80j\xa6\xaa\x90N\xb4\x8c\xdf\xb7\x03\x86\xb4\xe2A\xa5,\x92\xc4\xa10\x98q\xc1z\x08Im\xc9\x88:\xfd\xdeP\x05\x0ca\x0557\x8aB\xcb\xdb\x8c<\xcf\x89)\xbc\xf4\xb3\xf6n\x9bl\xe9\t\x8c\x89>\xb8l\x03\xd9s\xbe\xd5\xac\x98\xf6\xbc\xd9\x07x0\xd9\xba=\x86\xb6\x8c\x8f\x17\xe8\xd5V\xb8\xf6%\xe0K\xbb\xd1\x96\xa5So\x08wr(\xa1,6\x02\x02\x89g\xdb\x06n7\xdd\xe7k\xe1\xf7\xf7\xfb\xb9\x19\xf8\xa8\xe2\xb4\x9f\x8a\xafir\x1et\x0c\xae\xf6\xf8;\xa6\x86\x9dJ\xb1x\x1c\xaf5\x9eJ\xc4\xf8\xa9\xe7\x15\x0c\xfc\x8c\x04\x85pT\xf07\x1a\xfdia\xa6\x0c\r\xf0\xebt/\xe9\xca\xb4\x0715W2\xd2v\xab\x18\xde\xe1p\xca3\xda(:\xb1s\xc4\x8fe4}\x1al{\x00\x82\xdd\x81\xad\xbb\xef\xda\x89\xfa\x89\xce\x1f\x97-e=\xf5\x1bF\xae\x98ud\x9cd\xfd\xa1\xea)\xe3i\xcd\xcd\xc9\xf9TkWw\xaa\xc6\xdc\xfd\xe8\x94\x8c+&\x13\xa4a\xeb\xc9\xf3]\x96\xf1.3DB[\xa5\xe7~\xe1\xf3"JU`0\xb5\x06~(\xd4\x88\xaf\xe7\x07]\x8a;\x04\xca\x1a\'\xc7:#\x0e\x90\xa0\xc9!\xe6(!\xa8\xa6 #\x11+\xb6r.#x\xc4\xa5\xb1\x96\xb6\x9a\xa0\xde\\\x9e\x11\xab\x07\x95DbCr\xb0b2Fn-\xefPCE1\xfd\x14\xed\xeb\x7f\xcc+\xb8w\x18GNa<\xd6\xd2\xf4\xcc\xd3,\x98\xe3\xd1\xb61;V+e\xed\x8d"/\x9e\xf4\xff~J\x01\x9d\x84\xd4\xaac(\xad\xd7\\5\xe8t\xa0`\xbb\xa2\xbewF\xcc\xaeh]\x08\xb4\xc3\x0c\x0b\xb8\xde\xd8\xf7l\xf1\x88\xcd\x0e_W\xec\xee\xca}\xf3\xf6\xba\xcb\xa1O\xc27v{\xfd\xc2W\xf0w\xa2\xd6\xbe\xa0\n\xb3\x90\x7f\x94-G\xb7\x83\xe7\rj\xd2@\xb2\xa6\xe4\x9c\xc8_\x80\x98\xed\\\x03#\x0fL\xbb0\x1f%#\x00\xac\xd0\xa6\x8e|\xb7\x15\\J\x95\x0e\xcb?\xb5\x91\x8f\x9b\x83ty\x87\xa2\xd2t\r\x13\x17\xea\x81\x8b>\xb4FNo\xa0\x14\xddR7\x08$ZGa\x06+\xc9UK\x8b\xbc\x0ci\x92]G3l\x9a\xbc\xff\xcb\x11\xfe+\x07\xd7Bo\x17\x8a\xfa(\x11\xe0\xb1~\x04\xe2\xd9\xd2\x9f\xe1E\xbfodST\x96\x16\x89-\xed#\x1d\x86\xb1t\x9a\x0e\x14w\x16\xb2\xa9\x95M\xb6z.:\x10\x94\x90{\x8b\x8e\x9b\x7f\x81)\xe6\x0fN\x90G\x13On\x07\xb1\xa4\xe39Q\xd2{Cl\xf3|\xd9\xdc\xda\xa6\x88V\xd1\xed7\x1b\x1d\x82X\\\x8b\xcas\x7f\x96\x8e\x8a Bv\xf4\xd3\xfd\x90\x00~l\xdd\xe0/\xaf/\xa5\x82\x0bX\x82%O\x96\x80\x90\xa5M\xcb\xc0F\x01\n\xd8\xf6\x8d\xf8\x92XNO\x9b\x88\x95m\xf1eYX\xf3t\xa7\x14\xc9\xe5rE\xc4\x95^<\x89\xed\xbe9\xb9\x8eVI\xe7\xd1\xa2\n\xea\x98?\xe4\xc21\x86\x8c\xa8}\xce\x9d\x84\xec\xa4\xe2FE\x14\x1f{,\xe3\xd69\x87\xb2\xa8\xe3@]o*\x12\xe6\xb9=\x9a\x88\xb5\x13/H\x8c\x82\xc8\xdb9\xb2/\xa3*\x0f\x0c\xc5\xbd\t\xba\xfd\x1e\x17w\x90,v\xd6.r\xfe\xf6/\x16\xb2\x1bU\x8f\xfe\r\xacp`\xbe\xb5\xf6\xc2\x88\x8b\xad7NP\x13\xd5\x1b<|\xa9\xa4\x00\xb5Tk\x85Ko\x15\x0eP\xfb\xcdV$\x1d\xb8.\xd7\xa5\x8d\x96`\xb31[\x85L\xe7)\xff\x7f\xd1N\xd9\x99{\xe4\x87\xd7g\xd2\xe1\x9a\x97^\xd7\x85\xecac\xa4\xc8\xa50\xe0\xd3:\xe9\x89\x9f\x9f\xf6z\xd3<\x966\x13.*G\xc2|Lr\xb9a\xee\xb3\xc6\'P\xd4\x8fZ\xe1\x8c\xaa\x0e\x94Sq\x96\xe4/\xae\x12\xa4~ g\xd4l\xa5XR\xbd\xb5\\\xb8\xaf\xde\xf67LF\xef\xf8\x05\xe8\xcb]\re\x93i4b:\xb2 c(\x9aN\xa5\xc7i^\xf5\x8d\x03"\x05]bC\x1e\x8e\xc1x^\xc0\xb9\x85\x07N;.z3\t]#\x16\xa7*\x92\x03D\xc4\xbf\x8fz\xd0$ \x88,\x03\xbf\x1c\xde\x0f\xb3\xcf\x13\xd5\xa6\x80\xc8\xcek\x01\xe6\x9cFEk\xaa7\x1a\xaa\xda\xf3SKPBRDJ\x19/\xa2\xb6\x98.;\xf1\xcc\xe8\x87\xf6rc\xf9\xf4|\x0f\'f-\x1dx\x08\xc1OI\x19\x88\xb4\xf7\x06\xd6\x07\x17#\x14\x8e\xa4\xdfd\xba\xc0\xccCf\xec\xfb\x01\x96\x07ZB\x9fJ\xb0p\xcd\x86\x88\xf6\x83\xdb1\x82B\x1aB\xdb\xf4\\\xefc\xbfk\xf1\n \'re\x10\x87\x03}\xa9\xda\xfe\xf9\xba.\x97D\x97{\xeeN\xc9\x7f\xd5\x86kP\x06\xec\xed\x1c\x06K#\x8cs\x18?\x15rK\x17$\x9a\xe3\x88&\x18K8E7\x8fN\xc0*\xa30\xa73#\xa8\x84g:S\x8aM\xae\x9d\xddK\xad\x94_\xaf?\x17\xe9*\xa1\x99\xa5%n4:\x1e0\xa03\xda\x85\x12J5X\xf7 \xdfL(\x96\xc7\xd1\xc3$\x0bc\x81\x08\x16J\x1b\xf4 \xd8\xc3\x15\xec\xb7\xdd\xde\xdc\x8d\xd7\xd7\x143\xbb\xc72\xb0v^R\xc3\xc7\x95\n\x1b)\xc5\x8c\x87\xd1\x8c=\xfb\xff\xb7c\xef\x88\xbf\x81\xdbX\xcb\xc1\xc8Y\xa5F\xf6\xaa\xb0\x19\xcd\xe0\xe9\xf0pdR8\n#\x04D\x17\xc7\x16\x8d\x1c\x99\xf3y\x11\xb9\xec\x8cG$;\x99\x05\xea\x055>&\x1c\x00\x1d\x89\xb8;0\x85*\xe7\xce&<\xcf\x115nd~\xaa;\xec\x07"6\xcfa\xc4\x82\x96*\x1c\xe7\xcd\xd6\x13\x99\xe3\xca\x94\xc5\xa4QlH\xd4\xefcJR\xc8\x8f\x93bJ\x8b\xdc2u\x1b\xf9}\x01\xa5 \xee\xf8\xd3\n\x11\xf5?\xa0 Q7oY~\xd0\xfd\xd5QG\x94\xb3\x9d\x1b\xdb\xfb\xdf\x1b\x05\x93$\\p@H%\xe3\x18\xd3\x88Ml\xa3\x1e\'\xbb}4\xbb\x00HFdN\xf6\x8e(\xc5\xf6\xde\xd0>\xa7"={\xec^\x1f\xc2\n\xd5p\xed\x95\xd1\x06CWc\xaa\xd1\xa9DV\x0c`\x04\x92\xc9\x1cLX}|T\x96\xdeFKI\x1f\xeca\x07\xf7\x10d\x15\xfc\xab\xd6AMX\xfb\xdd\x06\x10\xe8\xc9v\x80\r\xc4c\x1e\xf4h`\xa2\xdd\x15_>\xcds!k\xe3\x1a\xae|>\xb2\xfa\x02\xb5N\xc9\xff\n\xe3\xd4\xc4\x93\x9dE\xd3\x17n.~\x00\xe6\x9b\'\x96\xc4\xf6\x06\xa2\xc8\xe7\xba\x10\x9a\xd9\x0e\xff\xd1O\\\x8c^\xaf\xe7\r\x15\x11\xe8h\x17\x8e8\xf2\xc05\xca\xf1:\xf5\xbe\xa7\x90$*\x1b\x92\xaf\xb8S\xf2\tzC\x9d\x91E\xba"7\x0c\x85a\x84\xeaj\xc8q\xc7i\t\x83\xc9e\xb4\xad\xe0\x7f\xdee&\xc5I\xd6\x8d\x89l\xbd\xb0\xfbo\x8e#\xb8\xb2\xa6,\x8e\xdc\x11`\x8c\x10;,X\x15K>-\xd2\x1c\x90a\xb5\x83g\x88Mi\x88M\xb6z/7\xef\'O\x0ce>\xdce\xec\xba-E\xe1S{\x1a\xcd\xef\xdb\xd0\x12\xbd\x8f\x82fF\xa8\xb1\xe2\xf5\xbd\x8a?\x90}\xd6Z\xe4:w,\xa1[\x8f\xd1:\x16@\xb7\xf3J\xaf\xd8Q\xd4I\x8ab\x92\rw\xdf\x03\xb020\xb1@\x85FUE\xf4\x9f\x8cr\x8a\xa8S\xfb\n{\x97\x12E\xd2\xbd\x9a\x8b\x1db\xa5\xfd\xaab\xcfV\n\x0e\xf7\xedga\xbab\x18o#+m\x07\xf6 \x98\xef\x8f@F\x06\\\x0b\x87\xf1`p#f\xa2{\xfa\xd4\xe6\xf8l\x88U\x997XTK\xb7\x8amF\xfb\\\xe9\x07\x8av\xd6\x8eh\x1d$.[g~\x91\x8aK\xdb&W\x08\xd2C\x0e\xa8 \xb2\x9a\xc8\xc8\x81\xc5\xed#\x1b[b\xeb\xa6\xa3\n!n?r\x0b\\6\xc7\xdd~\x8d\x0f\xbf\xb4]\xdeB\x05\x11\x00J\xb8\xe7z\x832\xf41\x8cX\x9e\xdf\xb9Xd\xafp\x92\xa5j4\xf3\xa4\xa2H\xcc\xc3\xab\x17\xb5\xe7\xce\x1f\xfb\xec\xd9\xe2N\xb3\x10\x18\xd2\xfcp\\7\x89fA\xbd\x86F$\xa3\xbbA\x04\x11\t\xe3.CU\xbb\xb2K3\x7fi\xac#\xce\x1f\x7fmw\xc2O\xc0\r\xc9Q\x87\xc5E\x03\xf4r(|\xb0\x83\xb8\x82\xa5\xd7\xe5$)-\xfa\xce\xe5\xf1\xaa1\x93\x13\x9e\x11\xb5\xf5\xcd\xc0\xea^\x0bi\xd6\xbfO\xbd\x98\xe0\xd3S\x9a\x96R\r\xf7H\xa1g\xfd\x9e\xdfS\xf5>\x9e!\x96\x16\xf7\xe7\x84\x93|\xacUT:\xc2\xa0\x83\xd1\xd1-\x1d5\xf5\xc2;\xf9\x9e\x82\xe2\xf9\x86\xbd\xce\xa8\xbe\xd8(\xb4_yo\xc6`\x87\xe2]\x82\xceT\xf22^9\xb4\x9fz\x19\xb0\xa6\x90aB\xd8\x9c\x11\xf9Gt\xb1\xd5\x07\xc3l\x19\x04c\xfbT7\xb3B\x0f\xc2\x9c\x0ez?e\x88\xdc&\xc3l\xaa\x8b\xcf\xd4\xaf\xac\xcb\xea\xf4s\xa2B\xcb\\x\xee\\\xf5]\xaa\xe5K\xecu\xbb\x87T\xc5\xf7\xcbQ?\x11"F9\xf7BW\x0c\xe8~N\x89\xf1\xac\x1fY\xc3;\xfbw\xfd\xa8\xff\x9e\x8c\xc5\x02\x1e6\xd8\xd0\xfaQ\xac\x14\x8cg\xf5\x0b\xac\xa7\xb3\x10\xfc\xbb.\x8a\xa0\xd3\xbb\x92\xe1\xf4\x03V}\xe9\xc8\xad\x19H\x83a\x9d\xf4U\xa5\xe8M6\xd1\xbc\x1a\xa9O\xb2\xcd\x12Y\x06\x035+\xfe\xfd\xe9-y\xbe\xca]\xa0\t,\xabcg%Bj\x822H~\xef\x1aZ\x82[\xa6\xf9Q\xe9\xc1\x8b\x8dN\x0b*\xa3P\xacQ(i\x1a\xc2\x92^q\xed~\x08qW\xee]O^mS)\x9aZ\x1a@\x1c\x0f\xe1\xf5&\x89p\xa0&\x8d\nN\xf3\xa7\x1e\xe1\x90\xa4\xac\x9a\xbb\xf6I{\x91\xc9\xdac4\xc8\x97\x98\xe7\xc6}\xb9\x1d\x16(\xbd\x9e\xac\xaa\xf0?f\xf45Q\x8c\xdbU\xaa!\x82b\xdc\x07\x9c\xe0J\xbd\x8d \xa7\xec\xda\xe1O\x83\xb4\xda\x9f\x06Pm={7\xe8PQ\xb7\xcf1\xbcD\x89\xc9^\xda\xaf\xd9NB\xff\x1e"\xd1\xa7\xdd\xf9{l\xc1\x06\'\xd9b{V\x9ae\xa0\xdb;\xcah\x9b~s\x08">X#\x1d\xa9\\\xa3\x85\x9eb\xec\x08}0E\x1d)\t\xb7\xc4\xec\xc81-\x81\xf3q1T\x8e\xacI\xbbF\xfd\xc7x\x08\xbe\x18\x1d\xf7jx\x1fMo\xe7\x07cbM\x8e\x83\x0b;\xa4{\xb5J)\x8e\xbc)st\x00Z\x1aYD\xd4\xe7\xf1\x17\x06\xf0.\xb66\x86G\x0b\xe9\x8dCLg\x14X\xb57{i\xf8Q\xde:\x9a\xcb\x88\xfd\xc5m6\xcb\xabt\xf1\x1eLy\x85d\x1f\xb0`\xf7\xf7\xe3\x18x\xd1\xd2\x0f\xcfhM\x0c\xe6\xc2\x1f\x9d\x8f/\xde\xd9\xb8\xd8\xc36tw\xaah@3ylXT\xce\xb7\x8d\xcd\xd4_\xf0\x03\x99\x13\xbc\xa0\x96w\x88EPS\xbf\x10\xca\x08j\xb0\x92\xc4\xf3\x84B0\xa48d\xbfc\xba"@@\tY[\xc7\xf0i\xbd\x85x\xcaF\xcf6\xa1\xb6d\\\x9br\xb9\xc0\x8fH\x1c\xfe0\xa2\xdewC3j\x0b\xa50\x98\x96\xa1f\x14\xf8\xf2D\xa5\xafq\x99\xcdw\x87\xab~\\\x98\xdf\xcc\xd7\xb5\x8c>\xb2o.D R\xb0\xf7\xedM^\x03_\x11s@\xf5\x9a{\xbeq\xbf\xfeQ\x17\\a\xd8\xa8vx\x17(\xe9\xbb\xc8c\x80.+\xfdN\xa5\x91\x8fV\xfb\xc3\x9c\xe6v\xeeS\x1d^\xcc\x06g5\xaf\x18\xc1\xa5\xbd\x04r\xc8ZV6\x03\xfa\xe4\xc7\xe3!EV\xbe\xc6\xd6\x92\xf4V\xff\x9cZUdH9\x9d\x90\xac\xbbz\xea<\xa8\xc0\xa2\x9c\x10\x81\x91\xa0\xd8[\x1f\xd0\xd2\xe5\xefV*\xe5P\x1d]\xd3\x10\x83]\\\x18\xc7\x04{\xca\xd4\x00\x14\x1b\xa6\xc6{\x8a\xe3\xa0\x06]\xec\xa6\x9c\xae>\x96\xcc\x1d\xdaO\xee\xe7$\r\xb2\xd8\xacA?\xa6\xe0c\x9d5\xea\xc2\xdd\xf0`L\xebO>U\x10\xa9"\xd0\x03\xca:@\x91\xcf\x91\x9a*\xca\x1c\xa0\xfb\xf8\xa4\xcf\xfe\x11\xb7Z\xba\x01\x1a.\x06\xfe\xe7\x0e\x00@\x98qgV\x90\xb9})[u\xcd\xc3,\xe4\xff\xdfi\xb0iL\x13\xd8\xba)\xbe:$\xd4\xc0;4N v5\xff\xd9*I^\x15\xc5Dv\xbc\xf8\x1b_\x10\xfd^\x8f<i\xcb]\xb3\x99iH\x98\x86\x1f\xe5P\xeck\xaf\x1c\xa6\x9ev@\xa4\x10\xf2%\x9cVCfb\x9c_\xd1\xa7\x87o\x9f\xfd\xf01\xcf\xba\x7fl)_\xe2-\xf0cEPO`\xd5\xddi\xa6JjS\x1a0\x15\x9f+\xe8\xfd\x94z\'\t\xb5+\x9e\xc2\x07\x04h0\xd9u\x94\x1eGJ\x08\x9a\xfd\xfdZ\xc1%\xe4\x12]\xbbA\x89\xc9i\x9f\x11\x19,\x18\x04\t\xb6\x11(\x91[\xa0\x93\x16\xce\xddk\x1a\xc1\x7f\x0f,\x82\xfb\x97\xb8\xd5\xe78\xcby\xdf\x82,^\x8a\xdd,>,\xee\x98\xcd\xf1\xae\x17K\x9c{w\xbe\x941r\x8f\xa3\xdb\x95\xe4\xd3\xfe\x97\t\xe6\xa2n\x00\x10\t\xc6\xf7\x9b\x9af\xea?\x12\xb5\xf1\xa84N\xe8\x0c{\x02\x94\xf1\xa9h\xd4\xb9p\xc9\x88d\x9a\xef\x98\xf9D\x83\xf2\xc3p/\xa4\xcc\x92\xab\xfaW\x1c0\x1b-\\`\x006\x01\xea\x8e\x07o\xd5c>\x15/\xffO\xaa,B\xc4\xb6\x07\nVj\x9e\xce(d[.y\xe98\xdf\xd8\x82[l\x1d\\6\xe3\x0cx>\xa8Ip\x05d\x95\x01w\xd7\xe5=\x0e\xe0+\xdd\x1c0\xe8-\x87\xee\xe6\xc9RN\xe3\x1crK:s\xcf\xff\x1d\xa3Y\x99\x039!u\n\xd6s\x8baq\xf4\xf4\xc7\x98/\xe0\xb2\x04\x86J~uS\xc7\x95D\n"\x02\xc1?\x9b<\x84nC\x02\x03\x14\xfb\x18Q\xc5F\'\x18\xc5~V\x84\xdd\x9f4\xafC\xf1\xd8\xea\xb9\xfe\x0e\xf7\x91\xfa\t\x876\xc2\xaf\xdc\x8eY\xb1\xf0.\xb25\xe3\xa9Q17\xa5\xf9\x0f\x84\xfb\x92eCH\x9d\xe2om\x078Y7j\xdck\xbc\xe5\xef\xfd#\xbd\xbe\xeeo\x02\xc6\xdb\x18\xdb$\xb6\xc2\xec K\x97g1\xc6cg\xbf(\x89n\xa4\xa5\x19\xfd\xdc\xc7P\xb38`\xc0\xe4\xc3\x19\x8b\xec?\x12\x8e\x96AA<\xce/x\x8c\x98rn\xfd\xee\xb9P\x19\xb1?^0\x96\xbb\xa2S\x98oAO\xee\'3\xden\x15Vin\xa7\x05}<m\xd2\x7f\x89\xa5A\xcaUn\xee\xac\\\xad\x8b\xcay\xe0;\x83\xcaq:\x85O\x1f# \xde\xb1\xcb\xf5_\x7f6l\xf2\x83\xff\x15\x80\x1b\xf3\x8cxIU&\xfb\x18\x07\x1c2\xf2\xfa\xd37\xe4\xee\xecRE5\x1cC{S\x9d\x9a\xe3ov\x11\xfc\xe5\x07\xb0*Z\xb2`\xcd\xeb\xc6\x9a\r\x90\x9d\xd7\x85\x9c\x06t]\x9c\xdfn-\xc5\xd2\xfa\x88-\xa8\xb7\xcc\xe8\xd1\xb8\xeb\x9a\xd4r\xac\xe8*(\xe9\xb3~J?P\x83\xa3:zo\xbc\xdcG1\xd8H\x86md0A\x96\xc0\xa1\x83\x8b8di:*\xf4\xfb\xd27$T\x9d\xeb\xc4\xb7\x99\xdc.0C\x96\xef\x1a\x15\xb4\xbb\xdat\xf8\xf7\xb4\xa1\x10\xd2$\xa0X\x83\xce\x94~\xdf\x8e\xe7j\xf6\x81w\x88\n.\xd8\x87\x8d!(=\xbeq\x90\x08\xe7\x0bge\xd4\x98\xf3T\x80\xd7\xe3\x0e\x9cy\xea\x07e\xc7b\xc4N\x07\xf0\xc8\xac\x91E\xc9\x02\x83\xdb\xae\xfe\x169r\xe1\xc2\n\x7f!mi\x80\xbd\xa7\x83\x06\x16\xbd>\x00\x87\x9c0>U\xe9\xd3\xf1\x11\t\\\xf0\xb8\xc0\x07\x83\x05\xbd?\x0e\xb9\xba\x8be^*\x17O\x83H8\x8a\xed3\x1c\xfb\xf2\xa4\xa2\xfcXxK\xf3{\xd5<\xf7K2\xb5@b\xd4-\xdexF0\x02`1\xf3/I\xe60-\xa1\xd94T\x81\xbc\xe7e9-[\xd7\r\x81\x10`\xd1\xd8\xbeo\x99\xe6\x00\x0e\x14\x06\x1d\x18\xc1(\x04\xa8d\x8d\xd3\xaa\xd1\xa1\xb0\x9e\xba*\xf3T\xdf\xb7\xc5}?+\xec\xd5\xcb\xb7\x8f(\x15\x8d8I\x83\tyB\xb0U@\x9c\xca\xf7\xefk\xac\xd4[\x9f\x85\xe2\xe4\x07WE\xff\xb2\r\xd3\x00<\xcb\xbb\x01\x9e\xdck\x1a\x13QY\xdf\x9b\xf7\x03\t\xdaJ\xfa\x9c1\xa7]B\xd7"4\x10\xe3+T\x82\xfa\xaa\xd12r\xbfc\xbe\x1e\x99\xdb\xf9\x15\xa1\x82W\x83c\xe6\xee*5\x932\x99\xa0@\xb5Fx\xac\xa3\xcd\r+\x11\xe4?\x80d5@\xc3\x88\xdeVe\xd6zY\xc3t\x8dc\xb5\x1d\xc05\xcf\xec\x08\xd6\xad \xd72\r\xb8Ys\xa8\x06x\xc9\xe7\xde\xfb9\x19$n\x0b\x82T (\xb7\xc9\xa6\x8a\x0f\xa13\xcf\x8b\x9a[\'/\x86\x07}\xa7#J"B\xae\x86!\xe3P]\xaa\x06u\\\\E\xfc\xa8\xe3\x04@\xa7g\xc9\xca\xb3i\x8ad\x9c\x11\xd3,\x17-\xb1bC\xa3\xf47\xc6\xdbq+\xf8\rvQ\x8a\xb3\xc1\xc8\x90\xcbC]\xccd\xa6G=/?\x9a3\xfa\xfe\x11&\x86\xbdi\xad\x12\x9dvo\xd2}\x19f\xb5\xc8}\xf1\xdf\xb6\xf6\x8c\xb0\xff\x81P(\xbc\xf8\xc4\xed\x12\xa1$\xf2\x93\xa8\xc4G\xa4\x94Q\xda5\x1a\xe1\xc0\xd8K\xc7\xc5\x9b\xaa\x84:\xaf\x08\xaf\xa5D\x0cv\xc2W\r\xc3\x19\x1c\x85\x96\x90\xe1&H\xa3\x97X9Y\xee\xaa\x0c\x01\xe1a\x12_I?8\xfdK\xde\x83\xa5,\xe8\x80\xc6\x86\xfe\xd2\xc4k\xf7\xbd\x0c\x7f9bk\x13\xc9z\x06\xb7,\xc0\x13X\xa8\n75\xf8\x94i\x8d\xfbF\x0e\xfe#\xc6.\x82\x81\x1eP\xbd\xd7\x1c\xb6\xa2\xd2B\xf4\xc6.\x96&\x00\n\xa5\xf8h\x97\x98\x8b0`\xcc\x12n\xd1\xc68\x9e]\x17\x11R\xc7\xc9\xaa\xe4\xc9\xef\x11\x1dG\x0f\xaf\xaa\xd2\x98\x02\x8b\x1e\x00\xfc\xe8A\xbaB"N2\x13\xc0\x05<\xf8\xf5\n\x01\x01\xa1\xed\x91c\x16\x95\x87\xe9;}\xb5\x8f\xcbh\x93\x1e@$>l\xe21\xe1{\x15\xe1\xb8@x\x83v\tk&\xae\x077\xd2\x02\xc9\xb1\xac\x12\xc1J\x97/\x92\x9b\x01\xf4\xcb\xa6\xcb\x0be\x8fe\r\x85\xbeE\x8b\xbb\x01\xd6\xed^\xd9\xc7\x85iXT^\\^\xe8\x01M#\xdb\x1d\xa7+m\xfe\x91&\xdc\xa5\xc7<3\xd2\x8d<\xf4\xe9\xea\xbbC\x03\xbbe\x02\xcfd\xccA\xd5K\xf9\x08k}\x89\xe3S@\xbe\x83F\xbc\rC\x7f3\xbe\x8fH<\x89\xbc\x94@\xa5E\xfc\xc3~\xb0\x84\xaf\xd5w\xc5g\xea\xf5\x97\xe2\x8a\xdc\x07\xd3\xd9\xc0\xf0\x19n\xcc\xf6\x00\xd1\xb7\x0b\xd9\xfbm\xacw\xf1\x0e\xf4\t\xfa\xc5i\xe2\xfc\xee\xc9F%\xcc\x9bA\xee\n8\x16\xf9\xe2\x10|\xee\xf0\x81z\xcf #$>=\xb8\xe0\xc3\xdf,PM|6~\xd5\xa7\x80B\x16:\xaeG\x1f^{\xcd\x02\xed\x13\x9a\x0b[\x12V\xaf\xd0\x1d3\x88\x8f\xd8\xd1\x1e\xca(T\xa6\x8e\xc53\x8e\x9b\xbc\xe12\xd8\xe2u\xaa\x1a\x81<\xaa3W\xa7\x17X\xc4\x7f\xd8U\x0e\xd6N^?[\xd6\xcd\xe7\xd8"\x8eJa\x08\x90!\xd73x!\xae\xf2X\xa4M\x0f\xc1\xfd \x92\xe1\xc3\x02\x9e_2!\x8a\xfaRiD\x10\xeb,#+z&\xe0\x833m\xd5{\xe1?d\xe8\xe1\xd6\xd9\xf2\xf9\xcf\xca\xb4M>\x8d\xa4\xfe=X\xd0Ikm\xc8\xbaz\xd7;geu\x82\x86%\xd5r\xab\x0e\xa4\xbd\x9b\x1f\xac=f\x9b6F\xeb\x1c\xe8\xa3\xeb&\xa0S\x83M\x1a:\x94o\x06Y]\xf6w\xbbg\x86bf\xf1l\xfe\x8b\xcaniG\x17\x03l\x07\xcd\xd1\xe4\x19/\x9d{\\\x1aY\x7f\xbc\x1a\xa4\xb59W\xb1\xb6a\xe1\x9b[\xa7\xd5\xac\x17\\h;ly5\x9eE\xe2\x91cc\x04\x8a\x9d[]\xf4\xf6\x8eV\xc1o\x95IJs\x01\xd4\xc6\xe5\x87n\x94!\xeb\x8eS\xf4\xb78CN\x08\x02F\x83\xc6u\x0c\xba\x91t\xd5N\x01\xb4\xd2\xcc\xfe[\xfe\xc6fr]"{<@\x93\xc3\x00j\xcf\xf6}\x04\x13\xf5T\x88\xf1\xf5\xaeu\x1e\xd2cc\xd1\x1dm\x06\xba\xde{T\xb4\x10f\xa9\xfd\x9ft\xbf\x8a\x80\x0b\x8c\x805\xe2\xbb\x83=\x1d\xcb\x86\xeanC\x16\xddQAk\xa8\xcf\xfb\x94TVF\x8bIl\xa6\x1d\xce\x8f\xa9x\x9eS\x898BwL\xce\x0f{s\x84\xfa\x13J\x0b\x90\x89E\x9a\xdc\xca\xffP\xdf>\x82Q\xb8Me+\xcd\x10\xf0\x16\xe0\xbe]\xc2\xb6\xe8\x0f\x874\x80\xfd\xb5@\xdeH\x03"\xa03J\xec+\xdaH\xee\xa9)z\x1d\xbd\x80#t\xdc\xf8\xe5\rO\xe8\xdf\xe2\x11\xf4~\xaf\xe4\xd3\xdf\x8efuM\x14y\xe2\x05\x8a2p%F\xfd\\L\xb8G\xcb\xf6y\xf0\xcb\x82\xd6\xc3ZP\xfc\xc9Z\xe4\xc5\x02J\x82\x9f\x01\x9fQ\xf1b\xf1\xf9r\xeeeA\xe15?\x8c\xcf\x8d\x16_\xc8\xd2k \x9f\xbaO\xe6\xd3\xd8d\x17N$l"7@\xdeZ:\xca\x87Z,\xcbx7\x1f\xaf]\x01\x0e\xa8=\xc0\xd1]\x0ef[\x1f\xf6^\xca?\x1f%v\x1a\xd9j\xd2+l\xaa\xf7\xc8\x881R\\.\x19\x18F72\xbb\x90\xdb\x0e\xed"\xb92\xde\xb7\xf9\x06=\xc71\x92\xb2\xf1<NXtB.F\xed43CJ\x1f#\xf2\xde\x88V\xf7\xed\x14\x1b\xde\xfb\xd9\xbc(\x80\xb2NiV \x99\xe4\x1a\xf6\xd4\xa8\x82\x93=X{ko\xbba\x1a\xa3\xd0\xe7gl"u2\x08i\x97n?]\xb1"P\xe2q\x80\xd4\x12[\xb9\x8bf&\xbfep\x0c\x00\x8aF\xfb\x042\x84\xcee\x10v\x96\xe4S\x94\xa0\x1bI\xdd\xaa/\x01\x99x14\xfbT\xdf\xc1\xe1\x173\x84\xc1C.\xdeB\xf5\xb9|\xb4\xe0\xc7\xcd\x83\xe5\xe5\xef\x8f\x19p*\x84{\xbff1\xc3\x8a Vf\xccl\xfd\xbbt=\xc2b]\xe2\xfd\x80m\x7fVb\xfc\xeb\xb3\xec+\x81\xc8\xbei\xbc;F\x8bV\xc6\x13\x08\xb1GLn\xfc\x06B\xe0\x1ef\x11\xec\xf2\xcf\xfe\xed\xa9\xd0\xd6\xce\x06\x1e\x94T\x96DbwI\x843\x03\xb0\x8e\x08\xe74\xff.\',\xb4\x00\xca\xa5\xfa\xe1[\xe9\x81\xb6\x03N\xf1\x10\xf0\xb5\x8c\x0bO\x98<\xad\xc9Ay\xbb\xba\xed]1\x12\x03k\xb5\x1d\x99 \x1c\xb0&N\xa7\xe9\x8cCP\xfc\x05\xab\x0e\xc5\xfd8\xcd\x8do\x0b\x81\xaa8\x0f\x1e?\n5\rqf\xb4\xe1\xd3\x139\xb3N0\x0f7\xe2})&\xe1\xe4\x9d\xe1\xf8\x0f_E\x90\r\xdc\xba\xaf_l\x0c\xcbS=\x18\x13\xc5\xba\x9ch\xc7J\x12\xe6Z\x9cwD\x06\xba\x18\xaaAZ\'0W\x97\x0bx\x91\x96y\xcd\xcaI\xdfL\xb0\xff\xcc\xd8\x90\xd0KV\xfc\xade\x8c\xd0Z\xdb\xf0\xae2\xc28\x8f\xbc8n\xd8B\\*\x9ar\xeb\x82\x95s\xec\x0c\x06\xc4\x7f\\P\xc81\x89\x82\x8cg\xc0\x0c\xf7c\xd4\xed\xdau\xd1^\xfa\xd3\x05\x85\x19K\xe931w\xf2\xb9\x17QF\xc6)\xd0\x9d[\xed\xbdbADn\x14\xd6\xda\xd7+\xdc\xf17\x03\x10\x9e\xdd\x1f0S\xa3H\xdd\x95 \xa2\xcc\x80\x04\xd7\xe8N\x15O\xc3|15\x18\xf5\xf5\xea\xab\xa3|\x0e\xc7\x14#\r\xbaz^\x9c\' \xf8\x95n\x80\xcd\xd5\xa30\xf5H\xb9\x9e\xd1\x99\xe0\xc0A\xe9\x87\xb9?w(\xac\xc3\xb4\xbd\x90\xf2)\xdf<\x18\x1b\xf4\xa8I\xd4%\x9c\x13*\xdcf+53\xedr\xf0\x9c\x8bZ\xfa\xf9\xb6\xba\xcd\xbb\xcf\x0f\xf6\xcc\x9b\xbc\xbf\x93qYfQ\x8c\x05\\\xb2\xcd\xe2@$\x93\xfc\xeb"\xed)\xd2K\xd6\xd7n\x80\x8d)\xf3\xe2\xdd\xbf\xda\xf0\xa0\xce\xd0\xb5\xeb\xe7\xca\xcc\xf5\xb0\x94\xa7\x1f[\xd9\xda\x03S\xbb\xd9\xb8*\xa1m\x9a+\x17\x96?\xb8p<Y=\x05\x07?G\xdaBw6\xb1\xa47\xbd\x8dg\x13h\xd2D\xe4\xd5!+6\x97=\x9f\x8a\xa9\x9e\x0c\xcb\x96P\x1f[\xd2r\xeb\xc6r\x93 \xe2\xc6\x8c\xf8\xceN\xda\x83\xaca\xd5\xeb\xa3\xed7E\xda\x9a\x1fBY\xd8|\xc14P\xac\xf3&O\x7f\x8a\xe6*o\x7fa\xb64\xc2FP\xd9\x9c\xceiR-{\x04\xa1\xabw\xf3pz[%\x15i\x17\xa3\xe5\x1brtA\xa1\xf0\x8dL\xca\x07g\x9e<d\xa4\xa4\xb7\xf2\x04/\xa2H\xeb\xeb\xc5\xe9\xd1Ez\x81$Z\xc0\xd6\x05\x1f\x1d\xce\xf7D\xe8\xd4o\x11H\x04Fsy\x91\xce\xd1\xa6\xed\xf58\x16\xc5M\xa8\x92\x95\\\xb6\xa6\x1d\xaa\x11,y(\x0c\x07\x89\x88>\xbe\xbf\xddj\x15\x7f\xe9+\xd6k\xc0\x1d\xa2\xd8\x9f+\n\xa1\xca-\x9b\tk\xd6r\x1aA\xac,\xd78f\x1d0\xa2\xec\x01\xfeT<\xafD\x17\xdd\xfc\x14\xe5\x0b\x12\xc8\xd4\x07&\xb2r\xa6;NY2^\xd2\xa93#\xb3\x1a}\xa5S\xca\xca\xfc\x9b\xc1\xbd\x87\xc2HNiE^\xebu\x0e\xf4?\xe0\xebq\x01\xd5$B%K\x94\xe4W\x8b\xa8cTPK\xda\x0cQ\x1a&\x9eI-Y\x94\xa6\x1c|L:\xbd\xae\xfc4\x8ed\xd9\xaa2/S\x9b\x96\x0092\xd8\xe8\xb5\x8f\xfaE\x92!!\xc8\xd1\xa8\xfe\xc6i3\x02\xca\x87\xc9H\xfb\xe7I\xae\x9f\x9d\xf3\x8a\x92\xb5\xe9\x04\x1cD\xdd\x02\xa4x8\x99\x8a}<\\\xd0\xe1\xc4{\x85\x93\x92x\xfdG+\xf4\x04 \xd2\xc5J:j\x0bI,\xee\xcf\xee^\xf6]\x94\x0e\x96K\xc4}9\x02\'/\xd0\x8f\x9c2\x05\x001\x80\xe3\x86\x15\xa7\x98\xa8\x85O\xac\xca\x95\x16!\r\xa37\xab\xb0\xb2#&\xf6\xb9\xf8d3SM\x1c\x00\xda\xa4\\"-\x99\\\xdf\xfd\x0f\x9c}\x020\x97\xf3=N8\xb6\xfd\x7f\xdb\xbbU\x81\x1f\x16du\xd6r5\xdc\x9eoQ\xf2{/\xb0\xaf>\x91\xa5\xb1#~\'r\xc0\xa1\x94\xb3\x8f\xff\x02\xf8\xd7\x04\x81\xe5\x01z\x14\xa2\xa8\xf6ob\xcf\xa7\xb7d\x8f\xfe\x8f\x1a>%\x7f\xf6\xd3_\xc0\x06\xcc\x0e\xab\x94o\xa9k\xdc\xf2\x88\r\x88\x99\x90r\xb40\x94p\x15\x06\xcd\xcc\x8c\xb7\x07"-\x83\\\x14n\xeff\xd0\x9f\xeesT\xb6\x0cm\x85F@l\x01\xc2&\x1c_"SM\xfc\xf6P\xbb94u\\D(%s\x93\x17\xcd_A\xb6+\xed\xb6\x9b\x13ke\x8e<$\x9d\xc1~\x11\xd35{\xfe\xdaA-\x10\x1e\x93(\xe6\xe7\xfec\n\xc2@\x9f\xec\x86\xbb\x15U\x90\xb6\x88D%\xfd\xee\xb752XD\xdex/\xcdl\x9b\x16\xd9\xa2\xae\x9364\x87\x19\x91+\xadI\x8e\xb0\xec\x14\xaf.\xee`\xbb\xf5\x16_\\`f\xef\xdb\x00h\xdf\n\xf4Y\xd8\xcd\x14b\x9ct\xd0 \x11\xb2<\x9c\xf4\xf7\x1bHl\xd7\x10L\xaf\x8aa\x9c\x98{\xf9\x19*\x98\xe3wM\n\xa8M\xa8:r\xadh"5BC\xd1,oXo"\x17\x0c\xabp~=)\x9f<\xe7bf\xc8<nK\xc7\x18`;\xc2s\x8f\xae\xc8\xfeO\x7f\xb2\x81\'r=\x7f\x1e;\x1db\xd2\x8bXYm\x14\x85\x14\xf9r4\x98\xe7\x14\x9c\xe0\x89A3<n\x96C\x84+\x8c\x9a\xf8$\xd2\xc8\x08e\xdb\x9aBi\x80\x84h\x9bv\xea\xcc\x11\xec\xce\x94Cz\x8b\xa8\xa4\xbe\x95F=h\xb2k\xd9\x98\xc5\x10\xf3\x81\x9e\x8d?,\x07y\xa7\xb4N9\x94w\xaf\x08\xf5;\xc3\xc83f\xd42\xf8\xefb\xecv\xeb@\x8b\x94\xb1\xa9r\xfa\x91\x04\'6A\x94\x83\xa6^\xe1\xaa\xd0\x85B?\xde\xe4\xa7\xd3\x95\xfb\xe7\xa5\xce\xb8\xc9\xbc\x03\x0c\xc5\xcf\x86\xeam+\rn\x1d\xedl\xce\xed`\'\xebo\x06\x1d\xb8(\xcc\xb7\x08\xc3\x94V\x9a"c%_\x9e\xe1#l\x1a\x1e\x06(>\xee\xf7T\xc7\xc0\xc6^\xccL\x14X\xc7\xbb\x9eY\x1c+}\xb1\x8e#\x95\xbe\x02\x0f\xc2\xda\xaa\xa3\xe0\xf1\xeeK\xaeM\xc9<\x91-\x92z5\xd6\xef\xd9#J>\x89\xfb^Gl\x9b\xb2\xa7\x84\x02\xc3\x9e\x82\x93\xdd\x98\xb2\xce\xdf^U`v\rYm\x1c\xde\\b$\xa2\xba\xc5\xed\x1d\xd6\xcbU\xdb9\xd1\x08;7\xcb\xf9\xe4\xb8\xf5z]\x1bX\x1eBj\x7f\xa1\xfay*\xa9\xf1Y o\xe12D\xf2I\xf6\xbe\xd2\x07\xf7i4\xf6\x8a2\xb0wo\xef\x84\xee\xe3oj\xe4\xf4\x1c[\xeal\xe3\xa5\xf0\x07U\xab\x050u\xa9\x9bR?\x99R5\xd0\xd8W\x87\x99G \xe4\xbb\xa9U\xcb\x90\xc3>\no\xaaZ%\x9b\xba%\xb3=Q\xaf9\xb8\x87\xc8y\xb5\xf8)!\xcb\x9bP\x11%\x81:\xe0X\x94\x86K\xf0A\xbd\xb7\xf2O\xb0gUq4:\xb5\x14\xcc\x9b\x97=y\x8cV9\x8c\xec\x01\x17l`>\xf5\xf3f>\xd8\xdf\xd0tW<\nz\x05\nx\xf3I\xcdi\xf1\xb3\x8c[\x8d\x07\x9e\xf1\xf7\xdd<\xd6^\x16\xb7\xe3\xbcu\xadzg\xcd.T1\x9f\xec:*\xceK\xfdU0\xcb\xd8\x91`\xac\xa2\xca\xafe\x07\xe1\x1a\x8b\x17Q\x15\xf9\x08\xdf\x12O.\x10#\x83\xe8.\x92\x86\xf5\xed\xe3\xef2\xad\xac-\x05\xdc\xbb\x8f\xb1\x13*TE\xe7\x0b\x107g,9\xd2\x072v\xe7\x1a]d|\xc6\xa3\x8a\xd1\xbf\x03\xbf~H;")\xb8\x98 \xe9Q\x9d\'Vg\xb87y_/^\xf4\xe1\xa9\x89\xae\xfe\x18r\xb1\x1f-\x98_\x02\xa5\x13K\xa7ZBu@\x0f\xf3>\xce9\xfaf\x8f>O\xf9\r\xe1i\xeb\xda{\x95\x80\xdeK\xb8\x88\n9\xc5\x1a\x90H\x94k\xc2\xf5\x88R\xb7it,\x1d=\xbe\x84\xaa\xf2\x0f\x14t.qE)2\x18\x17mP2&\xf0\x97)g0\xe4\x1a6e\xf4\xbb\xad\xc8\xa1\xb1\'\xe7\x12\x90\xd5\x86\xcc\xe0\xd5\xb6\x9cp\xb5\x0b|,1\xff\xcay\x8d\xde\xbd\xd1e\xd4\xe1\x8b`H\xab\x82a\xd6<\xf2J\xcaY\xbaeH!\xff=9\xc6\x1fuu\x82\r\xe6@bV\x93\xf3\xf4+\t\x8d\xfez_\xa0\xa9o\x96]i\x8aK\xa2sb\x84ur\xcb.3\xa9\xa0\xfa2UxT\x10\xeas\xd3\xe57\xc4}\nA\x81\xdc\xa0\x83\x17$;\x8d\x16z\xebq7\x94e\xd3Sr0\xdcX\xd1e\xf9\xe9\xe6\x1e\xb8;\x94\tJO\x01\x94^\x1c\xe6\x16\xe9N~Z\x98q\x9f\xef\x7f\xe9\xb7\xd8-Hd~\x0f/f\xbc@\xe4[/\x89|.~\xc2\x92$\xb3\xfb\x0b\x01\xef\x94v\xd1\xbc\xca\xd6A\xdc/>\xe04E\x04Y$2\xf9i)\x1b\xc6\xf3\x1f\xf0\xae\xc5d6E\xd2\x18\xf3\xefF\xc3\xca\t\xe4[\xe4(W\xba\xf0x_\xb9\x9fN\xac\\]eo\xfa\xa1\xb1\x90\xb3<\x02\xad\xc1\x1dg\xf4\x1a\r\xa2Y\x9a\xd7\x8f\xc2\x13\x9b\xeb}\xf3\xfc@pFcs\xa8\xb3\xb0\x89\xacA\x7f \x9dgZo\xcc~\xa6\x01|;\xa6S\x1e\xdb\x05\xc4\x85\xea \xde\xb0\\\xaa\xb0`\xc3u\xe2\xc0\x16\x87\xf8y\xfe4\xabKV#[\xaa\xd6m]\xbc\xcaE\xc8\x9b?/%\xbf\xb7\x0b\xa74\x9e\xed\x10$O\x94&>\xdd\xb1\x17\x02\xfe_\xeeu\xd0\x1a<\xef a/\xdd\x16\x84\t\x13g\x9f\x8c\x02\x86\xf5(a\x03\x9d\x1d\xac\xed\xbb\x00\xc0sd"\xaa\x85(\x15]\xeb[\t\x1d\x12\xad\xe4O\x1cpc\x87\x9dA\xef\xb4\x84\xa3h\x84\\\x8fyD\x890\x08(\xe3XW\x937\x81^\x1a\xa3P~\x9f\x1f\xef0t"IY87\xb3\x0b\x05p\xe3\xebq\\\x08\x8a\x93\xda\xd0F\xd0}\xc5\x1d\xce*viS7\xbf\x92\xe4\x1c\xcb\x1d\xfcr<\xf6\x1by\x89\x93?w\x9bm\xb5\xec\xe0\x93n\xf1#U\xa8\x88U\x04\t\xd9?\x1c\xccb-\xfe\x80\x14\x8f\x95J\xbf\xe1\xdd\x9bv\x8d\xe9Lr\x1djahef\\\x04\xc8\x8aVF\x80N2Qpi\x14\xb6\xa6L\xc6\xad^#\xbb\xe1\x86\t@p\xe6\xbe\x8e\x18v\xe3\xd0\xf7Sp\x7f-\x00\xf4}U\xb6~\xf9\x01>\x8c\xc7\x8c]5\x8b\xfd\xab\xe2\xd3h>y:+u\x90\xf6\xa4\xdf\xf6nN\xa6\x11v\x8fzW\xb0/\xa0ig\xa8{\x8b\x8c\x89T\xe7\xe5\xccwe\x8b\xae\x02\xd89\xb9\xaa\xa4\x81N\xc6I\xb8P\xe7\xa9\xf9C#W`\x0e\x07x\x13:\x83\xd8"\x1e\xb9\xe7\xa3\xf0O\xf3\x88\x1b$\xd5\xb2\xeb\xe0\xa5\xe8:\xa9\xed\xd7\rn,\xb2\xc5\xb6W\xfb\x1e\x9e\x97\'s\xed\x14\xdbO\xc0\xaag\x8e\xbc\xf6\xb08\xe8\x1b\xed\xb0\x04K\x94\xbd\xac\xea\x0c\xd0\xf9\xe1*\xa8w\x97\xc4\xc4%"j\xa1ee\xf0\xb7;\xf8#y`^[\x07\xea\x1dv\xb3g\xa4f\x94ws\x1f\xc8\xf7\x1a\x1a\xf9\xce\x96\xc7\xae5ej\xf7\xcc\xf9m\x02\xbf5\xf5\x90\xed\xd64i%)\xedP\xb8U\xdc\xf3`\x07\xb2\xc4\xcf\x13\x18\x99i\x8f\'\xd2&\xce>\x00\xbb7Onl\x9dt\xbe2z\x18\xe0Z\xd6FW\x17p\x07#\xae\xe5\x04]\xd1\xf8\xf7N\x86\xfb\xf3\x80t\x04V\xe2\nB\xf0\x96\xcf\xfe\xd1u\xba\x1b\x03P@+\x0f\xc8\x88\xa6{\xf6\x01\xeb\x14k\x10i\x17\x82\xd5q\x07\x1a\x81,_\x02J\x1b\x8d\x92d\'_(Y\x13\xfek\xf6H97\x8aI\x05d\xf5\x92&\x7f\xcbh\xf8|\x07\xb8\x97z\xddCO!I\x1aJ\x94\xb6*\x88\xbd\xd5\xa1bn\x8bn\x03\x1b`\xc0"\xf9\xc9Y`\x04\x91A\xd6\x05\xde\xa5\x99\x12 \x19\x98\xb5u\xc4\x9c\xc0GW\xa5 \xd5m\xd7\xb8\xf2h\x98\x9f\x1c\xb8\x95\xbc\x1e\xdcr\xce\x11\xca\x96\x8fi(\x06\x89\xfa\xc4\xea\xfc\xcb3\x82:\xc9\xbb\x19\r\xf3W\xde\x8dB3\xf6FC.(t+2\xba\xa5\xd9\xd9\x02\xaf\x81\xc4\x95^\xc2+\xd5\xae*:\xabn\x16\xfc\x17\x7fy2 \xa6\xf2\x86\xeb!\xe3\xedV\xab\xcc \xee\xb6^{\xa9/{\xbclD\x02x\xeb\xcb\\\xa3\x14\x15\x1b\xf4\xac\xfe\x05S:4\xb1\'62\xbaC{_Y\x8d0r\xe8\x80@\xca \xd5\xb8\x10\x13\xd9m\x7f;\xd2\xadjLJ8^d\xfat[p\x94\xcd\xa9)=\x11=\xa8\xbe\xee\x1b\x19\xa6\x15\x89\xb7\x952\xcf\xc8\xc6r\xb9c\x88c\x86O\xd3\x87\x8a\x00%\x15\x1d=0\xd4W\x93\xd9\xeb\x18\x1a\xe9Pd\xb7b;[3\x82g?\r\xb8"L\xb8,K\xe1\xad\xb76X\xb6\xa4\xc9\xf7i\'Q\xc1x\xcd\xb5\xcd\x932A\xc3\xdd\x9c\xaa!\xa7\xd6\xcb&\x8d\xc6\x9b\xfc&\x10\x15\xda\x10^\xb3=\xc1A\xac(\x06\xa5\xc1\xaf=\xb1\x07\xca\xfa\xedd\x87!\x02\xba\xcb\xe0I\xcd\xe3\xfd\x9b2\xc5O\xcb \x1d7\xe6\xb5%P\xb0\x15t\x7f\xf7\xb9\xd3K\x97\xa7\x98>\xc2\t&\xc7S3\x07\x16\xdd\xebu\xfa\xb5zp\xcd\xa7e\xc5\x18\xc57>\x1eG9ct\x14+\xfe\x0f\x88H\xbd\x03\xf4\xe3\x1bA]\xd1\x17\xecQ\x86\x8a\xa2*2=\xf8 \xbb#u\x97A\x9eU\xf6\x03\xb2q\xa2\xe2\xea\xe1}O\xb4\x81\x97&\xdbj\xe8k\x9d<\xb8\x95\x18$\xee\xc4\xddJ\xd7\xbe\xbd\xb7\xa6A[\x04N\xa4\xafy\x97\x0f\'fm\x13\xa1m\xdeo\x8a+\x90?Z\xd82{]\t\xf8\x11\xf8R\x17\xa4~Jjo5\x9eA\xcc\xde\xa8\xc8c\x8e\x87\xd0S\xae\xcc\xfeb\x96*p\xd5tWq\xfb\xb2X\xf0\x8a@\xf6\x96\xea\xd0\xd0nX\xd9\xf8\x05\x0f\x87\x14\xe5\xc2\x13q\x96\xe2SQ\x7f\xddF\xb2\xef`\x85kX\xff\x93\x86\x91\x99\x96#\x14\t\xb4\xc3Y\x9c\xe5\xcf\xefa\x06We\x93\x80=\x806\xd2j\x1c3\xad=vX\x1b\x94\r*\xbd0S@\x91"\x14\xd2\xcd\xfc\xc2\xc3\x0f\x06\x8d?%\xcfcD\xbe\xe5\x07\x04\x18<\xfe\x1b] =V\xf87C4\xa4\x81\xd6\xbe\xa9\xdd\xe0aM\xae\x0eM\x8a\xe5Z\x95\xb1\xd1a\x82\xa3\xdd\x99\xea\xa4C\x15\x9d\xbe\x83m\xd9\xa54\x18\xe9\xca\xfb\xe5\xf5\xc4IW)s%\xf4Y\x98fh\xdd\xc5^"}\xef\x91\x06\xe2\xa2\x0f\xdaL\xfc\x0ce\xaa\x82 G\xe1\x06\xdbm\xa6e\x9d\xb7F\'\x8d\xd0\xca\xb9\xdf\x8d\x9a(p\t9E\x15\x98\x8f6?jo\xf1N\xa7lG\xe5k\x9b\xa8$\'\xed\xa9/\xa7\xf5\x05\xe0p\xd11T=\x96\xfdk\x14\xc0\x8bIE\x0e\xae\xa4O\xed\x12\xa1X\n\x0f\xfd\xcd<\xa1\xb0X\x13\x97/\xbc\xae\xea\xec\x80TR@\xb2;\xe1\xb2Qho\x1b)KF\x95k\xd9\xf2\x87\xa7\xe0\xf8\x1c[UV;}\xc8\xcd*\xf63=F1\xe4\xbe;\xb8\xa0\xe5\xa7\xe0\x80\x0e#O(/C\xd9\x06\xa2\xdeQ_"\x17\xb3\xafY\x1an \xfa\xcc\xc5\x0c\xc1ih\xb9@)\xb6>\xf4\xf5G\x97&G\xd2\x1b\xf6\x0b\xb7\xee\x92\xfbk\x8c\xb1\xfb\xe0w\xa4\xfa\xb2\xcaD\x81\x0fF\xef5\xb4KF\x13\x00#}Gz\xa5\x02\xb8\x13\xad=\xba\x03\xf0\xe9\xd2\xbb\xba\xd0ro\x15\x08\xac>\xcb(`\x93J\xcd\xb3*\xff!B\xd2\xdf&p\xb3\x1c\x8b\xc7\xaeW\x14\xea\xf6E\x15\xc9`\xf4\xb1\xe4}\x91v\xe7}B\xbf\xedlP@|RkU\tW\x19\r1\xab^\xe1\x1am\xce\x01\xa5\x02\xd9\x9d\xe5\xa5M\xfb\xcf\xce4o\xec\xbb\xcf\xb5\x85%w\x0b\xb6\x9e>\x19HR+\xf4\xc5\x8b\xd0\xcf\xf0\xb64yI\xfb\xde\x89\xbf\xc7\xc8\x16\xcaM2\xa2\xff\x86\xb7\xc3\xf5\xf4\x04\x01\xbc\x8c\x94)KE\xb4\x0e\xc6*\xf3\xcc\xd9s\x06\xb7\xed4_Y\xf7\xc1w\xe2\xdbcqX\xc2\xd3\xf9\x8c\x91\xeb\xeb\xf2\xd3#\xfc\t\xe2-\x82\xc8\xeb\xb9\xfa\xe5z\x9b\x92\x0f\xf6\x8fU\xcc\xc3KH8\x92~-\xabI\xe0\xdd\x12\xa2\xbf\x9b\x7fo\xf4.\x8c\x1cJI\xb2\x1a\xa1t\xe7\xfa\x86\xdd\xff\xef<\x93}\xcc\x9d\xce?\x8e\x08\x8f\xf7\x0fM\x87\x1e\xaf\x02 \xeb\xe5\xc0\xff\xab\xd2Z5\x90\xe6x\x8bx\xd9S\xd2\xa37\x07\xa0evg\xf9\x16n0ZSQ\xf9\xa8X3\xa9=*~h\xf0\x89n\xa9\x0cSM\xff\x93g@\xe5je\xb8;\x1a\x90x<\xef\xe7S\x84\xb9oLo\xc5h.\xa1N\xd1\xa6\xe7\xe9\xab\xbd\x1a\x8a\xbbp\x89\x8e\x88]\xfcP\xfb\xf7.zG{\r\x83Vk\xda\x1a\xbd0\xd9\x15\x86\x8f\xcf\x99\xde\x89\x82Du\xdd\xe2\xae\x10\xbe7@\xfe\xe6\xc5\xa1%\xf9\x92\xfb\xba5\x9e\x9f\xa8\x9c\xd6\xd5\n\x0e\x12\xd6\x97\x7f\xd8\x90j\xcf7!\x7f\x9f\xe2\xa4/\xd3\x07s\xca\xb9\x7f\xa2\xa9A\xed_\x92\x8e\xd0Y \xf8\xdbL\x8f$a?Hj\xbcx\xd03\x19\xcc\xe7\xac\xb9\xa0\x1aao@{\xbbw1m:\xd2\x85&\xdf\xb4\xe8\x17\xda\x0b7\xbe\xd9@;0\x84\xd4R\x85\xea\x0bt\x8b\x90IO\xdc<\x19\xc1\xd1\xa0\xd1\xe3&y\x137\xab\xcc!\xd7\x00\xef\xcd\x99\xf14\xcf\x9b5\xa3<\x80?g\r^\x9bO\xe4\xa2\x80"\xeb|\xb9\xac\x19\xde\x94\x0eL\xf8D\xa0\xbd-\x89)\xed\xc8\xb7\x83h),\xa3\x13\xc2\x8eb\xfb\xa3\x92o{\xd9\x04[\xac\xdc\xa4\x85L\xef\x12\xec\xd8x\xfb\xb2\xea\xef\x89:\xd8P\x90>-`\xcatw\x17\x16\x1bWV\xb4\x91h\xe3\'7\x82"\xc5\x8e\xce\xc6\xd9[\xe4\xc9\x02\x8fk\x92\xbb\xbf\xe8\xfeD\xfe\x159\xdc\x9c\xb9\x84S\xa2\x91\xa3>.\x90\xbdF\x83\xc4]\x03\xe3\x99\xf9H\xf8&2a{\xf3>\xb5\x05\xa9\xb8B\x9a]\xa7\x99\x14\x17\xdc\xe3\xc8\xbb\xb2Z\xf8V\xc1!\x17\x9a/RZ\x0c+\x8d\x10\x07\xbcem\xebZ\x16\xcb\x15\xf5\x94\\IT]1t\xce\x1d\xb4>\xb1(\x10t\xcb-\xfb\xf3\xdf\x88\xf3\x0e\xb25\xbaZ\x943\x93@\xe7\x01\xefmL\x95\x84\xbe\xbfR\x18\xe1K\xa2\xd9~L\xf4p\xbe\xc7w\xde\x14\x89n}YK\x90\xe9\xd9P\xbc&htjg\xe7\x94^\x85\x8c\xd8~\x1aD\xbeg\xe4\x04\x829\xe1\xcf\xa1h6\x9d\xd5\xdb\xd8\xdcu\xb0n\xe1\xa3~\x91&\xab\xf7\x7f\xb6\x0b\xab\xe8\xe7\xe7c\xdf\xdey"\x07\'x\xc6R\xaa\xa10\xaa3\xee\x0c\xc6\xce\x9d\xbaMvq\xb3\xc5\x97\xdb@f\x99-U{\x1b\x9c\xfa\xaf\xf7\xe2\xd1\xcd:\xc3_\xf5\xcdr\x12h\xb1Wf\xce\xbf#A\xd9vb\xdf\x1f\x8a\xf1\xba\x92`\xa4\xc34mU\xb2\xc3\x0f\xe6P\x04\x97\xcc\xd5\x82FOJw\xaf \xa7\xe4S;\xb7,\xec\x00\xc5X\xde\xf3\xe5E\x83,\xfb\xc6t0\xc3\xfcd\x10}H\xed\xbd>\r\xa4\xbb\xaa\x895\x99|\x18}x3\x04\xb3\xf7?\x05\x04\xcc\xc8\xd7\x96\x13\x8d,\x90\x19\xd1\x872\xb4\xa1\x17\xcc-!A\xa1/2\xf3n\xd5\xc5C<P\xa6\x14J\x1e\x03\xae\x9d\xe7\x7fn,j\x11\xaf1\x08\x0by[^\x0e\xf0\xc3\xfc^\xd5\x9b\xfd+V\xa5\x83\xf5S=\x1b\xa7n\xad\x9c\x02\xd8R\xb6\xea:|\xfa,G\xa9\xbc\x03N\xd5}\xd5\x08|Ru]i\x01%\xf3\'$\xfb\xd15\xd6\xf0\xd7\xb7\xda\x13\xb2\xa0\xdaL\x16\xcb\xca\xea\x7f\xab\x08Fb\x92!:[\xa7\xfe\x07\x96\x15\xb3\x9d\xbb"\xe4+/\xb4\x90\xaa\xaa\xf4\x03\x81{\x1dl\x16_\xfc,\x81\x1b_.\xfd\xfc,\xf0\t&]?3\xec\xd8\xc8\xae\xcc\xc5\x92\xc4E\x9f5?\x1f{\\\x00w\x91\x19\xd3\xe1\x8b\xaf\xf0#z`\xf0\xcdJ\x7f\xd4&\xc73\xc8\xc0\x7f\x16\xdc-\xa7\x80\xdd\xcf\x80\x9ab\x8b\xb8\x00\xbaJ\xf8"w\xd3\nv6\xb3\x05\x1c\xae\xd9c\\\xafn\xa0rO}\xdf\xaal\x06SQ\xb7#\x86C\x9d\xd85.\x92\xfa`40\x8b\xe2B\x9b\x03\xe3\x0c\x97U\xa7;r\n:\xcd\xb4\x99\xf9\xdd\xe1\xa7\x91n\xfb\x0f\x93\xd72\x1b\xee\xaf\x98\x1d\x05!\xbe\xf0aI\x0fI&H\x17NW_X\xaa\x80:\xbb\x0f\xee\xfa\xee\xe5!Z\x8c\xf7 j\x98\xb4R\x15oHb!\x99\x0b\x9fM\x9a\xa2\x84\xc7\xa0.7\xe6\x81\xbe~w\x15L!zE\x12\\3\xc1\xa4\xf4\xc4\x00\xfe\xfd\xf9"GL\xa7=\x0c\x16\x0eM\xb5CF\xb0\xdd\xc4\xda"\xeeR\xcf\x16\x92{\x89fx\xa9\xe2P\x14n\xddQ\x92Fs[v\xe7\xf2\xad\xc9\xb6\x90~0\xdf\xa7\x9bI\x16f\xe78\x08N\xe0\x99\xd5\x0e\xb6\xaa\xbb\xc2\xd9\xa5\x0cG\xf0M\xf3\x11\xb9Kx\t\x17q:0v\x08\xc4b8\\0v|\x944l\x01\xe07\xd6\xd7_\xb0\xc3+\x00\xe84RU&{\xa8[\x1e\xc2\xfd\xbbz\x00\xa3\xc6\xa7\x9a\x1f:\x1db\xfd\x1bp\xb3\x8f\xa1\x9f\xdb\x06!a\xdb\x96\xb4\xad\'\xda\']\x8d\xa1w\x96U6\xca\xe9:\x99\xb7\xfc\xacs\xfa\xde\x1b\xddCH-\x0f\xed\x8f\xf7\x12!\xe0\x11\x00/w\xb7+\x0e\x83\xd7)\xc2\xb3b-R\x97DZ\xb2\x01\xd0]\x884JM_\x0c!\x0c\x1b\x8e\x8al\xa6YBh9\xd8\xe2\xe8\x13\x9bL\xc2\xfa\xf9_\xeb\xdbVO(\xc5\x8dG\x16HUG\xdaV-\x85\xa4\x87\xe1\xa8e\x1e\xd7xo!\xc5\x80`]\xc8\xbe\x96\x80\xd6f\xd6\xba\xe9\xf8\xc2\xb9\x18o"X\xb9\xf3\xa7\x1bM\x16O\xd6\xb9\xf2\xd1\'\xdfQ\xcd-Y\xb1\x9a\xe1\xcd>\xe0\xbb9VL}\xe7C\xe0`\xd4\xb4\xeb \xf9N\x9az\x84\x1a{\x0e\x10\xd4\xe9\xc7Y-U5\x1a\x83\x03\x8a\xa6\xf0v\xf7R\xab\x16[\x0f\xb8\x8f\xe7\xb0\xa7\x90\x8fbj\xdd@H:\xe7\xc0\xc3\x82\xa0B\n+\xf2\x84\x07\x90\xa6Y{\xcb>\x17\xb5\xc8`\xda\xc3\xfb&\x83Ds]\xdf\x0f\xe5=\x89r\xb1H\xdajU$<\x9f\xa2\xc1\xa6=\xe2\x00\xf1\xe9\xbe^^-_~\xfa$\xfdU]\xf2$\xe7};\x1b\x920\x00 \xec\x9c\xd7\xee%\x8d\xa1\xf8Jp\xbc\x97}\xdd#\xb7!\xdf\x80MgIKcOb\xb8mf\xf6\x81\x7f\x13\x11\xb7]\x89p\\\xef\xa8\x96o\xc4d\xb5\xda\xca\x8a\xc6GP\xf2\xee\xdc6\xa6\xc3\x14V\xd8\xd9\xc5\xf3\xa9\xa6\xb2V\xffp\x84\xc5\xa2\x86p%?\xf3\xb3U\xe0\xf1\x8d\x8a\xb4a8\xb3\x15\xb5-?\xe8\x9cFMH\xfcC-\xc4\xe0BYk\xcd@\xaa\x83\x1c\xc9\x12g\xfe\xc6O&\xa0\xd7\x08\x9c\xc3<[\x8fZ\xc2n|\xf9\x19\x19\xe2\x95\x86\xa2\x1d\x8f\xd94\'\xcc"f\xf2\xed\xb4z\x9f\xe3\xff\x96"\t\xb7\xad\x04\xd3\xe9\xf16\xe1\x7f\xac4q\x8e^\x8c\xaa8\xa0\xa3\xde\xa6\xfcI9\xc7=\x06c]\xe2\x976\xbfz\x00\xb1Y\xe2\x81\xe3\xd9\xd4\x97"\x95\x98[\xf8\x0f?\xc9:<\x89\x90C\xa5]\xa4\xc8 \xf7g\xa0>h\xe4yb\xdb\xaa+\x8a\xaf\x87\x83\xea\xec\xca\xa40\x93\x99.\xf3c\xd0\xd0\xe7y`\xab\x19\x18y\xcd\xc6\xf6\x7f\r\xdbY\x9d\x1b\xa7\xf49\x8f\xf1\x90/\xa54\xc2-\x7flaL\xed\xfc_+\xf3Q\x95T3 \xba\x1c\xab\xfb\x82\xad\xcc|\x10(\xfb\xe49\x18\xf2\xa2R\xcb\xccb>{<\x01\xf81\xa0C\xcc\x1e\x19-\xd4\xa0\x07s\xcc\x1e<R\x80\xb0v\x1b\x95\x99\xdaxM\x84h\x0b\xdf\xcby\xd6<],\x8f\xcb\x805\x83xt!W\x89\x83\xa8\x06p\xa5_\xd9\x04\x9e\xa5\x19;\xd3{vtv\x13\xe1\x92\xc9\xe7J.\xb1\x08\x82\xbd\xd3\xd4\x8a\xa7\x85\x91\x7fa\x8b\x15\x99\xcb\xe5r\r\xee\xae\xad\x1cyAR-_\x88\xa5\x1fm@=\xa9\xaaU\xf7\x8c\n\x0c\xd7e6R\xe80\xe1Q\xfd\xbd@K\xaa!2h-4\xbe\x00\xb0\x03\xe0h*\x08TX\x19\x1e\xd0`#&\x7fp\xec8\x1f\x17\xcd\xc6M\xc3\xac\xdbS8q\x19\x1f\n\x019D:\xd4;\x18\x17\x11Y3\x82}\x10\xe9\xbc\xe4U\xfd.\'\x8b3F\xc6\xacw\xd2\xc8D`\xaa!\xb8\x9a\xc8SD\xa7ca\x8f\xf6\x04\xa2DD\x98j\xc0\xfb\xac"1\xb9\xe5D\xd1\xfe\x13@\xca\xe3%\xd6rG\xe6e\xfe\x15\xa7\xc4f\xc9\x0bj/\x02\x0c\x8d\xabF\xd1+H\xe6`\x00\xbf\xdaw\xa4\x0c\xeez\xbc\xcc"\xf6\x05\xcf\'\x8c\xaa`\xdaII\x02\xa6\x92\xbd\xfeN\x98\xe1v\xecz9\xa9\xd5_\x9a\xd3(\xf7}0\xcf\xd2&\x05\x9ei[\xc1\x8e\xa9u\x11{\xa3\xce\xc9\xc2\x8a\x05\x98\xdb\xef\x90\xb93\xd3.\xf5>\xd97\xaf\xfccM\x10\xb1\xde\xad\xf6\xadvVF\x0c`?\x11c\xc1\xea\xe9 \xbd\x92\x98k\xc6\x04\xb9r<l\xa0\x05\x1d/:\xd0\xd7:\x05D\xb6\xe1u\x8e,\xef\x9e\xc8|dm,\x10v\x1a\x06\x96\xa5C\xa6T\xa2\x11g\xe5\xf6\x02\x06\xce\xc9\xa3\xc1;\xb7\xb7\xc9a\xc19\xafu\xf8\x8b\xbaVL\xf57\xa7\xc8\xca\xb0CC\x98\x10A@\xfbF\xc2\xb5\xf0y\xa1)\xebG\xdc\xf1U\xf8UT\xc7\xd4\x9f\x8a\x0e\xf7\x11+\xc89W\x9d\x85\xd6)\xe9M&\tb1#d>84\x17\xc7\xbc\xee\x02=s\xacW\x113\xfd\xb6\xf8x/\x1e\xa9\x16\xf4Z\xfd\xf6\n\x10\x94\xb6\x9b\xe8\x88\xf7F2\xea\x11\xda\xe7w\xe3\x9e\xc2\xf2\x0e\xee\x9f\x1b\xc3\xbd\n\x15\xa5\xc0;\x06\x99\xc5B\x99+\x0c\xca\x12\x1c\xfbu\xe0Z)\xf3\xe5RVZF\x8e\x8a\xf1m\x8e\xb28hA\xecr\xe9\xe2Gs\x14\xe8\xe9U!h\xea\xd1\xea\xa8A\xfc>\x97\xc9tQ\x86\x92\x8e\xc2\xa9C\xfbY\x89\x85\t\x91\x95\x91\xf2\xc2L\x8b\x11\x01\xe4\xfa\x1a;\xf91q\x0f\x03\xedFnh\x89.\xb9\xbb:\xde\x89\xfe\xfd\x1f\x89u\xb2r%\x82\xc4\xdb\x0b\x01W3\x91-\xe2\xe8\xfa\xfa\xceH\xa5b\x17\xc4jub\x83\xc3D\x0c\x99\x98\xd7\x1d\xb4\x1e\xb5nE\x8d\x8f\xabl\x03\xc5\xa6\x88\x93\xcd\xbac\x88\x93\xcf\x9fy\xad?\xf6\x7f{\xfe>\x82h5 a\xc9\xc8.xG~c\xfd\xe4\xe5\x91V\xd8m[\x1e\xbc\x85x4\x1f{X\x1d\xef9\x99\xd7\xe7\xcc\x07{\xe2\x1e\x84\xde\x03\xc3\x8dz\xdev;\x86z.(\xa8\x9ag\x99\x16\xcc\x94\x19\xf442\x85\xe0\xc8\x8b^\xc8/\xc5\x8f\x98E?\x83_{O\xda\xfa\x7f\xefq\xdf\xdd\x8bqK\xd1)\xf6:\xe7\xbdE\xf1\xfd\xe1\x8f\xb7\x95\x86[Q\xc1\x9f\x01r1\x86\xdc\x8e\xc4\xcb#\x1d\t\xb1\x0c\xb1\xce,\xca\xd2\x1d\xc1\xcc\xe2X\xa9 \x819u\xd2\x07\x98*\xb8\x98uO\x1aT\xe7#DsZ\x989NG\x99^r\x04{\xe2\xec\xd3\x8c4\xacc1\xc7\x10\xfe\xed\x9d\x1b\xcd\xeb\xfd\xf9\x16rI\xea\xef\x0b\x0fN\xe1\xa7\x8b\x00\xc9-\xb9\x1aK\xc2\x9egm<\xed\x8e\x0e\xe9\xdf\xd1\x10\xb9\x83\x8dH\x92\x9f\x06\xac\xd5\xa4V\xa5\x1f\\\x11\x8e\x1e\x99\xb1\x90\x99\'TP\xb4\xfb\x81,\xb1\x9a\'6]\xb7O\x0c\x1d\x94\x90\x13ab\xf0*yv\xa4}20*\xf9QoA7\xd3\xa7N^B\xfb\xb4<{K\xc9\xf6\xa5\xe2\x1e"\xba\xa5\xed\xf3\xfbA\xb4XxP\x13\x90\x8c\x85A\x8f\xe65\'\xc8\xbd\x85\xb0\xe4\x9a\x05a<\x12\xed\x15O\xef.\x1d\xe3r\xb0\t\xff+\xe66R\x9b\x13ss\xc1\x05\x90\x96\x00\xb9\xecq)y\xbf\rF\xa5:\xcdom\x0f\xde\xd2\xecyE\xb8\x1f\xf1\x8bd4\xd1\xfd\xe2\x1b\xfa~\xe3\xe7\x8em\xc2&h;{\xd7\x9a\x99\t\x96\x82k\xc4\xc1\xb7v\xd7\x10U9\xee\xe9\x93(z\xf7n4K{7#\x10\xc1\x8b>\xb2c\xade`\x1f\xd6\x18\xe5zZ\xe0\xea_"b\xf2U\xe7\x8dY\x90\xa8<$Cl\xb6aj*\xe9\xfe\xc5Su\xa8%\xe1\xe3I\xcf\xda\xb7\xf3\xb1\xf9\xf3\x8a\xa9\x83\x81(KP\xe3P%n`z\x87\xdf\xd9\xd8\xc3#H\xd4\x12>J\x12\x95\x9c\xbc1&xMYg,\x90A\x92\x9a\xf0\xc8\xd1\xb1\r\xe4\x16\x10\xb2\xe0.\xa2+a\x0f\xe0bj 2\x16sG\xbe\x80]M-\xb3n\x02\xefZ\rn\x00\x12\xf0/\x07O];\xec\xdb\x9eQ\xd8\xb2A\xe2\xe4{\xfd\xed\x85,\x9c\x96\x10\x05\x84Tk\x81X\x83\x8c{\xfa\xf0\xf9 \xff\x92OQ\xbf\x15 \xf3_\x81\r\xa0\x86o\x9e\xc6\xbcJ\xc5\x9bmo\xba\xa7O\xe1v\xc5\x0c\x87\xd6zq\xc7\x9a$\x98\xe0%\xb8\x03\x01x\xb74\xb8\x03\x95\xed\xd4\xceh\xcbI\xa0.\xef0\x92H\xa2\x8a2\xd2$\x1c\x02\xa1t\x1ft\xd6I]k\x105\x04\xa0\xf0\xcb\\\xe2\x88X\xe8BZ\xd6\xf8\xe0\xddG\xf0\xc0\xc4\xb0\\\x11\xabq#j\x1a\x14\xc3nV`-Q\xc7\xf3\x17\xc5\xb4\x7f\xf2\x00a\x89\x1f-\x98\xd41\r\xa6\xbf\x95\x1f\x89T1\x01\r\xfa~\x11g\x82\xdd\xbd\xdco\x07\xe9\x16\x8f\x9b\x86\x82\xda\x83\xbc\xaf\xd1\xa8I\xfd\x9c\x12\x12\xd5\x81\x96\xbbR\xe2\x91\xbb%\t^\xa48LC\xb8\xe6X\x99fYZ2\xd9\x02\xb0W\xdf~A\xb0\x1av\'ydwlO)y\xbd2\x0b\x03$\x9eR)_k\x8b\xc2\x8f\x04\xb6#Zu\x83b\xd6\x89\xe1\xfd[\x07A\x05y\x83\xe5\x81\x8b\x18Sa\xf1\xb9\xcf\xac\xbf\x019\xc2\x82\x11/\x86\x02\xe7\x12\xf2\xdc\x9d\xa1\x01\x84\xe8\xf2XB^\n\x0787\xab\xd8\x83\x17\xaa\xfa\x9b\x15u@\xff=\xf9\xd0\xb0@?H\xa2\xbe\xac\xfd\x02h%\xa2\t\xe9\xe1\x83J+_d\xe9\xeb~*\xdb?\xab\xc7m[\xb0\x10NY<w\r?\xc94\x02\xf7t\x89\xb7P\x90\xf4\xe6\x00J\xc9\x122\x12\xb2G\xfa\xbft\x03\xeb\xd5e}?\x06\x1a\xe4\x9b\x87*y\xfa\xb2\xf6\x16\xbb\x0c\xb1\xed\xae\xd3\xcb\x99\x89kC\xa2\xf4 \xc2\xa3e9\xcchT\xa5\x01\xaa\xb2\xcb\xf0(\x8e\x96C\x7f>\xbb%Fp\xd6\x0e\n\xb9\xf6\x8e\xbc\x9dZ\x83\x91\xa6\xb8l @\xb3\xdc1\x911\xb9\x87z\xbdV\x0ew~tw$\xf3\xdb\t\x94\x98\xd1\xccI\x1cd\xb4\x81\x06z8\xc2\xad\x9a(k\x0e\'5NX!\x0fG0\xb9\x87\xf1\x83\xa5g\r\xd4\x97\xfb\xb5\x8f\x96/m/\xab\xc8lM\x90+\x05@\x1b\x16u\xb8;zf\xd3\xafv\xc8s2&pC-\xc7\xec\xe5\xf3W\xbfB)\xfb\xc8r\xfe\x0c[V@x\xdbvc\xa5?z<\xe7K\x81\x94J\xbf\x7f\x10\xb9[\x1a\xdf\x08!\xb4Cs3H\xd8g\xcd\xd0\xad\xe1\xd4')
|
mit-llREPO_NAMEspacegym-kspdgPATH_START.@spacegym-kspdg_extracted@spacegym-kspdg-main@src@kspdg@private_src@python3_12@Darwin_x86_64@kspdg_envs@lbg1@[email protected]_END.py
|
{
"filename": "interval.py",
"repo_name": "threeML/threeML",
"repo_path": "threeML_extracted/threeML-master/threeML/utils/interval.py",
"type": "Python"
}
|
import copy
import re
from operator import attrgetter, itemgetter
import numpy as np
from threeML.io.logging import setup_logger
log = setup_logger(__name__)
class IntervalsDoNotOverlap(RuntimeError):
pass
class IntervalsNotContiguous(RuntimeError):
pass
class Interval:
def __init__(self, start: float, stop: float, swap_if_inverted: bool = False):
self._start: float = float(start)
self._stop: float = float(stop)
# Note that this allows to have intervals of zero duration
if self._stop < self._start:
if swap_if_inverted:
self._start: float = stop
self._stop: float = start
else:
log.exception(
"Invalid time interval! TSTART must be before TSTOP and TSTOP-TSTART >0. "
"Got tstart = %s and tstop = %s" % (start, stop)
)
raise RuntimeError()
@property
def start(self) -> float:
return self._start
@property
def stop(self) -> float:
return self._stop
@classmethod
def new(cls, *args, **kwargs):
return cls(*args, **kwargs)
def _get_width(self) -> float:
return self._stop - self._start
@property
def mid_point(self) -> float:
return (self._start + self._stop) / 2.0
def __repr__(self):
return " interval %s - %s (width: %s)" % (
self.start,
self.stop,
self._get_width(),
)
def intersect(self, interval):
# type: (Interval) -> Interval
"""
Returns a new time interval corresponding to the intersection between this interval and the provided one.
:param interval: a TimeInterval instance
:type interval: Interval
:return: new interval covering the intersection
:raise IntervalsDoNotOverlap : if the intervals do not overlap
"""
if not self.overlaps_with(interval):
log.exception("Current interval does not overlap with provided interval")
raise IntervalsDoNotOverlap()
new_start = max(self._start, interval.start)
new_stop = min(self._stop, interval.stop)
return self.new(new_start, new_stop)
def merge(self, interval):
# type: (Interval) -> Interval
"""
Returns a new interval corresponding to the merge of the current and the provided time interval. The intervals
must overlap.
:param interval: a TimeInterval instance
:type interval : Interval
:return: a new TimeInterval instance
"""
if self.overlaps_with(interval):
new_start = min(self._start, interval.start)
new_stop = max(self._stop, interval.stop)
return self.new(new_start, new_stop)
else:
raise IntervalsDoNotOverlap("Could not merge non-overlapping intervals!")
def overlaps_with(self, interval):
# type: (Interval) -> bool
"""
Returns whether the current time interval and the provided one overlap or not
:param interval: a TimeInterval instance
:type interval: Interval
:return: True or False
"""
if interval.start == self._start or interval.stop == self._stop:
return True
elif interval.start > self._start and interval.start < self._stop:
return True
elif interval.stop > self._start and interval.stop < self._stop:
return True
elif interval.start < self._start and interval.stop > self._stop:
return True
else:
return False
def to_string(self) -> str:
"""
returns a string representation of the time interval that is like the
argument of many interval reading funcitons
:return:
"""
return "%f-%f" % (self.start, self.stop)
def __eq__(self, other):
if not isinstance(other, Interval):
# This is needed for things like comparisons to None or other objects.
# Of course if the other object is not even a TimeInterval, the two things
# cannot be equal
return False
else:
return self.start == other.start and self.stop == other.stop
class IntervalSet:
"""
A set of intervals
"""
INTERVAL_TYPE = Interval
def __init__(self, list_of_intervals=()):
self._intervals = list(list_of_intervals)
@classmethod
def new(cls, *args, **kwargs):
"""
Create a new interval set of this type
:param args:
:param kwargs:
:return: interval set
"""
return cls(*args, **kwargs)
@classmethod
def new_interval(cls, *args, **kwargs):
"""
Create a new interval of INTERVAL_TYPE
:param args:
:param kwargs:
:return: interval
"""
return cls.INTERVAL_TYPE(*args, **kwargs)
@classmethod
def from_strings(cls, *intervals):
"""
These are intervals specified as "-10 -- 5", "0-10", and so on
:param intervals:
:return:
"""
list_of_intervals = []
for interval in intervals:
imin, imax = cls._parse_interval(interval)
list_of_intervals.append(cls.new_interval(imin, imax))
return cls(list_of_intervals)
@staticmethod
def _parse_interval(time_interval):
# The following regular expression matches any two numbers, positive or negative,
# like "-10 --5","-10 - -5", "-10-5", "5-10" and so on
tokens = re.match(
"(\-?\+?[0-9]+\.?[0-9]*)\s*-\s*(\-?\+?[0-9]+\.?[0-9]*)", time_interval
).groups()
return [float(x) for x in tokens]
@classmethod
def from_starts_and_stops(cls, starts, stops):
"""
Builds a TimeIntervalSet from a list of start and stop times:
start = [-1,0] -> [-1,0], [0,1]
stop = [0,1]
:param starts:
:param stops:
:return:
"""
assert len(starts) == len(
stops
), "starts length: %d and stops length: %d must have same length" % (
len(starts),
len(stops),
)
list_of_intervals = []
for imin, imax in zip(starts, stops):
list_of_intervals.append(cls.new_interval(imin, imax))
return cls(list_of_intervals)
@classmethod
def from_list_of_edges(cls, edges):
"""
Builds a IntervalSet from a list of time edges:
edges = [-1,0,1] -> [-1,0], [0,1]
:param edges:
:return:
"""
# sort the time edges
edges.sort()
list_of_intervals = []
for imin, imax in zip(edges[:-1], edges[1:]):
list_of_intervals.append(cls.new_interval(imin, imax))
return cls(list_of_intervals)
def merge_intersecting_intervals(self, in_place=False):
"""
merges intersecting intervals into a contiguous intervals
:return:
"""
# get a copy of the sorted intervals
sorted_intervals = self.sort()
new_intervals = []
while len(sorted_intervals) > 1:
# pop the first interval off the stack
this_interval = sorted_intervals.pop(0)
# see if that interval overlaps with the the next one
if this_interval.overlaps_with(sorted_intervals[0]):
# if so, pop the next one
next_interval = sorted_intervals.pop(0)
# and merge the two, appending them to the new intervals
new_intervals.append(this_interval.merge(next_interval))
else:
# otherwise just append this interval
new_intervals.append(this_interval)
# now if there is only one interval left
# it should not overlap with any other interval
# and the loop will stop
# otherwise, we continue
# if there was only one interval
# or a leftover from the merge
# we append it
if sorted_intervals:
assert (
len(sorted_intervals) == 1
), "there should only be one interval left over, this is a bug" # pragma: no cover
# we want to make sure that the last new interval did not
# overlap with the final interval
if new_intervals:
if new_intervals[-1].overlaps_with(sorted_intervals[0]):
new_intervals[-1] = new_intervals[-1].merge(sorted_intervals[0])
else:
new_intervals.append(sorted_intervals[0])
else:
new_intervals.append(sorted_intervals[0])
if in_place:
self.__init__(new_intervals)
else:
return self.new(new_intervals)
def extend(self, list_of_intervals):
self._intervals.extend(list_of_intervals)
def __len__(self):
return len(self._intervals)
def __iter__(self):
for interval in self._intervals:
yield interval
def __getitem__(self, item):
return self._intervals[item]
def __eq__(self, other):
for interval_this, interval_other in zip(self.argsort(), other.argsort()):
if not self[interval_this] == other[interval_other]:
return False
return True
def pop(self, index):
return self._intervals.pop(index)
def sort(self):
"""
Returns a sorted copy of the set (sorted according to the tstart of the time intervals)
:return:
"""
if self.is_sorted:
return copy.deepcopy(self)
else:
return self.new(np.atleast_1d(itemgetter(*self.argsort())(self._intervals)))
def argsort(self):
"""
Returns the indices which order the set
:return:
"""
# Gather all tstarts
tstarts = [x.start for x in self._intervals]
return [x[0] for x in sorted(enumerate(tstarts), key=itemgetter(1))]
def is_contiguous(self, relative_tolerance=1e-5):
"""
Check whether the time intervals are all contiguous, i.e., the stop time of one interval is the start
time of the next
:return: True or False
"""
starts = [attrgetter("start")(x) for x in self._intervals]
stops = [attrgetter("stop")(x) for x in self._intervals]
return np.allclose(starts[1:], stops[:-1], rtol=relative_tolerance)
@property
def is_sorted(self):
"""
Check whether the time intervals are sorted
:return: True or False
"""
return np.all(self.argsort() == np.arange(len(self)))
def containing_bin(self, value):
"""
finds the index of the interval containing
:param value:
:return:
"""
# Get the index of the first ebounds upper bound larger than energy
# (but never go below zero or above the last channel)
idx = min(max(0, np.searchsorted(self.edges, value) - 1), len(self))
return idx
def containing_interval(self, start, stop, inner=True, as_mask=False):
"""
returns either a mask of the intervals contained in the selection
or a new set of intervals within the selection. NOTE: no sort is performed
:param start: start of interval
:param stop: stop of interval
:param inner: if True, returns only intervals strictly contained within bounds, if False, returns outer bounds as well
:param as_mask: if you want a mask or the intervals
:return:
"""
# loop only once because every call unpacks the array
# we need to round for the comparison because we may have read from
# strings which are rounded to six decimals
starts = np.round(self.starts, decimals=6)
stops = np.round(self.stops, decimals=6)
start = np.round(start, decimals=6)
stop = np.round(stop, decimals=6)
condition = (starts >= start) & (stop >= stops)
if not inner:
# now we get the end caps
lower_condition = (starts <= start) & (start <= stops)
upper_condition = (starts <= stop) & (stop <= stops)
condition = condition | lower_condition | upper_condition
# if we just want the mask
if as_mask:
return condition
else:
return self.new(np.asarray(self._intervals)[condition])
@property
def starts(self):
"""
Return the starts fo the set
:return: list of start times
"""
return [interval.start for interval in self._intervals]
@property
def stops(self):
"""
Return the stops of the set
:return:
"""
return [interval.stop for interval in self._intervals]
@property
def mid_points(self):
return np.array([interval.mid_point for interval in self._intervals])
@property
def widths(self):
return np.array([interval._get_width() for interval in self._intervals])
@property
def absolute_start(self):
"""
the minimum of the start times
:return:
"""
return min(self.starts)
@property
def absolute_stop(self):
"""
the maximum of the stop times
:return:
"""
return max(self.stops)
@property
def edges(self):
"""
return an array of time edges if contiguous
:return:
"""
if self.is_contiguous() and self.is_sorted:
edges = [
interval.start
for interval in itemgetter(*self.argsort())(self._intervals)
]
edges.append(
[
interval.stop
for interval in itemgetter(*self.argsort())(self._intervals)
][-1]
)
else:
raise IntervalsNotContiguous(
"Cannot return edges for non-contiguous intervals"
)
return edges
def to_string(self):
"""
returns a set of string representaitons of the intervals
:return:
"""
return ",".join([interval.to_string() for interval in self._intervals])
@property
def bin_stack(self):
"""
get a stacked view of the bins [[start_1,stop_1 ],
[start_2,stop_2 ]]
:return:
"""
return np.vstack((self.starts, self.stops)).T
|
threeMLREPO_NAMEthreeMLPATH_START.@threeML_extracted@threeML-master@threeML@[email protected]@.PATH_END.py
|
{
"filename": "test_testing.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/tests/test_testing.py",
"type": "Python"
}
|
from unittest import SkipTest
import matplotlib
from yt.testing import requires_backend
active_backend = matplotlib.get_backend()
inactive_backend = ({"gtkagg", "macosx", "wx", "tkagg"} - {active_backend}).pop()
def test_requires_inactive_backend():
@requires_backend(inactive_backend)
def foo():
return
try:
foo()
except SkipTest:
pass
else:
raise AssertionError(
"@requires_backend appears to be broken (skip was expected)"
)
def test_requires_active_backend():
@requires_backend(active_backend)
def foo():
return
try:
foo()
except SkipTest:
raise AssertionError(
"@requires_backend appears to be broken (skip was not expected)"
) from None
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@tests@[email protected]_END.py
|
{
"filename": "pota2clus_novetonocoll.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/scripts/mock_tools/pota2clus_novetonocoll.py",
"type": "Python"
}
|
#standard python
import sys
import os
import shutil
import unittest
from datetime import datetime
import json
import numpy as np
import fitsio
import glob
import argparse
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
from desitarget.io import read_targets_in_tiles
from desitarget.mtl import inflate_ledger
from desitarget import targetmask
from desitarget.internal import sharedmem
from desimodel.footprint import is_point_in_desi
from desitarget import targetmask
import LSS.main.cattools as ct
import LSS.common_tools as common
import LSS.mocktools as mocktools
#import LSS.mkCat_singletile.fa4lsscat as fa
#from LSS.globals import main
import logging
logger = logging.getLogger('mkCat')
logger.setLevel(level=logging.INFO)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info('run started')
if os.environ['NERSC_HOST'] == 'perlmutter':
scratch = 'PSCRATCH'
else:
print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST'])
sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding')
parser = argparse.ArgumentParser()
parser.add_argument("--tracer", help="tracer type to be selected")
parser.add_argument("--realization",type=int)
parser.add_argument("--prog", default="DARK")
#parser.add_argument("--mockdir", help="directory when pota mock data is",default='/global/cfs/cdirs/desi/users/acarnero/y1mock/SecondGen/clustering/')
parser.add_argument("--base_dir", help="base directory for input/output",default='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/')
parser.add_argument("--random_dir",help="where to find the data randoms",default='/dvs_ro/cfs/cdirs/desi/survey/catalogs/Y1/LSS/random')
parser.add_argument("--mockver", default='AbacusSummit_v4_1', help = "which mocks to use")
parser.add_argument("--outloc", default = None)
parser.add_argument("--minr", help="minimum number for random files",default=0,type=int)
parser.add_argument("--maxr", help="maximum for random files",default=18,type=int)
parser.add_argument("--par", default = 'y',help='whether to run random steps in parallel or not')
args = parser.parse_args()
print(args)
rm = int(args.minr)
rx = int(args.maxr)
notqso = ''
#if args.notqso == 'y':
# notqso = 'notqso'
tracer = args.tracer
if tracer == 'LRG':
zmin = 0.4
zmax = 1.1
elif tracer == 'ELG_LOP':
zmin = 0.8
zmax = 1.6
elif tracer == 'QSO':
zmin = 0.8
zmax = 2.1
else:
sys.exit('tracer type '+args.tracer+' not supported (yet)')
mockdir = args.base_dir+args.mockver+'/mock'+str(args.realization)+'/'
in_data_fn = mockdir+'pota-'+args.prog+'.fits'
logger.info(in_data_fn)
if args.outloc == None:
outdir = os.getenv(scratch)+'/'+args.mockver+'/mock'+str(args.realization)+'/'
if args.outloc == 'prod':
outdir = mockdir
out_data_fn = outdir+tracer+'_complete_novetonocoll_clustering.dat.fits'
out_data_froot = outdir+tracer+'_complete_novetonocoll_'
cols = ['LOCATION',
'FIBER',
'TARGETID',
'RA',
'DEC','RSDZ',
'PRIORITY_INIT',
'PRIORITY',
'DESI_TARGET','BRICKID','NOBS_G',
'NOBS_R',
'NOBS_Z',
'MASKBITS','ZWARN',
'COLLISION',
'TILEID']
mock_data = fitsio.read(in_data_fn.replace('global','dvs_ro'),columns=cols)
#selcoll = mock_data['COLLISION'] == False
#mock_data = mock_data[selcoll]
if args.prog == 'DARK':
bit = targetmask.desi_mask[args.tracer]
desitarg='DESI_TARGET'
ndattot = len(mock_data)
seltar = mock_data[desitarg] & bit > 0
mock_data = mock_data[seltar]
logger.info('length before/after cut to target type '+args.tracer)
logger.info(str(ndattot)+' '+str(len(mock_data)))
'''
PUT IN SOMETHING HERE TO MASK TO GOODHARDLOC AS AN OPTION
'''
selz = mock_data['RSDZ'] > zmin
selz &= mock_data['RSDZ'] < zmax
mock_data = mock_data[selz]
mock_data = Table(mock_data)
mock_data = unique(mock_data,keys=['TARGETID'])
mock_data = common.addNS(mock_data)
logger.info('length after cutting to redshift and unique targetid '+str(len(mock_data)))
mock_data.rename_column('RSDZ', 'Z')
mock_data['WEIGHT'] = np.ones(len(mock_data))
common.write_LSS_scratchcp(mock_data,out_data_fn)
def splitGC(flroot,datran='.dat',rann=0):
import LSS.common_tools as common
from astropy.coordinates import SkyCoord
import astropy.units as u
app = 'clustering'+datran+'.fits'
if datran == '.ran':
app = str(rann)+'_clustering'+datran+'.fits'
fn = Table(fitsio.read(flroot+app))
c = SkyCoord(fn['RA']* u.deg,fn['DEC']* u.deg,frame='icrs')
gc = c.transform_to('galactic')
sel_ngc = gc.b > 0
outf_ngc = flroot+'NGC_'+app
common.write_LSS_scratchcp(fn[sel_ngc],outf_ngc,logger=logger)
outf_sgc = flroot+'SGC_'+app
common.write_LSS_scratchcp(fn[~sel_ngc],outf_sgc,logger=logger)
splitGC(out_data_froot,'.dat')
ran_samp_cols = ['Z','WEIGHT']
def ran_col_assign(randoms,data,sample_columns,tracer):
data.rename_column('TARGETID', 'TARGETID_DATA')
def _resamp(selregr,selregd):
for col in sample_columns:
randoms[col] = np.zeros_like(data[col],shape=len(randoms))
rand_sel = [selregr,~selregr]
dat_sel = [ selregd,~selregd]
for dsel,rsel in zip(dat_sel,rand_sel):
inds = np.random.choice(len(data[dsel]),len(randoms[rsel]))
#logger.info(str(len(data[dsel]),len(inds),np.max(inds))
dshuf = data[dsel][inds]
for col in sample_columns:
randoms[col][rsel] = dshuf[col]
rdl = []
for dsel,rsel in zip(dat_sel,rand_sel):
rd = np.sum(randoms[rsel]['WEIGHT'])/np.sum(data[dsel]['WEIGHT'])
rdl.append(rd)
rdr = rdl[0]/rdl[1]
logger.info('norm factor is '+str(rdr))
randoms['WEIGHT'][rand_sel[1]] *= rdr
des_resamp = False
if 'QSO' in tracer:
des_resamp = True
selregr = randoms['PHOTSYS'] == 'N'
selregd = data['PHOTSYS'] == 'N'
_resamp(selregr,selregd)
rand_sel = [selregr,~selregr]
dat_sel = [ selregd,~selregd]
for dsel,rsel in zip(dat_sel,rand_sel):
rd = np.sum(randoms[rsel]['WEIGHT'])/np.sum(data[dsel]['WEIGHT'])
logger.info('data/random weighted ratio after resampling:'+str(rd))
if des_resamp:
logger.info('resampling in DES region')
from regressis import footprint
import healpy as hp
foot = footprint.DR9Footprint(256, mask_lmc=False, clear_south=True, mask_around_des=False, cut_desi=False)
north, south, des = foot.get_imaging_surveys()
th_ran,phi_ran = (-randoms['DEC']+90.)*np.pi/180.,randoms['RA']*np.pi/180.
th_dat,phi_dat = (-data['DEC']+90.)*np.pi/180.,data['RA']*np.pi/180.
pixr = hp.ang2pix(256,th_ran,phi_ran,nest=True)
selregr = des[pixr]
pixd = hp.ang2pix(256,th_dat,phi_dat,nest=True)
selregd = des[pixd]
_resamp(selregr,selregd)
rand_sel = [selregr,~selregr]
dat_sel = [ selregd,~selregd]
for dsel,rsel in zip(dat_sel,rand_sel):
rd = np.sum(randoms[rsel]['WEIGHT'])/np.sum(data[dsel]['WEIGHT'])
logger.info('data/random weighted ratio after resampling:'+str(rd))
return randoms
def _mkran(rann):
in_ran_fn = args.random_dir+str(rann)+'/pota-'+args.prog+'.fits' #need to go back to pota to get randoms without collisions
out_ran_fn = out_data_froot+str(rann)+'_clustering.ran.fits'
ran = Table(fitsio.read(in_ran_fn,columns=['RA','DEC','TARGETID']))
ran = unique(ran,keys=['TARGETID'])
ran = common.addNS(ran)
ran = ran_col_assign(ran,mock_data,ran_samp_cols,args.tracer)
common.write_LSS_scratchcp(ran,out_ran_fn,logger=logger)
splitGC(out_data_froot,'.ran',rann)
return True
inds = np.arange(rm,rx)
if args.par == 'y':
from multiprocessing import Pool
with Pool() as pool:#processes=nproc
res = pool.map(_mkran, inds)
else:
for rn in inds:#range(rm,rx):
_mkran(rn)
#for rann in range(rm,rx):
# in_ran_fn = args.random_dir+'QSO_'+str(rann)+'_full_noveto.ran.fits' #type isn't important, all noveto have same ra,dec
# out_ran_fn = out_data_froot+str(rann)+'_clustering.ran.fits'
# ran = Table(fitsio.read(in_ran_fn,columns=['RA','DEC','PHOTSYS','TARGETID']))
# ran = ran_col_assign(ran,mock_data,ran_samp_cols,args.tracer)
# common.write_LSS(ran,out_ran_fn)
# splitGC(out_data_froot,'.ran',rann)
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@scripts@mock_tools@[email protected]_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.