metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
---|---|---|
{
"filename": "_tickvals.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/coloraxis/colorbar/_tickvals.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="tickvals", parent_name="layout.coloraxis.colorbar", **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@layout@coloraxis@colorbar@[email protected]_END.py
|
{
"filename": "test_ps_run.py",
"repo_name": "valboz/RTModel",
"repo_path": "RTModel_extracted/RTModel-main/tests/end_to_end_tests/test_ps_run.py",
"type": "Python"
}
|
import tempfile
import zipfile
from pathlib import Path
from RTModel import RTModel
def test_ps_run():
temporary_directory = Path(tempfile.gettempdir())
event_zip_path = Path(__file__).parent.joinpath('test_ps_run_resources/example_event.zip')
with zipfile.ZipFile(event_zip_path, 'r') as zip_file_handle:
zip_file_handle.extractall(temporary_directory)
rtm = RTModel(str(temporary_directory.joinpath('event001')))
rtm.Reader()
rtm.InitCond()
rtm.launch_fits('PS')
rtm.ModelSelector('PS')
|
valbozREPO_NAMERTModelPATH_START.@RTModel_extracted@RTModel-main@tests@end_to_end_tests@[email protected]_END.py
|
{
"filename": "_x.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/contour/colorbar/_x.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="x", parent_name="contour.colorbar", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@contour@colorbar@[email protected]_END.py
|
{
"filename": "retrieve_fake_data.py",
"repo_name": "ideasrule/platon",
"repo_path": "platon_extracted/platon-master/misc/retrieve_fake_data.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import corner
from platon.fit_info import FitInfo
from platon.transit_depth_calculator import TransitDepthCalculator
from platon.retriever import Retriever
Rs = 7e8
g = 9.8
Rp = 7.14e7
logZ = 0
CO = 0.53
log_cloudtop_P = 3
temperature = 1200
depth_calculator = TransitDepthCalculator(Rs, g)
wavelength_bins = []
stis_wavelengths = np.linspace(0.4e-6, 0.7e-6, 30)
for i in range(len(stis_wavelengths) - 1):
wavelength_bins.append([stis_wavelengths[i], stis_wavelengths[i+1]])
wfc_wavelengths = np.linspace(1.1e-6, 1.7e-6, 30)
for i in range(len(wfc_wavelengths) - 1):
wavelength_bins.append([wfc_wavelengths[i], wfc_wavelengths[i+1]])
wavelength_bins.append([3.2e-6, 4e-6])
wavelength_bins.append([4e-6, 5e-6])
depth_calculator.change_wavelength_bins(wavelength_bins)
wavelengths, transit_depths = depth_calculator.compute_depths(Rp, temperature, logZ=logZ, CO_ratio=CO, cloudtop_pressure=1e3)
#wavelengths, depths2 = depth_calculator.compute_depths(71414515.1348402, P_prof
retriever = Retriever()
fit_info = retriever.get_default_fit_info(Rs, g, 0.99*Rp, 0.9*temperature, logZ=2, CO_ratio=1, add_fit_params=True)
errors = np.random.normal(scale=50e-6, size=len(transit_depths))
transit_depths += errors
result = retriever.run_dynesty(wavelength_bins, transit_depths, errors, fit_info)
np.save("samples.npy", result.samples)
np.save("weights.npy", result.weights)
np.save("logl.npy", result.logl)
print fit_info.fit_param_names
fig = corner.corner(result.samples, weights=result.weights, range=[0.99] * result.samples.shape[1], labels=fit_info.fit_param_names)
fig.savefig("multinest_corner.png")
|
ideasruleREPO_NAMEplatonPATH_START.@platon_extracted@platon-master@misc@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "ebachelet/pyLIMA",
"repo_path": "pyLIMA_extracted/pyLIMA-master/pyLIMA/outputs/__init__.py",
"type": "Python"
}
|
ebacheletREPO_NAMEpyLIMAPATH_START.@pyLIMA_extracted@pyLIMA-master@pyLIMA@outputs@[email protected]_END.py
|
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/selected/marker/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="size", parent_name="scatter.selected.marker", **kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@selected@marker@[email protected]_END.py
|
{
"filename": "tests.py",
"repo_name": "plazar/TOASTER",
"repo_path": "TOASTER_extracted/TOASTER-master/webtoaster/app/tests.py",
"type": "Python"
}
|
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
|
plazarREPO_NAMETOASTERPATH_START.@TOASTER_extracted@TOASTER-master@webtoaster@[email protected]@.PATH_END.py
|
{
"filename": "_pattern.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/barpolar/marker/_pattern.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Pattern(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "barpolar.marker"
_path_str = "barpolar.marker.pattern"
_valid_props = {
"bgcolor",
"bgcolorsrc",
"fgcolor",
"fgcolorsrc",
"fgopacity",
"fillmode",
"shape",
"shapesrc",
"size",
"sizesrc",
"solidity",
"soliditysrc",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
When there is no colorscale sets the color of background
pattern fill. Defaults to a `marker.color` background when
`fillmode` is "overlay". Otherwise, defaults to a transparent
background.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# fgcolor
# -------
@property
def fgcolor(self):
"""
When there is no colorscale sets the color of foreground
pattern fill. Defaults to a `marker.color` background when
`fillmode` is "replace". Otherwise, defaults to dark grey or
white to increase contrast with the `bgcolor`.
The 'fgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["fgcolor"]
@fgcolor.setter
def fgcolor(self, val):
self["fgcolor"] = val
# fgcolorsrc
# ----------
@property
def fgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `fgcolor`.
The 'fgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["fgcolorsrc"]
@fgcolorsrc.setter
def fgcolorsrc(self, val):
self["fgcolorsrc"] = val
# fgopacity
# ---------
@property
def fgopacity(self):
"""
Sets the opacity of the foreground pattern fill. Defaults to a
0.5 when `fillmode` is "overlay". Otherwise, defaults to 1.
The 'fgopacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fgopacity"]
@fgopacity.setter
def fgopacity(self, val):
self["fgopacity"] = val
# fillmode
# --------
@property
def fillmode(self):
"""
Determines whether `marker.color` should be used as a default
to `bgcolor` or a `fgcolor`.
The 'fillmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['replace', 'overlay']
Returns
-------
Any
"""
return self["fillmode"]
@fillmode.setter
def fillmode(self, val):
self["fillmode"] = val
# shape
# -----
@property
def shape(self):
"""
Sets the shape of the pattern fill. By default, no pattern is
used for filling the area.
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['', '/', '\\', 'x', '-', '|', '+', '.']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
# shapesrc
# --------
@property
def shapesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shape`.
The 'shapesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shapesrc"]
@shapesrc.setter
def shapesrc(self, val):
self["shapesrc"] = val
# size
# ----
@property
def size(self):
"""
Sets the size of unit squares of the pattern fill in pixels,
which corresponds to the interval of repetition of the pattern.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# solidity
# --------
@property
def solidity(self):
"""
Sets the solidity of the pattern fill. Solidity is roughly the
fraction of the area filled by the pattern. Solidity of 0 shows
only the background color without pattern and solidty of 1
shows only the foreground color without pattern.
The 'solidity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["solidity"]
@solidity.setter
def solidity(self, val):
self["solidity"] = val
# soliditysrc
# -----------
@property
def soliditysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `solidity`.
The 'soliditysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["soliditysrc"]
@soliditysrc.setter
def soliditysrc(self, val):
self["soliditysrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
When there is no colorscale sets the color of
background pattern fill. Defaults to a `marker.color`
background when `fillmode` is "overlay". Otherwise,
defaults to a transparent background.
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
fgcolor
When there is no colorscale sets the color of
foreground pattern fill. Defaults to a `marker.color`
background when `fillmode` is "replace". Otherwise,
defaults to dark grey or white to increase contrast
with the `bgcolor`.
fgcolorsrc
Sets the source reference on Chart Studio Cloud for
`fgcolor`.
fgopacity
Sets the opacity of the foreground pattern fill.
Defaults to a 0.5 when `fillmode` is "overlay".
Otherwise, defaults to 1.
fillmode
Determines whether `marker.color` should be used as a
default to `bgcolor` or a `fgcolor`.
shape
Sets the shape of the pattern fill. By default, no
pattern is used for filling the area.
shapesrc
Sets the source reference on Chart Studio Cloud for
`shape`.
size
Sets the size of unit squares of the pattern fill in
pixels, which corresponds to the interval of repetition
of the pattern.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
solidity
Sets the solidity of the pattern fill. Solidity is
roughly the fraction of the area filled by the pattern.
Solidity of 0 shows only the background color without
pattern and solidty of 1 shows only the foreground
color without pattern.
soliditysrc
Sets the source reference on Chart Studio Cloud for
`solidity`.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bgcolorsrc=None,
fgcolor=None,
fgcolorsrc=None,
fgopacity=None,
fillmode=None,
shape=None,
shapesrc=None,
size=None,
sizesrc=None,
solidity=None,
soliditysrc=None,
**kwargs,
):
"""
Construct a new Pattern object
Sets the pattern within the marker.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.barpolar.marker.Pattern`
bgcolor
When there is no colorscale sets the color of
background pattern fill. Defaults to a `marker.color`
background when `fillmode` is "overlay". Otherwise,
defaults to a transparent background.
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
fgcolor
When there is no colorscale sets the color of
foreground pattern fill. Defaults to a `marker.color`
background when `fillmode` is "replace". Otherwise,
defaults to dark grey or white to increase contrast
with the `bgcolor`.
fgcolorsrc
Sets the source reference on Chart Studio Cloud for
`fgcolor`.
fgopacity
Sets the opacity of the foreground pattern fill.
Defaults to a 0.5 when `fillmode` is "overlay".
Otherwise, defaults to 1.
fillmode
Determines whether `marker.color` should be used as a
default to `bgcolor` or a `fgcolor`.
shape
Sets the shape of the pattern fill. By default, no
pattern is used for filling the area.
shapesrc
Sets the source reference on Chart Studio Cloud for
`shape`.
size
Sets the size of unit squares of the pattern fill in
pixels, which corresponds to the interval of repetition
of the pattern.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
solidity
Sets the solidity of the pattern fill. Solidity is
roughly the fraction of the area filled by the pattern.
Solidity of 0 shows only the background color without
pattern and solidty of 1 shows only the foreground
color without pattern.
soliditysrc
Sets the source reference on Chart Studio Cloud for
`solidity`.
Returns
-------
Pattern
"""
super(Pattern, self).__init__("pattern")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.barpolar.marker.Pattern
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.marker.Pattern`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("fgcolor", None)
_v = fgcolor if fgcolor is not None else _v
if _v is not None:
self["fgcolor"] = _v
_v = arg.pop("fgcolorsrc", None)
_v = fgcolorsrc if fgcolorsrc is not None else _v
if _v is not None:
self["fgcolorsrc"] = _v
_v = arg.pop("fgopacity", None)
_v = fgopacity if fgopacity is not None else _v
if _v is not None:
self["fgopacity"] = _v
_v = arg.pop("fillmode", None)
_v = fillmode if fillmode is not None else _v
if _v is not None:
self["fillmode"] = _v
_v = arg.pop("shape", None)
_v = shape if shape is not None else _v
if _v is not None:
self["shape"] = _v
_v = arg.pop("shapesrc", None)
_v = shapesrc if shapesrc is not None else _v
if _v is not None:
self["shapesrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
_v = arg.pop("solidity", None)
_v = solidity if solidity is not None else _v
if _v is not None:
self["solidity"] = _v
_v = arg.pop("soliditysrc", None)
_v = soliditysrc if soliditysrc is not None else _v
if _v is not None:
self["soliditysrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
[email protected][email protected]@packages@python@plotly@plotly@graph_objs@barpolar@marker@[email protected]_END.py
|
{
"filename": "generate_toml.py",
"repo_name": "j0r1/GRALE2",
"repo_path": "GRALE2_extracted/GRALE2-master/pygrale/grale_editor_cppqt/generate_toml.py",
"type": "Python"
}
|
#!/usr/bin/env python
import sys
import site
import pprint
import os
import subprocess
import glob
import platform
template = '''
# Specify sip v6 as the build system for the package.
[build-system]
requires = ["sip >=6, <7", "PyQt-builder >=1.9, <2" ]
build-backend = "sipbuild.api"
# Specify the PEP 566 metadata for the project.
[tool.sip.metadata]
name = "grale_editor_cppqt"
version = "{}"
[tool.sip]
project-factory = "pyqtbuild:PyQtProject"
# Configure the building of the fib bindings.
[tool.sip.bindings.grale_editor_cppqt]
headers = [ "cppqt.h" ]
include-dirs = {}
library-dirs = [ "{}" ]
libraries = [ "Qt5Widgets{}", "Qt5Core{}", "Qt5Gui{}" ]
'''
def getQtCoreTomlPath():
qtCoreToml = []
sitePack = site.getsitepackages()
for p in sitePack:
corePath = os.path.join(p, "PyQt5", "bindings", "QtCore", "QtCore.toml")
if os.path.exists(corePath):
qtCoreToml.append(corePath)
if not qtCoreToml:
raise Exception("No QtCore.toml could be located in {}".format(sitePack))
if len(qtCoreToml) > 1:
raise Exception("More than one QtCore.toml found in {}".format(sitePack))
return qtCoreToml[0]
def getLibDir():
o = subprocess.check_output(["qmake", "-v"])
o = o.splitlines()
# QMake version
assert o[0].startswith(b"QMake version "), "Unexpected output of qmake, can't detect version"
qmakeVersion = o[0].strip().split()[2]
print("Detected qmake version", qmakeVersion)
# Using Qt version
qtVStart = b"Using Qt version "
assert o[1].startswith(qtVStart), "Unexpected output of qmake, can't get qt version"
vPart = o[1][len(qtVStart):]
idx = vPart.find(b" ")
assert idx > 0, "Can't find end of version string in '{}'".format(vPart)
qtVersion = vPart[:idx]
print("Qt version detected is", qtVersion)
libPart = vPart[idx:]
assert libPart.startswith(b" in "),"Unexpected output of qmake, can't get lib dir"
libDir = libPart[4:].replace(b"\r", b"").replace(b"\n", b"")
print("Lib in '{}'".format(libDir))
extraLibSuffix = ""
if "CONDA_PREFIX" in os.environ and platform.system() == "Windows":
# It appears that on windows, with conda-forge, eg qt5widgets_conda.lib is used
widgets = glob.glob(os.path.join(libDir, b"Qt5Widgets*.lib"))
if len(widgets) == 1:
extraLibSuffix = os.path.basename(widgets[0])[10:-4].decode()
print("Detected extra library suffix", extraLibSuffix)
elif len(widgets) == 0:
print("WARNING: No Qt5Widgets lib found, assuming no extra suffix")
else:
print("WARNING: More than one Qt5Widgets lib found, can't deduce extra suffix")
pprint.pprint(widgets)
return qmakeVersion, qtVersion, libDir, extraLibSuffix
def getIncDirs(libDir):
incDir = os.path.join(os.path.dirname(libDir), b"include", b"qt")
assert os.path.exists(os.path.join(incDir, b"QtCore", b"QtCore")), "Can't locate Qt include dir based on lib dir"
return [
incDir,
os.path.join(incDir, b"QtCore"),
os.path.join(incDir, b"QtWidgets"),
os.path.join(incDir, b"QtGui"),
]
def genToml(version):
qmakeVersion, qtVersion, libDir, extraLibSuffix = getLibDir()
incDirs = getIncDirs(libDir)
incDirs = [ i.decode() for i in incDirs ]
incDirs.append(os.getcwd())
toml = template.format(version,incDirs, libDir.decode(), extraLibSuffix, extraLibSuffix, extraLibSuffix)
open("pyproject.toml", "wt").write(toml)
print("Wrote to pyproject.toml")
def main():
genToml(sys.argv[1])
if __name__ == "__main__":
main()
|
j0r1REPO_NAMEGRALE2PATH_START.@GRALE2_extracted@GRALE2-master@pygrale@grale_editor_cppqt@[email protected]_END.py
|
{
"filename": "zero_shot_GPT_4o.ipynb",
"repo_name": "dtanoglidis/zero-shot-astro",
"repo_path": "zero-shot-astro_extracted/zero-shot-astro-main/zero_shot_GPT_4o.ipynb",
"type": "Jupyter Notebook"
}
|
## **Zero-shot image classficiation with GPT-4o**
We perform zero-shot classification of astronomical images using the proprietary GPT-4o Large Multimodal Model, accessed through openAI's API
[Introducting GPT-4o](https://openai.com/index/hello-gpt-4o/)
We use two different datasets:
- LSBGs vs Artifacts, from [Tanoglidis et. al. 2021](https://arxiv.org/abs/2011.12437)
- [GalaxyMNIS](https://github.com/mwalmsley/galaxy_mnist), which has 4 different morphological categories (smooth and round, smooth and cigar-shaped, edge-on disk, and unbarred spiral).
From each dataset, we select 500 images to classify.
#### **Import basic packages**
```python
# Import basic packages
import numpy as np
import pandas as pd
import random
import json
from time import sleep
from IPython.display import Image
# ================================================
# Matplotlib, seaborn and plot pretty
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
%matplotlib inline
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
# Adjust rc parameters to make plots pretty
def plot_pretty(dpi=200, fontsize=9):
plt.rc("savefig", dpi=dpi) # dpi resolution of saved image files
plt.rc('text', usetex=False) # use LaTeX to process labels
plt.rc('font', size=fontsize) # fontsize
plt.rc('xtick', direction='in') # make axes ticks point inward
plt.rc('ytick', direction='in')
plt.rc('xtick.major', pad=10)
plt.rc('xtick.minor', pad=5)
plt.rc('ytick.major', pad=10)
plt.rc('ytick.minor', pad=5)
plt.rc('lines', dotted_pattern = [0.5, 1.1]) # fix dotted lines
return
plot_pretty()
```
```python
import base64
import requests
```
```python
# scikit-learn
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
```
**Define function to make nice plots of the confusion matrix**
```python
# Function that makes nice plots of the confusion matrix
def plot_confusion_matrix(y_true, y_pred, rot, l_size, ax_size, size, svname, class_names):
"""
Inputs: y_true: array of true values
y_pred: array of predicted values
rot: label rotation
l_size: label size
ax_size: axis title size
size: size of the confusion matrix
svname: name, in order to save as a pdf
class_names: the names to be used as class names in the confusion matrix
Output: Plot of the confusion matrix
"""
# Get the confusion matrix
con_mat = confusion_matrix(y_true, y_pred)
# Dimension / number of classes
dim = len(class_names)
# Get annotations and normalized confusion matrix
# Normalized confusion matrix
con_mat_normalized = np.zeros([dim,dim])
annots = np.zeros([dim,dim],dtype='U6')
# Populate normalized confusion matrix and annotations
for i in range(dim):
con_mat_normalized[i] = con_mat[i]/np.sum(con_mat[i])
for j in range(dim):
annots[i,j] = "("+"{:.2f}".format(con_mat_normalized[i,j])+")"
# Plot the matrix now
plt.figure(figsize = (size,size))
sns.heatmap(con_mat_normalized.T, square=True, annot=annots.T, annot_kws={'va':'top',"size": 19},cmap='Blues',fmt='', cbar=False,
cbar_kws={'shrink':0.94})
g= sns.heatmap(con_mat.T, square = True, annot=True,annot_kws={'va':'bottom',"size": 21},fmt='.0f', cmap='Blues', cbar=False,
xticklabels= class_names,
yticklabels= class_names)
g.set_xticklabels(class_names, fontsize = 15)
g.set_yticklabels(class_names, fontsize = 15)
# Ticks for colorbar
cax = plt.gcf().axes[-1]
if (rot!=None):
cax.tick_params(labelsize=l_size,labelrotation=rot)
else:
cax.tick_params(labelsize=l_size)
# =========================================
# =========================================
plt.xlabel('True label',fontsize=ax_size)
plt.ylabel('Predicted label',fontsize=ax_size)
plt.tight_layout()
# Save in .png and .pdf formats
plt.savefig("Images/results/"+svname+".png")
plt.savefig("Images/results/"+svname+".pdf")
plt.show()
```
### Connect to Google Drive
```python
# Mount drive to upload/download datasets
from google.colab import drive
drive.mount('/content/drive')
```
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).
### **Dowload Datasets**
- **LSBGs vs Artifacts**
```python
# Remove
#!rm -rf UPenn_DS_Hangout
!git clone https://github.com/dtanoglidis/UPenn_DS_Hangout
```
fatal: destination path 'UPenn_DS_Hangout' already exists and is not an empty directory.
```python
# Change to that directory where the data lie
%cd UPenn_DS_Hangout
```
/content/UPenn_DS_Hangout
```python
#Concatenate the data
# Data
X_1 = np.load("X_data_1.npy");X_2 = np.load("X_data_2.npy");X_3 = np.load("X_data_3.npy")
X_4 = np.load("X_data_4.npy");X_5 = np.load("X_data_5.npy")
X_data = np.concatenate((X_1,X_2,X_3,X_4,X_5))
# Labels
y_lab = np.load("y_labels.npy")
```
```python
# Change to the LMM directory
%cd ../drive/MyDrive/Multimodal/
```
/content/drive/MyDrive/Multimodal
- **galaxyMNIST**
Uncomment the first time, to download the galaxyMNIST dataset.
Then you can load the low ($64\times64$ pixel) or high ($256\times256$ pixel) resolution images. Here we opt to us the low resolution images, for lower payload when invoking the GPT-4o pipeline.
**Note**: you may have to restart the kernel after installing the package
```python
#!git clone https://github.com/mwalmsley/galaxy_mnist
!pip install -e galaxy_mnist
```
Obtaining file:///content/drive/MyDrive/Multimodal/galaxy_mnist
Preparing metadata (setup.py) ... [?25l[?25hdone
Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from galaxyMNIST==0.1.0) (2.0.3)
Requirement already satisfied: scikit-learn in /usr/local/lib/python3.10/dist-packages (from galaxyMNIST==0.1.0) (1.2.2)
Requirement already satisfied: h5py in /usr/local/lib/python3.10/dist-packages (from galaxyMNIST==0.1.0) (3.9.0)
Requirement already satisfied: numpy>=1.17.3 in /usr/local/lib/python3.10/dist-packages (from h5py->galaxyMNIST==0.1.0) (1.25.2)
Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.10/dist-packages (from pandas->galaxyMNIST==0.1.0) (2.8.2)
Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.10/dist-packages (from pandas->galaxyMNIST==0.1.0) (2023.4)
Requirement already satisfied: tzdata>=2022.1 in /usr/local/lib/python3.10/dist-packages (from pandas->galaxyMNIST==0.1.0) (2024.1)
Requirement already satisfied: scipy>=1.3.2 in /usr/local/lib/python3.10/dist-packages (from scikit-learn->galaxyMNIST==0.1.0) (1.11.4)
Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn->galaxyMNIST==0.1.0) (1.4.2)
Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn->galaxyMNIST==0.1.0) (3.5.0)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil>=2.8.2->pandas->galaxyMNIST==0.1.0) (1.16.0)
Installing collected packages: galaxyMNIST
Running setup.py develop for galaxyMNIST
Successfully installed galaxyMNIST-0.1.0
```python
from galaxy_mnist import GalaxyMNIST
# Load the 64x64 pixel images
dataset = GalaxyMNIST(
root='galaxy_mnist',
download=True,
train=True # by default, or set False for test set
)
```
```python
# images and labels here
images, labels = dataset.data, dataset.targets
# convert to numpy
images_np = images.numpy()
labels_np = labels.numpy()
# rearrange the images in a channel-last format
images_ra = np.moveaxis(images_np, source=1, destination=3)
```
## Function for zero-shot classification through API calls
Function that call the openAI API; it get as inputs the prompts and images.
We also need to define a function that transforms the images in an appropriate base64 format
```python
# Your openAI API key here
api_key =
```
```python
# Function to encode the image
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
```
```python
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
```
```python
# Zero-shot function
def gpt_4o_zero_shot(inp_prompt,enc_image):
"""
Function that returns zero shot prediction from GPT-4o
Inputs: inp_prompt (input prompt)
enc_image (base 64 encoded image)
Output: output prediction of the GPT-4o model
"""
# Define payload
payload = {
"model": "gpt-4o",
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": inp_prompt
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{enc_image}"
}
},
{
"type": "text",
"text": "Look well the image above. To which category does it belong? Respond only with a number"
}
]
}
],
"max_tokens": 500,
"temperature": 0.0,
"n": 1
}
# Get response
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
# Return final result
return response.json()['choices'][0]['message']['content']
```
## **I) LSBG vs Artifacts**
- **Make a plot of examples**
```python
X_pos = X_data[y_lab==1.][0:50] #Positives - LSBGs
X_neg = X_data[y_lab==0.][0:50] #Negatives - Artifacts
```
```python
# Plot Examples of LSBGs
n_rows = 4
n_cols = 5
plt.figure(figsize=(4*n_cols*0.4, 4*n_rows*0.4))
for i in range(n_rows*n_cols):
if (i==3):
plt.title("Examples of LSBGs",fontsize=18)
plt.subplot(n_rows, n_cols, i+1)
plt.imshow(X_pos[i])
plt.axis('off')
plt.subplots_adjust(wspace=0.01, hspace=0.03)
plt.tight_layout()
# Save in .png and .pdf formats
plt.savefig("Images/results/LSBG_examples.png")
plt.savefig("Images/results/LSBG_examples.pdf")
plt.show()
```

```python
# Plot examples of artifacts
plt.figure(figsize=(4*n_cols*0.4, 4*n_rows*0.4))
for i in range(n_rows*n_cols):
if (i==3):
plt.title("Examples of Artifacts",fontsize=18)
plt.subplot(n_rows, n_cols, i+1)
plt.imshow(X_neg[i])
plt.axis('off')
plt.subplots_adjust(wspace=0.01, hspace=0.03)
plt.tight_layout()
# Save in .png and .pdf formats
plt.savefig("Images/results/Artifact_examples.png")
plt.savefig("Images/results/Artifact_examples.pdf")
plt.show()
```

**Define classification prompt**
```python
prompt_LSBG = """
You are tasked to classify astronomical images. Each image belongs to one of two categories:
Low Surface Brightness Galaxies (LSBG, Category 1) or non-LSBG (Category 0).
Note that category 0 may also include parts or off-centered LSBGs.
Here are some characteristics of the two categories:
Category 1, LSBG: The image is dominated by a well-centered,
circular or elliptical, diffuse galaxy-like light source. There may be some
smaller, more compact light sources nearby.
Category 0: In this category belong all the images that do not fit into the
category 1 definiton. Examples may include: off-centered bright galaxies, diffuse light,
bright stars, light reflections.
To what category does the image belong?
"""
```
### Run in a loop
```python
n = 500
# initialize an array of predictions
y_pred_LSBG = np.zeros(n)
# Run
for i in range(n):
# Save image in directory; PNG format
plt.imsave('Images/Example_Image.png', X_data[i])
# encode it
encoded = encode_image('Images/Example_Image.png')
# Get response
resp = gpt_4o_zero_shot(prompt_LSBG,encoded)
y_pred_LSBG[i] = int(resp)
if (i%10==0):
sleep(3)
if (i%50==0):
print(i)
```
0
50
100
150
200
250
300
350
400
450
```python
# Save the predictions array, so we don't have to run the predictions every time
np.save('preds_LSBGs',y_pred_LSBG)
# We can load the array if we want to here
# y_pred_LSBG = np.load('preds_LSBGs.npy')
```
- Make a plot of the confusion matrix now:
```python
class_names = ['LSBG','Artifact']
plot_confusion_matrix(y_lab[:n], y_pred_LSBG, rot=None, l_size=16, ax_size = 19, size=5.0, svname='LSBG_vs_Artifact', class_names=class_names)
```

- Print metrics now:
```python
print("Overall accuracy:",accuracy_score(y_lab[:n], y_pred_LSBG))
print("Precision:", precision_score(y_lab[:n],y_pred_LSBG))
print("Recall:", recall_score(y_lab[:n],y_pred_LSBG))
```
Overall accuracy: 0.828
Precision: 0.8345864661654135
Recall: 0.8409090909090909
## **II) Galaxy Morphology**
- **Make plots of examples**
Let's first get some examples of each of the four categories
```python
images_smooth_round = images_ra[labels_np==0][0:50]
images_smooth_cigar = images_ra[labels_np==1][0:50]
images_edge_on = images_ra[labels_np==2][0:50]
images_edge_spiral = images_ra[labels_np==3][0:50]
```
```python
# Plot Examples of smooth_round
n_rows = 4
n_cols = 5
plt.figure(figsize=(4*n_cols*0.4, 4*n_rows*0.4))
for i in range(n_rows*n_cols):
if (i==3):
plt.title("Smooth Round galaxies",fontsize=18)
plt.subplot(n_rows, n_cols, i+1)
plt.imshow(images_smooth_round[i])
plt.axis('off')
plt.subplots_adjust(wspace=0.01, hspace=0.03)
# Save in .png and .pdf formats
plt.savefig("Images/results/Smooth_Round_examples.png")
plt.savefig("Images/results/Smooth_Round_examples.pdf")
plt.show()
```

```python
# Plot examples of smooth Cigar-shaped galaxies
plt.figure(figsize=(4*n_cols*0.4, 4*n_rows*0.4))
for i in range(n_rows*n_cols):
if (i==3):
plt.title("Smooth Cigar-shaped galaxies",fontsize=18)
plt.subplot(n_rows, n_cols, i+1)
plt.imshow(images_smooth_cigar[i])
plt.axis('off')
plt.subplots_adjust(wspace=0.01, hspace=0.03)
# Save in .png and .pdf formats
plt.savefig("Images/results/Smooth_Cigar_examples.png")
plt.savefig("Images/results/Smooth_Cigar_examples.pdf")
plt.show()
```

```python
# Plot examples of Edge-on galaxies
plt.figure(figsize=(4*n_cols*0.4, 4*n_rows*0.4))
for i in range(n_rows*n_cols):
if (i==3):
plt.title("Edge-on Disk galaxies",fontsize=18)
plt.subplot(n_rows, n_cols, i+1)
plt.imshow(images_edge_on[i])
plt.axis('off')
plt.subplots_adjust(wspace=0.01, hspace=0.03)
# Save in .png and .pdf formats
plt.savefig("Images/results/Edge_on_examples.png")
plt.savefig("Images/results/Edge_on_examples.pdf")
plt.show()
```

```python
# Plot examples of Unbarred spiral galaxies
plt.figure(figsize=(4*n_cols*0.4, 4*n_rows*0.4))
for i in range(n_rows*n_cols):
if (i==3):
plt.title("Unbarred Spiral galaxies",fontsize=18)
plt.subplot(n_rows, n_cols, i+1)
plt.imshow(images_edge_spiral[i])
plt.axis('off')
plt.subplots_adjust(wspace=0.01, hspace=0.03)
# Save in .png and .pdf formats
plt.savefig("Images/results/Unbarred_Spiral_examples.png")
plt.savefig("Images/results/Unbarred_Spiral_examples.pdf")
plt.show()
```

**Define Classification prompt**
Now we are ready to classify, so we define the classification prompt
```python
prompt_morphology = """
You are tasked to classify images depicting galaxies, into four morphological categories:
Category 0: smooth and round galaxy. Should not have signs of spires.
Category 1: smooth and cigar-shaped galaxy, looks like being seen edge on. This should not have signs of spires of a spiral galaxy
Category 2: edge-on-disk/spiral galaxy. This disk galaxy should have signs of spires, as seen from an edge-on perspective
Category 3: unbarred spiral galaxy. Has signs of a disk and/or spires
Note that categories 1 and 2 tend to be very similar to each other.
To categorize them, ask yourself the following question: Is this galaxy very smooth, maybe with a small bulge? Then it
belongs to category 1. Does it have irregularities/signs of structure? Then it belongs to category 2.
To which category does the image belong?
"""
```
### Run classification in a loop
```python
n = 500
y_pred_morphology = np.zeros(n)
for i in range(n):
# Save image in directory; PNG format
plt.imsave('Example_Image.png', images_ra[i])
# encode it
encoded = encode_image('Example_Image.png')
# Get response
resp = gpt_4o_zero_shot(prompt_morphology,encoded)
y_pred_morphology[i] = int(resp)
if (i%10==0):
sleep(5)
if (i%50==0):
print(i)
```
0
50
100
150
200
250
300
350
400
450
```python
# Save the predictions array, so we don't have to run the predictions every time
np.save('preds_morphology',y_pred_morphology)
# We can load the array if we want to here
#y_pred_morphology = np.load('preds_morphology.npy')
```
- Make plot of the **confusion matrix**
```python
class_names = ['Round', 'Cigar-shape', 'Edge-On','Spiral']
plot_confusion_matrix(labels_np[:n], y_pred_morphology, rot=None,l_size=17, ax_size=20.5, size=6.5,svname='morphology', class_names=class_names)
```

Print metrics now
```python
print("Overall accuracy:",accuracy_score(labels_np[:n], y_pred_morphology))
#print("Precision:", precision_score(y_lab[:n],y_pred_morphology))
#print("Recall:", recall_score(y_lab[:n],y_pred_morphology))
```
Overall accuracy: 0.674
Combine them in three categories now
```python
# Predictions
y_pred_3 = np.copy(y_pred_morphology)
y_pred_3[(y_pred_3==1)|(y_pred_3==2)] = 1
y_pred_3[y_pred_3>2] = 2
# True values
y_true_3 = np.copy(labels_np[:n])
y_true_3[(y_true_3==1)|(y_true_3==2)] = 1
y_true_3[y_true_3>2] = 2
```
Make new confusion matrix now
```python
class_names = ['Round', 'Elongated','Spiral']
plot_confusion_matrix(y_true_3, y_pred_3, rot=None, size=6.5,svname='morphology', class_names=class_names)
```

|
dtanoglidisREPO_NAMEzero-shot-astroPATH_START.@zero-shot-astro_extracted@zero-shot-astro-main@[email protected]_END.py
|
{
"filename": "index.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/docs/en/guides/index.md",
"type": "Markdown"
}
|
---
comments: true
description: Master YOLO with Ultralytics tutorials covering training, deployment and optimization. Find solutions, improve metrics, and deploy with ease!.
keywords: Ultralytics, YOLO, tutorials, guides, object detection, deep learning, PyTorch, training, deployment, optimization, computer vision
---
# Comprehensive Tutorials to Ultralytics YOLO
Welcome to the Ultralytics' YOLO 🚀 Guides! Our comprehensive tutorials cover various aspects of the YOLO [object detection](https://www.ultralytics.com/glossary/object-detection) model, ranging from training and prediction to deployment. Built on [PyTorch](https://www.ultralytics.com/glossary/pytorch), YOLO stands out for its exceptional speed and [accuracy](https://www.ultralytics.com/glossary/accuracy) in real-time object detection tasks.
Whether you're a beginner or an expert in [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl), our tutorials offer valuable insights into the implementation and optimization of YOLO for your [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) projects. Let's dive in!
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/96NkhsV-W1U"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Ultralytics YOLO11 Guides Overview
</p>
## Guides
Here's a compilation of in-depth guides to help you master different aspects of Ultralytics YOLO.
- [YOLO Common Issues](yolo-common-issues.md) ⭐ RECOMMENDED: Practical solutions and troubleshooting tips to the most frequently encountered issues when working with Ultralytics YOLO models.
- [YOLO Performance Metrics](yolo-performance-metrics.md) ⭐ ESSENTIAL: Understand the key metrics like mAP, IoU, and [F1 score](https://www.ultralytics.com/glossary/f1-score) used to evaluate the performance of your YOLO models. Includes practical examples and tips on how to improve detection accuracy and speed.
- [Model Deployment Options](model-deployment-options.md): Overview of YOLO [model deployment](https://www.ultralytics.com/glossary/model-deployment) formats like ONNX, OpenVINO, and TensorRT, with pros and cons for each to inform your deployment strategy.
- [K-Fold Cross Validation](kfold-cross-validation.md) 🚀 NEW: Learn how to improve model generalization using K-Fold cross-validation technique.
- [Hyperparameter Tuning](hyperparameter-tuning.md) 🚀 NEW: Discover how to optimize your YOLO models by fine-tuning hyperparameters using the Tuner class and genetic evolution algorithms.
- [SAHI Tiled Inference](sahi-tiled-inference.md) 🚀 NEW: Comprehensive guide on leveraging SAHI's sliced inference capabilities with YOLO11 for object detection in high-resolution images.
- [AzureML Quickstart](azureml-quickstart.md) 🚀 NEW: Get up and running with Ultralytics YOLO models on Microsoft's Azure [Machine Learning](https://www.ultralytics.com/glossary/machine-learning-ml) platform. Learn how to train, deploy, and scale your object detection projects in the cloud.
- [Conda Quickstart](conda-quickstart.md) 🚀 NEW: Step-by-step guide to setting up a [Conda](https://anaconda.org/conda-forge/ultralytics) environment for Ultralytics. Learn how to install and start using the Ultralytics package efficiently with Conda.
- [Docker Quickstart](docker-quickstart.md) 🚀 NEW: Complete guide to setting up and using Ultralytics YOLO models with [Docker](https://hub.docker.com/r/ultralytics/ultralytics). Learn how to install Docker, manage GPU support, and run YOLO models in isolated containers for consistent development and deployment.
- [Raspberry Pi](raspberry-pi.md) 🚀 NEW: Quickstart tutorial to run YOLO models to the latest Raspberry Pi hardware.
- [NVIDIA Jetson](nvidia-jetson.md) 🚀 NEW: Quickstart guide for deploying YOLO models on NVIDIA Jetson devices.
- [DeepStream on NVIDIA Jetson](deepstream-nvidia-jetson.md) 🚀 NEW: Quickstart guide for deploying YOLO models on NVIDIA Jetson devices using DeepStream and TensorRT.
- [Triton Inference Server Integration](triton-inference-server.md) 🚀 NEW: Dive into the integration of Ultralytics YOLO11 with NVIDIA's Triton Inference Server for scalable and efficient deep learning inference deployments.
- [YOLO Thread-Safe Inference](yolo-thread-safe-inference.md) 🚀 NEW: Guidelines for performing inference with YOLO models in a thread-safe manner. Learn the importance of thread safety and best practices to prevent race conditions and ensure consistent predictions.
- [Isolating Segmentation Objects](isolating-segmentation-objects.md) 🚀 NEW: Step-by-step recipe and explanation on how to extract and/or isolate objects from images using Ultralytics Segmentation.
- [Edge TPU on Raspberry Pi](coral-edge-tpu-on-raspberry-pi.md): [Google Edge TPU](https://coral.ai/products/accelerator) accelerates YOLO inference on [Raspberry Pi](https://www.raspberrypi.com/).
- [View Inference Images in a Terminal](view-results-in-terminal.md): Use VSCode's integrated terminal to view inference results when using Remote Tunnel or SSH sessions.
- [OpenVINO Latency vs Throughput Modes](optimizing-openvino-latency-vs-throughput-modes.md) - Learn latency and throughput optimization techniques for peak YOLO inference performance.
- [Steps of a Computer Vision Project ](steps-of-a-cv-project.md) 🚀 NEW: Learn about the key steps involved in a computer vision project, including defining goals, selecting models, preparing data, and evaluating results.
- [Defining A Computer Vision Project's Goals](defining-project-goals.md) 🚀 NEW: Walk through how to effectively define clear and measurable goals for your computer vision project. Learn the importance of a well-defined problem statement and how it creates a roadmap for your project.
- [Data Collection and Annotation](data-collection-and-annotation.md) 🚀 NEW: Explore the tools, techniques, and best practices for collecting and annotating data to create high-quality inputs for your computer vision models.
- [Preprocessing Annotated Data](preprocessing_annotated_data.md) 🚀 NEW: Learn about preprocessing and augmenting image data in computer vision projects using YOLO11, including normalization, dataset augmentation, splitting, and exploratory data analysis (EDA).
- [Tips for Model Training](model-training-tips.md) 🚀 NEW: Explore tips on optimizing [batch sizes](https://www.ultralytics.com/glossary/batch-size), using [mixed precision](https://www.ultralytics.com/glossary/mixed-precision), applying pre-trained weights, and more to make training your computer vision model a breeze.
- [Insights on Model Evaluation and Fine-Tuning](model-evaluation-insights.md) 🚀 NEW: Gain insights into the strategies and best practices for evaluating and fine-tuning your computer vision models. Learn about the iterative process of refining models to achieve optimal results.
- [A Guide on Model Testing](model-testing.md) 🚀 NEW: A thorough guide on testing your computer vision models in realistic settings. Learn how to verify accuracy, reliability, and performance in line with project goals.
- [Best Practices for Model Deployment](model-deployment-practices.md) 🚀 NEW: Walk through tips and best practices for efficiently deploying models in computer vision projects, with a focus on optimization, troubleshooting, and security.
- [Maintaining Your Computer Vision Model](model-monitoring-and-maintenance.md) 🚀 NEW: Understand the key practices for monitoring, maintaining, and documenting computer vision models to guarantee accuracy, spot anomalies, and mitigate data drift.
- [ROS Quickstart](ros-quickstart.md) 🚀 NEW: Learn how to integrate YOLO with the Robot Operating System (ROS) for real-time object detection in robotics applications, including Point Cloud and Depth images.
## Contribute to Our Guides
We welcome contributions from the community! If you've mastered a particular aspect of Ultralytics YOLO that's not yet covered in our guides, we encourage you to share your expertise. Writing a guide is a great way to give back to the community and help us make our documentation more comprehensive and user-friendly.
To get started, please read our [Contributing Guide](../help/contributing.md) for guidelines on how to open up a Pull Request (PR) 🛠️. We look forward to your contributions!
Let's work together to make the Ultralytics YOLO ecosystem more robust and versatile 🙏!
## FAQ
### How do I train a custom object detection model using Ultralytics YOLO?
Training a custom object detection model with Ultralytics YOLO is straightforward. Start by preparing your dataset in the correct format and installing the Ultralytics package. Use the following code to initiate training:
!!! example
=== "Python"
```python
from ultralytics import YOLO
model = YOLO("yolo11n.pt") # Load a pre-trained YOLO model
model.train(data="path/to/dataset.yaml", epochs=50) # Train on custom dataset
```
=== "CLI"
```bash
yolo task=detect mode=train model=yolo11n.pt data=path/to/dataset.yaml epochs=50
```
For detailed dataset formatting and additional options, refer to our [Tips for Model Training](model-training-tips.md) guide.
### What performance metrics should I use to evaluate my YOLO model?
Evaluating your YOLO model performance is crucial to understanding its efficacy. Key metrics include [Mean Average Precision](https://www.ultralytics.com/glossary/mean-average-precision-map) (mAP), [Intersection over Union](https://www.ultralytics.com/glossary/intersection-over-union-iou) (IoU), and F1 score. These metrics help assess the accuracy and [precision](https://www.ultralytics.com/glossary/precision) of object detection tasks. You can learn more about these metrics and how to improve your model in our [YOLO Performance Metrics](yolo-performance-metrics.md) guide.
### Why should I use Ultralytics HUB for my computer vision projects?
Ultralytics HUB is a no-code platform that simplifies managing, training, and deploying YOLO models. It supports seamless integration, real-time tracking, and cloud training, making it ideal for both beginners and professionals. Discover more about its features and how it can streamline your workflow with our [Ultralytics HUB](https://docs.ultralytics.com/hub/) quickstart guide.
### What are the common issues faced during YOLO model training, and how can I resolve them?
Common issues during YOLO model training include data formatting errors, model architecture mismatches, and insufficient [training data](https://www.ultralytics.com/glossary/training-data). To address these, ensure your dataset is correctly formatted, check for compatible model versions, and augment your training data. For a comprehensive list of solutions, refer to our [YOLO Common Issues](yolo-common-issues.md) guide.
### How can I deploy my YOLO model for real-time object detection on edge devices?
Deploying YOLO models on edge devices like NVIDIA Jetson and Raspberry Pi requires converting the model to a compatible format such as TensorRT or TFLite. Follow our step-by-step guides for [NVIDIA Jetson](nvidia-jetson.md) and [Raspberry Pi](raspberry-pi.md) deployments to get started with real-time object detection on edge hardware. These guides will walk you through installation, configuration, and performance optimization.
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@docs@en@[email protected]@.PATH_END.py
|
{
"filename": "plot_photxerror.py",
"repo_name": "splus-collab/codes-dr4-paper",
"repo_path": "codes-dr4-paper_extracted/codes-dr4-paper-main/codes/plot_photxerror.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import argparse
import os
import glob
import matplotlib.pyplot as plt
import pandas as pd
import dask.dataframe as dd
import itertools
import multiprocessing as mp
import numpy as np
os.environ["DASK_MEMORY_LIMIT"] = "32G"
def load_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description=" ".join([
'Plot the photometry of a given field.']))
parser.add_argument('--work_dir', type=str,
help='Working directory. Default: current directory.',
default=os.getcwd())
parser.add_argument('--fields_list', type=str,
help='List of fields to get the photometry.',
default='DR4_pointings.csv')
parser.add_argument('--nprocs', type=int,
help='Number of processes to use. Default: 1.',
default=1)
parser.add_argument('--npoints', type=int,
help='Number of points to plot. Use -1 to plot the entire table. Default: 10000',
default=10000)
parser.add_argument('--nbins', type=int,
help='Number of bins for shadow regions. Default: 20.',
default=20)
parser.add_argument('--plot', action='store_true',
help='Plot the photometry.')
parser.add_argument('--prefix', type=str,
help='Prefix of the csv files. Default: rand10',
default='rand10')
parser.add_argument('--savefig', action='store_true',
help='Save the plot.')
parser.add_argument('--clobber', action='store_true',
help='Overwrite existing files.')
return parser.parse_args()
def load_csv_data(csv_file):
"""Load the data from a csv file."""
field = csv_file.split('/')[-1].split('_')[0]
try:
data = pd.read_csv(csv_file)
except FileNotFoundError:
print(f'File {csv_file} not found.')
return pd.DataFrame(columns=['RA', 'DEC', 'mag', 'e_mag', 'SEX_FLAGS', 'Field'])
if not any(['SEX_FLAGS' in col for col in data.columns]):
data['SEX_FLAGS'] = [-1]*len(data)
data['Field'] = [field]*len(data)
data.columns = ['RA', 'DEC', 'mag', 'e_mag', 'SEX_FLAGS', 'Field']
return data
def process_csvfiles(workdir, modes, filters, nprocs, args):
for mode, f in itertools.product(modes, filters):
outputcsv = os.path.join(workdir, f'{mode}/{args.prefix}_{f}.csv')
if os.path.exists(outputcsv) and not args.clobber:
print(f'File {outputcsv} exists. Skipping.')
continue
print(f'Processing mode {mode} and filter {f}...')
nfiles2combine = args.prefix.strip('rand')
print(f'Combining {nfiles2combine} files.')
list_csvs = glob.glob(os.path.join(
workdir, mode, f'*_{f}.csv'))[:int(nfiles2combine)]
pool = mp.Pool(nprocs)
dfs = pool.map(load_csv_data, list_csvs)
pool.close()
pool.join()
alldf = pd.concat(dfs, ignore_index=True)
print(f'Saving file {outputcsv}.')
alldf.to_csv(outputcsv, index=False)
def plot_photometry(workdir, modes, filters, args):
"""Plot the photometry."""
colours = {'u': 'indigo', 'j0378': 'darkviolet', 'j0395': 'navy',
'j0410': 'b', 'j0430': 'dodgerblue', 'j0515': 'lime',
'g': 'turquoise', 'r': 'limegreen', 'j0660': 'y',
'i': 'darkorange', 'j0861': 'orangered', 'z': 'darkred'}
mode_names = {'dual_auto': 'Dual auto', 'dual_PStotal': 'PStotal',
'single_auto': 'Single auto', 'psf_psf': 'PSF'}
fig, ax = plt.subplots(12, 4, figsize=(10, 12))
ax = ax.ravel()
iter_modes_filters = itertools.product(filters, modes)
for i, (f, m) in enumerate(iter_modes_filters):
mode_dir = os.path.join(workdir, f'{m}')
allcsv = os.path.join(mode_dir, f'{args.prefix}_{f}.csv')
if os.path.exists(allcsv):
if args.npoints == -1:
try:
df = dd.read_csv(allcsv, blocksize=25e6)
except ValueError:
print(
f'An error occurred while reading the file {allcsv}.')
continue
else:
try:
df = dd.read_csv(allcsv, blocksize=25e6).head(
n=args.npoints)
except ValueError:
print(
f'An error occurred while reading the file {allcsv}.')
continue
else:
print(f'File {allcsv} not found.')
continue
df['mag'] = df['mag'].astype(float)
df['e_mag'] = df['e_mag'].astype(float)
df['mag'] = df['mag'].round(3)
df['e_mag'] = df['e_mag'].round(3)
mask = (df['mag'] > 10) & (df['mag'] < 30)
mask &= (df['e_mag'] > 0) & (df['e_mag'] < 10.9)
if m in ['dual_auto', 'dual_PStotal', 'single_auto']:
mask &= df['SEX_FLAGS'] == 0
# completeness = len(df[mask]) / len(df)
xlims = [10.1, 26.9]
ylims = [-0.1, 1.9]
xticks = [12, 16, 20, 24]
yticks = np.linspace(min(ylims) + 0.2, max(ylims) - 0.2, 5)
df = df[mask]
df = df.sort_values(by='mag')
df['mag_bin'] = pd.qcut(df['mag'], q=25, labels=False)
percentiles = [.16, .50, .84]
mag_bins = df.groupby('mag_bin')[['mag', 'e_mag']].quantile(
percentiles).unstack()
mag_intersect = mag_bins['mag'][percentiles[1]][
mag_bins['e_mag'][percentiles[1]] < 0.3619].max()
ax[i].scatter(df['mag'], df['e_mag'], s=1, alpha=0.1, color='gray')
print(i, f, m, allcsv, min(df['mag']), max(df['mag']), min(
df['e_mag']), max(df['e_mag']), sum(mask), len(df))
ax[i].plot(mag_bins['mag'][percentiles[0]],
mag_bins['e_mag'][percentiles[0]],
'o-', lw=2, color=colours[f], ms=2)
ax[i].fill_between(mag_bins['mag'][percentiles[0]],
mag_bins['e_mag'][percentiles[0]],
mag_bins['e_mag'][percentiles[2]],
color=colours[f], alpha=0.3)
if mag_intersect <= max(df['mag']):
ax[i].axvline(mag_intersect, color='k', ls='--', lw=1)
if i in [3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47]:
ax[i].text(mag_intersect+0.1, 0.05, r'$\mathrm{%.2f}$' %
mag_intersect, fontsize=6)
else:
# ax[i].text(mag_intersect+0.1, 9, r'$\mathrm{%.2f}$' %
# mag_intersect, fontsize=6)
ax[i].text(mag_intersect - 2.5, max(ylims) - 0.3,
r'$\mathrm{%.2f}$' % mag_intersect, fontsize=6)
if i in [44, 45, 46, 47]:
ax[i].set_xlabel(r'$\mathrm{Mag}$', fontsize=12)
else:
ax[i].set_xticklabels(ax[i].get_xticklabels(), visible=False)
if i in [0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44]:
ax[i].set_ylabel(r'$\mathrm{e_{Mag}}$', fontsize=12)
if i in [3, 7, 11, 15, 19, 23, 27, 31, 35, 39, 43, 47]:
ax2 = ax[i].twinx()
if f in ['u', 'g', 'r', 'i', 'z']:
fname = f'{f}'
else:
fname = f'{f}'.upper()
ax2.set_ylabel(r"$%s$" % fname, rotation=90,
labelpad=10, fontsize=12)
ax2.set_yticklabels(ax2.get_yticklabels(), visible=False)
ax2.set_yticks([])
ax[i].set_xlim(10.1, 24)
ax[i].set_xticks(xticks)
ax[i].set_ylim(-0.02, 0.51)
ax[i].set_yticks([0, 0.1, 0.2, 0.3, 0.4])
else:
ax[i].set_xlim(xlims)
ax[i].set_ylim(ylims)
ax[i].set_xticks(xticks)
ax[i].set_yticks(yticks)
if i in [0, 1, 2, 3]:
ax[i].set_title(f'{mode_names[m]}', fontsize=12)
ax[i].grid(alpha=0.5)
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
ax[i].text(0.05, 0.9, r'%i' % len(df),
transform=ax[i].transAxes,
fontsize=6, verticalalignment='top', bbox=bbox_props)
plt.tight_layout()
plt.subplots_adjust(wspace=0.2, hspace=0.02)
if args.savefig:
figpath = os.path.join(workdir, 'dr4-photxerrors.png')
print(f'Saving figure {figpath}.')
plt.savefig(figpath, format='png', dpi=300)
plt.show()
plt.close()
def main(args):
workdir = args.work_dir
modes = ['dual_auto', 'dual_PStotal', 'single_auto', 'psf_psf']
filters = ['u', 'j0378', 'j0395', 'j0410', 'j0430', 'g', 'j0515', 'r',
'j0660', 'i', 'j0861', 'z']
nprocs = args.nprocs
process_csvfiles(workdir, modes, filters, nprocs, args)
if args.plot:
plot_photometry(workdir, modes, filters, args)
if __name__ == '__main__':
args = load_args()
main(args)
|
splus-collabREPO_NAMEcodes-dr4-paperPATH_START.@codes-dr4-paper_extracted@codes-dr4-paper-main@codes@[email protected]_END.py
|
{
"filename": "makeMassFunctionPlotsCCL.py",
"repo_name": "simonsobs/nemo",
"repo_path": "nemo_extracted/nemo-main/examples/SOSims/validationScripts/makeMassFunctionPlotsCCL.py",
"type": "Python"
}
|
"""
Plot the mass function in z bins.
Range adjusted to drop the last bin, which is more incomplete in the sense that it may not cover that
full mass bin (whereas all other bins are guaranteed to by definition).
"""
import os
import sys
import astropy.table as atpy
import astropy.io.fits as pyfits
import IPython
import numpy as np
from nemo import plotSettings, completeness, signals
import pylab as plt
from scipy import stats
from astLib import *
import pyccl as ccl
from colossus.lss import mass_function
#------------------------------------------------------------------------------------------------------------
# Options
SNRCut=4.0
selFnDir="../MFMF_SOSim_3freq_tiles/selFn"
footprintLabel=None
massCol='M200m'
zBinEdges=[0.2, 0.5, 0.9, 1.2]
zMin=min(zBinEdges)
zMax=max(zBinEdges)
log10MBinEdges=np.linspace(13.8, 15.5, 18)
# Handling different mass definitions
if massCol == 'M500c':
delta=500
rhoType="critical"
elif massCol == 'M200m':
delta=200
rhoType="matter"
else:
raise Exception("Unsupported massCol - should be M500c or M200m")
deltaLabel="%d%s" % (delta, rhoType[0])
log10MBinCentres=(log10MBinEdges[1:]+log10MBinEdges[:-1])/2
# Set up Websky cosmology
H0, Om0, Ob0, sigma_8, ns = 68.0, 0.31, 0.049, 0.81, 0.965
selFn=completeness.SelFn(selFnDir, SNRCut, footprintLabel = footprintLabel, zStep = 0.02,
delta = delta, rhoType = rhoType)
scalingRelationDict=selFn.scalingRelationDict
selFn.update(H0, Om0, Ob0, sigma_8, ns, scalingRelationDict = scalingRelationDict)
print("Total area = %.3f square degrees" % (selFn.totalAreaDeg2))
# Cut to just the halos in the survey mask
cutTabFileName="halosInMask.fits"
if os.path.exists(cutTabFileName) == False:
print("Cutting halos catalog to the survey mask")
tab=atpy.Table().read('../halos.fits')
checkMask=selFn.checkCoordsInAreaMask(tab['RADeg'], tab['decDeg'])
tab=tab[checkMask]
tab.write(cutTabFileName, overwrite = True)
print("Reading %s" % (cutTabFileName))
tab=atpy.Table().read(cutTabFileName)
# On-the-fly mass conversion as quick with CCL
if massCol == "M500c":
print("Converting M200m to M500c")
M500c=[]
count=0
M200mDef=ccl.halos.MassDef200m(c_m='Bhattacharya13')
M500cDef=ccl.halos.MassDef(500, "critical")
M500c=[]
count=0
for row in tab:
M500c.append(M200mDef.translate_mass(selFn.mockSurvey.cosmoModel, row['M200m'], 1/(1+row['z']), M500cDef))
tab['M500c']=M500c
# Bit of preprocessing to make life easier
tab['fixed_SNR']=100.0
tab.rename_column('z', 'redshift')
tab[massCol]=tab[massCol]/1e14
## Example (not used here) - N(z) with M500c > 5e13 MSun - with selection function applied
#predMz=selFn.compMz*selFn.mockSurvey.clusterCount
#countsByRedshift=predMz[:, np.greater(selFn.mockSurvey.log10M, np.log10(5e13))].sum(axis = 1)
# All the analysis first ------------------------------------------------------------------------------------
# WARNING: We're using halo catalogs, so disabled completeness correction
results={}
predMz=selFn.mockSurvey.clusterCount
for i in range(len(zBinEdges)-1):
zMin=zBinEdges[i]
zMax=zBinEdges[i+1]
label='%.1f < z < %.1f' % (zMin, zMax)
fSky=selFn.mockSurvey.areaDeg2/(4*np.pi*(180/np.pi)**2)
shellVolumeMpc3=fSky*(selFn.mockSurvey._comovingVolume(zMax)-selFn.mockSurvey._comovingVolume(zMin))
zMask=np.logical_and(selFn.mockSurvey.z >= zMin, selFn.mockSurvey.z < zMax)
countsByMass=predMz[zMask, :].sum(axis = 0)
predCounts=np.zeros(len(log10MBinEdges)-1)
predNumDensity=np.zeros(len(log10MBinEdges)-1)
obsCounts=np.zeros(len(log10MBinEdges)-1)
obsCountsErr=np.zeros(len(log10MBinEdges)-1)
obsNumDensity=np.zeros(len(log10MBinEdges)-1)
obsNumDensityErr=np.zeros(len(log10MBinEdges)-1)
h=H0/100.
binTab=tab[np.logical_and(tab['redshift'] >= zMin, tab['redshift'] < zMax)]
obsLog10Ms=np.log10(binTab[massCol]*1e14)
for j in range(len(log10MBinEdges)-1):
mMin=log10MBinEdges[j]
mMax=log10MBinEdges[j+1]
mMask=np.logical_and(selFn.mockSurvey.log10M >= mMin, selFn.mockSurvey.log10M < mMax)
predCounts[j]=countsByMass[mMask].sum()
obsMask=np.logical_and(obsLog10Ms >= mMin, obsLog10Ms < mMax)
obsCounts[j]=obsMask.sum()
obsCountsErr[j]=np.sqrt(obsCounts[j])
predNumDensity[j]=predCounts[j]/shellVolumeMpc3
obsNumDensity[j]=obsCounts[j]/shellVolumeMpc3
#complCorr[j]=selFn.compMz[zMask, :].mean(axis = 0)[mMask].mean()
validMask=(obsCounts > 0)
results[label]={'log10MBinCentres': log10MBinCentres[validMask],
'predCounts': predCounts[validMask],
'obsCounts': obsCounts[validMask],
'obsCountsErr': obsCountsErr[validMask],
'predNumDensity': predNumDensity[validMask],
'obsNumDensity': obsNumDensity[validMask],
'obsNumDensityErr': (obsCountsErr[validMask]/obsCounts[validMask])*obsNumDensity[validMask]}
# Counts comparison plot (just N as a function of mass) -----------------------------------------------------
plotSettings.update_rcParams()
plt.figure(figsize=(9,6.5))
ax=plt.axes([0.15, 0.12, 0.84, 0.85])
for key in results.keys():
plotLog10MBinCentres=results[key]['log10MBinCentres']
pred=results[key]['predCounts']
obs=results[key]['obsCounts']
obsErr=results[key]['obsCountsErr']
plt.errorbar(plotLog10MBinCentres, obs, yerr = obsErr,
elinewidth = 3, fmt = 'D', ms = 6, zorder = 900, label = key)
plt.plot(plotLog10MBinCentres, pred, 'k-')
plt.semilogy()
plt.ylim(0.1, 5e5)
plt.xlim(14.0, log10MBinEdges.max())
plt.xlabel("log$_{10}$($M^{\\rm true}_{\\rm %s}$ / $M_{\odot}$)" % (deltaLabel))
plt.ylabel("$N$")
plt.legend()
plt.savefig("%s_counts.png" % (massCol))
plt.close()
# Counts per unit volume (N per Mpc^3) ----------------------------------------------------------------------
plotSettings.update_rcParams()
plt.figure(figsize=(9,6.5))
ax=plt.axes([0.15, 0.12, 0.84, 0.85])
for key in results.keys():
plotLog10MBinCentres=results[key]['log10MBinCentres']
pred=results[key]['predNumDensity']
obs=results[key]['obsNumDensity']
obsErr=results[key]['obsNumDensityErr']
plt.errorbar(plotLog10MBinCentres, obs, yerr = obsErr,
elinewidth = 3, fmt = 'D', ms = 6, zorder = 900, label = key)
plt.plot(plotLog10MBinCentres, pred, 'k-')
plt.semilogy()
#plt.ylim(0.1, 5e5)
plt.xlim(14.0, log10MBinEdges.max())
plt.xlabel("log$_{10}$($M^{\\rm true}_{\\rm %s}$ / $M_{\odot}$)" % (deltaLabel))
plt.ylabel("$N$ (Mpc$^{3}$)")
plt.legend()
plt.savefig("%s_numDensity.png" % (massCol))
plt.close()
|
simonsobsREPO_NAMEnemoPATH_START.@nemo_extracted@nemo-main@examples@SOSims@[email protected]@.PATH_END.py
|
{
"filename": "_x.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/streamtube/lightposition/_x.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="x", parent_name="streamtube.lightposition", **kwargs
):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 100000),
min=kwargs.pop("min", -100000),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@streamtube@lightposition@[email protected]_END.py
|
{
"filename": "timezones.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/_plotly_future_/timezones.py",
"type": "Python"
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@[email protected]@.PATH_END.py
|
|
{
"filename": "A03compare_triggers.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioReco/examples/StandAloneScripts/A03compare_triggers.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import logging
from NuRadioReco.utilities import units
import NuRadioReco.framework.channel
import NuRadioReco.framework.station
import NuRadioReco.framework.event
import NuRadioReco.modules.channelResampler
import NuRadioReco.modules.channelGenericNoiseAdder
import NuRadioReco.modules.channelBandPassFilter
import NuRadioReco.modules.channelSignalReconstructor
import NuRadioReco.modules.channelLengthAdjuster
import NuRadioReco.utilities.diodeSimulator
import NuRadioReco.modules.ARA.triggerSimulator
import NuRadioReco.modules.trigger.highLowThreshold
from NuRadioReco.modules.base import module
from NuRadioReco.detector import detector
from NuRadioReco.framework.parameters import channelParameters as chp
det = detector.Detector(json_filename='../example_data/dummy_detector.json')
logger = module.setup_logger(level=logging.WARNING)
channelGenericNoiseAdder = NuRadioReco.modules.channelGenericNoiseAdder.channelGenericNoiseAdder()
channelGenericNoiseAdder.begin(debug=False)
channelResampler = NuRadioReco.modules.channelResampler.channelResampler()
channelSignalReconstructor = NuRadioReco.modules.channelSignalReconstructor.channelSignalReconstructor()
channelSignalReconstructor.begin(debug=False)
channelBandPassFilter = NuRadioReco.modules.channelBandPassFilter.channelBandPassFilter()
channelLengthAdjuster = NuRadioReco.modules.channelLengthAdjuster.channelLengthAdjuster()
channelLengthAdjuster.begin(number_of_samples=400, offset=50)
diodeSimulator = NuRadioReco.utilities.diodeSimulator.diodeSimulator()
noise_mean, noise_std = diodeSimulator.calculate_noise_parameters(
amplitude=20 * units.mV,
min_freq=50 * units.MHz,
max_freq=1000 * units.MHz,
sampling_rate=1. * units.GHz
)
triggerSimulator_ARA = NuRadioReco.modules.ARA.triggerSimulator.triggerSimulator()
triggerSimulator_ARIANNA = NuRadioReco.modules.trigger.highLowThreshold.triggerSimulator()
triggerSimulator_ARIANNA.begin()
event_ARA = NuRadioReco.framework.event.Event(1, 1)
event_ARIANNA = NuRadioReco.framework.event.Event(1, 1)
station_ARA = NuRadioReco.framework.station.Station(101)
channel_ARA = NuRadioReco.framework.channel.Channel(0)
station_ARIANNA = NuRadioReco.framework.station.Station(101)
channel_ARIANNA = NuRadioReco.framework.channel.Channel(0)
# Switch between cosmic ray pulse (CoREAS) and Askaryan parameterization
CR = False
# TYPE_SNR = 'integrated_power'
TYPE_SNR = 'peak_amplitude'
if CR:
# Use numpy array with band-pass limited pulse [30-1000] MHz, samples at 10 GHz
data = np.load("example_data/Test_data_8.npy")
test_pulse = data[2, :]
# Normalize test pulse to 1
else:
# PARAMEETERS for Askaryan Pulse
energy = 1e19 * units.eV
fhad = 0.5
viewing_angle = 54 * units.degree
n_samples = 2**12
dt = 0.1 * units.ns
n_index = 1.5
R = 100 * units.m
from NuRadioMC.SignalGen import parametrizations as signalgen
test_pulse = signalgen.get_time_trace(energy * fhad, viewing_angle, n_samples, dt, 'HAD', n_index, R, 'Alvarez2000')
test_pulse /= np.max(np.abs(test_pulse))
n_scaling = 50
result_ARA = np.zeros((n_scaling, 2))
result_ARIANNA = np.zeros((n_scaling, 2))
i = 0
n_iter = 20
for scaling in np.linspace(10 * units.mV, 200 * units.mV, n_scaling):
test_pulse_sc = test_pulse * scaling
n_trigger_ARA = 0
n_trigger_ARIANNA = 0
SNR_ARA = 0
SNR_ARIANNA = 0
max = []
for n in range(n_iter):
channel_ARA.set_trace(test_pulse_sc, 10 * units.GHz)
station_ARA.add_channel(channel_ARA)
station_ARA.remove_triggers()
channel_ARIANNA.set_trace(test_pulse_sc, 10 * units.GHz)
station_ARIANNA.add_channel(channel_ARIANNA)
station_ARIANNA.remove_triggers()
channelBandPassFilter.run(
event_ARIANNA,
station_ARIANNA,
det, passband=[50 * units.MHz, 1000 * units.MHz],
filter_type='rectangular'
)
channelBandPassFilter.run(
event_ARA,
station_ARA,
det,
passband=[50 * units.MHz, 1000 * units.MHz],
filter_type='rectangular'
)
channelGenericNoiseAdder.run(
event_ARA,
station_ARA,
det,
amplitude=20 * units.mV,
min_freq=50 * units.MHz,
max_freq=1000 * units.MHz,
type='perfect_white'
)
channelGenericNoiseAdder.run(
event_ARIANNA,
station_ARIANNA,
det,
amplitude=20 * units.mV,
min_freq=50 * units.MHz,
max_freq=1000 * units.MHz,
type='perfect_white'
)
channelResampler.run(event_ARA, station_ARA, det, sampling_rate=1 * units.GHz)
channelResampler.run(event_ARIANNA, station_ARIANNA, det, sampling_rate=1 * units.GHz)
channelSignalReconstructor.run(event_ARIANNA, station_ARIANNA, det)
channelSignalReconstructor.run(event_ARA, station_ARA, det)
channelLengthAdjuster.run(event_ARIANNA, station_ARIANNA, det)
channelLengthAdjuster.run(event_ARA, station_ARA, det)
triggerSimulator_ARA.run(
event_ARA,
station_ARA,
det,
power_threshold=6.5,
coinc_window=110 * units.ns,
number_concidences=1,
triggered_channels=[0, 1, 2, 3, 4, 5, 6, 7],
power_mean=noise_mean,
power_std=noise_std
)
triggerSimulator_ARIANNA.run(
event_ARIANNA,
station_ARIANNA,
det,
threshold_high=36 * units.mV,
threshold_low=-36 * units.mV,
high_low_window=20 * units.ns,
coinc_window=32 * units.ns,
number_concidences=1,
triggered_channels=[0, 1, 2, 3]
)
#
SNR_ARA += station_ARA.get_channel(0)[chp.SNR][TYPE_SNR]
SNR_ARIANNA += station_ARIANNA.get_channel(0)[chp.SNR][TYPE_SNR]
max.append(np.max(np.abs(station_ARA.get_channel(0).get_trace())))
if station_ARA.has_triggered():
n_trigger_ARA += 1.
if station_ARIANNA.has_triggered():
n_trigger_ARIANNA += 1.
result_ARA[i, 0] = SNR_ARA / n_iter
result_ARA[i, 1] = n_trigger_ARA / n_iter
result_ARIANNA[i, 0] = SNR_ARIANNA / n_iter
result_ARIANNA[i, 1] = n_trigger_ARIANNA / n_iter
i += 1
plt.figure()
plt.plot(result_ARA[:, 0], result_ARA[:, 1], linestyle='None', marker='o', label="ARA, power_threshold 6.5")
plt.plot(result_ARIANNA[:, 0], result_ARIANNA[:, 1], linestyle='None', marker='s', label="ARIANNA, 3 sigma")
plt.ylabel("Trigger efficiency on one antenna")
plt.xlabel(TYPE_SNR)
plt.legend()
plt.show()
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioReco@examples@StandAloneScripts@[email protected]_END.py
|
{
"filename": "_stream.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/icicle/_stream.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "icicle"
_path_str = "icicle.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.icicle.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.icicle.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.icicle.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
[email protected][email protected]@packages@python@plotly@plotly@graph_objs@icicle@[email protected]_END.py
|
{
"filename": "_title.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmap/colorbar/_title.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(
self, plotly_name="title", parent_name="choroplethmap.colorbar", **kwargs
):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Defaults to
"top" when `orientation` if "v" and defaults
to "right" when `orientation` if "h". Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmap@colorbar@[email protected]_END.py
|
{
"filename": "_family.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermap/marker/colorbar/title/font/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="scattermap.marker.colorbar.title.font",
**kwargs,
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@scattermap@marker@colorbar@title@font@[email protected]_END.py
|
{
"filename": "ab_initio_solids.py",
"repo_name": "geodynamics/burnman",
"repo_path": "burnman_extracted/burnman-main/misc/benchmarks/ab_initio_solids.py",
"type": "Python"
}
|
from __future__ import absolute_import
from __future__ import print_function
from burnman.minerals import DKS_2013_solids
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from burnman.tools.eos import check_eos_consistency
"""
SOLIDS
"""
phases = [
[
"stishovite",
DKS_2013_solids.stishovite(),
[10, 18, -25, 175],
[10, 18, -2400, -1800],
],
[
"perovskite",
DKS_2013_solids.perovskite(),
[14.5, 27.5, 0, 344],
[14.5, 27.5, -3600, -2000],
],
[
"periclase",
DKS_2013_solids.periclase(),
[6.5, 14, -25, 275],
[6.5, 14, -1200, -560],
],
]
temperatures = [1000.0, 2000.0, 3000.0, 4000.0, 5000.0, 6000.0, 8000.0]
for name, phase, PVT_range, EVT_range in phases:
phase.params["G_0"] = 0.0 # just for consistency checking
phase.params["Gprime_0"] = 1.3 # just for consistency checking
phase.params["eta_s_0"] = 0.0 # just for consistency checking
print(
"EoS consistent for {0} model: {1}".format(
name, check_eos_consistency(phase, tol=1.0e-4)
)
)
vmin = PVT_range[0]
vmax = PVT_range[1]
fig = plt.figure()
ax_P = fig.add_subplot(1, 2, 1)
ax_E = fig.add_subplot(1, 2, 2)
fig1 = mpimg.imread("figures/" + name + "_PVT.png")
ax_P.imshow(fig1, extent=PVT_range, aspect="auto")
volumes = np.linspace(PVT_range[0] * 1.0e-6, PVT_range[1] * 1.0e-6, 101)
pressures = np.empty_like(volumes)
for temperature in temperatures:
for i, volume in enumerate(volumes):
pressures[i] = phase.method.pressure(temperature, volume, phase.params)
ax_P.plot(
volumes * 1e6,
pressures / 1e9,
linewidth=2,
label="{0:.0f} K".format(temperature),
)
ax_P.set_xlim(PVT_range[0], PVT_range[1])
ax_P.set_xlabel("Volume (cm^3/mol)")
ax_P.set_ylabel("Pressure (GPa)")
fig1 = mpimg.imread("figures/" + name + "_EVT.png")
ax_E.imshow(fig1, extent=EVT_range, aspect="auto")
volumes = np.linspace(EVT_range[0] * 1.0e-6, EVT_range[1] * 1.0e-6, 101)
energies = np.empty_like(volumes)
for temperature in temperatures:
for i, volume in enumerate(volumes):
energies[i] = phase.method.molar_internal_energy(
0.0, temperature, volume, phase.params
)
ax_E.plot(
volumes * 1e6,
energies / 1e3,
linewidth=2,
label="{0:.0f} K".format(temperature),
)
ax_E.legend(loc="upper right")
ax_E.set_xlim(EVT_range[0], EVT_range[1])
ax_E.set_xlabel("Volume (cm^3/mol)")
ax_E.set_ylabel("Internal energy (kJ/mol)")
plt.show()
|
geodynamicsREPO_NAMEburnmanPATH_START.@burnman_extracted@burnman-main@misc@benchmarks@[email protected]_END.py
|
{
"filename": "prior.py",
"repo_name": "joezuntz/cosmosis",
"repo_path": "cosmosis_extracted/cosmosis-main/cosmosis/runtime/prior.py",
"type": "Python"
}
|
#coding: utf-8
u"""Implementation of :class:`Prior` and the concrete probability distribution specializations.
The specializations are :class:`UniformPrior`, :class:`GaussianPrior`,
:class:`TruncatedGaussianPrior`, :class:`ExponentialPrior`,
:class:`TruncatedExponentialPrior`, :class:`TruncatedOneoverxPrior`,
and :class:`DeltaFunctionPrior`.
Applications can get by only knowing about the :class:`Prior` superclass.
"""
from . import config
import numpy as np
import math
from scipy import interpolate
import copy
class Prior(object):
u"""This class serves as an abstract base for possible prior distributions, and provides a concrete distribution factory method.
At high level, each concrete prior distribution is a function which
takes a value and returns a probability of that value occurring;
actually, the logarithm of the probability density is returned as this
is more useful in a Bayesian framework where the value of evidence
takes the form of a log-likelihood. There are additionally methods
which
* Return a value corresponding to a given cumulated probability,
i.e. the inverse of the prior distribution.
* Obtain the value of a number of random samples from the
distribution.
* Optionally, a method for returning a truncated version of the
distribution.
The application calls a single function, :func:`Prior.load_priors`, to
obtain a concrete set of all the priors the user specifies in the
configuration files, and then abstractly calls the above methods on
those priors to get the work done.
"""
def __init__(self):
u"""Do nothing."""
pass
def sample(self, n):
u"""Return an array of `n` samples (default one) from the distribution in the derived object.
The generic case implemented here in the base class is an
*extremely* expensive operation in almost all cases. Other cases
(such as uniform and delta functions) are specially implemented to
run much quicker. Moral: think hard about how you are using this
method. If you have to deal with truncated distributions, maybe
this sledge-hammer approach is the only one feasible.
"""
if n is None:
n = 1
Y = np.random.uniform(0., 1.0, n)
return np.array([self.denormalize_from_prior(y) for y in Y])
@classmethod
def parse_prior(cls, value):
u"""Produce a concrete :class:`Prior` object based on a line of a .ini ifile.
This is part of the implementation of :func:`load_priors` and
should be considered private to the class.
"""
prior_type, parameters = value.split(' ', 1)
prior_type = prior_type.lower()
try:
# Most of the priors require float
# parameters, but not all of them -
# the InverseTransform prior needs
# a string argument (file name).
parameters_flt = []
for p in parameters.split():
try:
p = float(p)
except:
pass
parameters_flt.append(p)
parameters = parameters_flt
if prior_type.startswith("uni"):
return UniformPrior(*parameters)
elif prior_type.startswith("gau") or \
prior_type.startswith("nor"):
return GaussianPrior(*parameters)
elif prior_type.startswith("exp"):
return ExponentialPrior(*parameters)
elif prior_type.startswith("one"):
return TruncatedOneoverxPrior(*parameters)
elif prior_type.startswith("tab") or \
prior_type.startswith("loa"):
return TabulatedPDF(*parameters)
else:
raise ValueError("Unable to parse %s as prior" %
(value,))
except TypeError:
raise ValueError("Unable to parse %s as prior" %
(value,))
@classmethod
def load_priors(cls,prior_files):
u"""Produce a dictionary of `(section,name)` -> :class:`Prior` objects as per the instructions in `prior_files`.
The dictionary values are concrete :class:`Prior` types, and this
method is the applicationʼs sole constructor of :class:`Prior`
objects.
Can also pass in an ini file directly
"""
priors = {}
for f in prior_files:
if isinstance(f, config.Inifile):
ini = f
else:
ini = config.Inifile(f)
for option, value in ini:
if option in priors:
raise ValueError("Duplicate prior identified")
priors[option] = cls.parse_prior(value)
return priors
class TabulatedPDF(Prior):
u"""Load from a 2-column ASCII table containing values for x, pdf(x).
pdf(x) not interpolated (assumed to be zero) outisde of range of x
Heavily 'inspired' by DistDeviate from GalSim
"""
def __init__(self, function_filename=None, lower=None, upper=None):
u"""Create an object representing the distribution specified in function_filename"""
self.function_filename = function_filename
# Basic input checking and setups
if function_filename is None:
raise TypeError('You must specify a function_filename for TabulatedPDF!')
# Load the probability distribution function, pdf(x)
xarray, pdf = np.loadtxt(function_filename, unpack=True)
if lower==None:
self.lower = xarray.min()
else:
self.lower = lower
if upper==None:
self.upper = xarray.max()
else:
self.upper = upper
pdf = pdf[(xarray >= self.lower)*(xarray <= self.upper)]
xarray = xarray[(xarray >= self.lower)*(xarray <= self.upper)]
# Set up pdf, so cumsum basically does a cumulative trapz integral
# On Python 3.4, doing pdf[1:] += pdf[:-1] the last value gets messed up.
# Writing it this way works. (Maybe slightly slower though, so if we stop
# supporting python 3.4, consider switching to the += version.)
pdf_x = copy.copy(pdf)
pdf[1:] = pdf[1:] + pdf[:-1]
pdf[1:] *= np.diff(xarray)
pdf[0] = 0
# Check that the probability is nonnegative
if not np.all(pdf >= 0.):
raise ValueError('Negative probability found in TabulatedPDF.',function)
# Compute the cumulative distribution function = int(pdf(x),x)
cdf = np.cumsum(pdf)
# Quietly renormalize the probability if it wasn't already normalized
totalprobability = cdf[-1]
cdf /= totalprobability
self.inverse_cdf_interp = interpolate.interp1d(cdf, xarray, kind='linear')
self.cdf_interp = interpolate.interp1d(xarray, cdf, kind='linear')
self.pdf_interp = interpolate.interp1d(xarray, pdf_x, kind='linear')
def __call__(self, x):
u"""Return the logarithm of the probability density."""
if x<self.lower:
return -np.inf
elif x>self.upper:
return -np.inf
return np.log(self.pdf_interp(x))
def sample(self, n):
u"""Use interpolation of inverse CDF to give us `n` random samples from a the distribution."""
if n is None:
n = 1
return self.inverse_cdf_interp(np.random.rand(n))
def denormalize_from_prior(self, y):
u"""Get the value for which the cumulated probability is `y`."""
return self.inverse_cdf_interp(y)
def __str__(self):
u"""Tersely describe ourself to a human mathematician."""
return "Tabulated transform from {0} on range [{1}, {2}]".format(self.function_filename, self.lower, self.upper)
def truncate(self, lower, upper):
u"""Return a new distribution whose range is the intersection of ours with [`lower`, `upper`].
A :class:`ValueError` will be thrown if the arguments supplied do
not make sense.
"""
lower = max(self.lower, lower)
upper = min(self.upper, upper)
if lower>upper:
raise ValueError("One of your priors is inconsistent with the range described in the values file")
return TabulatedPDF(self.function_filename, lower, upper)
class UniformPrior(Prior):
u"""Statistical distribution in which all values in a range have equal probability of occurring."""
def __init__(self, a, b):
u"""Create an object which encapsulates a Uniform distribution over the interval [`a`, `b`]."""
self.a=a
self.b=b
self.norm = -np.log(b-a)
super(UniformPrior,self).__init__()
def __call__(self, x):
u"""Return the logarithm of the probability density, a constant value independent of `x` so long as `x` is in the proper range."""
if x<self.a or x>self.b:
return -np.inf
return self.norm
def sample(self, n):
u"""Use NumPy to obtain a random number in the range [`a`, `b`]."""
return np.random.uniform(self.a, self.b, n)
def denormalize_from_prior(self, y):
u"""Interpolate the cumulated probability `y` to the corresponding value in the interval [`a`, `b`]."""
if y<0.0:
x = np.nan
elif y>1.0:
x = np.nan
else:
x = y * (self.b-self.a) + self.a
return x
def __str__(self):
u"""Tersely describe ourself to a human mathematician."""
return "U({}, {})".format(self.a,self.b)
def truncate(self, lower, upper):
u"""Return a new Uniform distribution whose range is the intersection of ours with [`lower`, `upper`].
A :class:`ValueError` will be thrown if the arguments supplied do
not make sense.
"""
a = max(lower, self.a)
b = min(upper, self.b)
if a>b:
raise ValueError("One of your priors is inconsistent with the range described in the values file")
return UniformPrior(a, b)
class GaussianPrior(Prior):
u"""Encapsulation of a Normal distribution function."""
def __init__(self, mu, sigma):
u"""Make a Normal distribution object with mean `mu` and deviation `sigma`."""
self.mu = mu
self.sigma = sigma
self.sigma2 = sigma**2
self.norm=0.5*np.log(2*np.pi*self.sigma2)
super(GaussianPrior,self).__init__()
def __call__(self, x):
u"""Return the logarithm of the probability density at `x`."""
return -0.5 * (x-self.mu)**2 / self.sigma2 - self.norm
def sample(self, n):
u"""Use NumPy to give us `n` random samples from a Normal distribution."""
if n is None:
n = 1
return np.random.normal(self.mu, self.sigma, n)
def denormalize_from_prior(self, y):
u"""Obtain the value such that the cumulated probability of obtaining a lesser value is `y`.
Note that this is a very expensive function to call.
"""
x_normal = normal_ppf(y)
x = x_normal*self.sigma + self.mu
return x
def __str__(self):
u"""Tersely describe ourself to a human mathematician."""
return "N({}, {} ** 2)".format(self.mu, self.sigma)
def truncate(self, lower, upper):
u"""Return a :class:`TruncatedGaussianPrior` object, with our mean and variance and the given `lower` and `upper` limits of non-zero probability."""
return TruncatedGaussianPrior(self.mu, self.sigma, lower, upper)
class TruncatedGaussianPrior(Prior):
u"""A Normal distribution, except that probabilities outside of a specified range are truncated to zero."""
def __init__(self, mu, sigma, lower, upper):
u"""Get a concrete object representing a distribution with mode at `mu`, ‘shape width’ `sigma`, and limits of non-zero probability `lower` and `upper`."""
self.lower = lower
self.upper = upper
self.mu = mu
self.sigma = sigma
self.sigma2 = sigma**2
self.a = (lower-mu)/sigma
self.b = (upper-mu)/sigma
self.phi_a = normal_cdf(self.a)
self.phi_b = normal_cdf(self.b)
self.norm = np.log(self.phi_b - self.phi_a) + 0.5*np.log(2*np.pi*self.sigma2)
super(TruncatedGaussianPrior,self).__init__()
def __call__(self, x):
u"""Get the logarithm of the probability density at the value `x`."""
if x<self.lower:
return -np.inf
elif x>self.upper:
return -np.inf
return -0.5 * (x-self.mu)**2 / self.sigma2 - self.norm
def denormalize_from_prior(self, y):
u"""Get the value for which the cumulated probability is `y`.
This is a very expensive function.
"""
x_normal = truncated_normal_ppf(y, self.a, self.b)
x = x_normal*self.sigma + self.mu
return x
def __str__(self):
u"""Return a terse description of ourself."""
return "N({}, {} ** 2) [{} < x < {}]".format(self.mu, self.sigma, self.lower, self.upper)
def truncate(self, lower, upper):
u"""Produce a new :class:`Prior` object representing a Normal distribution whose range of non-zero probability is the intersection of our own range with [`lower`, `upper`]."""
lower = max(self.lower, lower)
upper = min(self.upper, upper)
return TruncatedGaussianPrior(self.mu, self.sigma, lower, upper)
class ExponentialPrior(Prior):
u"""The Exponential Distribution, zero probability of any value less than zero."""
def __init__(self, beta):
u"""Create object representing distribution with ‘width’ `beta`."""
self.beta = beta
self.log_beta = np.log(beta)
super(ExponentialPrior,self).__init__()
def __call__(self, x):
u"""Return logarithm of probability density at `x`."""
if x<0.0:
return -np.inf
return -x/self.beta - self.log_beta
def sample(self,n):
u"""Use NumPy to obtain random sample of `n` values from Exponential Distribution with our width `beta`."""
if n is None:
n = 1
return np.random.exponential(self.beta,n)
def denormalize_from_prior(self, y):
u"""Return value for which cumulated probability of lesser values occurring is `y`."""
#y = 1 - exp(-x/beta)
#exp(-x/beta) = 1 - y
#-x/beta = log(1-y)
#x = -beta * log(1-y)
return -self.beta * np.log(1-y)
def __str__(self):
u"""Give a terse description of ourself."""
return "Expon({})".format(self.beta)
def truncate(self, lower, upper):
u"""Return a :class:`Prior` object representing the distribution you get when you take the current distribution but set probability to zero everywhere outside the range [`lower`, `upper`], and re-normalize."""
return TruncatedExponentialPrior(self.beta, lower, upper)
class TruncatedExponentialPrior(Prior):
u"""Like the Exponential prior, but truncated."""
def __init__(self, beta, lower, upper):
u"""Create a distribution with ‘half-life’ `beta`, `lower` bound of non-zero probability, and `upper` bound."""
self.beta = beta
self.log_beta = np.log(beta)
if lower<0:
lower = 0.0
self.lower = lower
self.upper = upper
self.a = lower/beta
self.b = upper/beta
self.phi_a = exponential_cdf(self.a)
self.phi_b = exponential_cdf(self.b)
self.norm = np.log(self.phi_b - self.phi_a) + self.log_beta
super(TruncatedExponentialPrior,self).__init__()
def __call__(self, x):
u"""Return the logarithm of probability density at `x`."""
#(1/beta)*exp(-x/beta)
if x<self.lower:
return -np.inf
if x>self.upper:
return -np.inf
return -x/self.beta - self.norm
def denormalize_from_prior(self, y):
u"""Return the value at which the cumulated probability is `y`."""
x_normal = truncated_exponential_ppf(y, self.a, self.b)
x = x_normal * self.beta
return x
def __str__(self):
u"""Give a terse description of ourself."""
return "Expon({}) [{} < x < {}]".format(self.beta, self.lower, self.upper)
def truncate(self, lower, upper):
u"""Return a :class:`Prior` like ourself but with range of non-zero probability further restricted to `lower` and `upper` bounds."""
lower = max(lower, self.lower)
upper = min(upper, self.upper)
return TruncatedExponentialPrior(self.beta, lower, upper)
class TruncatedOneoverxPrior(Prior):
u"""The 1/x distribution, which is a uniform distribution in ln(x). As ln(x) diverges in both directions, we only provide the truncated option."""
def __init__(self, lower, upper):
u"""Create a distribution with 1/x, `lower` bound of non-zero probability, and `upper` bound."""
if lower<=0:
lower = np.nextafter(0, 1)
self.lower = lower
self.upper = upper
self.ln_lower = np.log(lower)
self.ln_upper = np.log(upper)
# Normalization: \int_upper^lower 1/x dx = ln(upper) - ln(lower)
self.norm = self.ln_upper-self.ln_lower
self.ln_norm = np.log(self.norm)
super(TruncatedOneoverxPrior,self).__init__()
def __call__(self, x):
u"""Return the logarithm of probability density at `x`."""
if x<self.lower:
return -np.inf
if x>self.upper:
return -np.inf
return -np.log(x) - self.ln_norm
def sample(self, n):
u"""Obtain random sample of `n` values from 1/x distribution within `lower` and `upper` bounds."""
if n is None:
n = 1
return np.exp(np.random.uniform(self.ln_lower, self.ln_upper, n))
def denormalize_from_prior(self, y):
u"""Return the value at which the cumulated probability is `y`."""
# int_a^x dz 1/z 1/N = (ln(x)-ln(a)) / N != y
# ln(x) = y*N + ln(a)
return np.exp(y*self.norm + self.ln_lower)
def __str__(self):
u"""Give a terse description of ourself."""
return "1/x [{} < x < {}]".format(self.lower, self.upper)
def truncate(self, lower, upper):
u"""Return a :class:`Prior` like ourself but with range of non-zero probability further restricted to `lower` and `upper` bounds."""
lower = max(lower, self.lower)
upper = min(upper, self.upper)
return TruncatedOneoverxPrior(lower, upper)
class DeltaFunctionPrior(Prior):
u"""Probability distribution with non-zero probability at a single value."""
# In case this is useful later on
def __init__(self, x0):
u"""Create object with atom of probability at `x0`."""
self.x0 = x0
super(DeltaFunctionPrior,self).__init__()
def __call__(self, x):
u"""The log-density is zero when `x` is `x0`, minus infinity otherwise."""
if x==self.x0:
return 0.0
return -np.inf
def sample(self, n=None):
u"""Just return `x0` `n` times."""
if n is None:
n = 1
return np.repeat(self.x0, n)
def __str__(self):
u"""Terse description of ourself."""
return "delta({})".format(self.x0)
def denormalize_from_prior(self, x):
u"""Just return `x0`; itʼs the only value with any probability."""
return self.x0
# Helper functions
def inverse_function(f, y, xmin, xmax, *args, **kwargs):
"Find x in [xmin,xmax] such that f(x)==y, in 1D, with bisection"
import scipy.optimize
def g(x):
return f(x, *args, **kwargs) - y
x = scipy.optimize.bisect(g, xmin, xmax)
return x
SQRT2 = np.sqrt(2.)
def normal_cdf(x):
# return 0.5*math.erf(x) + 0.5
return 0.5*(math.erf(x/SQRT2) + 1)
def normal_ppf(y):
if y<0:
return np.nan
if y>1:
return np.nan
return inverse_function(normal_cdf, y, -20.0, 20.0)
def truncated_normal_cdf(x, a, b):
if x<a:
return np.nan
if x>b:
return np.nan
phi_a = normal_cdf(a)
phi_b = normal_cdf(b)
phi_x = normal_cdf(x)
return (phi_x - phi_a) / (phi_b - phi_a)
def truncated_normal_ppf(y, a, b):
if y<0:
return np.nan
if y>1:
return np.nan
return inverse_function(truncated_normal_cdf, y, a, b, a, b)
def exponential_cdf(x):
if x<0.0:
return np.nan
return 1 - np.exp(-x)
def truncated_exponential_cdf(x, a, b):
if x<a:
return np.nan
if x>b:
return np.nan
phi_a = exponential_cdf(a)
phi_b = exponential_cdf(b)
phi_x = exponential_cdf(x)
return (phi_x - phi_a) / (phi_b - phi_a)
def exponential_ppf(y):
#y = 1 - exp(-x)
# exp(-x) = 1-y
# x = -log(1-y)
return -np.log(1-y)
def truncated_exponential_ppf(y, a, b):
if y<0:
return np.nan
if y>1:
return np.nan
return inverse_function(truncated_exponential_cdf, y, a, b, a, b)
|
joezuntzREPO_NAMEcosmosisPATH_START.@cosmosis_extracted@cosmosis-main@cosmosis@[email protected]@.PATH_END.py
|
{
"filename": "kin_likelihood.py",
"repo_name": "sibirrer/hierArc",
"repo_path": "hierArc_extracted/hierArc-main/hierarc/Likelihood/LensLikelihood/kin_likelihood.py",
"type": "Python"
}
|
__author__ = "sibirrer"
from lenstronomy.Util import constants as const
import numpy as np
class KinLikelihood(object):
"""Likelihood to deal with IFU kinematics constraints with covariances in both the
model and measured velocity dispersion."""
def __init__(
self,
z_lens,
z_source,
sigma_v_measurement,
j_model,
error_cov_measurement,
error_cov_j_sqrt,
normalized=True,
sigma_sys_error_include=False,
):
"""
:param z_lens: lens redshift
:param z_source: source redshift
:param sigma_v_measurement: numpy array, velocity dispersion measured
:param j_model: numpy array of the predicted dimensionless dispersion on the IFU's
:param error_cov_measurement: covariance matrix of the measured velocity dispersions in the IFU's
:param error_cov_j_sqrt: covariance matrix of sqrt(J) of the model predicted dimensionless dispersion on the IFU's
:param normalized: bool, if True, returns the normalized likelihood, if False, separates the constant prefactor
(in case of a Gaussian 1/(sigma sqrt(2 pi)) ) to compute the reduced chi2 statistics
:param sigma_sys_error_include: bool, if True will include a systematic error in the velocity dispersion
measurement (if sampled from), otherwise this sampled value is ignored.
"""
self._z_lens = z_lens
self._j_model = np.array(j_model, dtype=float)
self._sigma_v_measured = np.array(sigma_v_measurement, dtype=float)
self._error_cov_measurement = np.array(error_cov_measurement, dtype=float)
self._error_cov_j_sqrt = np.array(error_cov_j_sqrt, dtype=float)
self.num_data = len(j_model)
self._normalized = normalized
self._sigma_sys_error_include = sigma_sys_error_include
def log_likelihood(
self,
ddt,
dd,
kin_scaling=None,
sigma_v_sys_error=None,
sigma_v_sys_offset=None,
):
"""
Note: kinematics + imaging data can constrain Ds/Dds. The input of Ddt, Dd is transformed here to match Ds/Dds
:param ddt: time-delay distance
:param dd: angular diameter distance to the deflector
:param kin_scaling: array of size of the velocity dispersion measurement or None, scaling of the predicted
dimensionless quantity J (proportional to sigma_v^2) of the anisotropy model in the sampling relative to the
anisotropy model used to derive the prediction and covariance matrix in the init of this class.
:param sigma_v_sys_error: float (optional) added error on the velocity dispersion measurement in quadrature
:param sigma_v_sys_offset: float (optional) for a fractional systematic offset in the kinematic measurement
such that sigma_v = sigma_v_measured * (1 + sigma_v_sys_offset)
:return: log likelihood given the single lens analysis
"""
ds_dds = np.maximum(ddt / dd / (1 + self._z_lens), 0)
if kin_scaling is None:
scaling_ifu = 1
else:
scaling_ifu = kin_scaling
sigma_v_predict = self.sigma_v_model(ds_dds, scaling_ifu)
delta = self.sigma_v_measurement_mean(sigma_v_sys_offset) - sigma_v_predict
cov_error = self.cov_error_measurement(
sigma_v_sys_error
) + self.cov_error_model(ds_dds, scaling_ifu)
try:
cov_error_inv = np.linalg.inv(cov_error)
except:
return -np.inf
lnlikelihood = -delta.dot(cov_error_inv.dot(delta)) / 2.0
if self._normalized is True:
sign_det, lndet = np.linalg.slogdet(cov_error)
if sign_det < 0:
raise ValueError(
"error covariance matrix needs to be positive definite"
)
lnlikelihood -= 1 / 2.0 * (self.num_data * np.log(2 * np.pi) + lndet)
return lnlikelihood
def sigma_v_measurement_mean(self, sigma_v_sys_offset=None):
"""
:param sigma_v_sys_offset: float (optional) for a fractional systematic offset in the kinematic measurement
such that sigma_v = sigma_v_measured * (1 + sigma_v_sys_offset)
:return: corrected measured velocity dispersion
"""
if sigma_v_sys_offset is None:
return self._sigma_v_measured
else:
return self._sigma_v_measured * (1 + sigma_v_sys_offset)
def sigma_v_model(self, ds_dds, kin_scaling=1):
"""Model predicted velocity dispersion for the IFU's.
:param ds_dds: Ds/Dds
:param kin_scaling: scaling of the anisotropy affecting sigma_v^2
:return: array of predicted velocity dispersions
"""
sigma_v_predict = np.sqrt(self._j_model * ds_dds * kin_scaling) * const.c / 1000
return sigma_v_predict
def cov_error_model(self, ds_dds, kin_scaling=1):
"""
:param ds_dds: Ds/Dds
:param kin_scaling: scaling of the anisotropy affecting sigma_v^2
:return: covariance matrix of the error in the predicted model (from mass model uncertainties)
"""
scaling_matix = np.outer(np.sqrt(kin_scaling), np.sqrt(kin_scaling))
return self._error_cov_j_sqrt * scaling_matix * ds_dds * (const.c / 1000) ** 2
def cov_error_measurement(self, sigma_v_sys_error=None):
"""
:param sigma_v_sys_error: float (optional) added error on the velocity dispersion measurement in quadrature
:return: error covariance matrix of the velocity dispersion measurements
"""
if self._sigma_sys_error_include and sigma_v_sys_error is not None:
return self._error_cov_measurement + np.outer(
self._sigma_v_measured * sigma_v_sys_error,
self._sigma_v_measured * sigma_v_sys_error,
)
else:
return self._error_cov_measurement
def sigma_v_prediction(self, ddt, dd, kin_scaling=1):
"""Model prediction mean velocity dispersion vector and model prediction
covariance matrix.
:param ddt: time-delay distance
:param dd: angular diameter distance to the deflector
:param kin_scaling: array of size of the velocity dispersion measurement or
None, scaling of the predicted dimensionless quantity J (proportional to
sigma_v^2) of the anisotropy model in the sampling relative to the
anisotropy model used to derive the prediction and covariance matrix in the
init of this class.
:return: model prediction mean velocity dispersion vector and model prediction
covariance matrix
"""
ds_dds = np.maximum(ddt / dd / (1 + self._z_lens), 0)
sigma_v_predict = self.sigma_v_model(ds_dds, kin_scaling)
cov_error_predict = self.cov_error_model(ds_dds, kin_scaling)
return sigma_v_predict, cov_error_predict
def sigma_v_measurement(self, sigma_v_sys_error=None, sigma_v_sys_offset=None):
"""
:param sigma_v_sys_error: float (optional) added error on the velocity dispersion measurement in quadrature
:param sigma_v_sys_offset: float (optional) for a fractional systematic offset in the kinematic measurement
such that sigma_v = sigma_v_measured * (1 + sigma_v_sys_offset)
:return: measurement mean (vector), measurement covariance matrix
"""
return self.sigma_v_measurement_mean(
sigma_v_sys_offset
), self.cov_error_measurement(sigma_v_sys_error)
|
sibirrerREPO_NAMEhierArcPATH_START.@hierArc_extracted@hierArc-main@hierarc@Likelihood@LensLikelihood@[email protected]_END.py
|
{
"filename": "_easing.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/transition/_easing.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class EasingValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="easing", parent_name="layout.transition", **kwargs):
super(EasingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
values=kwargs.pop(
"values",
[
"linear",
"quad",
"cubic",
"sin",
"exp",
"circle",
"elastic",
"back",
"bounce",
"linear-in",
"quad-in",
"cubic-in",
"sin-in",
"exp-in",
"circle-in",
"elastic-in",
"back-in",
"bounce-in",
"linear-out",
"quad-out",
"cubic-out",
"sin-out",
"exp-out",
"circle-out",
"elastic-out",
"back-out",
"bounce-out",
"linear-in-out",
"quad-in-out",
"cubic-in-out",
"sin-in-out",
"exp-in-out",
"circle-in-out",
"elastic-in-out",
"back-in-out",
"bounce-in-out",
],
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@layout@transition@[email protected]_END.py
|
{
"filename": "test_sdes.py",
"repo_name": "AlexandreAdam/score_models",
"repo_path": "score_models_extracted/score_models-master/tests/test_sdes.py",
"type": "Python"
}
|
from score_models.sde import VESDE, VPSDE, TSVESDE
import numpy as np
import torch
def get_trajectories(sde, B=10, N=100, x0=5):
dt = 1/N
t = torch.zeros(B) + sde.epsilon
x0 = torch.ones(B) * x0
x = torch.clone(x0)
trajectories = [x]
marginal_samples = [x]
for step in range(N):
t += dt
f = sde.drift(t, x)
g = sde.diffusion(t, x)
dw = torch.randn_like(x) * dt**(1/2)
x = x + f * dt + g * dw
trajectories.append(x)
marginal_samples.append(sde.sample_marginal(t, x0))
trajectories = np.stack(trajectories)
marginal_samples = np.stack(marginal_samples)
return trajectories, marginal_samples
# Visual test that marginals of the trajectories are as expected.
if __name__ == "__main__":
import matplotlib.pyplot as plt
B = 100
N = 1000
x0 = 1e2
sde1 = VESDE(sigma_min=1e-1, sigma_max=100)
sde2 = VPSDE(beta_min=1e-2, beta_max=20)
sde3 = TSVESDE(sigma_min=1e-6, sigma_max=1e9, t_star=0.4, beta=30, beta_fn="relu")
sde4 = TSVESDE(sigma_min=1e-4, sigma_max=1e6, t_star=0.4, beta=20, beta_fn="silu")
sde5 = TSVESDE(sigma_min=1e-4, sigma_max=1e6, t_star=0.4, beta=20, beta_fn="hardswish")
text = ["", "", "relu", "silu", "hardswish"]
for i, sde in enumerate([sde1, sde2, sde3, sde4, sde5]):
trajectories, marginal_samples = get_trajectories(sde, B, N, x0=x0)
fig, axs = plt.subplots(2, 2, figsize=(8, 4), sharex=True)
fig.suptitle(sde.__class__.__name__ + " " + text[i], y=0.96)
axs[0, 0].set_title("Trajectories")
axs[0, 1].set_title("Samples from the time marginals")
axs[1, 0].set_xlabel("t")
axs[1, 1].set_xlabel("t")
axs[0, 0].set_ylabel("x")
axs[1, 0].set_ylabel("x")
t = np.linspace(0, 1, N+1)
for b in range(B):
axs[0, 0].plot(t, trajectories[:, b])
axs[1, 0].plot(t, trajectories.std(axis=1), "k-", alpha=0.5, label=r"Empirical $\sigma(t)$")
axs[1, 0].plot(t, trajectories.mean(axis=1), "r-", alpha=0.5, label=r"Empirical $\mu(t)$")
mu, sigma = sde.marginal_prob_scalars(torch.tensor(t))
axs[1, 0].plot(t, sigma, "k--", label=r"Expected $\sigma(t)$")
axs[1, 0].plot(t, mu * x0, "r-", label=r"Expected $\mu(t)$")
# axs[1, 0].legend()
for b in range(B):
axs[0, 1].plot(t, marginal_samples[:, b])
axs[1, 1].plot(t, marginal_samples.std(axis=1), "k-", alpha=0.5, label=r"Empirical $\sigma(t)$")
axs[1, 1].plot(t, marginal_samples.mean(axis=1), "r-", alpha=0.5,label=r"Empirical $\mu(t)$")
axs[1, 1].plot(t, sigma, "k--", label=r"Expected $\sigma(t)$")
axs[1, 1].plot(t, mu * x0, "r-", label=r"Expected $\mu(t)$")
axs[1, 1].legend(bbox_to_anchor=(1.1, 1.05))
fig.tight_layout()
plt.show()
|
AlexandreAdamREPO_NAMEscore_modelsPATH_START.@score_models_extracted@score_models-master@tests@[email protected]_END.py
|
{
"filename": "downloader_tools.py",
"repo_name": "henrysky/astroNN",
"repo_path": "astroNN_extracted/astroNN-master/src/astroNN/shared/downloader_tools.py",
"type": "Python"
}
|
# ---------------------------------------------------------#
# astroNN.shared.downloader_tools: shared download tools
# ---------------------------------------------------------#
import hashlib
from tqdm import tqdm
class TqdmUpTo(tqdm):
"""
NAME:
sha256_checksum
PURPOSE:
Provides `update_to(n)` which uses `tqdm.update(delta_n)`.
INPUT:
OUTPUT:
HISTORY:
2017-Oct-25 - Written - Henry Leung (University of Toronto)
"""
def update_to(self, b=1, bsize=1, tsize=None):
"""
b : int, optional
Number of blocks transferred so far [default: 1].
bsize : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n) # will also set self.n = b * bsize
def filehash(filename, block_size=65536, algorithm="sha256"):
"""
Computes the hash value for a file by using a specified hash algorithm.
:param filename: filename
:type filename: str
:param block_size: blocksize used to compute file hash
:type block_size: int
:param algorithm: hash algorithms like 'sha256' or 'md5' etc.
:type algorithm: str
:return: None
:History: 2019-Mar-12 - Written - Henry Leung (University of Toronto)
"""
algorithm = algorithm.lower()
if algorithm not in hashlib.algorithms_guaranteed:
raise ValueError(f"{algorithm} is an unsupported hashing algorithm")
func_algorithm = getattr(hashlib, algorithm)()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(block_size), b""):
func_algorithm.update(block)
return func_algorithm.hexdigest()
|
henryskyREPO_NAMEastroNNPATH_START.@astroNN_extracted@astroNN-master@src@astroNN@shared@[email protected]_END.py
|
{
"filename": "_offsetgroup.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/_offsetgroup.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OffsetgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="offsetgroup", parent_name="funnel", **kwargs):
super(OffsetgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@funnel@[email protected]_END.py
|
{
"filename": "analysis.py",
"repo_name": "erusseil/MvSR-analysis",
"repo_path": "MvSR-analysis_extracted/MvSR-analysis-main/analysis.py",
"type": "Python"
}
|
import numpy as np
import sys, os
import csv
import mvsr as mvsr
import shutil
import json
import pandas as pd
import pyoperon as Operon
import time
import sympy as sp
from sympy import sin, exp, sqrt, log, Abs
import string
import re
from sympy import symbols, lambdify
from iminuit import Minuit
from iminuit.cost import LeastSquares
import argparse
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import seaborn as sns
import matplotlib.pyplot as plt
def refit_and_plot(
folder,
func,
func_str,
initial_guess,
Xlim,
ylim,
labels,
saveas,
limits=None,
):
"""
Refits a func to all examples of the specified folder.
Plots the result (needs to be 2D).
Parameters
---------
folder: str
Path to the folder of examples
func: Python function
Function to fit
func_str: str
String of the function (used as title)
initial_guess: dict
Initial parameter values for the minimization
Xlim: list
[Xmin, Xmax] bounds to display
ylim: list
[ymin, ymax] bounds to display
labels: list
[x_label, y_label] used to legend the axis
saveas: str
Name of the file to save
limits: list
List of [lower, upper] bounds to use for the minimization.
Needs to be in the same order as initial_guess.
Optional
Returns
-------
List of r2 error of the fit
"""
smooth = [np.linspace(Xlim[0], Xlim[1], 500).T]
color_palette = sns.color_palette("tab10")
all_sets = np.sort([x for x in os.listdir(folder) if "csv" in x])
fig, axes = plt.subplots(1, 1, figsize=(16, 8))
all_sets = all_sets[: len(color_palette)]
errors = []
for idx, file in enumerate(all_sets):
df = pd.read_csv(f"{folder}/{file}")
X = df.iloc[:, :-1].values.T
y = df.yaxis.values
least_squares = LeastSquares(X, y, 1, func)
fit = Minuit(least_squares, **initial_guess)
if limits is not None:
for k in range(len(limits)):
fit.limits[list(initial_guess)[k]] = limits[k]
fit.migrad()
y_pred = func(X, *fit.values)
errors.append(r2_score(y, y_pred))
sx = np.sort(X, axis=0)
dic = fit.values.to_dict()
display = [f"{x}: {dic.get(x):.2f}" for x in dic]
display = ", ".join([str(item) for item in display])
plt.scatter(X.flatten(), y, label=display, color=color_palette[idx], s=60)
plt.plot(
smooth[0],
func(smooth, *fit.values).flatten(),
color=color_palette[idx],
alpha=0.6,
linewidth=3,
)
plt.ylim(ylim[0], ylim[1])
plt.xlim(min(smooth[0]), max(smooth[0]))
title = f"f(X1) = {func_str}".replace("X1", "X")
plt.title(title, fontsize=20)
plt.xlabel(labels[0], fontsize=18)
plt.ylabel(labels[1], fontsize=18)
for axis in ["top", "bottom", "left", "right"]:
axes.spines[axis].set_linewidth(2)
axes.tick_params(width=2, labelsize=17)
plt.legend(fontsize=17)
plt.savefig(f"plots/{saveas}.png", bbox_inches="tight")
return errors
def save_2D_example(X, y, path):
"""
Save 2D examples to the correct format to be used by MvSR
Parameters
---------
X: array
y: array
path: str
Path of the folder to stores examples
"""
header = ['Xaxis0', 'yaxis']
example = np.vstack((X, y)).T
with open(path, 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(example)
def refit_solution(func, path, initial_guess):
"""
Fits a python function to an example and returns MSE
Parameters
---------
func: Python function
Function to fit
path: str
Path of the example to fit
initial_guess: dict
Initial parameter values for the minimization (iminuit)
Returns
-------
Mean square error of the fit
"""
data = pd.read_csv(path)
npoints = len(data)
if npoints < len(initial_guess):
return np.nan
X, y = data.iloc[:, :-1].values, data.yaxis.values
X = X.T
least_squares = LeastSquares(X, y, 0.1, func)
try:
fit = Minuit(least_squares, **initial_guess)
fit.migrad()
except:
print('Minimization error: check inputed function')
fit = Minuit(least_squares, **initial_guess)
fit.migrad()
y_pred = func(X, *fit.values)
y_pred = np.where(y_pred<1e50, y_pred, 1e50)
MSE_mvsr = mean_squared_error(y, y_pred)
return MSE_mvsr
def convert_string_to_func(SR_str, n_variables):
"""
Converts a string outputed by pyOperon into a python function
Parameters
---------
SR_str: str
Result of pyoperon
n_variables: int
Dimensionality of X
"""
alphabet = list(string.ascii_uppercase)
parameter_names = alphabet + [[k + i for k in alphabet for i in alphabet]]
parameters_dict = {}
function_str = str(sp.N(sp.sympify(SR_str), 50))
floatzoo = 99.9
# Zoo detection :
while "zoo" in function_str:
function_str = function_str.replace("zoo", str(floatzoo), 1)
floatzoo += 1
function_str = function_str.replace("re", "")
function_str = function_str.replace("im", "")
if "I" in function_str:
function_str = function_str.replace("**I", "**1")
function_str = function_str.replace("*I", "*1")
function_str = function_str.replace("/I", "/1")
function_str = function_str.replace("+ I", "+ 0")
function_str = function_str.replace("- I", "- 0")
function_str = str(sp.N(sp.sympify(function_str), 50))
# Remove scientific notation
function_str = re.sub(
"e\d+", "", re.sub("e\+\d+", "", re.sub("e-\d+", "", function_str))
)
# Make sure sqrt are not mistaken for parameters up to 5 sqrt intricated
for i, code in enumerate(["one", "two", "three", "four", "five"]):
function_str = function_str.replace(f"**{str(0.5**(i+1))}", f"**sqrt{code}")
all_floats = re.findall("\d+\.\d+", function_str) + ["0"]
if len(all_floats) > len(parameter_names):
print("WARNING WAY TOO BIG FUNCTIONS")
return function_str, False
n_parameters = 0
for idx, one_float in enumerate(all_floats):
if one_float in function_str:
if one_float == "0":
for zzz in [
i for i, letter in enumerate(function_str) if letter == "0"
]:
if not function_str[zzz - 1].isnumeric():
n_parameters += 1
function_str = function_str.replace(
one_float, parameter_names[idx], 1
)
parameters_dict[parameter_names[idx]] = float(one_float)
else:
n_parameters += 1
function_str = function_str.replace(one_float, parameter_names[idx], 1)
parameters_dict[parameter_names[idx]] = float(one_float)
# Revert sqrt temporariry protection
for i, code in enumerate(["one", "two", "three", "four", "five"]):
function_str = function_str.replace(f"**sqrt{code}", f"**{str(0.5**(i+1))}")
used_params = parameter_names[:n_parameters]
X = sp.IndexedBase("X")
param_symbols = {k: sp.Symbol(k) for k in used_params}
param_symbols["X"] = X
tempo_function_str = function_str
for i in range(n_variables):
tempo_function_str = tempo_function_str.replace(f"X{i+1}", f"X[{i}]")
try:
func = sp.lambdify(
["X"] + used_params,
eval(tempo_function_str, globals(), param_symbols),
modules=[
"numpy",
{"exp": np.exp, "log": np.log, "sin": np.sin, "abs": np.abs},
],
)
except:
print("Original:", SR_str)
print("After:", function_str)
return func, function_str, parameters_dict
def create_folders(name, noises, settings):
"""
Creates folders associated to the function
Paramters
---------
name: str
Name of the function's folder
noises: list (of floats or str)
List of the noise levels to consider
"""
if not os.path.exists("toy_results"):
os.makedirs("toy_results")
if not os.path.exists(f"toy_results/{name}"):
os.makedirs(f"toy_results/{name}")
for noise in noises:
if not os.path.exists(f"toy_results/{name}/{noise}"):
os.makedirs(f"toy_results/{name}/{noise}")
for maxL in settings["maxL"]:
# Delete previous data if it exists
if os.path.isdir(f"toy_results/{name}/{noise}/max{maxL}"):
shutil.rmtree(f"toy_results/{name}/{noise}/max{maxL}")
if not os.path.exists(f"toy_results/{name}/{noise}/max{maxL}"):
os.makedirs(f"toy_results/{name}/{noise}/max{maxL}")
def run_mvsr(name, nseeds, settings, use_single_view=None):
"""
Run the main MvSR analysis for a given toy data at different noise levels.
Saves results inside "toy_results" folder
Paramters
---------
name: str
Name of the function's folder
nseeds: int
Number of repetition of the experiment
settings: dict
Parameters of the MvSR function.
Only 4 values will be changed in the main analysis namely:
settings = {'generations': generations,
'maxL': maxL, 'maxD': maxD,
'OperationSet': OperationSet}
use_single_view: None or int
If None, run MvSR normally
If int, run normal SR using only example number "use_single_view".
In that case the expression found is still evaluated on all examples
"""
noises = os.listdir(f"toy_data/{name}")
examples = sorted([x for x in os.listdir(f"toy_data/{name}/perfect") if "csv" in x])
n_variables = np.shape(pd.read_csv(f"toy_data/{name}/perfect/{examples[0]}"))[1] - 1
results = pd.DataFrame(
data=np.empty(shape=(nseeds, 2)),
columns=["expression", "losses"],
dtype="object",
)
for noise in noises:
for seed in range(nseeds):
result = mvsr.MultiViewSR(
f"toy_data/{name}/{noise}",
verbose=0,
seed=seed,
use_single_view=use_single_view,
**settings,
)
conversion = convert_string_to_func(result[0], n_variables)
# Case where the expression was too big to be fitted realistically
if not conversion[1]:
results.iloc[seed] = [conversion[0], np.nan]
else:
func, func_str, initial_guess = conversion
mse_refit = []
for example in examples:
perfect_path = f"toy_data/{name}/perfect/{example}"
refit = refit_solution(
func, perfect_path, initial_guess
)
mse_refit.append(refit)
results.iloc[seed] = [func_str, mse_refit]
if use_single_view is not None:
results.to_csv(
f"toy_results/{name}/{noise}/max{settings['maxL']}/example{use_single_view}_results.csv",
index=False,
)
else:
results.to_csv(
f"toy_results/{name}/{noise}/max{settings['maxL']}/MvSR_results.csv",
index=False,
)
def run_single_view(name, nseeds, settings):
path = f"toy_data/{name}/perfect/"
all_examples = sorted([x for x in os.listdir(path) if "csv" in x])
for example in range(len(all_examples)):
run_mvsr(name, nseeds, settings, use_single_view=example)
def run_analysis(name, nseeds, settings):
noises = os.listdir(f"toy_data/{name}")
create_folders(name, noises, settings)
with open(f"toy_results/{name}/settings.txt", "w") as f:
save_settings = settings.copy()
save_settings["OperationSet"] = str(save_settings["OperationSet"])
f.write(json.dumps(save_settings))
for maxL in settings["maxL"]:
setting = settings.copy()
setting["maxL"] = maxL
run_mvsr(name, nseeds, setting)
run_single_view(name, nseeds, setting)
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"--maxL", nargs="*", type=int, default=[5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25], help="maxL list"
)
arg_parser.add_argument("--opset", default="common", type=str, help="common or sin")
arg_parser.add_argument(
"--function", required=True, type=str, help="Function to extract"
)
arg_parser.add_argument("--nseeds", default=100, type=int, help="Number of seeds")
args = arg_parser.parse_args()
common_operation_set = (
Operon.NodeType.Square | Operon.NodeType.Exp | Operon.NodeType.Sqrt
)
if args.opset == "common":
operation_set = common_operation_set
elif args.opset == "sin":
operation_set = common_operation_set | Operon.NodeType.Sin
common_setting = {
"generations": 1000,
"maxL": args.maxL,
"maxD": 5,
"OperationSet": operation_set,
}
run_analysis(args.function, args.nseeds, common_setting)
|
erusseilREPO_NAMEMvSR-analysisPATH_START.@MvSR-analysis_extracted@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astropy/photutils",
"repo_path": "photutils_extracted/photutils-main/photutils/aperture/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains tools to perform aperture photometry.
"""
from .bounding_box import * # noqa: F401, F403
from .circle import * # noqa: F401, F403
from .converters import * # noqa: F401, F403
from .core import * # noqa: F401, F403
from .ellipse import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .photometry import * # noqa: F401, F403
from .rectangle import * # noqa: F401, F403
from .stats import * # noqa: F401, F403
|
astropyREPO_NAMEphotutilsPATH_START.@photutils_extracted@photutils-main@photutils@aperture@[email protected]_END.py
|
{
"filename": "composite_tensor_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/framework/composite_tensor_test.py",
"type": "Python"
}
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.composite_tensor."""
import gc
import sys
import weakref
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import googletest
from tensorflow.python.util import nest
class CTSpec(type_spec.TypeSpec):
"""A generic CompositeTensor TypeSpec, used for constructing tests."""
def __init__(self, component_specs, metadata=None):
self.component_specs = component_specs
self.metadata = metadata
value_type = property(lambda self: CT)
_component_specs = property(lambda self: self.component_specs)
def _serialize(self):
return (self.component_specs, self.metadata)
def _to_components(self, value):
return value.components
def _from_components(self, tensor_list):
return CT(tensor_list, self.metadata)
class CT(composite_tensor.CompositeTensor):
"""A generic CompositeTensor, used for constructing tests."""
_type_spec_class = CTSpec
def __init__(self, components, metadata=None):
if isinstance(components, list):
components = tuple(components)
self.components = components
self.metadata = metadata
@property
def _type_spec(self):
component_specs = nest.map_structure(type_spec.type_spec_from_value,
self.components)
return self._type_spec_class(component_specs, self.metadata)
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__, self.components, self.metadata)
def __eq__(self, other):
return (type(self) is type(other) and
self.components == other.components and
self.metadata == other.metadata)
# Another test CompositeTensor class. `tf.nest` should treat different CT
# classes without common supertypes as different structure types
# (e.g. for assert_same_structure).
class CTSpec2(CTSpec):
pass
class CT2(CT):
_type_spec_class = CTSpec2
# CompositeTensors with a common supertype are considered to be the same
# structure by tf.nest (e.g. for assert_same_structure).
class CT3(CT):
_type_spec_class = CTSpec
@test_util.run_all_in_graph_and_eager_modes
class CompositeTensorTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters([
{'structure': CT(0),
'expected': [0],
'paths': [('CT',)]},
{'structure': CT('a'),
'expected': ['a'],
'paths': [('CT',)]},
{'structure': CT(['a', 'b', 'c']),
'expected': ['a', 'b', 'c'],
'paths': [('CT', 0), ('CT', 1), ('CT', 2)]},
{'structure': CT({'x': 'a', 'y': 'b', 'z': 'c'}),
'expected': ['a', 'b', 'c'],
'paths': [('CT', 'x'), ('CT', 'y'), ('CT', 'z')]},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'expected': ['a', 'b', 'c'],
'paths': [(0, 'k1', 'CT'), (1, 'CT', 0), (1, 'CT', 1, 'x', 'CT', 'y')]},
{'structure': CT(0),
'expand_composites': False,
'expected': [CT(0)],
'paths': [()]},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'expand_composites': False,
'expected': [CT('a'), CT(['b', {'x': CT({'y': 'c'})}])],
'paths': [(0, 'k1'), (1,)]},
]) # pyformat: disable
def testNestFlatten(self, structure, expected, paths, expand_composites=True):
result = nest.flatten(structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
result_with_paths = nest.flatten_with_tuple_paths(
structure, expand_composites=expand_composites)
self.assertEqual(result_with_paths, list(zip(paths, expected)))
string_paths = ['/'.join(str(p) for p in path) for path in paths] # pylint: disable=g-complex-comprehension
result_with_string_paths = nest.flatten_with_joined_string_paths(
structure, expand_composites=expand_composites)
self.assertEqual(result_with_string_paths,
list(zip(string_paths, expected)))
flat_paths_result = list(
nest.yield_flat_paths(structure, expand_composites=expand_composites))
self.assertEqual(flat_paths_result, paths)
@parameterized.parameters([
{'s1': [1, 2, 3],
's2': [CT(['a', 'b']), 'c', 'd'],
'expand_composites': False,
'expected': [CT(['a', 'b']), 'c', 'd'],
'paths': [(0,), (1,), (2,)]},
{'s1': [CT([1, 2, 3])],
's2': [5],
'expand_composites': False,
'expected': [5],
'paths': [(0,)]},
{'s1': [[CT([9, 9, 9])], 999, {'y': CT([9, 9])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expand_composites': False,
'expected': [CT([1, 2, 3]), 100, CT([CT([4, 5]), 6])],
'paths': [(0, 0), (1,), (2, 'y')]},
{'s1': [[CT([9, 9, 9])], 999, {'y': CT([CT([9, 9]), 9])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}],
'expand_composites': False,
'expected': [CT([1, 2, 3]), 100, CT([5, 6])],
'paths': [(0, 0), (1,), (2, 'y')]},
]) # pyformat: disable
def testNestFlattenUpTo(self, s1, s2, expected, paths,
expand_composites=True):
result = nest.flatten_up_to(s1, s2, expand_composites=expand_composites)
self.assertEqual(expected, result)
result_with_paths = nest.flatten_with_tuple_paths_up_to(
s1, s2, expand_composites=expand_composites)
self.assertEqual(result_with_paths, list(zip(paths, expected)))
@parameterized.parameters([
{'structure': CT(0),
'sequence': [5],
'expected': CT(5)},
{'structure': CT(['a', 'b', 'c']),
'sequence': ['A', CT(['b']), {'x': 'y'}],
'expected': CT(['A', CT(['b']), {'x': 'y'}])},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'sequence': ['A', 'B', 'C'],
'expected': [{'k1': CT('A')}, CT(['B', {'x': CT({'y': 'C'})}])]},
{'structure': [{'k1': CT('a')}, CT(['b', {'x': CT({'y': 'c'})}])],
'sequence': ['A', 'B'],
'expand_composites': False,
'expected': [{'k1': 'A'}, 'B']},
{'structure': CT(0, metadata='abc'),
'sequence': [5],
'expected': CT(5, metadata='abc')},
]) # pyformat: disable
def testNestPackSequenceAs(self,
structure,
sequence,
expected,
expand_composites=True):
result = nest.pack_sequence_as(
structure, sequence, expand_composites=expand_composites)
self.assertEqual(result, expected)
@parameterized.parameters([
{'s1': CT('abc'), 's2': CT('xyz')},
{'s1': CT(['a', 'b', 'c']), 's2': CT(['d', 'e', 'f'])},
{'s1': [1, CT([10]), CT(200, metadata='xyz')],
's2': [8, CT([55]), CT(100, metadata='xyz')]},
{'s1': CT('abc'), 's2': CT3('xyz')},
{'s1': CT(['a', 'b', 'c']), 's2': CT3(['d', 'e', 'f'])},
{'s1': [1, CT([10]), CT(200, metadata='xyz')],
's2': [8, CT([55]), CT3(100, metadata='xyz')]},
]) # pyformat: disable
def testNestAssertSameStructure(self, s1, s2, expand_composites=True):
nest.assert_same_structure(s1, s2, expand_composites=expand_composites)
nest.assert_shallow_structure(s1, s2, expand_composites=expand_composites)
@parameterized.parameters([
{'s1': CT(0), 's2': CT(['x'])},
{'s1': CT([1]), 's2': CT([1, 2])},
{'s1': CT({'x': 1}), 's2': CT({'y': 1})},
{'s1': CT(0), 's2': CT(0, metadata='xyz')},
{'s1': CT(0, metadata='xyz'), 's2': CT(0)},
{'s1': CT(0, metadata='xyz'), 's2': CT(0, metadata='abc')},
{'s1': CT(['a', 'b', 'c']), 's2': CT(['d', 'e'])},
{'s1': [1, CT(['a']), CT('b', metadata='xyz')],
's2': [8, CT([55, 66]), CT(100, metadata='abc')]},
{'s1': CT(0), 's2': CT2(0)},
]) # pyformat: disable
def testNestAssertSameStructureCompositeMismatch(self,
s1,
s2,
error=ValueError):
# s1 and s2 have the same structure if expand_composites=False; but
# different structures if expand_composites=True.
nest.assert_same_structure(s1, s2, expand_composites=False)
nest.assert_shallow_structure(s1, s2, expand_composites=False)
with self.assertRaises(error): # pylint: disable=g-error-prone-assert-raises
nest.assert_same_structure(s1, s2, expand_composites=True)
@parameterized.parameters([
# Note: there are additional test cases in testNestAssertSameStructure.
{'s1': [1], 's2': [CT(1)]},
{'s1': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expand_composites': False},
{'s1': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}],
'expand_composites': False},
]) # pyformat: disable
def testNestAssertShallowStructure(self, s1, s2, expand_composites=True):
nest.assert_shallow_structure(s1, s2, expand_composites=expand_composites)
@parameterized.parameters([
# Note: there are additional test cases in
# testNestAssertSameStructureCompositeMismatch.
{'s1': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([5, 6])}]},
{'s1': CT([1, 2, 3]),
's2': [1, 2, 3],
'check_types': False},
]) # pyformat: disable
def testNestAssertShallowStructureCompositeMismatch(self,
s1,
s2,
check_types=True):
with self.assertRaises((TypeError, ValueError)): # pylint: disable=g-error-prone-assert-raises
nest.assert_shallow_structure(
s1, s2, expand_composites=True, check_types=check_types)
@parameterized.parameters([
{'structure': CT(1, metadata=2),
'expected': CT(11, metadata=2)},
{'structure': CT({'x': 1, 'y': [2, 3]}, metadata=2),
'expected': CT({'x': 11, 'y': [12, 13]}, metadata=2)},
{'structure': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expected': [[CT([11, 12, 13])], 110, {'y': CT([CT([14, 15]), 16])}]},
]) # pyformat: disable
def testNestMapStructure(self, structure, expected, expand_composites=True):
func = lambda x: x + 10
result = nest.map_structure(
func, structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
@parameterized.parameters([
{'s1': [[CT([1, 2, 3])], 100, {'y': 4}],
's2': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expected': [[CT([11, 12, 13])], 110, {'y': CT([CT([4, 5]), 6])}]}
]) # pyformat: disable
def testNestMapStructureUpTo(self, s1, s2, expected):
func = lambda x: x + 10 if isinstance(x, int) else x
result = nest.map_structure_up_to(s1, func, s2, expand_composites=True)
self.assertEqual(result, expected)
@parameterized.parameters([
{'structure': CT('a'),
'expected': CT('CT:a')},
{'structure': CT(['a', 'b']),
'expected': CT(['CT/0:a', 'CT/1:b'])},
{'structure': [[CT([1, 2, 3])], 100, {'y': CT([CT([4, 5]), 6])}],
'expected': [
[CT(['0/0/CT/0:1', '0/0/CT/1:2', '0/0/CT/2:3'])],
'1:100',
{'y': CT([CT(['2/y/CT/0/CT/0:4', '2/y/CT/0/CT/1:5']),
'2/y/CT/1:6'])}]},
]) # pyformat: disable
def testNestMapStructureWithPaths(self,
structure,
expected,
expand_composites=True):
def func1(path, x):
return '%s:%s' % (path, x)
result = nest.map_structure_with_paths(
func1, structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
# Use the same test cases for map_structure_with_tuple_paths.
def func2(tuple_path, x):
return '%s:%s' % ('/'.join(str(v) for v in tuple_path), x)
result = nest.map_structure_with_tuple_paths(
func2, structure, expand_composites=expand_composites)
self.assertEqual(result, expected)
@parameterized.parameters([
{'s1': [[CT([1, 2, 3])], 100, {'y': [4, 5]}],
's2': [[CT([1, 2, 3])], 100, {'y': [CT([4, 5]), 6]}],
'expected': [
[CT(['0/0/CT/0:1', '0/0/CT/1:2', '0/0/CT/2:3'])],
('1:100'),
{'y': ['2/y/0:CT((4, 5), None)', '2/y/1:6']}]},
]) # pyformat: disable
def testNestMapStructureWithTuplePathsUpTo(self, s1, s2, expected):
def func(tuple_path, x):
return '%s:%s' % ('/'.join(str(v) for v in tuple_path), x)
result = nest.map_structure_with_tuple_paths_up_to(
s1, func, s2, expand_composites=True)
self.assertEqual(result, expected)
def testNestGetTraverseShallowStructure(self):
func = lambda t: not (isinstance(t, CT) and t.metadata == 'B')
structure = [CT([1, 2], metadata='A'), CT([CT(3)], metadata='B')]
result = nest.get_traverse_shallow_structure(
func, structure, expand_composites=True)
expected = [CT([True, True], metadata='A'), False]
self.assertEqual(result, expected)
def testMemoryIsFreed(self):
# Note: we use `np.array` values for CT and `set` values for
# metadata because we need to construct weakrefs to them. Other builtin
# types, such as `list` and `tuple`, do not support weakrefs.
ct1 = CT(np.array([1, 2]), set(['no', 'leaks']))
ct2 = CT(np.array([3, 4]), set(['no', 'leaks']))
ct3 = CT(np.array([5, 6]), set(['other', 'metadata']))
# Note: map_structure exercises flatten, pack_sequence_as, and
# assert_same_structure.
func = lambda x, y: x + y
ct4 = nest.map_structure(func, ct1, ct2, expand_composites=True)
# Check that the exception-raising path in assert_same_structure
# doesn't leak any objects.
with self.assertRaises(ValueError):
nest.map_structure(func, ct2, ct3, expand_composites=True)
if hasattr(sys, 'exc_clear'):
sys.exc_clear() # Remove any references in exception stack traces.
refs = []
for ct in [ct1, ct2, ct3, ct4]:
refs.append(weakref.ref(ct))
refs.append(weakref.ref(ct.components))
refs.append(weakref.ref(ct.metadata))
del ct # pylint: disable=undefined-loop-variable
for ref in refs:
self.assertIsNotNone(ref())
del ct1, ct2, ct3, ct4
gc.collect()
for ref in refs:
self.assertIsNone(ref())
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
('IndexedSlicesNoDenseShape', lambda: indexed_slices.IndexedSlices(
constant_op.constant([1, 2, 3]), constant_op.constant([2, 8, 4]))),
('IndexedSlicesInt32DenseShape', lambda: indexed_slices.IndexedSlices(
constant_op.constant([1, 2, 3]), constant_op.constant([2, 8, 4]),
constant_op.constant([10], dtypes.int32))),
('IndexedSlicesInt64DenseShape', lambda: indexed_slices.IndexedSlices(
constant_op.constant([[1, 2], [3, 4]]), constant_op.constant([2, 8]),
constant_op.constant([10, 2], dtypes.int64))),
('RaggedTensorRaggedRank1',
lambda: ragged_factory_ops.constant([[1, 2], [3]])),
('RaggedTensorRaggedRank2',
lambda: ragged_factory_ops.constant([[[1, 2], [3]], [[6, 7, 8]]])),
('SparseTensor',
lambda: sparse_tensor.SparseTensor([[3], [7]], ['a', 'b'], [10])),
('Nested structure', lambda: {
'a':
indexed_slices.IndexedSlices(
constant_op.constant([1, 2, 3]),
constant_op.constant([2, 8, 4])),
'b': [
ragged_factory_ops.constant([[1, 2], [3]]),
sparse_tensor.SparseTensor([[3], [7]], ['a', 'b'], [10])
]
}),
])
def testAssertSameStructureWithValueAndTypeSpec(self, value_func):
value = value_func()
spec = nest.map_structure(type_spec.type_spec_from_value, value,
expand_composites=False)
nest.assert_same_structure(value, spec, expand_composites=True)
def testConvertVariablesToTensors(self):
ct = CT(1)
result = ct._convert_variables_to_tensors()
self.assertIs(result, ct)
result2 = composite_tensor.convert_variables_to_tensors(ct)
self.assertIs(result2, ct)
if __name__ == '__main__':
googletest.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@framework@[email protected]_END.py
|
{
"filename": "setup.py",
"repo_name": "hahnec/torchimize",
"repo_path": "torchimize_extracted/torchimize-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
__author__ = "Christopher Hahne"
__email__ = "[email protected]"
__license__ = """
Copyright (c) 2022 Christopher Hahne <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from setuptools import setup, find_packages
from torchimize import __version__
from docutils import core
from pathlib import Path
path = Path.cwd()
# parse description section text
with open(str(path / 'README.rst'), 'r') as f:
data = f.read()
readme_nodes = list(core.publish_doctree(data))
for node in readme_nodes:
if node.astext().startswith('Description'):
long_description = node.astext().rsplit('\n\n')[1]
# parse package requirements from text file
with open(str(path / 'requirements.txt'), 'r') as f:
req_list = f.read().split('\n')
setup(
name='torchimize',
version=__version__,
description='Optimization Algorithms using Pytorch',
long_description=long_description,
long_description_content_type='text/x-rst',
url='http://github.com/hahnec/torchimize',
author='Christopher Hahne',
author_email='[email protected]',
license='GNU GPL V3.0',
keywords='pytorch torch optimization mathematical linear programming gauss newton levenberg marquardt',
packages=find_packages(),
install_requires=req_list,
include_package_data=True,
zip_safe=False,
)
|
hahnecREPO_NAMEtorchimizePATH_START.@torchimize_extracted@[email protected]@.PATH_END.py
|
{
"filename": "webgl-text-and-annotations.md",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/doc/unconverted/python/webgl-text-and-annotations.md",
"type": "Markdown"
}
|
---
jupyter:
jupytext:
notebook_metadata_filter: all
text_representation:
extension: .md
format_name: markdown
format_version: '1.1'
jupytext_version: 1.1.1
kernelspec:
display_name: Python 2
language: python
name: python2
plotly:
description: How to add webGL based text labels and annotations to plots in python
display_as: advanced_opt
has_thumbnail: false
language: python
layout: base
name: WebGL Text and Annotations
order: 2
page_type: example_index
permalink: python/webgl-text-and-annotations/
thumbnail: thumbnail/webgl-text-and-annotations.jpg
---
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!!
### Heatmap with Annotations
```python
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.figure_factory import create_annotated_heatmap
n=250
y = [[i]*n for i in range(12)]
y = [item for sublist in y for item in sublist]
trace = dict(type='heatmap', z=np.random.randint(1, 10,(12, n)), colorscale = 'Viridis')
data=[trace]
# Here's the key part - Scattergl text!
data.append({'type': 'scattergl',
'mode': 'text',
'x': list(range(n))*12,
'y': y,
'text': np.random.choice(list('ATGC'), 12*250),
'textfont': {
'size': 20
}})
steps = [{'args': ['xaxis', {'range': [-0.5 + e, 30.5 + e]}], 'method': 'relayout'} for e in range(n-30)]
sliders = [dict(
active = 0,
steps = steps
)]
layout = dict(sliders=sliders)
layout['xaxis'] = {'range': [-0.5, 30.5]}
fig = dict(data=data, layout=layout)
py.iplot(fig, validate=False)
```
### Reference
See https://plot.ly/python/reference/#scattergl for more information and chart attribute options!
```python
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'webgl-text-and-annotations.ipynb', 'python/webgl-text-and-annotations/', 'WebGL Text and Annotations',
'How to add webGL based text labels and annotations to plots in python',
title = 'WebGL Text and Annotations | plotly',
name = 'WebGL Text and Annotations',
has_thumbnail='false', thumbnail='thumbnail/webgl-text-and-annotations.jpg',
language='python',
page_type='example_index', display_as='style_opt', order=2,
ipynb= '~notebook_demo/219', uses_plotly_offline=False)
```
```python
```
|
[email protected][email protected]@doc@unconverted@[email protected]@.PATH_END.py
|
{
"filename": "test_rectangle.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/visuals/tests/test_rectangle.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Tests for RectPolygonVisual
All images are of size (100,100) to keep a small file size
"""
from vispy.scene import visuals, transforms
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main, raises)
from vispy.testing.image_tester import assert_image_approved
@requires_application()
def test_rectangle_draw():
"""Test drawing rectpolygons without transform using RectPolygonVisual"""
with TestingCanvas() as c:
rectpolygon = visuals.Rectangle(center=(50, 50, 0), height=40.,
width=80., color='red',
parent=c.scene)
assert_image_approved(c.render(), 'visuals/rectpolygon1.png')
rectpolygon.parent = None
rectpolygon = visuals.Rectangle(center=(50, 50, 0), height=40.,
width=80., radius=10., color='red',
parent=c.scene)
assert_image_approved(c.render(), 'visuals/rectpolygon2.png')
rectpolygon.parent = None
rectpolygon = visuals.Rectangle(center=(50, 50, 0), height=40.,
width=80., radius=10., color='red',
border_color=(0, 1, 1, 1),
parent=c.scene)
assert_image_approved(c.render(), 'visuals/rectpolygon3.png')
rectpolygon.parent = None
rectpolygon = visuals.Rectangle(center=(50, 50, 0), height=40.,
width=80., radius=10.,
border_color='white',
parent=c.scene)
assert_image_approved(c.render(), 'visuals/rectpolygon4.png',
min_corr=0.5)
rectpolygon.parent = None
rectpolygon = visuals.Rectangle(center=(50, 50, 0), height=60.,
width=80., radius=[25, 10, 0, 15],
color='red', border_color=(0, 1, 1, 1),
parent=c.scene)
assert_image_approved(c.render(), 'visuals/rectpolygon5.png')
@requires_application()
def test_rectpolygon_draw():
"""Test drawing transformed rectpolygons using RectPolygonVisual"""
with TestingCanvas() as c:
rectpolygon = visuals.Rectangle(center=(0., 0.), height=20.,
width=20., radius=10., color='blue',
parent=c.scene)
rectpolygon.transform = transforms.STTransform(scale=(2.0, 3.0),
translate=(50, 50))
assert_image_approved(c.render(), 'visuals/rectpolygon6.png')
rectpolygon.parent = None
rectpolygon = visuals.Rectangle(center=(0., 0.), height=20.,
width=20., radius=10.,
color='blue', border_color='red',
parent=c.scene)
rectpolygon.transform = transforms.STTransform(scale=(2.0, 3.0),
translate=(50, 50))
assert_image_approved(c.render(), 'visuals/rectpolygon7.png')
rectpolygon.parent = None
rectpolygon = visuals.Rectangle(center=(0., 0.), height=60.,
width=60., radius=10.,
border_color='red',
parent=c.scene)
rectpolygon.transform = transforms.STTransform(scale=(1.5, 0.5),
translate=(50, 50))
assert_image_approved(c.render(), 'visuals/rectpolygon8.png',
min_corr=0.5)
rectpolygon.parent = None
rectpolygon = visuals.Rectangle(center=(0., 0.), height=60.,
width=60., radius=[25, 10, 0, 15],
color='blue', border_color='red',
parent=c.scene)
rectpolygon.transform = transforms.STTransform(scale=(1.5, 0.5),
translate=(50, 50))
assert_image_approved(c.render(), 'visuals/rectpolygon9.png')
@requires_application()
def test_reactive_draw():
"""Test reactive RectPolygon attributes"""
with TestingCanvas() as c:
rectpolygon = visuals.Rectangle(center=(50, 50, 0), height=40.,
width=80., color='red',
parent=c.scene)
rectpolygon.radius = [20., 20, 0., 10.]
assert_image_approved(c.render(),
'visuals/reactive_rectpolygon1.png')
rectpolygon.center = (60, 60, 0)
assert_image_approved(c.render(),
'visuals/reactive_rectpolygon2.png')
rectpolygon.color = 'blue'
assert_image_approved(c.render(),
'visuals/reactive_rectpolygon3.png')
rectpolygon.border_color = 'yellow'
assert_image_approved(c.render(),
'visuals/reactive_rectpolygon4.png')
rectpolygon.radius = 10.
assert_image_approved(c.render(),
'visuals/reactive_rectpolygon5.png')
@requires_application()
def test_attributes():
"""Test if attribute checks are in place"""
with TestingCanvas() as c:
rectpolygon = visuals.Rectangle(center=(50, 50, 0), height=40.,
width=80., color='red',
parent=c.scene)
with raises(ValueError):
rectpolygon.height = 0
with raises(ValueError):
rectpolygon.width = 0
with raises(ValueError):
rectpolygon.radius = [10, 0, 5]
with raises(ValueError):
rectpolygon.radius = [10.]
with raises(ValueError):
rectpolygon.radius = 21.
run_tests_if_main()
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@visuals@tests@[email protected]_END.py
|
{
"filename": "test.py",
"repo_name": "bolverk/huji-rich",
"repo_path": "huji-rich_extracted/huji-rich-master/convergence/simple_waves_2d/lagrangian_second_order/test.py",
"type": "Python"
}
|
#! /usr/bin/python
def goodness_of_fit(a1, a2):
import math
diff2 = [(x-y)**2 for x,y in zip(a1,a2)]
return math.sqrt(sum(diff2)/(max(a1)-min(a1))/len(a1))
def main():
import numpy
# Simulation results
rawd = numpy.loadtxt('x_prof_initial.txt')
xr0 = rawd[:,0]
dr0 = rawd[:,1]
pr0 = rawd[:,2]
vr0 = rawd[:,3]
cr0 = rawd[:,4]
time = numpy.loadtxt('time.txt')
xs0 = numpy.sort(xr0)
ids0 = numpy.argsort(xr0)
ds0 = [dr0[i] for i in ids0]
ps0 = [pr0[i] for i in ids0]
vs0 = [vr0[i] for i in ids0]
cs0 = [cr0[i] for i in ids0]
rawd = numpy.loadtxt('x_prof_final.txt')
xr = rawd[:,0]
dr = rawd[:,1]
pr = rawd[:,2]
vr = rawd[:,3]
cr = rawd[:,4]
xs = numpy.sort(xr)
ids = numpy.argsort(xr)
ds = [dr[i] for i in ids]
ps = [pr[i] for i in ids]
vs = [vr[i] for i in ids]
cs = [cr[i] for i in ids]
# Analytic results
xa = [x+time*(c+v) for x,v,c in zip(xs0,vs0,cs0)]
# Prepare for interpolation
x_inside = [z for z in xa if z<numpy.max(xs) and
z>numpy.min(xs)]
d_analytic = [ds0[i] for i in range(len(xa))
if xa[i]<numpy.max(xs) and
xa[i]>numpy.min(xs)]
p_analytic = [ps0[i] for i in range(len(xa))
if xa[i]<numpy.max(xs) and
xa[i]>numpy.min(xs)]
v_analytic = [vs0[i] for i in range(len(xa))
if xa[i]<numpy.max(xs) and
xa[i]>numpy.min(xs)]
#d_analytic = numpy.interp(x_inside,xa,ds0)
d_numeric = numpy.interp(x_inside,xs,ds)
#p_analytic = numpy.interp(x_inside,xa,ps0)
p_numeric = numpy.interp(x_inside,xs,ps)
#v_analytic = numpy.interp(x_inside,xa,vs0)
v_numeric = numpy.interp(x_inside,xs,vs)
gof1 = goodness_of_fit(d_numeric,d_analytic)
gof2 = goodness_of_fit(p_numeric,p_analytic)
gof3 = goodness_of_fit(v_numeric,v_analytic)
f = open('gradesheet.txt','w')
f.write(str(gof1)+'\n')
f.write(str(gof2)+'\n')
f.write(str(gof3)+'\n')
f.close()
return gof1<0.2 and \
gof2<0.61 and \
gof3<0.3
import sys
if __name__=='__main__':
print main()
|
bolverkREPO_NAMEhuji-richPATH_START.@huji-rich_extracted@huji-rich-master@convergence@simple_waves_2d@[email protected]@.PATH_END.py
|
{
"filename": "misc.py",
"repo_name": "thomasorb/orb",
"repo_path": "orb_extracted/orb-master/orb/utils/misc.py",
"type": "Python"
}
|
#!/usr/bin/python
# *-* coding: utf-8 *-*
# Author: Thomas Martin <[email protected]>
# File: misc.py
## Copyright (c) 2010-2020 Thomas Martin <[email protected]>
##
## This file is part of ORB
##
## ORB is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## ORB is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
##
## You should have received a copy of the GNU General Public License
## along with ORB. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import logging
import numpy as np
import math
import warnings
import sys
import orb.cutils
import pyregion
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import orb.utils.io
import fnmatch
def find(pattern, path):
"""Find a file from a pattern at a given path
https://stackoverflow.com/questions/1724693/find-a-file-in-python
"""
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
find('*.txt', '/path/to/dir')
def aggregate_pixels(pixel_list, radius=1.42):
"""Aggregate neighbouring pixels into a set of sources. Two
neighbours are found if there distance is smaller than a given
radius (in pixels).
:param pixel_list: A list of pixel position as returned by a
function like numpy.nonzero.
:param radius: (Optional) Max separation between two pixels of the
same source (default 1.42).
:returns: A list of pixel list. Each item of the list corresponds
to a source and each source is itself a list of pixel positions
(x,y).
"""
def get_neighbours(ix, iy, pixel_list, agg_list_ok, radius):
_x = pixel_list[0]
_y = pixel_list[1]
_r = np.sqrt((_x - ix)**2. + (_y - iy)**2.)
_r[agg_list_ok == 1] = 2. * radius
nei_x = _x[_r < radius]
nei_y = _y[_r < radius]
agg_list_ok[_r < radius] = True
neighbours = list()
for inei in range(len(nei_x)):
neighbours.append((nei_x[inei], nei_y[inei]))
return neighbours, agg_list_ok
sources = list()
agg_list_ok = np.zeros(len(pixel_list[0]), dtype=bool)
x = pixel_list[0]
y = pixel_list[1]
for isource in range(len(x)):
ii = x[isource]
ij = y[isource]
sys.stdout.write(' '*10)
sys.stdout.write('\r {}/{}'.format(isource, len(x)))
if not agg_list_ok[isource]:
agg_list_ok[isource] = True
new_source = list()
new_source.append((ii,ij))
more = True
while more:
for pix in new_source:
more = False
neighbours, agg_list_ok = get_neighbours(
pix[0], pix[1], pixel_list, agg_list_ok, radius)
if len(neighbours) > 0:
more = True
for inei in neighbours:
new_source.append((inei))
if len(new_source) > 1:
sources.append(new_source)
sys.stdout.flush()
print(' '*20)
logging.info('{} sources detected'.format(len(sources)))
return sources
def get_axis_from_hdr(hdr, axis_index=1):
"""Return axis from a classic FITS header
:param hdr: FITS header
:param axis_index: (Optional) Index of the axis to retrieve
(default 1)
"""
naxis = int(hdr['NAXIS{}'.format(axis_index)])
crpix = float(hdr['CRPIX{}'.format(axis_index)])
crval = float(hdr['CRVAL{}'.format(axis_index)])
cdelt = float(hdr['CDELT{}'.format(axis_index)])
return (np.arange(naxis, dtype=float) + 1. - crpix) * cdelt + crval
def get_mask_from_ds9_region_file(reg_path, x_range, y_range,
integrate=True, header=None):
"""Return a mask from a ds9 region file or from a ds9-like region
string.
:param reg_path: Path to a ds9 region file or ds9-like region string
:param x_range: Range of x image coordinates
considered as valid. Pixels outside this range are
rejected..
:param y_range: Range of y image coordinates
considered as valid. Pixels outside this range are
rejected.
:param integrate: (Optional) If True, all pixels are integrated
into one mask, else a list of region masks is returned (default
True)
:param header: (Optional) Header containing the WCS transformation
if the region file is in celestial coordinates (default None).
.. note:: The returned array can be used like a list of
indices returned by e.g. numpy.nonzero().
.. note:: Coordinates can be celestial or image coordinates
(x,y). if coordinates are celestial a header must be passed to
the function.
"""
### Warning: pyregion works in 'transposed' coordinates
### We will work here in python (y,x) convention
if os.path.exists(reg_path):
_regions = pyregion.open(reg_path)
else:
_regions = pyregion.parse(reg_path)
if not _regions.check_imagecoord():
if header is None: raise Exception('DS9 region file is not in image coordinates. Please change it to image coordinates or pass a astropy.io.fits.Header instance to the function to transform the actual coordinates to image coordinates.')
else:
wcs = pywcs.WCS(header, naxis=2, relax=True)
#_regions = _regions.as_imagecoord(wcs.to_header())
# WCS does not export NAXIS1, NAXIS2 anymore...
h = wcs.to_header(relax=True)
h.set('NAXIS1', header['NAXIS1'])
h.set('NAXIS2', header['NAXIS2'])
_regions = _regions.as_imagecoord(h)
shape = (np.max(y_range), np.max(x_range))
mask = np.zeros(shape, dtype=float)
hdu = pyfits.PrimaryHDU(mask)
mask_list = list()
for _region in _regions:
if len(_regions) > 1:
sys.stdout.write('\r loading region: {}'.format(_region))
imask2d = pyregion.get_mask([_region], hdu)
imask2d[:np.min(x_range), :] = 0
imask2d[:, :np.min(y_range)] = 0
imask = np.nonzero(imask2d)
mask[imask] = 1
if integrate:
mask[imask] = True
else:
mask_list.append([imask[1], imask[0]]) # transposed to
# return
if len(_regions) > 1:
sys.stdout.flush()
if len(_regions) > 1:
print('\n')
if integrate:
return np.nonzero(mask.T) # transposed to return
else:
return mask_list
def compute_obs_params(nm_min_filter, nm_max_filter,
theta_min=5.01, theta_max=11.28):
"""Compute observation parameters (order, step size) given the
filter bandpass.
:param nm_min_filter: Min wavelength of the filter in nm.
:param nm_max_filter: Max wavelength of the filter in nm.
:param theta_min: (Optional) Min angle of the detector (default
5.01).
:param theta_max: (Optional) Max angle of the detector (default
11.28).
:return: A tuple (order, step size, max wavelength)
"""
def get_step(nm_min, n, cos_min):
return int(nm_min * ((n+1.)/(2.*cos_min)))
def get_nm_max(step, n, cos_max):
return 2. * step * cos_max / float(n)
cos_min = math.cos(math.radians(theta_min))
cos_max = math.cos(math.radians(theta_max))
n = 0
order_found = False
while n < 200 and not order_found:
n += 1
step = get_step(nm_min_filter, n, cos_min)
nm_max = get_nm_max(step, n, cos_max)
if nm_max <= nm_max_filter:
order_found = True
order = n - 1
step = get_step(nm_min_filter, order, cos_min)
nm_max = get_nm_max(step, order, cos_max)
return order, step, nm_max
def correct_bad_frames_vector(bad_frames_vector, dimz):
"""Remove bad indexes of the bad frame vector.
:param bad_frames_vector: The vector of indexes to correct
:param dimz: Dimension of the cube along the 3rd axis.
"""
if (bad_frames_vector is None
or np.size(bad_frames_vector) == 0):
return bad_frames_vector
bad_frames_vector= np.array(np.copy(bad_frames_vector))
bad_frames_vector = [bad_frames_vector[badindex]
for badindex in range(bad_frames_vector.shape[0])
if (bad_frames_vector[badindex] >= 0
and bad_frames_vector[badindex] < dimz)]
return bad_frames_vector
def restore_error_settings(old_settings):
"""Restore old floating point error settings of numpy.
"""
np.seterr(divide = old_settings["divide"])
np.seterr(over = old_settings["over"])
np.seterr(under = old_settings["under"])
np.seterr(invalid = old_settings["invalid"])
def get_cfht_odometer(path):
"""Return the odometer of a cfht FITS file from its path."""
return int(os.path.splitext(os.path.split(path.strip())[-1])[0][:-1])
def sort_image_list(file_list, image_mode, cube=True):
"""Sort a list of fits files.
:param file_list: A list of file names
:param image_mode: Image mode, can be 'sitelle' or 'spiomm'.
:param cube: If True, image list is considered as a cube
list. Headers are used to get the right order based on step
number instead of file path (default True).
"""
file_list = [path for path in file_list if
(('.fits' in path) or ('.hdf5' in path))]
if len(file_list) == 0: return None
if image_mode == 'spiomm':
file_list = [path for path in file_list
if not '_bias.fits' in path]
# get all numbers
file_seq = [re.findall("[0-9]+", path)
for path in file_list if
(('.fits' in path) or ('.hdf5' in path))]
try:
file_keys = np.array(file_seq, dtype=int)
except Exception as e:
raise Exception('Malformed sequence of files: {}:\n{}'.format(
e, file_seq))
# get changing column
test = np.sum(file_keys == file_keys[0,:], axis=0)
if np.min(test) > 1:
logging.warning('Images list cannot be safely sorted. Two images at least have the same index')
column_index = np.nan
else:
column_index = np.argmin(test)
# get changing step (if possible)
steplist = list()
if cube:
for path in file_list:
sys.stdout.write('\rreading {}'.format(path))
if '.fits' in path:
try:
hdr = orb.utils.io.read_fits(
path, return_hdu_only=True).header
if 'SITSTEP' in hdr:
steplist.append(int(hdr['SITSTEP']))
except Exception: pass
sys.stdout.write('\n')
if len(steplist) == len(file_list):
_list = list()
for i in range(len(file_list)):
_list.append({'path':file_list[i], 'step':steplist[i]})
_list.sort(key=lambda x: x['step'])
file_list = [_path['path'] for _path in _list]
elif not np.isnan(column_index):
file_list.sort(key=lambda x: float(re.findall("[0-9]+", x)[
column_index]))
else:
raise Exception('Image list cannot be sorted.')
return file_list
def read_instrument_value_from_file(path):
"""Read the instrument value form an hdf5/fits file
:param path: path to an hdf5/fits file.
"""
instrument = None
if 'hdf' in path:
with orb.utils.io.open_hdf5(path, 'r') as f:
if 'instrument' in f.attrs:
instrument = f.attrs['instrument']
elif 'INSTRUME' in f.attrs:
instrument = f.attrs['INSTRUME'].lower()
elif 'fit' in path:
hdu = orb.utils.io.read_fits(path, return_hdu_only=True)
_hdr = hdu.header
if 'INSTRUME' in _hdr:
instrument = _hdr['INSTRUME'].lower()
return instrument
def convert_camera_parameter(param):
"""Convert camera parameter to an integer value
"""
if param == 'MERGED_DATA': return 0
elif '1' in str(param): return 1
elif '2' in str(param): return 2
elif '0' in str(param): return 0
else:
raise ValueError('camera parameter {} not understood'.format(param))
|
thomasorbREPO_NAMEorbPATH_START.@orb_extracted@orb-master@orb@[email protected]@.PATH_END.py
|
{
"filename": "_separatethousands.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/marker/colorbar/_separatethousands.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="separatethousands",
parent_name="scattergeo.marker.colorbar",
**kwargs,
):
super(SeparatethousandsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@scattergeo@marker@colorbar@[email protected]_END.py
|
{
"filename": "image_array.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/data_objects/image_array.py",
"type": "Python"
}
|
import numpy as np
from unyt import unyt_array
from yt.config import ytcfg
from yt.visualization.image_writer import write_bitmap, write_image
class ImageArray(unyt_array):
r"""A custom Numpy ndarray used for images.
This differs from ndarray in that you can optionally specify an
info dictionary which is used later in saving, and can be accessed with
ImageArray.info.
Parameters
----------
input_array: array_like
A numpy ndarray, or list.
Other Parameters
----------------
info: dictionary
Contains information to be stored with image.
Returns
-------
obj: ImageArray object
Raises
------
None
See Also
--------
numpy.ndarray : Inherits
Notes
-----
References
----------
Examples
--------
These are written in doctest format, and should illustrate how to
use the function. Use the variables 'ds' for the dataset, 'pc' for
a plot collection, 'c' for a center, and 'L' for a vector.
>>> im = np.zeros([64, 128, 3])
>>> for i in range(im.shape[0]):
... for k in range(im.shape[2]):
... im[i, :, k] = np.linspace(0.0, 0.3 * k, im.shape[1])
>>> myinfo = {
... "field": "dinosaurs",
... "east_vector": np.array([1.0, 0.0, 0.0]),
... "north_vector": np.array([0.0, 0.0, 1.0]),
... "normal_vector": np.array([0.0, 1.0, 0.0]),
... "width": 0.245,
... "units": "cm",
... "type": "rendering",
... }
>>> im_arr = ImageArray(im, info=myinfo)
>>> im_arr.save("test_ImageArray")
Numpy ndarray documentation appended:
"""
def __new__(
cls,
input_array,
units=None,
registry=None,
info=None,
bypass_validation=False,
):
obj = super().__new__(
cls, input_array, units, registry, bypass_validation=bypass_validation
)
if info is None:
info = {}
obj.info = info
return obj
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
super().__array_finalize__(obj)
self.info = getattr(obj, "info", None)
def write_hdf5(self, filename, dataset_name=None):
r"""Writes ImageArray to hdf5 file.
Parameters
----------
filename : string
The filename to create and write a dataset to
dataset_name : string
The name of the dataset to create in the file.
Examples
--------
>>> im = np.zeros([64, 128, 3])
>>> for i in range(im.shape[0]):
... for k in range(im.shape[2]):
... im[i, :, k] = np.linspace(0.0, 0.3 * k, im.shape[1])
>>> myinfo = {
... "field": "dinosaurs",
... "east_vector": np.array([1.0, 0.0, 0.0]),
... "north_vector": np.array([0.0, 0.0, 1.0]),
... "normal_vector": np.array([0.0, 1.0, 0.0]),
... "width": 0.245,
... "units": "cm",
... "type": "rendering",
... }
>>> im_arr = ImageArray(im, info=myinfo)
>>> im_arr.write_hdf5("test_ImageArray.h5")
"""
if dataset_name is None:
dataset_name = self.info.get("name", "image")
super().write_hdf5(filename, dataset_name=dataset_name, info=self.info)
def add_background_color(self, background="black", inline=True):
r"""Adds a background color to a 4-channel ImageArray
This adds a background color to a 4-channel ImageArray, by default
doing so inline. The ImageArray must already be normalized to the
[0,1] range.
Parameters
----------
background:
This can be used to set a background color for the image, and can
take several types of values:
* ``white``: white background, opaque
* ``black``: black background, opaque
* ``None``: transparent background
* 4-element array [r,g,b,a]: arbitrary rgba setting.
Default: 'black'
inline : boolean, optional
If True, original ImageArray is modified. If False, a copy is first
created, then modified. Default: True
Returns
-------
out: ImageArray
The modified ImageArray with a background color added.
Examples
--------
>>> im = np.zeros([64, 128, 4])
>>> for i in range(im.shape[0]):
... for k in range(im.shape[2]):
... im[i, :, k] = np.linspace(0.0, 10.0 * k, im.shape[1])
>>> im_arr = ImageArray(im)
>>> im_arr.rescale()
>>> new_im = im_arr.add_background_color([1.0, 0.0, 0.0, 1.0], inline=False)
>>> new_im.write_png("red_bg.png")
>>> im_arr.add_background_color("black")
>>> im_arr.write_png("black_bg.png")
"""
assert self.shape[-1] == 4
if background is None:
background = (0.0, 0.0, 0.0, 0.0)
elif background == "white":
background = (1.0, 1.0, 1.0, 1.0)
elif background == "black":
background = (0.0, 0.0, 0.0, 1.0)
# Alpha blending to background
if inline:
out = self
else:
out = self.copy()
for i in range(3):
out[:, :, i] = self[:, :, i] * self[:, :, 3]
out[:, :, i] += background[i] * background[3] * (1.0 - self[:, :, 3])
out[:, :, 3] = self[:, :, 3] + background[3] * (1.0 - self[:, :, 3])
return out
def rescale(self, cmax=None, amax=None, inline=True):
r"""Rescales the image to be in [0,1] range.
Parameters
----------
cmax : float, optional
Normalization value to use for rgb channels. Defaults to None,
corresponding to using the maximum value in the rgb channels.
amax : float, optional
Normalization value to use for alpha channel. Defaults to None,
corresponding to using the maximum value in the alpha channel.
inline : boolean, optional
Specifies whether or not the rescaling is done inline. If false,
a new copy of the ImageArray will be created, returned.
Default:True.
Returns
-------
out: ImageArray
The rescaled ImageArray, clipped to the [0,1] range.
Notes
-----
This requires that the shape of the ImageArray to have a length of 3,
and for the third dimension to be >= 3. If the third dimension has
a shape of 4, the alpha channel will also be rescaled.
Examples
--------
>>> im = np.zeros([64, 128, 4])
>>> for i in range(im.shape[0]):
... for k in range(im.shape[2]):
... im[i, :, k] = np.linspace(0.0, 0.3 * k, im.shape[1])
>>> im = ImageArray(im)
>>> im.write_png("original.png")
>>> im.rescale()
>>> im.write_png("normalized.png")
"""
assert len(self.shape) == 3
assert self.shape[2] >= 3
if inline:
out = self
else:
out = self.copy()
if cmax is None:
cmax = self[:, :, :3].sum(axis=2).max()
if cmax > 0.0:
np.multiply(self[:, :, :3], 1.0 / cmax, out[:, :, :3])
if self.shape[2] == 4:
if amax is None:
amax = self[:, :, 3].max()
if amax > 0.0:
np.multiply(self[:, :, 3], 1.0 / amax, out[:, :, 3])
np.clip(out, 0.0, 1.0, out)
return out
def write_png(
self,
filename,
sigma_clip=None,
background="black",
rescale=True,
):
r"""Writes ImageArray to png file.
Parameters
----------
filename : string
Filename to save to. If None, PNG contents will be returned as a
string.
sigma_clip : float, optional
Image will be clipped before saving to the standard deviation
of the image multiplied by this value. Useful for enhancing
images. Default: None
background:
This can be used to set a background color for the image, and can
take several types of values:
* ``white``: white background, opaque
* ``black``: black background, opaque
* ``None``: transparent background
* 4-element array [r,g,b,a]: arbitrary rgba setting.
Default: 'black'
rescale : boolean, optional
If True, will write out a rescaled image (without modifying the
original image). Default: True
Examples
--------
>>> im = np.zeros([64, 128, 4])
>>> for i in range(im.shape[0]):
... for k in range(im.shape[2]):
... im[i, :, k] = np.linspace(0.0, 10.0 * k, im.shape[1])
>>> im_arr = ImageArray(im)
>>> im_arr.write_png("standard.png")
>>> im_arr.write_png("non-scaled.png", rescale=False)
>>> im_arr.write_png("black_bg.png", background="black")
>>> im_arr.write_png("white_bg.png", background="white")
>>> im_arr.write_png("green_bg.png", background=[0, 1, 0, 1])
>>> im_arr.write_png("transparent_bg.png", background=None)
"""
if rescale:
scaled = self.rescale(inline=False)
else:
scaled = self
if self.shape[-1] == 4:
out = scaled.add_background_color(background, inline=False)
else:
out = scaled
if filename is not None and filename[-4:] != ".png":
filename += ".png"
if sigma_clip is not None:
clip_value = self._clipping_value(sigma_clip, im=out)
return write_bitmap(out.swapaxes(0, 1), filename, clip_value)
else:
return write_bitmap(out.swapaxes(0, 1), filename)
def write_image(
self,
filename,
color_bounds=None,
channel=None,
cmap_name=None,
func=lambda x: x,
):
r"""Writes a single channel of the ImageArray to a png file.
Parameters
----------
filename : string
Note filename not be modified.
Other Parameters
----------------
channel: int
Which channel to write out as an image. Defaults to 0
cmap_name: string
Name of the colormap to be used.
color_bounds : tuple of floats, optional
The min and max to scale between. Outlying values will be clipped.
cmap_name : string, optional
An acceptable colormap. See either yt.visualization.color_maps or
https://scipy-cookbook.readthedocs.io/items/Matplotlib_Show_colormaps.html .
func : function, optional
A function to transform the buffer before applying a colormap.
Returns
-------
scaled_image : uint8 image that has been saved
Examples
--------
>>> im = np.zeros([64, 128])
>>> for i in range(im.shape[0]):
... im[i, :] = np.linspace(0.0, 0.3 * i, im.shape[1])
>>> myinfo = {
... "field": "dinosaurs",
... "east_vector": np.array([1.0, 0.0, 0.0]),
... "north_vector": np.array([0.0, 0.0, 1.0]),
... "normal_vector": np.array([0.0, 1.0, 0.0]),
... "width": 0.245,
... "units": "cm",
... "type": "rendering",
... }
>>> im_arr = ImageArray(im, info=myinfo)
>>> im_arr.write_image("test_ImageArray.png")
"""
if cmap_name is None:
cmap_name = ytcfg.get("yt", "default_colormap")
if filename is not None and filename[-4:] != ".png":
filename += ".png"
# TODO: Write info dict as png metadata
if channel is None:
return write_image(
self.swapaxes(0, 1).to_ndarray(),
filename,
color_bounds=color_bounds,
cmap_name=cmap_name,
func=func,
)
else:
return write_image(
self.swapaxes(0, 1)[:, :, channel].to_ndarray(),
filename,
color_bounds=color_bounds,
cmap_name=cmap_name,
func=func,
)
def save(self, filename, png=True, hdf5=True, dataset_name=None):
"""
Saves ImageArray.
Arguments:
filename: string
This should not contain the extension type (.png, .h5, ...)
Optional Arguments:
png: boolean, default True
Save to a png
hdf5: boolean, default True
Save to hdf5 file, including info dictionary as attributes.
"""
if png:
if not filename.endswith(".png"):
filename = filename + ".png"
if len(self.shape) > 2:
self.write_png(filename)
else:
self.write_image(filename)
if hdf5:
if not filename.endswith(".h5"):
filename = filename + ".h5"
self.write_hdf5(filename, dataset_name)
def _clipping_value(self, sigma_clip, im=None):
# return the max value to clip with given a sigma_clip value. If im
# is None, the current instance is used
if im is None:
im = self
nz = im[:, :, :3][im[:, :, :3].nonzero()]
return nz.mean() + sigma_clip * nz.std()
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@data_objects@[email protected]_END.py
|
{
"filename": "utils.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/gammapy/visualization/utils.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging as log
import numpy as np
from scipy.interpolate import CubicSpline
from scipy.optimize import curve_fit
from scipy.stats import norm
from astropy.visualization import make_lupton_rgb
import matplotlib.axes as maxes
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
__all__ = [
"add_colorbar",
"plot_contour_line",
"plot_map_rgb",
"plot_theta_squared_table",
"plot_distribution",
]
ARTIST_TO_LINE_PROPERTIES = {
"color": "markeredgecolor",
"edgecolor": "markeredgecolor",
"ec": "markeredgecolor",
"facecolor": "markerfacecolor",
"fc": "markerfacecolor",
"linewidth": "markeredgewidth",
"lw": "markeredgewidth",
}
def add_colorbar(img, ax, axes_loc=None, **kwargs):
"""
Add colorbar to a given axis.
Parameters
----------
img : `~matplotlib.image.AxesImage`
The image to plot the colorbar for.
ax : `~matplotlib.axes.Axes`
Matplotlib axes.
axes_loc : dict, optional
Keyword arguments passed to `~mpl_toolkits.axes_grid1.axes_divider.AxesDivider.append_axes`.
kwargs : dict, optional
Keyword arguments passed to `~matplotlib.pyplot.colorbar`.
Returns
-------
cbar : `~matplotlib.pyplot.colorbar`
The colorbar.
Examples
--------
.. testcode::
from gammapy.maps import Map
from gammapy.visualization import add_colorbar
import matplotlib.pyplot as plt
map_ = Map.read("$GAMMAPY_DATA/cta-1dc-gc/cta-1dc-gc.fits.gz")
axes_loc = {"position": "right", "size": "2%", "pad": "10%"}
kwargs_colorbar = {'label':'Colorbar label'}
# Example outside gammapy
fig = plt.figure(figsize=(6, 3))
ax = fig.add_subplot(111)
img = ax.imshow(map_.sum_over_axes().data[0,:,:])
add_colorbar(img, ax=ax, axes_loc=axes_loc, **kwargs_colorbar)
# `add_colorbar` is available for the `plot` function here:
fig = plt.figure(figsize=(6, 3))
ax = fig.add_subplot(111)
map_.sum_over_axes().plot(ax=ax, add_cbar=True, axes_loc=axes_loc,
kwargs_colorbar=kwargs_colorbar) # doctest: +SKIP
"""
kwargs.setdefault("use_gridspec", True)
kwargs.setdefault("orientation", "vertical")
axes_loc = axes_loc or {}
axes_loc.setdefault("position", "right")
axes_loc.setdefault("size", "5%")
axes_loc.setdefault("pad", "2%")
axes_loc.setdefault("axes_class", maxes.Axes)
divider = make_axes_locatable(ax)
cax = divider.append_axes(**axes_loc)
cbar = plt.colorbar(img, cax=cax, **kwargs)
return cbar
def plot_map_rgb(map_, ax=None, **kwargs):
"""
Plot RGB image on matplotlib WCS axes.
This function is based on the `~astropy.visualization.make_lupton_rgb` function. The input map must
contain 1 non-spatial axis with exactly 3 bins. If this is not the case, the map has to be resampled
before using the `plot_map_rgb` function (e.g. as shown in the code example below).
Parameters
----------
map_ : `~gammapy.maps.WcsNDMap`
WCS map. The map must contain 1 non-spatial axis with exactly 3 bins.
ax : `~astropy.visualization.wcsaxes.WCSAxes`, optional
WCS axis object to plot on.
**kwargs : dict
Keyword arguments passed to `~astropy.visualization.make_lupton_rgb`.
Returns
-------
ax : `~astropy.visualization.wcsaxes.WCSAxes`
WCS axis object.
Examples
--------
>>> from gammapy.visualization import plot_map_rgb
>>> from gammapy.maps import Map, MapAxis
>>> import astropy.units as u
>>> map_ = Map.read("$GAMMAPY_DATA/cta-1dc-gc/cta-1dc-gc.fits.gz")
>>> axis_rgb = MapAxis.from_energy_edges(
... [0.1, 0.2, 0.5, 10], unit=u.TeV, name="energy", interp="log"
... )
>>> map_ = map_.resample_axis(axis_rgb)
>>> kwargs = {"stretch": 0.5, "Q": 1, "minimum": 0.15}
>>> plot_map_rgb(map_.smooth(0.08*u.deg), **kwargs) #doctest: +SKIP
"""
geom = map_.geom
if len(geom.axes) != 1 or geom.axes[0].nbin != 3:
raise ValueError(
"One non-spatial axis with exactly 3 bins is needed to plot an RGB image"
)
data = [data_slice / np.nanmax(data_slice.flatten()) for data_slice in map_.data]
data = make_lupton_rgb(*data, **kwargs)
ax = map_._plot_default_axes(ax=ax)
ax.imshow(data)
if geom.is_allsky:
ax = map_._plot_format_allsky(ax)
else:
ax = map_._plot_format(ax)
# without this the axis limits are changed when calling scatter
ax.autoscale(enable=False)
return ax
def plot_contour_line(ax, x, y, **kwargs):
"""Plot smooth curve from contour points."""
xf = x
yf = y
# close contour
if not (x[0] == x[-1] and y[0] == y[-1]):
xf = np.append(x, x[0])
yf = np.append(y, y[0])
# curve parametrization must be strictly increasing
# so we use the cumulative distance of each point from the first one
dist = np.sqrt(np.diff(xf) ** 2.0 + np.diff(yf) ** 2.0)
dist = [0] + list(dist)
t = np.cumsum(dist)
ts = np.linspace(0, t[-1], 50)
# 1D cubic spline interpolation
cs = CubicSpline(t, np.c_[xf, yf], bc_type="periodic")
out = cs(ts)
# plot
if "marker" in kwargs.keys():
marker = kwargs.pop("marker")
else:
marker = "+"
if "color" in kwargs.keys():
color = kwargs.pop("color")
else:
color = "b"
ax.plot(out[:, 0], out[:, 1], "-", color=color, **kwargs)
ax.plot(xf, yf, linestyle="", marker=marker, color=color)
def plot_theta_squared_table(table):
"""Plot the theta2 distribution of counts, excess and significance.
Take the table containing the ON counts, the OFF counts, the acceptance,
the off acceptance and the alpha (normalisation between ON and OFF)
for each theta2 bin.
Parameters
----------
table : `~astropy.table.Table`
Required columns: theta2_min, theta2_max, counts, counts_off and alpha
"""
from gammapy.maps import MapAxis
from gammapy.maps.axes import UNIT_STRING_FORMAT
from gammapy.maps.utils import edges_from_lo_hi
theta2_edges = edges_from_lo_hi(
table["theta2_min"].quantity, table["theta2_max"].quantity
)
theta2_axis = MapAxis.from_edges(theta2_edges, interp="lin", name="theta_squared")
ax0 = plt.subplot(2, 1, 1)
x = theta2_axis.center.value
x_edges = theta2_axis.edges.value
xerr = (x - x_edges[:-1], x_edges[1:] - x)
ax0.errorbar(
x,
table["counts"],
xerr=xerr,
yerr=np.sqrt(table["counts"]),
linestyle="None",
label="Counts",
)
ax0.errorbar(
x,
table["counts_off"],
xerr=xerr,
yerr=np.sqrt(table["counts_off"]),
linestyle="None",
label="Counts Off",
)
ax0.errorbar(
x,
table["excess"],
xerr=xerr,
yerr=(table["excess_errn"], table["excess_errp"]),
fmt="+",
linestyle="None",
label="Excess",
)
ax0.set_ylabel("Counts")
ax0.set_xticks([])
ax0.set_xlabel("")
ax0.legend()
ax1 = plt.subplot(2, 1, 2)
ax1.errorbar(x, table["sqrt_ts"], xerr=xerr, linestyle="None")
ax1.set_xlabel(f"Theta [{theta2_axis.unit.to_string(UNIT_STRING_FORMAT)}]")
ax1.set_ylabel("Significance")
def plot_distribution(
wcs_map,
ax=None,
ncols=3,
func=None,
kwargs_hist=None,
kwargs_axes=None,
kwargs_fit=None,
):
"""
Plot the 1D distribution of data inside a map as an histogram. If the dimension of the map is smaller than 2,
a unique plot will be displayed. Otherwise, if the dimension is 3 or greater, a grid of plot will be displayed.
Parameters
----------
wcs_map : `~gammapy.maps.WcsNDMap`
A map that contains data to be plotted.
ax : `~matplotlib.axes.Axes` or list of `~matplotlib.axes.Axes`
Axis object to plot on. If a list of Axis is provided it has to be the same length as the length of _map.data.
ncols : int
Number of columns to plot if a "plot grid" was to be done.
func : function object or str
The function used to fit a map data histogram or "norm". Default is None.
If None, no fit will be performed. If "norm" is given, `scipy.stats.norm.pdf`
will be passed to `scipy.optimize.curve_fit`.
kwargs_hist : dict
Keyword arguments to pass to `matplotlib.pyplot.hist`.
kwargs_axes : dict
Keyword arguments to pass to `matplotlib.axes.Axes`.
kwargs_fit : dict
Keyword arguments to pass to `scipy.optimize.curve_fit`
Returns
-------
axes : `~numpy.ndarray` of `~matplotlib.pyplot.Axes`
Array of Axes.
result_list : list of dict
List of dictionnary that contains the results of `scipy.optimize.curve_fit`. The number of elements in the list
correspond to the dimension of the non-spatial axis of the map.
The dictionnary contains:
* `axis_edges` : the edges of the non-spatial axis bin used
* `param` : the best-fit parameters of the input function `func`
* `covar` : the covariance matrix for the fitted parameters `param`
* `info_dict` : the `infodict` return of `scipy.optimize.curve_fit`
Examples
--------
>>> from gammapy.datasets import MapDataset
>>> from gammapy.estimators import TSMapEstimator
>>> from scipy.stats import norm
>>> from gammapy.visualization import plot_distribution
>>> dataset = MapDataset.read("$GAMMAPY_DATA/cta-1dc-gc/cta-1dc-gc.fits.gz")
>>> tsmap_est = TSMapEstimator().run(dataset)
>>> axs, res = plot_distribution(tsmap_est.sqrt_ts, func="norm", kwargs_hist={'bins': 75, 'range': (-10, 10), 'density': True})
>>> # Equivalently, one can do the following:
>>> func = lambda x, mu, sig : norm.pdf(x, loc=mu, scale=sig)
>>> axs, res = plot_distribution(tsmap_est.sqrt_ts, func=func, kwargs_hist={'bins': 75, 'range': (-10, 10), 'density': True})
"""
from gammapy.maps import WcsNDMap # import here to avoid circular import
if not isinstance(wcs_map, WcsNDMap):
raise TypeError(
f"map_ must be an instance of gammapy.maps.WcsNDMap, given {type(wcs_map)}"
)
kwargs_hist = kwargs_hist or {}
kwargs_axes = kwargs_axes or {}
kwargs_fit = kwargs_fit or {}
kwargs_hist.setdefault("density", True)
kwargs_fit.setdefault("full_output", True)
cutout, mask = wcs_map.cutout_and_mask_region()
idx_x, idx_y = np.where(mask)
data = cutout.data[..., idx_x, idx_y]
if ax is None:
n_plot = len(data)
cols = min(ncols, n_plot)
rows = 1 + (n_plot - 1) // cols
width = 12
figsize = (width, width * rows / cols)
fig, axes = plt.subplots(
nrows=rows,
ncols=cols,
figsize=figsize,
)
cells_in_grid = rows * cols
else:
axes = np.array([ax])
cells_in_grid = axes.size
if not isinstance(axes, np.ndarray):
axes = np.array([axes])
result_list = []
for idx in range(cells_in_grid):
axe = axes.flat[idx]
if idx > len(data) - 1:
axe.set_visible(False)
continue
d = data[idx][np.isfinite(data[idx])]
n, bins, _ = axe.hist(d, **kwargs_hist)
if func is not None:
kwargs_plot_fit = {"label": "Fit"}
centers = 0.5 * (bins[1:] + bins[:-1])
if func == "norm":
def func_to_fit(x, mu, sigma):
return norm.pdf(x, mu, sigma)
pars, cov, infodict, message, _ = curve_fit(
func_to_fit, centers, n, **kwargs_fit
)
mu, sig = pars[0], pars[1]
err_mu, err_sig = np.sqrt(cov[0][0]), np.sqrt(cov[1][1])
label_norm = (
r"$\mu$ = {:.2f} ± {:.2E}\n$\sigma$ = {:.2f} ± {:.2E}".format(
mu, err_mu, sig, err_sig
)
).replace(r"\n", "\n")
kwargs_plot_fit["label"] = label_norm
else:
func_to_fit = func
pars, cov, infodict, message, _ = curve_fit(
func_to_fit, centers, n, **kwargs_fit
)
axis_edges = (
wcs_map.geom.axes[-1].edges[idx],
wcs_map.geom.axes[-1].edges[idx + 1],
)
result_dict = {
"axis_edges": axis_edges,
"param": pars,
"covar": cov,
"info_dict": infodict,
}
result_list.append(result_dict)
log.info(message)
xmin, xmax = kwargs_hist.get("range", (np.min(d), np.max(d)))
x = np.linspace(xmin, xmax, 1000)
axe.plot(x, func_to_fit(x, *pars), lw=2, color="black", **kwargs_plot_fit)
axe.set(**kwargs_axes)
axe.legend()
return axes, result_list
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@gammapy@[email protected]@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "projectchrono/chrono",
"repo_path": "chrono_extracted/chrono-main/data/vehicle/M113_RS/README.md",
"type": "Markdown"
}
|
----------------------------------------------------
Test M113 variant with REAR sprocket and FRONT idler
----------------------------------------------------
The data for the M113 model implemented in these JSON files was obtained from the following publicly available sources:
- M. Wallin, A.K. Aboubakr, P. Jayakumar, M.D. Letherwood, D.J. Gorsich, A. Hamed, and A.A. Shabana, "A comparative study of joint formulations: Application to multibody system tracked vehicles," Nonlinear Dynamics 74(3), 2013.
- M. Wallin, A.K. Aboubakr, P. Jayakumar, M.D. Letherwood, A. Hamed, and A.A. Shabana, "Chain Dynamic Formulations for Multibody System Tracked Vehicles" (Preprint), University of Illinois at Chicago, 2013.
www.dtic.mil/cgi-bin/GetTRDoc?AD=ADA566761 ,
www.dtic.mil/dtic/tr/fulltext/u2/a566266.pdf
- Manuel F.R. Afonso, "Ride Dynamic Analysis of Tracked Vehicles," Master Thesis, Concordia University, Montreal, Quebec, Canada, 1989.
- R.H. Keays, "Analysis of Armoured-Vehicle Track Loads and Stresses, with Considerations on Alternative Track Materials," MRL General Document, MRL-GD-0022, 1988.
http://www.dtic.mil/dtic/tr/fulltext/u2/a219397.pdf
- Peter Blume and Clemens Niesner, "M113 in der Bundeswehr - Teil 1 (M113 in the Modern German Army - Part 1)," Publication No. 5032, Verlag Jochen Vollert - Tankograd Publishing, Erlangen, Germany, 2011.
https://www.tankograd.com/cms/website.php?id=/en/M-113-in-der-Bundeswehr-Teil-1.htm
|
projectchronoREPO_NAMEchronoPATH_START.@chrono_extracted@chrono-main@data@vehicle@[email protected]@.PATH_END.py
|
{
"filename": "mms_tai2unix.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/mms/mms_tai2unix.py",
"type": "Python"
}
|
import datetime
import numpy as np
from pyspedas.utilities.leap_seconds import load_leap_table
def mms_tai2unix(values):
"""
Converts MMS timestamps in TAI to unix timestamps
Based on Mitsuo Oka's IDL code with the same name.
Input
---------
values: float, list of floats or np.ndarray
Time values in TAI
Returns
---------
Array of time values as unix times
"""
if not isinstance(values, list) and not isinstance(values, np.ndarray):
values = [values]
table = load_leap_table()
tai_minus_unix = 378691200.0
juls = np.array(table['juls'])
values_juls = np.array(values)/86400.0 + datetime.date(1958, 1, 1).toordinal() + 1721424.5
out = np.zeros(len(values))
for idx, value in enumerate(values_juls):
loc_greater = np.argwhere(value > juls).flatten()
if len(loc_greater) == 0:
continue
last_loc = loc_greater[len(loc_greater)-1]
current_leap = float(table['leaps'][last_loc])
tinput_1970 = values[idx] - tai_minus_unix
out[idx] = tinput_1970 - current_leap
return out
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@mms@[email protected]_END.py
|
{
"filename": "lightcone.ipynb",
"repo_name": "sambit-giri/tools21cm",
"repo_path": "tools21cm_extracted/tools21cm-master/docs/examples/lightcone.ipynb",
"type": "Jupyter Notebook"
}
|
# Light-cone construction
```python
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import tools21cm as t2c
import warnings
warnings.filterwarnings("ignore")
```
Here we create a fake dataset of coeval cubes which consist of three growing spherical HII regions. The radius is defined using the following form:
$$r = 20~e^{-(z-7)/3}$$
```python
zs_set = np.arange(7,12.1,0.2)
r_hii = lambda x: 30*np.exp(-(x-7.)/3)
plt.plot(zs_set, r_hii(zs_set))
```
[<matplotlib.lines.Line2D at 0x7fb6143ee110>]

```python
def fake_simulator(ncells, z):
cube = np.zeros((ncells,ncells,ncells))
xx, yy, zz = np.meshgrid(np.arange(ncells), np.arange(ncells), np.arange(ncells), sparse=True)
# rr2 = xx**2+yy**2+zz**2
r = r_hii(z)
r2 = (xx-ncells/2)**2+(yy-ncells/2)**2+(zz-ncells/2)**2
xx0, yy0, zz0 = int(ncells/2), int(ncells/2), int(ncells/2)
cube0 = np.zeros((ncells,ncells,ncells))
cube0[r2<=r**2] = 1
cube0 = np.roll(np.roll(np.roll(cube0, -xx0, axis=0), -yy0, axis=1), -zz0, axis=2)
# Bubble 1
xx1, yy1, zz1 = int(ncells/2), int(ncells/2), int(ncells/2)
cube = cube+np.roll(np.roll(np.roll(cube0, xx1, axis=0), yy1, axis=1), zz1, axis=2)
# Bubble 2
xx2, yy2, zz2 = int(ncells/2), int(ncells/4), int(ncells/16)
cube = cube+np.roll(np.roll(np.roll(cube0, xx2, axis=0), yy2, axis=1), zz2, axis=2)
# Bubble 3
xx3, yy3, zz3 = int(ncells/2+10), int(-ncells/4), int(-ncells/32)
cube = cube+np.roll(np.roll(np.roll(cube0, xx3, axis=0), yy3, axis=1), zz3, axis=2)
return cube
```
Visualizing the fake coeval cubes of growing HII regions.
```python
z0 = 9; c0 = fake_simulator(200,z0)
z1 = 8; c1 = fake_simulator(200,z1)
z2 = 7; c2 = fake_simulator(200,z2)
fig, axs = plt.subplots(1,3, figsize=(14, 5))
axs[0].imshow(c0[100,:,:], origin='lower', cmap='jet')
axs[0].set_title('z={}'.format(z0))
axs[1].imshow(c1[100,:,:], origin='lower', cmap='jet')
axs[1].set_title('z={}'.format(z1))
axs[2].imshow(c2[100,:,:], origin='lower', cmap='jet')
axs[2].set_title('z={}'.format(z2))
```
Text(0.5, 1.0, 'z=7')

```python
zs_set = np.arange(7,12.1,0.2)
coeval_set = {}
for zi in tqdm(zs_set):
coeval_set['{:.2f}'.format(zi)] = fake_simulator(200,zi)
```
100%|██████████| 26/26 [00:07<00:00, 3.29it/s]
### Preparing for light-cone construction
To construct light-cones, one can use the `make_lightcone` function. The parameter `filenames` takes a list such that each element from this list can be provided to the `reading_function` to retrieve the coeval cubes for the corresponding redshift (specified in a list by the parameter `file_redshifts`).
```python
def reading_function(name):
return coeval_set[name]
```
```python
filenames = ['{:.2f}'.format(zi) for zi in zs_set]
file_redshifts = zs_set
xf_lc, zs_lc = t2c.make_lightcone(
filenames,
z_low=None,
z_high=None,
file_redshifts=file_redshifts,
cbin_bits=32,
cbin_order='c',
los_axis=2,
raw_density=False,
interpolation='linear',
reading_function=reading_function,
box_length_mpc=200,
)
```
Making lightcone between 7.000000 < z < 11.999359
100%|██████████| 1256/1256 [00:02<00:00, 605.08it/s]
...done
Visualizing the constructed light-cone.
```python
xi = np.array([zs_lc for i in range(xf_lc.shape[1])])
yi = np.array([np.linspace(0,200,xf_lc.shape[1]) for i in range(xi.shape[1])]).T
zj = (xf_lc[100,1:,1:]+xf_lc[100,1:,:-1]+xf_lc[100,:-1,1:]+xf_lc[100,:-1,:-1])/4
fig, axs = plt.subplots(1,1, figsize=(14, 5))
im = axs.pcolor(xi, yi, zj, cmap='jet')
axs.set_xlabel('z', fontsize=18)
axs.set_ylabel('L (cMpc)', fontsize=18)
# axs.set_xticks(np.arange(6.5,13,1))
# axs.set_yticks(np.arange(0,350,100))
fig.subplots_adjust(bottom=0.11, right=0.91, top=0.95, left=0.06)
cax = plt.axes([0.92, 0.15, 0.02, 0.75])
fig.colorbar(im,cax=cax)
#plt.tight_layout()
plt.show()
```

```python
```
|
sambit-giriREPO_NAMEtools21cmPATH_START.@tools21cm_extracted@tools21cm-master@docs@[email protected]@.PATH_END.py
|
{
"filename": "analysis.py",
"repo_name": "dynamics-of-stellar-systems/dynamite",
"repo_path": "dynamite_extracted/dynamite-master/dynamite/analysis.py",
"type": "Python"
}
|
import os
import logging
import numpy as np
import matplotlib.pyplot as plt
from plotbin.display_pixels import display_pixels
import cmasher as cmr
import astropy
import dynamite as dyn
class Decomposition:
"""
Class for decomposition.
Upon instatiating, the orbits are decomposed by the method
``decompose_orbits`` and the results stored in astropy table
``self.decomp``. The components' flux and moments (currently mean velocity
and velocity dispersion only) are plotted by calling ``self.plot_decomp``
which also writes the plotted data into the model directory.
The methodology in this class has been contributed by Ling Zhu and
Giulia Santucci. Please cite Zhu+18, MNRAS 473, 3000 and
Santucci+22, ApJ 930, 153 if used.
Parameters
----------
config : a ``dyn.config_reader.Configuration`` object, mandatory
model : a ``dyn.model.Model`` object, optional
Determines which model is used.
If model = None, the model corresponding to the minimum
chisquare (so far) is used; the setting in the configuration
file's parameter settings is used to determine which chisquare
to consider. The default is None.
kin_set : int, optional
Determines which kinematic set to use.
The value of this parameter is the index of the data
set (e.g. kin_set=0, kin_set=1). The default is 0.
Raises
------
ValueError
if no config object is given or the kin_set does not exist.
"""
def __init__(self, config=None, model=None, kin_set=0):
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
if config is None:
text = f'{__class__.__name__} needs configuration object, ' \
'None provided.'
self.logger.error(text)
raise ValueError(text)
self.config = config
if model is None:
best_model_idx = config.all_models.get_best_n_models_idx(n=1)[0]
self.model = config.all_models.get_model_from_row(best_model_idx)
stars = \
config.system.get_component_from_class(
dyn.physical_system.TriaxialVisibleComponent)
n_kin = len(stars.kinematic_data)
if kin_set >= n_kin:
text = f'kin_set must be < {n_kin}, but it is {kin_set}'
self.logger.error(text)
raise ValueError(text)
self.kin_set = kin_set
self.logger.info(f'Performing decomposition for kin_set no {kin_set}: '
f'{stars.kinematic_data[kin_set].name}')
# Get losvd_histograms and projected_masses
self.orblib = self.model.get_orblib()
self.orblib.read_losvd_histograms()
self.losvd_histograms = self.orblib.losvd_histograms[self.kin_set]
self.proj_mass = self.orblib.projected_masses[self.kin_set]
self.logger.debug(f'{self.losvd_histograms.y.shape=}, '
f'{self.proj_mass.shape=}.')
# Get orbit weights and store them in self.model.weights
_ = self.model.get_weights(self.orblib)
# Do the decomposition
self.decomp = self.decompose_orbits()
# self.losvd_histograms, self.proj_mass, self.decomp = self.run_dec()
self.logger.info('Orbits read and velocity histogram created.')
def plot_decomp(self, xlim, ylim, v_sigma_option='fit'):
""" Generate decomposition plots.
Parameters
----------
xlim : float
restricts plot x-coordinates to abs(x) <= xlim.
ylim : float
restricts plot y-coordinates to abs(y) <= ylim.
v_sigma_option : str, optional
If 'fit', v_mean and v_sigma are calculated based on fitting
Gaussians, if 'moments', v_mean and v_sigma are calculated
directly from the model's losvd histograms. The default is 'fit'.
Returns
-------
None.
"""
comp_kinem_moments = self.comps_aphist(v_sigma_option)
self.logger.info('Component data done.')
self.plot_comps(xlim=xlim, ylim=ylim,
comp_kinem_moments=comp_kinem_moments)
self.logger.info('Plots done.')
def comps_aphist(self, v_sigma_option='fit'):
"""Calculate components' flux, mean velocity, and velocity dispersion.
Parameters
----------
v_sigma_option : str, optional
If 'fit', v_mean and v_sigma are calculated based on fitting
Gaussians, if 'moments', v_mean and v_sigma are calculated
directly from the model's losvd histograms. The default is 'fit'.
Raises
------
ValueError
if v_sigma_option is neither 'moments' nor 'fit'.
Returns
-------
comp_flux_v_sigma : astropy table
The table columns are: aperture index (starting with 0), followed
by three columns per component holding the flux, mean velocity,
and velocity dispersion.
The chosen v_sigma_option is in the table meta data.
"""
v_sigma_options = ['moments', 'fit']
if v_sigma_option not in v_sigma_options:
text = f'Unknown v_sigma_option {v_sigma_option}, ' \
f'must be one of {v_sigma_options}.'
self.logger.error(text)
raise ValueError(text)
self.logger.info('Calculating flux, v, and sigma for components '
f'{self.decomp.meta["comps"]}, {v_sigma_option=}.')
comp_flux_v_sigma = astropy.table.Table(
{'ap_id':range(self.losvd_histograms.y.shape[-1])},
dtype=[int],
meta={'v_sigma_option':v_sigma_option})
for comp in self.decomp.meta['comps']:
self.logger.info(f'Component {comp}...')
# calculate flux and losvd histograms for component
orb_sel = np.array([comp in s for s in self.decomp['component']],
dtype=bool)
flux=np.dot(self.proj_mass[orb_sel].T, self.model.weights[orb_sel])
losvd = np.dot(self.losvd_histograms.y[orb_sel,:,:].T,
self.model.weights[orb_sel]).T
losvd = losvd[np.newaxis]
self.logger.debug(f'{comp}: {np.count_nonzero(orb_sel)} orbits, '
f'{flux.shape=}, {losvd.shape=}.')
losvd_hist = dyn.kinematics.Histogram(self.losvd_histograms.xedg,
y=losvd,
normalise=False)
if v_sigma_option == 'moments':
v_mean = np.squeeze(losvd_hist.get_mean())
v_sigma = np.squeeze(losvd_hist.get_sigma())
elif v_sigma_option == 'fit':
v_mean, v_sigma = losvd_hist.get_mean_sigma_gaussfit()
v_mean = np.squeeze(v_mean)
v_sigma = np.squeeze(v_sigma)
else:
pass
comp_flux_v_sigma.add_columns([flux, v_mean, v_sigma],
names=[f'{comp}_lsb',
f'{comp}_v',
f'{comp}_sig'])
return comp_flux_v_sigma
def decompose_orbits(self, ocut=None):
"""Decompose orbits based on lambda_z.
Parameters
----------
ocut : list of floats, optional
The cuts in lambda_z. The default is None, which translates to
ocut=[0.8, 0.25, -0.25], the selection in lambda_z
following Santucci+22.
Returns
-------
decomp : astropy table
The table has two columns, ``id`` and ``component``. The former is
the orbit id (starting with 0), ``component`` is a string
describing the component(s) an orbit belongs to. Note that an
orbit can belong to multiple components. In that case, the
component strings are concatenated. For easier parsing later, the
component descriptors are surrounded by pipe symbols ``|``.
The table's meta data ``comps`` holds a list of all components.
"""
if ocut is None:
ocut = [0.8, 0.25, -0.25]
self.logger.debug(f'Cut lines are: {ocut}.')
file2 = self.model.directory_noml + 'datfil/orblib.dat_orbclass.out'
file3 = self.model.directory_noml + 'datfil/orblibbox.dat_orbclass.out'
file3_test = os.path.isfile(file3)
if not file3_test:
file3 = '%s' % file2
n_orb = self.config.settings.orblib_settings['nE'] * \
self.config.settings.orblib_settings['nI2'] * \
self.config.settings.orblib_settings['nI3']
n_dither = self.config.settings.orblib_settings['dithering']
conversion_factor=self.config.all_models.system.distMPc*1.0e6*1.49598e8
ncol = n_dither ** 3
orbclass1=self.orblib.read_orbit_property_file_base(file2, ncol, n_orb)
orbclass2=self.orblib.read_orbit_property_file_base(file3, ncol, n_orb)
orbw = self.model.weights
n_orbs = len(orbw)
self.logger.debug(f'{n_orb=}, {n_orbs=}.')
orbclass = np.dstack((orbclass1, orbclass1, orbclass2))
self.logger.debug(f'{len(orbclass) = }.')
orbclass1a = np.copy(orbclass1)
orbclass1a[0:3, :, :] *= -1 # the reverse rotating orbits of orbclass
for i in range(n_orb):
orbclass[:, :, i * 2] = orbclass1[:, :, i]
orbclass[:, :, i * 2 + 1] = orbclass1a[:, :, i]
## define circularity of each orbit [nditcher^3, n_orb]
lz = (orbclass[2, :, :] / orbclass[3, :, :] / np.sqrt(orbclass[4, :, :]))
# Average values for the orbits in the same bundle (n_dither^3).
# Only include the orbits within Rmax_arcs
rm = np.sum(orbclass[3, :, :]/conversion_factor, axis=0) / n_dither**3
# flip the sign of lz to confirm total(lz) > 0
t = np.ravel(np.argsort(rm))
yy = np.max(np.ravel(np.where(np.cumsum(orbw[t]) <= 0.5)))
k = t[0:yy]
if np.sum(np.sum(lz[:, k], axis=0) / (n_dither ** 3) * orbw[k]) < 0:
lz *= -1.0
lzm_sign= np.sum(lz, axis=0) / n_dither ** 3
comps=['thin_d', 'warm_d', 'disk', 'bulge', 'all']
self.logger.info(f'Decomposing {n_orbs} orbits into {comps=}...')
decomp = astropy.table.Table({'id':range(n_orbs),
'component':['']*n_orbs},
dtype=[int, 'U256'],
meta={'comps':comps})
# map components
comp_map = np.zeros(n_orbs, dtype=int)
# cold component
comp_map[np.ravel(np.where(lzm_sign >= ocut[0]))] += \
2**comps.index('thin_d')
# warm component
comp_map[np.ravel(np.where((lzm_sign > ocut[1])
& (lzm_sign < ocut[0])))] += \
2**comps.index('warm_d')
# hot component
comp_map[np.ravel(np.where((lzm_sign > ocut[2])
& (lzm_sign <= ocut[1])))] += \
2**comps.index('bulge') # was lzm_sign<ocut[1]
# disk component
comp_map[np.ravel(np.where(lzm_sign > ocut[1]))] += \
2**comps.index('disk')
# whole component
comp_map += 2**comps.index('all')
for i in np.ravel(np.where(comp_map > 0)):
for k, comp in enumerate(comps):
if comp_map[i] & (1 << k):
decomp['component'][i] += f'|{comp}|'
return decomp
def plot_comps(self,
xlim,
ylim,
comp_kinem_moments,
figtype='.png'):
""" Generate decomposition plots.
Parameters
----------
xlim : float
restricts plot x-coordinates to abs(x) <= xlim.
ylim : float
restricts plot y-coordinates to abs(y) <= ylim.
comp_kinem_moments : astropy table
The table columns are: aperture index (starting with 0), followed
by three columns per component holding the flux, mean velocity,
and velocity dispersion.
The chosen v_sigma_option is in the table meta data.
figtype : str, optional
Determines the file format and extension to use when saving the
figure. The default is '.png'.
Returns
-------
None.
"""
v_sigma_option = comp_kinem_moments.meta['v_sigma_option'] \
if 'v_sigma_option' in comp_kinem_moments.meta.keys()\
else ''
self.logger.info(f'Plotting decomposition for {v_sigma_option=}.')
weights = self.model.weights
comps = self.decomp.meta["comps"]
stars = \
self.config.system.get_component_from_class(
dyn.physical_system.TriaxialVisibleComponent)
dp_args = stars.kinematic_data[self.kin_set].dp_args
xi = dp_args['x']
yi = dp_args['y']
dx = dp_args['dx']
grid = dp_args['idx_bin_to_pix']
# The angle that is saved in this file is measured counter clock-wise
# from the galaxy major axis to the X-axis of the input data.
angle_deg = dp_args['angle']
self.logger.debug(f'Pixel grid dimension is {dx=}, {len(xi)=}, '
f'{len(yi)=}, {grid.shape}, {angle_deg=}.')
s = np.ravel(np.where((grid >= 0) & (np.abs(xi) <= xlim)
& (np.abs(yi) <= ylim)))
s_wide = np.ravel(np.where(grid >= 0))
quant = ['_lsb', '_v', '_sig']
vel = []
sig = []
t = []
totalf = 0
for i in range(len(comps)):
labels = [comps[i] + qq for qq in quant]
flux = comp_kinem_moments[labels[0]]
w = weights[[comps[i] in s for s in self.decomp['component']]]
fhist, fbinedge = np.histogram(grid[s_wide], bins=len(flux))
flux = flux / fhist
tt = flux[grid]*1.
tt = tt * np.sum(w)/np.sum(tt)
t.append(tt.copy())
if comps[i] in ['thin_d', 'warm_d', 'bulge']:
totalf += np.sum(tt)
if comps[i] == 'thin_d':
fluxtot = tt
else:
fluxtot += tt
vel.append(comp_kinem_moments[labels[1]])
sig.append(comp_kinem_moments[labels[2]])
t = t/totalf
vmax = np.nanmax(vel)
sig_t = np.array(sig)
smax = np.nanmax(sig_t[sig_t > 0])
smin = np.nanmin(sig_t[sig_t > 0])
minf=np.nanmin(np.log10(fluxtot))
maxf=np.nanmax(np.log10(fluxtot[fluxtot !=0]))
xi_t=(xi[s])
yi_t=(yi[s])
table = {'x/arcs':xi_t,'y/arcs':yi_t}
for i in range(len(comps)):
labels = [comps[i] + qq for qq in quant]
table.update({labels[0]:t[i][s],
labels[1]:vel[i][grid[s]],
labels[2]:sig[i][grid[s]]})
comps_kin = astropy.table.Table(table)
kin_name = stars.kinematic_data[self.kin_set].name
file_name = f'comps_kin_{v_sigma_option}_{kin_name}'
table_file_name = self.model.directory + file_name + '.ecsv'
plot_file_name = self.config.settings.io_settings['plot_directory'] \
+ file_name \
+ figtype
comps_kin.write(f'{table_file_name}',
format='ascii.ecsv',
overwrite=True)
self.logger.info('Component grid kinematics written to '
f'{table_file_name}.')
self.logger.debug(f'{v_sigma_option}: {vmax=}, {smax=}, {smin=}.')
LL = len(comps)
map1 = cmr.get_sub_cmap('twilight_shifted', 0.05, 0.6)
map2 = cmr.get_sub_cmap('twilight_shifted', 0.05, 0.95)
titles = ['THIN DISK','THICK DISK','DISK','BULGE','ALL']
compon = np.array(['thin_d','warm_d','disk','bulge','all'])
kwtext = dict(size=20, ha='center', va='center', rotation=90.)
kw_display1 = dict(pixelsize=dx, colorbar=True,
nticks=7, cmap=map1)
kw_display2 = dict(pixelsize=dx, colorbar=True,
nticks=7, cmap=map2)
plt.figure(figsize=(16, int((LL+2)*3)*ylim/xlim))
plt.subplots_adjust(hspace=0.7, wspace=0.01, left=0.01,
bottom=0.05, top=0.99, right=0.99)
for ii in range(len(comps)):
ax = plt.subplot(LL, 3, 3*ii+1)
if ii == 0:
ax.set_title('surface brightness (log)',fontsize=20,pad=20)
display_pixels(xi_t, yi_t, np.log10(t[ii][s])-maxf,
vmin=minf-maxf, vmax=0, **kw_display1)
ax.text(-0.2, 0.5, titles[np.where(compon==comps[ii])[0][0]],
**kwtext, transform=ax.transAxes)
plt.subplot(LL, 3, 3*ii+2)
if ii == 0:
plt.title('velocity',fontsize=20,pad=20)
display_pixels(xi_t, yi_t, vel[ii][grid[s]],
vmin=-1.0*vmax, vmax=vmax, **kw_display2)
plt.subplot(LL, 3, 3*ii+3)
if ii == 0:
plt.title('velocity dispersion',fontsize=20,pad=20)
display_pixels(xi_t, yi_t, sig[ii][grid[s]],
vmin=smin, vmax=smax, **kw_display1)
plt.tight_layout()
plt.savefig(plot_file_name)
self.logger.info(f'Component plots written to {plot_file_name}.')
plt.close()
class Analysis:
"""Class to hold results' analysis methods.
This class contains methods that help analyzing DYANMITE results and can
be called, e.g. by plotting routines.
Parameters
----------
config : a ``dyn.config_reader.Configuration`` object
model : a ``dyn.model.Model`` object, optional, default: best model so far
kin_set : int, optional, default: 0
"""
def __init__(self, config, model=None, kin_set=0):
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
if config is None:
text = f'{__class__.__name__} needs configuration object, ' \
'None provided.'
self.logger.error(text)
raise ValueError(text)
if model is None:
model_index = config.all_models.get_best_n_models_idx(1)[0]
model = config.all_models.get_model_from_row(model_index)
self.config = config
self.model = model
self.kin_set = kin_set
def get_gh_model_kinematic_maps(self,
model=None,
kin_set=None,
v_sigma_option='fit',
kinematics_as='table',
weights=None):
"""
Generates an astropy table in the model directory that holds the
model's data for creating Gauss-Hermite kinematic maps:
v, sigma, h3 ... h<number_GH>.
v and sigma are either directly calculated from the model's losvd
histograms or from fitting a Gaussian in each aperture.
Parameters
----------
model : a ``dyn.model.Model`` object, optional
The default is the Analysis object's model.
kin_set : int, optional
Which kinematics set to use. The default is the
Analysis object's kin_set.
v_sigma_option : str, optional
If 'fit', v_mean and v_sigma are calculated based on fitting
Gaussians, if 'moments', v_mean and v_sigma are calculated
directly from the model's losvd histograms. The default is 'fit'.
kinematics_as : str, optional
If 'table', return ``gh_table``, the model's kinematics as an
astropy table, if 'file', write the table to disk in ascii.ecsv
format and return its full path ``f_name``, if 'both', write the
table to disk and return a tuple ``(gh_table, f_name)``.
The default is 'table'.
weights : ``numpy.array`` like, optional
Orbital weights to use. The default is ``None`` and will
determine the weights via ``model.get_weights(orblib)``.
Raises
------
ValueError
if v_sigma_option or kinematics_as are invalid.
Returns
-------
gh_table : astropy table (if kinematics_as='table')
The astropy table holding the model's gh kinematics.
f_name : str (if kinematics_as='file')
The file name (full path) of the astropy table holding the model's
gh kinematics.
(gh_table, f_name) : tuple (if kinematics_as='both')
"""
if model is None:
model = self.model
if kin_set is None:
kin_set = self.kin_set
if v_sigma_option not in ['moments', 'fit']:
txt = f"{v_sigma_option=} but must be either 'fit' or 'moments'."
self.logger.error(txt)
raise ValueError(txt)
if kinematics_as not in ['table', 'file', 'both']:
txt = f"{kinematics_as=} but must be either 'table', 'file', or " \
"'both'."
self.logger.error(txt)
raise ValueError(txt)
stars = self.config.system.get_component_from_class(
dyn.physical_system.TriaxialVisibleComponent)
kin_name = stars.kinematic_data[kin_set].name
self.logger.info('Getting model projected masses and losvds.')
orblib = model.get_orblib()
if weights is None:
_ = model.get_weights(orblib)
weights = model.weights
# get losvd_histograms and projected masses:
orblib.read_losvd_histograms()
# get all orbits' losvds; orbits_losvd.shape = n_orb,n_vbin,n_aperture
orbits_losvd = orblib.losvd_histograms[kin_set].y[:,:,]
# weighted sum of orbits_losvd; model_losvd.shape = 1,n_vbin,n_aperture
model_losvd = np.dot(orbits_losvd.T, weights).T[np.newaxis]
#model_losvd /= np.sum(model_losvd, 0) # normalisation not necessary
model_proj_masses = np.dot(orblib.projected_masses[kin_set].T,
weights) # .shape = n_aperture
# calculate v_mean and v_sigma values from the losvd histograms
model_losvd_hist = \
dyn.kinematics.Histogram(xedg=orblib.losvd_histograms[kin_set].xedg,
y=model_losvd,
normalise=False)
if v_sigma_option == 'moments':
v_mean = np.squeeze(model_losvd_hist.get_mean()) # from distr.
v_sigma = np.squeeze(model_losvd_hist.get_sigma()) # from distr.
v_sig_text = 'losvd moments'
elif v_sigma_option == 'fit':
v_mean, v_sigma = model_losvd_hist.get_mean_sigma_gaussfit()
v_mean = np.squeeze(v_mean)
v_sigma = np.squeeze(v_sigma)
v_sig_text = 'fitted Gaussians'
else:
pass
self.logger.debug(f'Calculated v_mean and v_sigma from {v_sig_text} '
f'for {len(v_mean)} apertures.')
gh_table = astropy.table.Table([model_proj_masses, v_mean, v_sigma],
names = ['flux', 'v', 'sigma'],
meta={'v_sigma_option': v_sigma_option,
'kin_set': kin_name})
weight_solver_settings = self.config.settings.weight_solver_settings
n_gh = weight_solver_settings['number_GH']
if n_gh > 2:
# calculate the model's gh expansion coefficients
gh = dyn.kinematics.GaussHermite()
gh.data = gh_table
model_gh_coefficients = gh.transform_orblib_to_observables(
losvd_histograms=model_losvd_hist,
weight_solver_settings=weight_solver_settings)
# unscale by projected masses (see weight solver)
model_gh_coefficients = \
(np.squeeze(model_gh_coefficients).T / model_proj_masses).T
# add the gh coefficients to the astropy table
col_names = [f'h{i}' for i in range(3,n_gh+1)]
tab_data = list(model_gh_coefficients[:,2:].T)
gh_table.add_columns(tab_data, names=col_names)
if kinematics_as == 'table':
return gh_table
f_name = f'{model.directory}model_gh_kins_' + \
f'{kin_name}_{v_sigma_option}.ecsv'
gh_table.write(f_name, format='ascii.ecsv', overwrite=True)
self.logger.info(f'Model gh kinematics {kin_name}, {n_gh=} '
f'written to {f_name}.')
if kinematics_as == 'file':
return f_name
return (gh_table, f_name)
|
dynamics-of-stellar-systemsREPO_NAMEdynamitePATH_START.@dynamite_extracted@dynamite-master@[email protected]@.PATH_END.py
|
{
"filename": "utils_spec.py",
"repo_name": "justyncw/STAR_MELT",
"repo_path": "STAR_MELT_extracted/STAR_MELT-main/utils_spec.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 19:05:15 2019
@author: jcampbellwhite001
"""
import time
import pandas as pd
import numpy as np
import astropy.constants
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation
import astropy.units as u
from scipy.interpolate import interp1d
import scipy.optimize as optimize
from scipy.optimize import curve_fit
from scipy.stats import spearmanr, chisquare
from ESO_fits_get_spectra import *
from ESP_fits_get_spectra import *
from utils_data import *
from PyAstronomy import pyasl
from matplotlib import *
from matplotlib.pyplot import *
from astropy.stats import sigma_clip
from astropy.timeseries import LombScargle
from scipy.signal import savgol_filter
from scipy.signal import argrelextrema
from lmfit.models import GaussianModel, LinearModel, PolynomialModel
import utils_shared_variables as USH
clight=astropy.constants.c.to('km/s').to_value()
timenow=time.strftime("%d_%b_%Y_%Hh_", time.gmtime())
line_table=USH.line_table
line_table=USH.line_table_prev_obs
class renamer():
#adds '_1' to duplicated strings
def __init__(self):
self.d = dict()
def __call__(self, x):
if x not in self.d:
self.d[x] = 0
return x
else:
self.d[x] += 1
return "%s_%d" % (x, self.d[x])
def NormalizeData(data):
return (data - np.min(data)) / (np.max(data) - np.min(data))
def NormalizeDataMedian(data):
return (data / np.median(data)) - np.median(data)
def get_av_spec_simple(df_spec,w0,columns=None,norm=False,output=False,plot_av=True):
'''
Create dataframe of observations for list of df_spec dataframes with 'wave' and 'f0' columns
file='/Users/jcampbel/Downloads/MPfit_example_tab.txt'
df_spec=pd.read_csv(file,delim_whitespace=True)
df_spec
w0=np.arange(min(df_spec.wave),max(df_spec.wave),0.1)
df_av=get_av_spec_simple([df_spec],w0,output=True)
Parameters
----------
df_spec : list of dataframes
list containing dataframes with wave and f0 coluumns.
w0 : array
wavelength array.
columns : list, optional
list of column names for the flux columns. The default is None.
norm : bool, optional
normalise the flux values by dividing by the median flux. The default is False.
output : bool, optional
plot flux data frame. The default is False.
plot_av : bool, optional
plot average spectra. The default is True.
Returns
-------
df_av : dataframe
dataframe of wave, flux1,...,fluxi,av_flux,med_flux,std_flux .
'''
'''create empty data frames for the values and col names'''
df_av=pd.DataFrame({'wave': w0})
df_av_col_names=['wave']
if output == True:
ioff()
fig=figure(figsize=USH.fig_size_s)
'''create average data framne of flux values'''
i=0
for f in df_spec:
data_flux=f.f0
data_wave=f.wave
resample=interp1d(data_wave,data_flux,fill_value='extrapolate')
f0_data=resample(w0)
if norm == True:
f0_data=(f0_data)/np.nanmedian(f0_data)
#if min(data_wave) < min(w0) < max(data_wave):
df_av=pd.concat([df_av,pd.Series(f0_data)],axis=1)#this makes the full data frame
df_av_col_names.append(i)#this names the flux columns
i+=1
#df_av_col_names.append((data_info[2]))#this names the flux columns by mjd
if output == True or savefig == True:
plot(w0,f0_data)#,label=data_info[0]+' '+data_info[1]+' '+data_info[3])
if columns != None:
df_av.columns=columns
else:
df_av.columns=df_av_col_names
df_av=df_av.rename(columns=renamer()) #if any duplicated column names, rename them to add _x
av_flux=df_av.iloc[:,1:len(df_av.columns)].mean(axis=1).rename('av_flux')
med_flux=df_av.iloc[:,1:len(df_av.columns)].median(axis=1).rename('med_flux')
std_flux=df_av.iloc[:,1:len(df_av.columns)].std(axis=1).rename('std_flux')
df_av=pd.concat([df_av,av_flux,med_flux,std_flux],axis=1)
if output == True:
if plot_av==True:
plot(w0,df_av.av_flux,'k',linewidth=2,label='average')
plot(w0,df_av.med_flux,'b--',linewidth=2,label='median')
#legend(loc='upper left', fontsize=7, numpoints=1)
ylabel('Flux')
xlabel('Wavelength [Angstroms]')
if output==True:
show()
ion()
return df_av
def get_av_spec(data_dates_range,w0,label='mjd',norm=False,output=False,plot_av=True,savefig=False):
'''
function to return data frame of all fluxes, av flux and std for given fits file list
and stanard wavelength range w0 from get_instrument_date_details()
Parameters
----------
data_dates_range : data frame
output of get_instrument_date_details() providing info and filenames to create
average dataframe from.
w0 : array
output of get_instrument_date_details() giving specified wavelength range to
interpolate flux values between.
label : str, optional
how to label the flux columns, either 'mjd' or 'utc_inst'. The default is 'mjd'.
norm : bool, optional
option to normalise the spectra using median values. The default is False.
output : bool, optional
option to plot results. The default is False.
plot_av : bool, optional
plot average spectra. The default is True.
savefig : bool, optional
option to save the plot. The default is False.
Returns
-------
df_av : data frame
data frame containing all flux values, mean flux, median flux and std flux
columns are wave, followed by mjd of each individual obs, mean, median, std.
'''
target=data_dates_range['target'].any() #added this any, check if filename saving still works
instr=data_dates_range['inst'].any() #added this any, check if filename saving still works
'''create empty data frames for the values and col names'''
df_av=pd.DataFrame({'wave': w0})
df_av_col_names=['wave']
if output == True or savefig == True:
#ioff()
fig=figure(figsize=USH.fig_size_l)
'''create average data framne of flux values'''
for f in data_dates_range.file:
data_info,data_wave,data_flux,data_err=read_fits_files(f,verbose=False)
#convert to Angstrom for fits files that use nm
if instr!='XMM':
if min(data_wave) < 1000:
data_wave=data_wave * 10
if data_info[3]=='UVES' or data_info[3]=='SHOOT' or data_info[3]=='XSHOOTER' or data_info[3]=='HRS':
bary_shift=(data_wave * data_info[6]) / clight #shift in the rest wl due to bary
data_wave=data_wave + bary_shift
resample=interp1d(data_wave,data_flux,fill_value='extrapolate')
f0_data=resample(w0)
if norm == True:
#f0_data=(f0_data-median(f0_data))/median(f0_data)
f0_data=(f0_data)/np.nanmedian(f0_data)
#f0_data=NormalizeData(f0_data)
#f0_data=f0_data-median(f0_data)
#if min(data_wave) < min(w0) < max(data_wave):
df_av=pd.concat([df_av,pd.Series(f0_data)],axis=1)#this makes the full data frame
#df_av_col_names.append(data_info[1].replace('-',''))#this names the flux columns by date
#df_av_col_names.append((data_info[2]))#this names the flux columns by mjd
if label=='utc_inst':
lab=data_info[1]+'_'+data_info[3]
df_av_col_names.append(lab)#this names the flux columns by utc data and instrument
elif label=='mjd':
lab=str(data_info[2])
df_av_col_names.append(lab)#this names the flux columns by mjd
if output == True or savefig == True:
plot(w0,f0_data,label=data_info[0]+' '+lab)
df_av.columns=df_av_col_names
df_av=df_av.rename(columns=renamer()) #if any duplicated column names, rename them to add _x
av_flux=df_av.iloc[:,1:len(df_av.columns)].mean(axis=1).rename('av_flux')
med_flux=df_av.iloc[:,1:len(df_av.columns)].median(axis=1).rename('med_flux')
std_flux=df_av.iloc[:,1:len(df_av.columns)].std(axis=1).rename('std_flux')
df_av=pd.concat([df_av,av_flux,med_flux,std_flux],axis=1)
if output == True or savefig == True:
if plot_av==True:
plot(w0,df_av.av_flux,'k--',linewidth=1,label='Mean Flux')
plot(w0,df_av.med_flux,'b--',linewidth=1,label='Median Flux')
#legend(bbox_to_anchor=(1.04,0.5), loc="center left", borderaxespad=0)
legend(loc='upper right',fontsize=8)
if norm==True:
ylabel('Noramlised Flux')
else:
ylabel('Flux')
xlabel('Wavelength [Angstroms]')
#if instr == 'FEROS':
# ylim(-0.01,0.1)
if output==True:
show()
tight_layout()
if savefig==True:
#output dir
dirname=os.path.join('av_spec_plots'+'_'+timenow)
if not os.path.exists(dirname):
os.makedirs(dirname)
fig.savefig(os.path.join(dirname,target)+'_'+instr+'.pdf')
print('saving file',os.path.join(dirname,target)+'_'+instr+'.pdf')
if output == False:
close()
#ion()
return df_av
def wl_plot(df_av,plot_av=True,fs=USH.fig_size_l,output=True,savefig=False,legend=True):
'''
plotter for df_av dataframe, for use after wl_exluder has been run
Parameters
----------
w0 : array
wavelength values to be plotted.
df_av : data frame
result from get_av_spec() or wl_excluder().
Returns
-------
None.
'''
ioff()
#fs=(5,5)
fig=figure(figsize=fs)
plot(df_av.wave,df_av.iloc[:,1:len(df_av.columns)-3],linewidth=3)
if plot_av==True:
plot(df_av.wave,df_av.av_flux,'k',linewidth=3,label='average')
plot(df_av.wave,df_av.med_flux,'b-',linewidth=6,label='median')
#plot(df_av.wave,df_av.std_flux/np.mean(df_av.av_flux),'k--',linewidth=2)
#fill_between(df_av.wave,0,df_av.std_flux/np.mean(df_av.av_flux),color='grey',label='sd')
#legend(df_av.columns[1:,], loc='upper left', fontsize=7, numpoints=1)
ylabel('Flux')
xlabel('Wavelength [$\AA$]')
#xlim(6290,6310)
#xlim(6555,6572)
#ylim(0.3,1.5)
#locator_params(nbins=10)
if legend==True:
fig.legend(df_av.columns[1:,], fontsize=10, loc='upper right')
#legend(df_av.columns[1:,], loc='center left', bbox_to_anchor=(1, 0.5), fontsize=7, numpoints=1)
tight_layout()
if output==True:
show()
if savefig==True:
dirname=os.path.join('wl_plots',timenow)
if not os.path.exists(dirname):
os.makedirs(dirname)
filename=os.path.join(dirname,USH.target)+'_'+USH.instrument+'_'+str(int(median(df_av.wave)))+'.png'
fig.savefig(filename)
print('saving figure: ',filename)
if output == False:
close()
ion()
def get_line_spec(df_av,line,w_range=0.6,vel_offset=0,vel=False,norm=False,cont_sub=False):
'''
get subset of df_av for given wavelength range around a line,
convert to vel if vel==True, with zero at specified line
Parameters
----------
df_av : data frame
average flux dataframe from get_ave_spec().
line : float
wavelength position of emission line to consider.
w_range : float, optional
wavelength range +/- around line to include. The default is 0.6.
vel : bool, optional
option to return velcity values in km/s around line. The default is False.
Returns
-------
df_line_av : data frame
subset of df_av around line, either as is or with wave replaced with vel if vel==True.
'''
df=df_av.copy()
if vel==False:
rv_shift=(df['wave'].values * vel_offset) / clight #shift in the rest wl due to rv
df.wave=df.wave - rv_shift #comparison wl to compare to observed, accounting for rv shift
df_line_av=df[df['wave'].between(line-w_range,line+w_range)]
else:
df_line_av=df[df['wave'].between(line-200,line+200)]
if w_range < 10:
w_range = 10
#convert wavelength to velocity around given line
if vel==True:
df_vel=pd.Series(((df_line_av.wave - line)*clight/line)-vel_offset,name='vel')
df_line_av=pd.concat([df_vel,df_line_av],axis=1).drop('wave',axis=1)
#df_line_av=df_line_av[df_line_av['vel'].between(0-w_range,0+w_range)]
df_line_av1=df_line_av[df_line_av['vel'].between(0-w_range,0+w_range)]
df_line_av=df_line_av1
if norm==True:
df_line_av=df_line_av.copy()
df_line_av.iloc[:,1:len(df_line_av.columns)]=df_line_av.iloc[:,1:len(df_line_av.columns)].apply(lambda x: (x - median(x) ) / (x.max() - median(x)))
if cont_sub==True:
df_line_av=df_line_av.copy()
df_line_av.iloc[:,1:len(df_line_av.columns)]=df_line_av.iloc[:,1:len(df_line_av.columns)].apply(lambda x: (x - median(x) +1 ))# / (x.max() - median(x)))
return df_line_av
def vel_plot(df_av_line,start_date='1900',end_date='2100',line=0,fs=USH.fig_size_s,output=True,plot_av=True,plot_sd=False,
savefig=False):
global target
'''plotting function for spectra,
option to convert to velocity given line and radvel'''
ioff()
fig=figure(figsize=fs)
plot(df_av_line.vel,df_av_line.iloc[:,1:len(df_av_line.columns)-3],linewidth=2)
if plot_av==True:
plot(df_av_line.vel,df_av_line.av_flux,'k',linewidth=1,label='mean')
plot(df_av_line.vel,df_av_line.med_flux,'b',linewidth=1,label='median')
fig.legend(df_av_line.columns[1:-1], fontsize=10, numpoints=1)
if plot_sd==True:
plot(df_av_line.vel,df_av_line.std_flux/df_av_line.med_flux,color='green',alpha=0.5, linestyle='dashed',linewidth=2,label='sd')
fill_between(df_av_line.vel,0,(df_av_line.std_flux),color='grey',alpha=0.5)
if plot_av==True:
fig.legend(np.append(df_av_line.columns[1:-1],['std_flux/med_flux','std_flux']), fontsize=10, numpoints=1)
else:
fig.legend(np.append(df_av_line.columns[1:-3],['std_flux/med_flux','std_flux']), fontsize=10, numpoints=1)
if plot_av==False and plot_sd==False:
i=1
fig.legend(df_av_line.columns[1:-3].values, fontsize=10, numpoints=1)#,bbox_to_anchor=(1.04,1))
axvline(x=0,color='k',linewidth=0.5,linestyle='--')
title('Plot of line at %s Angstroms'%(line))
ylabel('Flux')
xlabel('v [km/s]')
tight_layout()
print(USH.target)
print(USH.instrument)
if output==True:
show()
else:
close()
if savefig==True:
dirname=os.path.join('vel_plots')
if not os.path.exists(dirname):
os.makedirs(dirname)
filename=os.path.join(dirname,USH.target)+'_'+USH.instrument+'_'+str(line)+'.png'
fig.savefig(filename)
print('saving figure: ',filename)
if output == False:
close()
ion()
def quadrant_split2d(array):
"""Example function for identifiying the elements of quadrants in an array.
array:
A 2D NumPy array.
Returns:
The quadrants as 2D arrays.
"""
Ysize = array.shape[0]
Xsize = array.shape[1]
y, x = np.indices((Ysize,Xsize))
#if not (Xsize==Ysize)&(Xsize % 2 == 0): print ('There will be overlaps')
bl=(x<Xsize/2)&(y<Ysize/2)
br=(x>Xsize/2-1)&(y<Ysize/2)
tl=(x<Xsize/2)&(y>Ysize/2-1)
tr=(x>Xsize/2-1)&(y>Ysize/2-1)
sectors=(array[bl],array[br],array[tl],array[tr])
return sectors
def vel_xcorr(df1,df2=[],undersample=1,line1=0,line2=0,fs=(8,6),mask=0.6,c_lvls=[0.05],masked_plot=False):
flux1=df1.iloc[:,1:len(df1.columns)-3].values[::undersample]
vel1=df1['vel'].values[::undersample]
if len(df2)==0:
flux2=flux1
vel2=vel1
else:
flux2=df2.iloc[:,1:len(df2.columns)-3].values[::undersample]
vel2=df2['vel'].values[::undersample]
xcor = np.zeros([len(flux1),len(flux2)])
pcor = np.zeros([len(flux1),len(flux2)])
for i in range(0,len(flux1)):
for j in range(0,len(flux2)):
v=flux1[i]
w=flux2[j]
r,p=spearmanr(v,w)
xcor[i,j]=r
pcor[i,j]=p
print('vel plot of line(s), including undersample factor:')
vel_plot(df1[::undersample],line=line1,plot_av=False,plot_sd=False,fs=USH.fig_size_s)
if len(df2)>0:
vel_plot(df2[::undersample],line=line2,plot_av=False,plot_sd=False,fs=USH.fig_size_s)
print('xcorr plot:')
figure(figsize=fs)
ax2=[min(vel2), max(vel2), min(vel1), max(vel1)]
axis(ax2)
x=[min(vel1),max(vel1)]
y=[min(vel1),max(vel1)]
plot(x,y,color='k',linewidth=2, linestyle='--')
axvline(x=0,color='k',linewidth=0.5,linestyle='--')
axhline(y=0,color='k',linewidth=0.5,linestyle='--')
xcor_masked=np.select([xcor>mask , xcor <-mask],[xcor,xcor])
sectors = quadrant_split2d(xcor) #for percentage coverage in quadrants, use all +ve or -ve
sectors = quadrant_split2d(xcor_masked) #or at the above masked threshold
bl_pos_frac=len(np.where(sectors[0]>0)[0])/len(sectors[0])
bl_neg_frac=len(np.where(sectors[0]<0)[0])/len(sectors[0])
br_pos_frac=len(np.where(sectors[1]>0)[0])/len(sectors[1])
br_neg_frac=len(np.where(sectors[1]<0)[0])/len(sectors[1])
tl_pos_frac=len(np.where(sectors[2]>0)[0])/len(sectors[2])
tl_neg_frac=len(np.where(sectors[2]<0)[0])/len(sectors[2])
tr_pos_frac=len(np.where(sectors[3]>0)[0])/len(sectors[3])
tr_neg_frac=len(np.where(sectors[3]<0)[0])/len(sectors[3])
#print('BL% r > 0.6:',np.round(bl_pos_frac,2),', BL% r < -0.6:',np.round(bl_neg_frac,2))
#print('BR% r > 0.6:',np.round(br_pos_frac,2),', BR% r < -0.6:',np.round(br_neg_frac,2))
#print('TL% r > 0.6:',np.round(tl_pos_frac,2),', TL% r < -0.6:',np.round(tl_neg_frac,2))
#print('TR% r > 0.6:',np.round(tr_pos_frac,2),', TR% r < -0.6:',np.round(tr_neg_frac,2))
print(f'fraction of sectors with positive corr > r={mask}:')
quad_pos=pd.DataFrame([[bl_pos_frac, br_pos_frac], [tl_pos_frac, tr_pos_frac]], columns=['line2 blue','line2 red'], index=['line1 blue','line1 red'])
display(quad_pos.round(2))
print(f'fraction of sectors with negative corr < r={mask}:')
quad_neg=pd.DataFrame([[bl_neg_frac, br_neg_frac], [tl_neg_frac, tr_neg_frac]], columns=['line2 blue','line2 red'], index=['line1 blue','line1 red'])
display(quad_neg.round(2))
if masked_plot==True:
corre=pcolor(vel2,vel1,xcor_masked,cmap=cm.PRGn, vmin=-1, vmax=1,shading='auto')#,norm=colors.PowerNorm(gamma=2))
#blank low values colour, try white lower end
else:
corre=pcolor(vel2,vel1,xcor,cmap=cm.PRGn, vmin=-1, vmax=1,shading='auto')#,norm=colors.PowerNorm(gamma=2))
#contour(vel2,vel1,pcor,levels=[1e-15,1e-10,1e-5,1e-4,1e-2],colors='k',linewidths=1)
contour(vel2,vel1,pcor,levels=c_lvls,colors='k',linewidths=1,alpha=0.8)
#contour(vel2,vel1,xcor,levels=c_lvls,colors='k',linewidths=1,alpha=0.8)
colorbar(corre, shrink=1, aspect=30)
if len(df2)>0:
xlabel(f'{line2} v (km/s)')
else:
xlabel(f'{line1} v (km/s)')
ylabel(f'{line1} v (km/s)')
text(max(vel2)+max(vel2)*0.45, 0, r'r')
gca().invert_yaxis() #for output plots,
return(xcor,pcor,quad_pos,quad_neg)
def get_RV(w0,df_av,st_wave,st_flux,st_rv,date='med_flux',w_min=5610,w_max=5710,multi=False,output=True):
'''
old version of RV, to be removed. calculate radial velocity of target star using template/standard star and cross correlation
Parameters
----------
w0 : array
wavelength array.
df_av : data frame
average dataframe containing all flux values.
st_wave : array
wavelength array of standard star.
st_flux : array
flux array of standard star.
st_rv : float
radial velocity of standard star.
date : float or str, colname of df_av, optional
the date or which average flux to use. The default is 'med_flux'.
w_min : float, optional
min wavelength for rv calculation. The default is 5610.
w_max : float, optional
max wavelength for rv calculation. The default is 5710.
multi : bool, optional
option to rerun rv calculation across shifting wavelength range to get average and std. The default is False.
Returns
-------
radvel : float
radial velocity of the target star.
'''
print('now calculating radial velocity of target star using template spectra')
rvmin=-50-st_rv
rvmax=50-st_rv
drv=0.1 #steps of rad velo
vt=st_rv #template radial velocity
df_av_rv=df_av[df_av['wave'].between(w_min,w_max)]
w0_rv=df_av_rv['wave'].values
f0_data_rv=df_av_rv[date].values
resample_st=interp1d(st_wave,st_flux,fill_value='extrapolate')
#f0_st_rv=resample_st(w0_rv)
w0_st=np.arange(w_min-200,w_max+200,0.1)
f0_st_rv=resample_st(w0_st)
xcor,ycor=pyasl.crosscorrRV(w0_rv, f0_data_rv, w0_st, f0_st_rv, rvmin, rvmax,
drv, mode='doppler', skipedge=70, edgeTapering=1.)
fol=(ycor==max(ycor))
radvel=float(xcor[fol])+vt
rvs=[radvel]
if output == True:
wl_plot(df_av_rv,plot_av=False,fs=USH.fig_size_n)
#plot(w0_st,f0_st_rv)
figure()
plot(xcor+vt,ycor/max(ycor), ':', label='Temp')
if multi == True:
for i in range(1,6):
w_min += 50
w_max += 50
df_av_rv=df_av[df_av['wave'].between(w_min,w_max)]
w0_rv=df_av_rv['wave'].values
f0_data_rv=df_av_rv[date].values
f0_st_rv=resample_st(w0_rv)
xcor,ycor=pyasl.crosscorrRV(w0_rv, f0_data_rv, w0_rv, f0_st_rv, rvmin, rvmax, drv, mode='doppler', skipedge=200, edgeTapering=1.)
#plot(xcor+vt,ycor/max(ycor), ':', label='Temp')
fol=(ycor==max(ycor))
radvel_i=float(xcor[fol])+vt
rvs.append(radvel_i)
if output == True:
plot(xcor+vt,ycor/max(ycor))
radvel=np.round(np.mean(rvs),2)
print ('av rad vel = %.2f km/s, sd = %.2f' %(np.mean(rvs),np.std(rvs)))
else:
print ('rad vel = %.2f km/s' %(radvel))
if output == True:
axvline(x=radvel,color='k',linewidth=0.5,linestyle='--')
xlabel('Radial Velocity [km/s]')
ylabel('Normalised Xcor')
return radvel
def get_vsini(w0,df_av,st_wave,st_flux,st_rv,date='med_flux',w_min=5610,w_max=5710,output=True):
'''
old version of vsini, to be removed.
calculate the projected rotational velocity, vsini, or target star using template/sandard
spectra, cross correlation and broadening
Parameters
----------
w0 : array
wavelength array of target star.
df_av : data frame
data frame containing all flux observations for each wavelength and averages.
st_wave : array
standard star wavelength array.
st_flux : array
standard star flux array.
st_rv : float
standard star radial velocity.
date : float or str, colname of df_av, optional
the date or which average flux to use. The default is 'med_flux'.
w_min : float, optional
min wavelength for rv calculation. The default is 5610.
w_max : float, optional
max wavelength for rv calculation. The default is 5710.
output : bool, optional
option to plot results of vsini calculation. The default is True.
Returns
-------
vsini : float
vsini of target star.
'''
print('now calculating vsini of target star using template spectra')
start_time = time.time()
df_av_rv=df_av[df_av['wave'].between(w_min,w_max)]
w0_rv=df_av_rv['wave'].values
f0_data_rv=df_av_rv[date].values
resample_st=interp1d(st_wave,st_flux,fill_value='extrapolate')
f0_st_rv=resample_st(w0_rv)
#radvel=get_RV(w0_rv,df_av,st_wave,st_flux,st_rv,date,w_min,w_max,multi=True,output=False)
radvel=0
if output == True:
fig, ax = subplots(1, 2,figsize=(10,5))#,gridspec_kw={'wspace':0})
fig.suptitle('V sin i calculation')
ax[0].set(xlabel='Rad. Vel. (km/s)',ylabel='Normalized Xcor')
ax[1].set(ylabel='Vsini (km/s)',xlabel='Xcorr width (km/s)')
#Parameters for the xcor and broadening:
rvmin=-50.
rvmax=50.
drv=0.1 #steps of rad velo
epsilon=0 #0.6 #limb darkening for models, good for young stars, Dahm+12, Hartmann+86
vm=-50.
vmm=50. #max and min vel for the fit.
cutparam=0.1 #0.2
#Now do some broadened spectra:
kms_list=[3,5,7,9,12,15,20,25,30,35]#,50]
'''create empty data frames for the values and col names'''
broad_flux=pd.DataFrame({'wave': w0_rv})
col_names=['wave']
for kms in kms_list:
#f=pyasl.rotBroad(w0_rv,f0_st_rv, epsilon, kms, edgeHandling='firstlast')
f=pyasl.fastRotBroad(w0_rv, f0_st_rv, epsilon, kms)
broad_flux=pd.concat([broad_flux,pd.Series(f)],axis=1)
col_names.append('%s km/s'%(kms))
#plot(w0,f)
broad_flux.columns=col_names
#Get xcors for all
x_xcor=pd.DataFrame()
y_xcor=pd.DataFrame()
widths=[]
widthplace=0.997
for kms in broad_flux.columns[1:len(broad_flux.columns)]:
x1,y1=pyasl.crosscorrRV(w0_rv, numpy.array(broad_flux.loc[:,kms]), w0_rv, f0_st_rv, rvmin, rvmax, drv, mode='doppler', skipedge=200, edgeTapering=1.)
#plot(x1,y1)
filter1=(y1>min(y1)+cutparam*(max(y1)-min(y1))) & (x1>vm) & (x1<vmm)
#filter1=(x1>vm) & (x1<vmm)
x1=x1[filter1]
y1=y1[filter1]/max(y1)
#guassian fit of the xcorr
gfit=fit_gauss(x1,y1)
#y1=gfit.best_fit
#width=gfit.best_values['g1_sigma']
#width=gfit.values['g1_fwhm']
if output == True:
ax[0].plot(x1,y1,label=kms)
x_xcor=pd.concat([x_xcor,pd.Series(x1,name=kms)],axis=1)
y_xcor=pd.concat([y_xcor,pd.Series(y1,name=kms)],axis=1)
foli=(x1<x1[np.argmax(y1)]) & (y1<widthplace)
folo=(x1>x1[np.argmax(y1)]) & (y1<widthplace)
width=abs(min(x1[folo])-max(x1[foli]))
widths.append(width)
p_vsini=polyfit(widths,kms_list,deg=2)
xx=arange(min(widths), max(widths), 0.1)
yy=polyval(p_vsini,xx)
if output == True:
ax[1].plot(widths,kms_list,'bo')
ax[1].plot(xx,yy,'k:')
w0_st=np.arange(w_min-200,w_max+200,0.1)
f0_st_rv=resample_st(w0_st)
xcor,ycor=pyasl.crosscorrRV(w0_rv, f0_data_rv, w0_st, f0_st_rv, rvmin-st_rv, rvmax-st_rv, drv, mode='doppler', skipedge=200, edgeTapering=1.)
#Get the vsini:
filter2=(ycor>min(ycor)+cutparam*(max(ycor)-min(ycor)))
#filter2=((xcor+st_rv)-radvel>vm) & ((xcor+st_rv)-radvel<vmm)
ycorf=ycor[filter2]/max(ycor)
xcorf=xcor[filter2]
#guassian fit of the xcorr
gfit=fit_gauss(xcorf,ycorf)
#ycorf=gfit.best_fit
#width=gfit.best_values['g1_sigma']
#width=gfit.values['g1_fwhm']
if output == True:
ax[0].plot((xcorf+st_rv)-radvel,ycorf,'k--', linewidth=3, label='target star')
comps = gfit.eval_components(x=xcorf)
#ax[0].plot((xcorf+st_rv)-radvel, comps['g1_'], 'g--', label='Gauss 1')
#ax[0].plot((xcorf+st_rv)-radvel, comps['line_'], 'k--', label='linear')
ax[0].legend(fontsize=8)
#Just measuring the width
foli=(xcorf<xcorf[argmax(ycorf)]) & (ycorf<widthplace)
folo=(xcorf>xcorf[argmax(ycorf)]) & (ycorf<widthplace)
width=abs(min(xcorf[folo])-max(xcorf[foli]))
vsini=polyval(p_vsini, width)
if output == True:
ax[1].hlines(vsini, 0, width)
ax[1].vlines(width, 0, vsini)
print('width = %f' %(width))
print('vsini = %f km/s' %(vsini))
elapsed_time = time.time() - start_time
#print('duration of vsini calculation:',time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
print('rv=%.1f, vsini=%.2f'%(radvel,vsini))
return np.round(vsini,2),gfit
def get_rv_vsini(df_av,st_wave,st_flux,st_rv,date='med_flux',vsini_max=50,w_min=5000,w_max=5500,output=True,rtn_err=False):
'''
Parameters
----------
df_av : dataframe
input observation dataframe.
st_wave : array
standard star wavelength values.
st_flux : array
standard star flux values.
st_rv : float
radial velocity of standard star.
date : str, optional
observation date to use from observation dataframe. The default is 'med_flux'.
w_min : float, optional
min wavelength to consider for observation data. The default is 5000.
w_max : float, optional
max wavelength to consider for observation data. The default is 5500.
output : bool, optional
plot outputs. The default is True.
Returns
-------
radvel : float
radial velocity of target star.
vsini : float
vsini of target star.
'''
print('now calculating radial velocity of target star using template spectra')
vt=st_rv #template radial velocity
rvmin=-100-vt #adjust range so that zero is centred
rvmax=100-vt
drv=0.1 #steps of rad velo
df_av_rv=df_av[df_av['wave'].between(w_min,w_max)] #range to take for RV
w0_rv=df_av_rv['wave'].values
f0_data_rv=df_av_rv[date].values
f0_data_rv=NormalizeData(f0_data_rv)
# w_smooth=np.arange(min(w0_rv),max(w0_rv),5) #set a larger step interval to undersample input data
# smooth=interp1d(w0_rv,f0_data_rv,fill_value='extrapolate')
# f0_data_rv=smooth(w_smooth)
# w0_rv=w_smooth
if max(st_wave) < 1000:
st_wave=st_wave*10
resample_st=interp1d(st_wave,st_flux,fill_value='extrapolate')
w0_st=np.arange(w_min-100,w_max+100,0.05) #larger range for template
f0_st_rv=resample_st(w0_st)
f0_st_rv=NormalizeData(f0_st_rv)
xcor,ycor=pyasl.crosscorrRV(w0_rv, f0_data_rv, w0_st, f0_st_rv, rvmin, rvmax,
drv, mode='doppler', skipedge=70, edgeTapering=1.)
#guassian fit of the xcorr
gfit=fit_gauss(xcor,ycor)
g1_stderr=gfit.params['g1_center'].stderr
ycor=gfit.best_fit
ycor=ycor/median(ycor)
t_width=gfit.best_values['g1_sigma']
t_chi=gfit.redchi
fol=(ycor==max(ycor))
radvel=float(xcor[fol])+vt
rvs=[radvel]
t_widths=[t_width]
t_chis=[t_chi]
t_ycors=[ycor]
g1_stderrs=[g1_stderr]
if output == True:
#wl_plot(df_av_rv,plot_av=False,fs=USH.fig_size_n)
figure(figsize=USH.fig_size_n)
plot(w0_rv,(f0_data_rv),label='target')
plot(w0_st,(f0_st_rv),alpha=0.5,label='template')
legend(loc='upper left', fontsize=8, numpoints=1)
figure()
plot(xcor+vt,ycor)
#axvline(x=radvel,color='k',linewidth=0.5,linestyle='--')
for i in range(1,6):
w_min += 10
w_max += 10
df_av_rv=df_av[df_av['wave'].between(w_min,w_max)]
w0_rv=df_av_rv['wave'].values
f0_data_rv=df_av_rv[date].values
f0_data_rv=NormalizeData(f0_data_rv)
xcor,ycor=pyasl.crosscorrRV(w0_rv, f0_data_rv, w0_st, f0_st_rv, rvmin, rvmax, drv, mode='doppler', skipedge=70, edgeTapering=1.)
gfit=fit_gauss(xcor,ycor)
ycor=gfit.best_fit
ycor=ycor/median(ycor)
t_width=gfit.best_values['g1_sigma']
t_chi=gfit.redchi
fol=(ycor==max(ycor))
radvel_i=float(xcor[fol])+vt
rvs.append(radvel_i)
g1_stderrs.append(gfit.params['g1_center'].stderr)
t_widths.append(t_width)
t_chis.append(t_chi)
t_ycors.append(ycor)
if output == True:
plot(xcor+vt,ycor)
xlabel('Radial Velocity [km/s]')
ylabel('Normalised Xcor')
radvel=np.round(np.median(rvs),2)
if output == True:
axvline(x=radvel,color='k',linewidth=0.5,linestyle='--')
print ('av rad vel = %.2f km/s, sd = %.2f, std err = %.2f' %(radvel,np.std(rvs),(np.std(rvs)/np.sqrt(len(rvs)))))
#print ('rv cen mean std err= %.4f' %(np.mean(g1_stderr)))
best_t_width=t_widths[argmin(t_chis)]
best_t_ycors=t_ycors[argmin(t_chis)]
#now have the width of the xcor and values for the best fit, with lowest chisq
print('now calculating vsini of target star using template spectra')
if output == True:
fig, ax = subplots(1, 2,figsize=(10,5))#,gridspec_kw={'wspace':0})
fig.suptitle('V sin i calculation')
ax[0].set(xlabel='Rad. Vel. (km/s)',ylabel='Normalized Xcor')
ax[1].set(ylabel='Vsini (km/s)',xlabel='Xcorr width (km/s)')
#Parameters for the xcor and broadening:
rvmin=-100
rvmax=100
drv=0.1 #steps of rad velo
epsilon=0 #0.6 #limb darkening for models, good for young stars, Dahm+12, Hartmann+86
vm=-100.
vmm=100. #max and min vel for the fit.
#Now do some broadened spectra:
kms_list=[2,3,5,7,9,12,15,20,25,30,35,50,100]
kms_list=np.arange(2,vsini_max,np.round((vsini_max-2)/10,0))
'''create empty data frames for the values and col names'''
broad_flux=pd.DataFrame({'wave': w0_st})
col_names=['wave']
for kms in kms_list:
#f=pyasl.rotBroad(w0_rv,f0_st_rv, epsilon, kms, edgeHandling='firstlast')
f=pyasl.fastRotBroad(w0_st, f0_st_rv, epsilon, kms)
broad_flux=pd.concat([broad_flux,pd.Series(f)],axis=1)
col_names.append('%s km/s'%(kms))
#plot(w0,f)
broad_flux.columns=col_names
#Get xcors for all
x_xcor=pd.DataFrame()
y_xcor=pd.DataFrame()
s_widths=[]
for kms in broad_flux.columns[1:len(broad_flux.columns)]:
x1,y1=pyasl.crosscorrRV(w0_st, numpy.array(broad_flux.loc[:,kms]), w0_st, f0_st_rv, rvmin, rvmax, drv, mode='doppler', skipedge=200, edgeTapering=1.)
filter1=(x1>vm) & (x1<vmm)
x1=x1[filter1]
y1=y1[filter1]/median(y1)
#y1=
#guassian fit of the xcorr
gfit=fit_gauss(x1,y1)
y1=gfit.best_fit
y1=NormalizeData(y1)
s_width=gfit.best_values['g1_sigma']
if output == True:
ax[0].plot(x1,y1,label=kms)
x_xcor=pd.concat([x_xcor,pd.Series(x1,name=kms)],axis=1)
y_xcor=pd.concat([y_xcor,pd.Series(y1,name=kms)],axis=1)
s_widths.append(s_width)
width=np.mean(t_widths)
width_err=np.std(t_widths)
p_vsini=polyfit(s_widths,kms_list,deg=2)
xx=arange(min(s_widths), max(s_widths), 0.1)
yy=polyval(p_vsini,xx)
av_vsini=[]
for i in t_widths:
vsini=np.round(polyval(p_vsini, i),2)
av_vsini.append(vsini)
vsini=np.round(np.mean(av_vsini),2)
if output == True:
ax[1].plot(s_widths,kms_list,'bo')
ax[1].plot(xx,yy,'k:')
#ax[1].hlines(vsini, 0, width)
#ax[1].vlines(width, 0, vsini)
for i in range(len(t_widths)):
ax[1].hlines(av_vsini[i], 0, t_widths[i])
ax[1].vlines(t_widths[i], 0, av_vsini[i])
ax[1].set_xlim(min(xx)-2,max(xx)+2)
#ax[0].plot(xcor+vt-radvel,NormalizeData(best_t_ycors),'b--', linewidth=3, label='b target star')
#ax[0].plot((xcorf+st_rv)-radvel,ycorf,'k--', linewidth=3, label='target star')
for i in t_ycors:
ax[0].plot(xcor+vt-radvel,NormalizeData(i),'b--', linewidth=1)
ax[0].legend(fontsize=8)
#print('best width = %.2f' %(best_t_width))
print('av width = %.2f , sd = %.2f' %(width,width_err))
print('av vsini = %.2f km/s , sd = %.2f, std err = %.2f' %(vsini,np.std(av_vsini),(np.std(av_vsini)/np.sqrt(len(av_vsini)))))
if vsini<2:
print('ERROR with vsini calculation, value %.2f < template vel.'%(vsini))
print('Try a different wavelength range, different instrument, or different template')
print('Setting vsini to 5 km/s')
vsini=5.0
USH.radvel=radvel
USH.vsini=vsini
if rtn_err==True:
return radvel, (np.std(rvs)/np.sqrt(len(rvs))), vsini,(np.std(av_vsini)/np.sqrt(len(av_vsini)))
else:
return radvel,vsini#,gfit
def subtract_cont(df_av,av='med',poly=3,wl_win=0.5,coeff=31,output=False,return_cont=False,plot_x=[],plot_y=[]):
'''
For a given flux and wavelength, apply sigma clipping, undersampling and
savgol-golay filter to approximately remove the continuum.
This is only used for finding the emission lines and is not an accurate model of the continuum.
Parameters
----------
df_av : dataframe
wave and flux dataframe.
av : str, optional
obs to use. The default is 'med'.
poly : int, optional
polynomial order to use in SG filter. The default is 3.
wl_win : float, optional
wavelength window to use in the SG filter. The default is 0.5.
coeff : int, optional
coeff of SG filter. The default is 31.
output : bool, optional
plot outputs. The default is False.
plot_x : float, optional
min wave range for plotting. The default is [].
plot_y : float, optional
max wave range for plotting. The default is [].
Returns
-------
f_flat : array
flux array of roughly continuum subtracted spectra.
'''
w0=df_av.wave
if plot_x==[]:
plot_x=[min(w0),max(w0)]
if av=='med':
f0_data=df_av.med_flux
else:
f0_data=df_av.av_flux
if plot_y==[]:
plot_y=[min(f0_data),max(f0_data)]
'''clip the data then resample back to original w0'''
f0_mask=sigma_clip(f0_data,sigma_lower=2.5,sigma_upper=5)
w0_clip=w0[~f0_mask.mask]
f0_clip=f0_data[~f0_mask.mask]
clip_resample=interp1d(w0_clip,f0_clip,fill_value='extrapolate')
f0_sc=clip_resample(w0)
'''undersample the data to get rough continuum'''
w_smooth=np.arange(min(w0),max(w0),wl_win) #set a larger step interval to undersample input data
smooth=interp1d(w0,f0_sc,fill_value='extrapolate')
f_smooth=smooth(w_smooth)
#print('smooth std:',np.std(f_smooth))
'''apply Savitzky-Golay filter to get continuum
expand values back out to original wavelength intervals
subtract continuum from raw flux values'''
f_sf=savgol_filter(f_smooth,coeff,poly)
expand=interp1d(w_smooth,f_sf,fill_value='extrapolate')
f_sf_full=expand(w0)
f_flat=f0_data-f_sf_full
print('savgol std:',np.std(f_sf_full))
if output == True:
#ioff()
figure(figsize=USH.fig_size_l)
cla()
plot(w0,f0_data,linewidth=0.75,label='Input Average Flux')
#plot(w0,f0_sc,label='sigma clipped')
plot(w_smooth,f_smooth,linewidth=0.75,label='Undersampled')
plot(w_smooth,f_sf,linewidth=1,label='S-G Flter, order=%s'%(poly))
plot(w0,f_flat,linewidth=0.75,label='Continuum Subtracted')
ylabel('Flux')
xlabel('Wavelength [Angstroms]')
xlim(plot_x)
ylim(plot_y)
legend(loc='upper left', fontsize=8, numpoints=1)
tight_layout()
show()
#ion()
if return_cont==True:
return f_flat,f_sf_full
else:
return f_flat
def subtract_templ(df_line_target,obs,df_line_templ,rv_shift=0,vsini=0,r=0,factor=1,shift=0,
fs=USH.fig_size_l,plot_subtracted=True,plot_divided=False):
#target spectrum normalised to 0 continuum
w0_target=df_line_target.wave.values
#f0_target=NormalizeDataMedian(df_line_target[obs]).values
f0_target=NormalizeData(df_line_target[obs]).values
#template spectrum normalised to 0 continuum
w0_templ=df_line_templ.wave.values
#f0_templ=NormalizeDataMedian(df_line_templ.med_flux).values
f0_templ=NormalizeData(df_line_templ.med_flux).values
#rv shift template spectrum
rvmin=-40.
rvmax=40.
drv=0.1
xcor,ycor=pyasl.crosscorrRV(w0_target, f0_target, w0_templ, f0_templ, rvmin, rvmax,
drv, mode='doppler', skipedge=100)
fol=(ycor==max(ycor))
radvel_diff=float(xcor[fol])
rv_corr=w0_templ * (radvel_diff + rv_shift) / clight
w0_templ=w0_templ+rv_corr
#rescale to lowest res bins
if len(df_line_target.wave.values) < len(df_line_templ.wave.values):
expand=interp1d(w0_templ,f0_templ,fill_value='extrapolate')
f0_templ=expand(w0_target)
w0_templ=w0_target
else:# len(df_line_target.wave.values) > len(df_line_templ.wave.values):
expand=interp1d(w0_target,f0_target,fill_value='extrapolate')
f0_target=expand(w0_templ)
w0_target=w0_templ
#vsini broaden template
f0_templ=pyasl.rotBroad(w0_templ, f0_templ,epsilon=0, vsini=vsini)
#veiling
f0_templ=(f0_templ + r ) / (1 + r)
#manual scaling factor and set continuum to 1
f0_templ=(f0_templ*factor)-median(f0_templ*factor)+1
f0_target=f0_target - median (f0_target)+1
f0_templ=f0_templ+shift
f0_subtracted=f0_target-f0_templ
f0_divided=f0_target/f0_templ
#chisq=chisquare(f0_templ,f0_target)
chisq=np.sum(((f0_target-f0_templ)/f0_target)**2)
print('chi sq = ',np.round(chisq,3))
figure(figsize=fs)
cla()
plot(w0_target,(f0_target),'k-',lw=2,label='Target')
plot(w0_templ,(f0_templ),color='darkorange',ls='--',lw=2,label='Class III Template')
if plot_subtracted==True:
plot(w0_target,f0_subtracted+1, 'b',lw=3,label='Residual')
if plot_divided==True:
plot(w0_target,f0_divided,label='divided',linewidth=3)
ylabel('Flux')
xlabel('Wavelength [$\AA$]')
legend(ncol=2,loc='upper center')
tight_layout()
#xlim(mean(w0_target)-5,mean(w0_target)+5)
show()
#xlim(6295,6305)
#ylim(0.3,1.5)
print(radvel_diff)
#w0_target=w0_target-rv_corr
return w0_target,f0_subtracted+1
def find_em_lines(df_av,f_flat,radvel,vsini,sigma=2.5,av='med',atol=0.5,wl_win=1,
output=False,line_id=False,prev_lines_only=False,xrange=[],xlim_min='min',xlim_max='max'):
'''
function to find EM lines for flat spectra
req. database of lines to compare with 'obs_wl_air' as the wavelength col
Parameters
----------
w0 : array
wavelength array of target star.
f_flat : array
output of subtract_cont() flux values with continuum subtracted.
f0_data : array
original flux values used in subtract_cont() in order to position em points on plots.
radvel : float
radial velocity of target star.
vsini : float
vsini of target star.
sigma : float, optional
sigma level for thresholding of emission lines. The default is 2.5.
output : bool, optional
option to plot results. The default is False.
line_id : bool, optional
option to plot labels on matched emission lines. The default is False.
xlim_min : float, optional
x limits for output and final fits. The default is min(w0).
xlim_max : float, optional
x limits for output and final fits. The default is max(w0).
Returns
-------
em_matches : data frame
dataframe containing list of matched emission lines and properties such as centre, fwhm, snr.
em_match_common_Ek : data_frame
subset of em_matches of lines originating from common upper energy levels.
'''
tar_inst=USH.target + '_' + str(USH.instrument)
w0_ini=df_av.wave
if av=='med':
f0_data=df_av.med_flux
else:
f0_data=df_av.av_flux
'''undersample the data to get rough continuum'''
w0=np.arange(min(w0_ini),max(w0_ini),wl_win) #set a larger step interval to undersample input data
smooth=interp1d(w0_ini,f_flat,fill_value='extrapolate')
smooth2=interp1d(w0_ini,f0_data,fill_value='extrapolate')
f_flat=smooth(w0)
f0_data=smooth2(w0)
#print('smooth std:',np.std(f_smooth))
if xrange==[]:
xlim_min=min(w0)
xlim_max=max(w0)
else:
xlim_min=xrange[0]
xlim_max=xrange[1]
f_flat_clip=sigma_clip(f_flat,sigma_lower=20,sigma_upper=0.9*sigma)
#f_flat_clip=sigma_clip(f_flat,sigma_lower=1,sigma_upper=0.9*sigma)
w0_flat_clip=w0[f_flat_clip.mask]#take the corresponding wavelength values from the mask creating the clip
a=np.array(f_flat[f_flat_clip.mask])#take just the clipped flux values from flat spectra
a_f0=np.array(f0_data[f_flat_clip.mask])#clipped flux values from input data
em1=np.r_[True, a[1:] > a[:-1]] & np.r_[a[:-1] > a[1:], True] #find max points from nn 1d
em2=argrelextrema(a, np.greater,order=1)[0] #find max points using argrelextrema signal filter
w0_em=np.array(w0_flat_clip[em2]) #take index values of max points to get w0 of em lines
f0_em=a[em2]#flux value from flat spectra
f0_data_em=a_f0[em2]#flux values from input spectra
''' list of lines'''
if prev_lines_only==True:
line_table=USH.JCW_lines_NIST
wave='obs_wl_air'
print('Using previously observed lines only and',wave)
elif USH.instrument[0]=='XMM':
line_table=USH.xr_line_table
line_table=USH.xrs_line_table
wave='ritz_wl_vac'
print('Using x-ray line file and ',wave)
elif USH.instrument[0]=='Sol':
line_table=USH.sol_line_table
wave='ritz_wl_vac'
print('Using Sol line file and ',wave)
elif USH.instrument[0]=='COS' or USH.instrument[0]=='STIS':
line_table=USH.sol_line_table
wave='ritz_wl_vac'
print('Using Sol line file and ',wave)
else:
line_table=USH.line_table
line_table=USH.line_table_prev_obs
wave='obs_wl_air'
print('Using NIST list with previous observations indicated and',wave)
rv_shift=(line_table[wave].values * radvel) / clight #shift in the rest wl due to rv
vsini_shift=(line_table[wave].values * vsini) / clight #shift in the rest vl due to vsini (broadening)
rv_wl=line_table[wave].values + rv_shift #comparison wl to compare to observed, accounting for rv shift
line_table=pd.concat([pd.Series(np.around(rv_wl,decimals=2),name='rv_wl'),
line_table,
pd.Series(np.around(rv_shift,decimals=2),name='rv_shift'),
pd.Series(np.around(vsini_shift,decimals=2),name='vsini_shift')],axis=1)
#find lines that are close to obs wl and ref wl, filter out all lines that are not close
em_lines_mask=np.isclose(line_table['rv_wl'].values[:,None],w0_em, atol=atol).any(axis=1)
em_matches=line_table[em_lines_mask].reset_index(drop=True)
w0_matches=[]
f0_flat_matches=[]
f0_data_matches=[]
for wl in range(len(em_matches)):
w0_line_pos=np.isclose(em_matches.rv_wl.values[wl],w0_em,atol=atol)
w0_matches.append(w0_em[w0_line_pos][0])
f0_flat_matches.append(f0_em[w0_line_pos][0])
f0_data_matches.append(f0_data_em[w0_line_pos][0])
w0_matches=[round(x,2) for x in w0_matches]
em_matches=pd.concat([pd.Series(w0_matches,name='w0'),
pd.Series(((w0_matches - em_matches.rv_wl)*clight/em_matches.rv_wl),name='vel_diff'),
em_matches,
pd.Series(f0_flat_matches,name='f0_flat'),
pd.Series(f0_data_matches,name='f0_data'),
pd.Series((f0_flat_matches/std(f_flat_clip)),name='SNR')],axis=1)
if USH.instrument[0]!='XMM' and USH.instrument[0]!='COS':
#remove lines that are further away from the reference line than the shift induced by the vsini
em_matches=em_matches.drop(em_matches[abs(em_matches.w0 - em_matches.rv_wl) > 2* abs(em_matches.vsini_shift)].index)
#only keep values between x limits
em_matches=em_matches[em_matches['w0'].between(xlim_min,xlim_max)].sort_values(by=['w0','Acc'],ascending=[True, True])
#pd.set_option('mode.chained_assignment',None)
#check for lines with >1 match to databse
#for wl in em_matches.w0:
# if len(em_matches[em_matches.w0 == wl]) > 1:
# em_matches.element.loc[em_matches.w0 == wl] = em_matches[em_matches.w0 == wl].element.values[0]+'*'
em_matches['multi']=np.where([len(em_matches[em_matches.w0 == wl])>1 for wl in em_matches.w0],'yes','no')
em_matches['tar_inst_line_id']=tar_inst
em_matches['abs_vel_diff']=abs(em_matches['vel_diff'])
em_matches.sort_values(['w0','sp_num','prev','Aki'],ascending=[True, True,True, False],inplace=True)
#find em lines that are from the same upper energy level
check_Ek=np.column_stack(np.unique(em_matches.Ek,return_counts=True))
common_Ek=check_Ek[check_Ek[:,1]>1][:,0]
em_match_common_Ek=em_matches[em_matches.Ek.isin(common_Ek)]
if output == True:
figure(figsize=USH.fig_size_n)
cla()
#plot(w0,f0_data+1,'b',label='Input Spectra')
plot(w0,f_flat,'r',label='Continuum Sub.')
xlim(xlim_min,xlim_max)
#ylim(-2,10.0)
plot(w0,f_flat_clip,label='Threshold')
plot(w0_em,f0_em,'b.',label='Potential Em. Line')
plot(w0_matches,f0_flat_matches,'go',label='NIST Matched Em. Line')
if line_id==True:
[axvline(x=_x,color='k',linewidth=0.5,linestyle='--') for _x in w0_matches]
line=0.0 #this stops multiple labels being plotted for lines matched to more than one emission
for index,row in em_matches.iterrows():
if row.w0 != line:
line = row.w0
flux = row.f0_flat
name = '%s%s %.2f'%(row.element,row.sp_num,row.w0)
annotate(name,(line,flux),rotation=90,size=14,
xytext=(10, 10), # 3 points vertical offset
textcoords="offset pixels",
horizontalalignment='left', verticalalignment='bottom')
legend(loc='upper left', fontsize=8, numpoints=1)
ylabel('Normalised Flux')
xlabel('Wavelength [Angstroms]')
locator_params(axis='x', nbins=4)
#tight_layout()
show()
if output == True:
figure(figsize=USH.fig_size_n)
cla()
#plot(w0,f0_data+1,'b',label='Input Spectra')
plot(w0,f0_data,'r',label='Input spectra')
xlim(xlim_min,xlim_max)
#ylim(-2,10.0)
#plot(w0,f_flat_clip,label='Threshold')
plot(w0_em,f0_data_em,'b.',label='Potential Em. Line')
plot(w0_matches,f0_data_matches,'gx',label='NIST Matched Em. Line')
if line_id==True:
[axvline(x=_x,color='k',linewidth=0.5,linestyle='--') for _x in w0_matches]
line=0.0 #this stops multiple labels being plotted for lines matched to more than one emission
for index,row in em_matches.iterrows():
if row.w0 != line:
line = row.w0
flux = row.f0_data
name = '%s%s %.2f'%(row.element,row.sp_num,row.w0)
annotate(name,(line,flux),rotation=90,size=14,
xytext=(10, 10), # 3 points vertical offset
textcoords="offset pixels",
horizontalalignment='left', verticalalignment='bottom')
legend(loc='upper left', fontsize=8, numpoints=1)
ylabel('Flux')
xlabel('Wavelength [Angstroms]')
locator_params(axis='x', nbins=4)
#tight_layout()
show()
return em_matches,em_match_common_Ek
def plot_em_lines(df_av,em_matches,plot_av=False,fs=USH.fig_size_l):
'''
function to plot the average data frame with a list of identified lines
Parameters
----------
df_av : dataframe
dataframe of wave and flux values.
em_matches : dataframe
dataframe of emission line matches.
plot_av : bool, optional
plot average spectra. The default is False.
fs : tuple, optional
figure size. The default is USH.fig_size_l.
Returns
-------
plot.
'''
em_matches_cut=em_matches[em_matches.w0.between(min(df_av.wave),max(df_av.wave))]
f0_data_matches=em_matches_cut.f0_data
w0_matches=em_matches_cut.w0
figure(figsize=fs)
cla()
#plot(w0,f0_data+1,'b',label='Input Spectra')
plot(df_av.wave,df_av.med_flux,linewidth=1,label='median')
plot(w0_matches,f0_data_matches,'gx',label='NIST Em. Line from ref table')
[axvline(x=_x,color='k',linewidth=0.5,linestyle='--') for _x in w0_matches]
line=0.0 #this stops multiple labels being plotted for lines matched to more than one emission
for index,row in em_matches.iterrows():
if row.w0 != line:
line = row.w0
flux = row.f0_data
name = '%s%s %.2f'%(row.element,row.sp_num,row.w0)
annotate(name,(line,flux),rotation=90,size=14,
xytext=(10, 10), # 3 points vertical offset
textcoords="offset pixels",
horizontalalignment='left', verticalalignment='top')
legend(loc='upper left', fontsize=8, numpoints=1)
ylabel('Flux')
xlabel('Wavelength [Angstroms]')
locator_params(axis='x', nbins=4)
def fit_gauss(x,y,ngauss=1,neg=False,g1_cen=None,g2_cen=None,g3_cen=None,neg_cen=None,
g1_sig=None,g2_sig=None,g3_sig=None,neg_sig=None):
'''
Parameters
----------
x : array or list
wave.
y : array or list
flux.
ngauss : int, optional
number of positie Gaussians to fit. The default is 1.
neg : bool, optional
Whether to include a negative Gaussian. The default is False.
g1_cen : list, optional
list of min and max. The default is None.
g2_cen : list, optional
list of min and max. The default is None.
g3_cen : list, optional
list of min and max. The default is None.
neg_cen : list, optional
list of min and max. The default is None.
g1_sig : list, optional
list of min and max. The default is None.
g2_sig : list, optional
list of min and max. The default is None.
g3_sig : list, optional
list of min and max. The default is None.
neg_sig : list, optional
list of min and max. The default is None.
Returns
-------
out : lmfit model
lmfit model results.
'''
gauss1 = GaussianModel(prefix='g1_')
gauss2 = GaussianModel(prefix='g2_')
gauss3 = GaussianModel(prefix='g3_')
gauss4 = GaussianModel(prefix='g4_')
line1=LinearModel(prefix='line_')
pars_g1 = gauss1.guess(y, x=x)
pars_line = line1.guess(y, x=x)
pars_g2 = gauss2.guess(y, x=x)
pars_g3 = gauss3.guess(y, x=x)
pars_g4 = gauss4.guess(y, x=x ,negative=True)
if ngauss==1:
mod = gauss1 + line1
pars=pars_g1 + pars_line
#pars['g1_amplitude'].set(min=0)
#pars['g1_sigma'].set(max=100)
elif ngauss==2:
mod = gauss1 + gauss2 + line1
pars=pars_g1 + pars_g2 + pars_line
#pars['g1_amplitude'].set(min=0)
#pars['g2_amplitude'].set(min=0)
elif ngauss==3:
mod = gauss1 + gauss2 + gauss3 + line1
pars=pars_g1 + pars_g2 + pars_g3 +pars_line
#pars['g1_amplitude'].set(min=0)
#pars['g2_amplitude'].set(min=0)
#pars['g3_amplitude'].set(min=0)
#pars['line_slope'].set(max=0)
if neg==True:
mod += gauss4
pars += pars_g4
pars['g4_amplitude'].set(max=0)
if g1_cen != None:
pars['g1_center'].set(value=(g1_cen[0]+g1_cen[1])/2, min=g1_cen[0], max=g1_cen[1])
if g2_cen != None and ngauss==2:
pars['g2_center'].set(value=(g2_cen[0]+g2_cen[1])/2, min=g2_cen[0], max=g2_cen[1])
if g3_cen != None and ngauss==3:
pars['g3_center'].set(value=(g3_cen[0]+g3_cen[1])/2, min=g3_cen[0], max=g3_cen[1])
if neg_cen != None and neg==True:
pars['g4_center'].set(value=(neg_cen[0]+neg_cen[1])/2, min=neg_cen[0], max=neg_cen[1])
if g1_sig != None:
pars['g1_sigma'].set(value=(g1_sig[0]+g1_sig[1])/2, min=g1_sig[0], max=g1_sig[1])
if g2_sig != None and ngauss==2:
pars['g2_sigma'].set(value=(g2_sig[0]+g2_sig[1])/2, min=g2_sig[0], max=g2_sig[1])
if g3_sig != None and ngauss==3:
pars['g3_sigma'].set(value=(g3_sig[0]+g3_sig[1])/2, min=g3_sig[0], max=g3_sig[1])
if neg_sig != None and neg==True:
pars['g4_sigma'].set(value=(neg_sig[0]+neg_sig[1])/2, min=neg_sig[0], max=neg_sig[1])
out = mod.fit(y, pars, x=x, weights = 1/np.std(y),nan_policy='propagate') #use weights to obtain red. chi sq
return out
def gauss_stats(df_av_line,obs,ngauss=1,neg=False,em_row=999,target='temp',
gof_min=0.2,printout=False,output=False,savefig=False,subplot=False,plot_comps=True,legend=True,
reject_low_gof=False,reject_line_close=True,g1_cen=None,g2_cen=None,g3_cen=None,
vred=False,neg_cen=None,title='full',g1_sig=None,g2_sig=None,g3_sig=None,neg_sig=None,
sub_cont_fit=False):
'''
Parameters
----------
df_av_line : data frame
subset of average data frame around given emission line, result of get_line_spec().
obs : str
obesrvation to use from df_av_line, one of the column names or av_spec, med_sepc.
ngauss : int, optional
number of gauss to fit, 1 to 2. The default is 1.
neg : bool, optional
whether to force one of the gauss to be negative, for ngauss > 1. The default is False.
em_row : pd series, optional
row from em_matches containing matched info for outputs. The default is 999.
target : str, optional
target star name, for file saving. The default is 'temp'.
gof_min : flat, optional
minumum goodness of fit value to keep lines. The default is 0.2.
printout : bool, optional
option to print details of fit to screen. The default is False.
output : bool, optional
optiion to plot results of fitting. The default is False.
savefig : bool, optional
option to save output plot. The default is False.
reject_low_gof : bool, optional
whether to reject fits that do not meet gof_min. The default is False.
Returns
-------
g_fit : lmfit output
details of the fit.
x : array
x values of the fit.
g_fit.best_fit : array
y values of best fit.
line_info : list
list of parmeters of the fit.
'''
clight=astropy.constants.c.to('km/s').to_value()
#if a row from the emmission line matching results table is parsed, take needed values from that, if not, assign values and unknowns from the w0 position
try:
line=em_row.obs_wl_air #rest wavelenth used for plot titles, not for 0 vel point
ele=em_row.element
sp_num=em_row.sp_num
J_i=em_row.J_i
J_k=em_row.J_k
#w0_vel=rv
# w0_vel=((em_row.w0 - line)*clight/line)-rv
SNR=em_row.SNR
except:
try:
line=em_row.ritz_wl_vac #rest wavelenth used for plot titles, not for 0 vel point
ele=em_row.element
sp_num=em_row.sp_num
J_i=em_row.J_i
J_k=em_row.J_k
#w0_vel=rv
# w0_vel=((em_row.w0 - line)*clight/line)-rv
SNR=em_row.SNR
except:
line=em_row
ele='unk'
sp_num=0
J_i='0'
J_k='0'
#w0_vel=0
SNR=0
x=df_av_line['vel'].values
#y=df_av_line.iloc[:,2].values #change iloc to user input or average
y=df_av_line[obs].values #take observation date from function specified input
flux_scaled=False
scale_factor=1
if abs(mean(y)) < 1e-5: #for absolute flux units, remove the small order of mag for error calculations in the fitting
scale_factor=10**floor(log10(mean(y)))
y=y/scale_factor
flux_scaled=True
try:
#y -= min(y) #shift all the lines to be min 0 flux
#y-= 1
g_fit=fit_gauss(x,y,ngauss,neg,g1_cen=g1_cen,g2_cen=g2_cen,g3_cen=g3_cen,neg_cen=neg_cen,
g1_sig=g1_sig,g2_sig=g2_sig,g3_sig=g3_sig,neg_sig=neg_sig) #fit the linear model using above function
except:
#this is the exception for the fit failing, will just pass
print(line, obs,'has no data within specified range')
return None,x,y,None
gof=g_fit.redchi # / np.std(y)**2 #do not need to divide here as it is included in the weights in the fit_gauss() fn
#note on gof, as window of line increased, gof is larger because more either side of gauss is included, 0.1 is good for 0.6 range in wl, 0.3 good for 1.0 range in wl
y_base=g_fit.best_fit - min(g_fit.best_fit) # determine y values starting from 0 min
line_values= (g_fit.best_values['line_slope'] * x) + g_fit.best_values['line_intercept']
#y_sub_line=g_fit.best_fit - line_values # remove line component from final fit,
y_sub_line=y - line_values # remove line component from final fit,
if sub_cont_fit==True:
y=y_sub_line
g_fit.best_fit= g_fit.best_fit - line_values
#calculate intergrated flux just from flux above continuum, i.e. subtract line compnent before integrating
#int_flux=np.round(np.trapz(y_sub_line,x),4)
int_flux=np.trapz(y_sub_line,x) # check units here, need to do line * int_flux / clight for absolute fluxs
EW=line * (int_flux/median(line_values))/clight #in angstroms
int_flux=(line/10) * int_flux / clight #for XS flux units of erg/s/cm2/nm
#calculate asym from the intergrated flux above the zero baseline, comparing each side of peak
#centre_x=closest(x,g_fit.best_values['g1_center'])
centre_x=closest(x,0) #calculate wrt to 0 velocity rather than g1 centre
centre_x_idx=np.where(x==centre_x)[0][0]
centre_x1=closest(x,g_fit.best_values['g1_center'])
peak_y=float(g_fit.best_fit[centre_x_idx])
peak_y_base=y_base[centre_x_idx]
lhs_int_flux=np.trapz(y_sub_line[0:centre_x_idx],x[0:centre_x_idx])
rhs_int_flux=np.trapz(y_sub_line[centre_x_idx:-1],x[centre_x_idx:-1])
asym=lhs_int_flux/(lhs_int_flux + rhs_int_flux)
#asym=lhs_int_flux/(int_flux)
g1_stderr=g_fit.params['g1_center'].stderr
if (g1_stderr) is None:
g1_stderr=999#np.nan
g1_amp_stderr=g_fit.params['g1_amplitude'].stderr
if g1_amp_stderr is None:
g1_amp_stderr=999
try:
dely=g_fit.eval_uncertainty(sigma=3)
except:
dely=0
if ngauss==2:
centre_x2=closest(x,g_fit.best_values['g2_center'])
#centre_fit=closest(x,min(g_fit.best_fit))
g2_stderr=g_fit.params['g2_center'].stderr
if (g2_stderr) is None:
g2_stderr=999
if ngauss==3:
centre_x2=closest(x,g_fit.best_values['g2_center'])
g2_stderr=g_fit.params['g2_center'].stderr
if (g2_stderr) is None:
g2_stderr=999
centre_x3=closest(x,g_fit.best_values['g3_center'])
g3_stderr=g_fit.params['g3_center'].stderr
if (g3_stderr) is None:
g3_stderr=999
if neg==True:
centre_x4=closest(x,g_fit.best_values['g4_center'])
g4_stderr=g_fit.params['g4_center'].stderr
if (g4_stderr) is None:
g4_stderr=999
depth10_x=0
if vred==True:
try:
y_min=min(g_fit.best_fit[x>0]) #find min of redshifted absorption
y_min_idx=np.where(g_fit.best_fit==y_min)[0][0] #index of this value
x_min=float(x[y_min_idx]) #vel of min flux
line_y_min=line_values[y_min_idx]
depth=line_y_min-y_min #depth from continuum fit
depth10=depth*0.1
vred_max=closest(g_fit.best_fit[x>x_min],line_y_min+depth) #find where the absorption meets the continuum
vred_idx=np.where(g_fit.best_fit==vred_max)
vred_max_x=x[vred_idx]
depth10_y=closest(g_fit.best_fit[(x>x_min) & (x<vred_max_x)],line_y_min-depth10) #find 10% depth that is greater than min flux and less than where the absorption meets continuum
depth10_y_idx=np.where(g_fit.best_fit==depth10_y)[0][0]
depth10_x=x[depth10_y_idx]
except:
depth10_y=0
depth10_x=0
if vred==True: #this is actually for vblue... need to add a switch for vred==blue rather than adding everything in again
try:
y_min=min(g_fit.best_fit[x<0]) #find min of redshifted absorption
y_min_idx=np.where(g_fit.best_fit==y_min)[0][0] #index of this value
x_min=float(x[y_min_idx]) #vel of min flux
line_y_min=line_values[y_min_idx]
depth=line_y_min-y_min #depth from continuum fit
depth10=depth*0.1
vred_max=closest(g_fit.best_fit[x<x_min],line_y_min+depth) #find where the absorption meets the continuum
vred_idx=np.where(g_fit.best_fit==vred_max)
vred_max_x=x[vred_idx]
depth10_y=closest(g_fit.best_fit[(x<x_min) & (x>vred_max_x)],line_y_min-depth10) #find 10% depth that is greater than min flux and less than where the absorption meets continuum
depth10_y_idx=np.where(g_fit.best_fit==depth10_y)[0][0]
depth10_x=x[depth10_y_idx]
except:
depth10_y=0
depth10_x=0
#for reject_low_gof==True, also reject lines whose gauss centre are far from ref centre
#also reject lines where peak value is negative (may have to change this in future for abs lines)
#if g_fit.values['g1_center'] > w0_vel-10 and g_fit.values['g1_center'] < w0_vel+10 and g_fit.values['g1_fwhm'] < 30 and peak_y > 0:
if g_fit.values['g1_center'] > min(x) and g_fit.values['g1_center'] < max(x):# and g1_stderr < 900:# and int_flux > 0:# and abs(g_fit.best_values['line_slope']/peak_y)<0.02: #and g_fit.values['g1_fwhm'] < 50
line_close=True
elif reject_line_close==False:
line_close=True
else:
line_close=False
if reject_low_gof==True and gof < gof_min and line_close==True or reject_low_gof==False:
line_info=pd.Series(({'target':target,'mjd':obs,'gof':gof,'g1_cen':g_fit.values['g1_center'],'g1_stderr':g1_stderr, 'g1_sigma':g_fit.values['g1_sigma'],
'g1_fwhm':g_fit.values['g1_fwhm'],'g1_fwhm_stderr':g_fit.params['g1_fwhm'].stderr,'g1_amp':g_fit.values['g1_amplitude']*scale_factor,'g1_amp_stderr':g1_amp_stderr*scale_factor,
'peak':peak_y, 'asym':asym, 'int_flux':int_flux*scale_factor,'EW':EW,'med_cont':median(line_values)*scale_factor,'Vred':depth10_x}))
try:
line_info=pd.concat([line_info,em_row],axis=0)
except:
pass
if ngauss==2 or ngauss==3:
line_info2=pd.Series(({'g2_cen':g_fit.values['g2_center'],'g2_stderr':g2_stderr,
'g2_fwhm':g_fit.values['g2_fwhm'],'g2_fwhm_stderr':g_fit.params['g2_fwhm'].stderr,
'g2_amp':g_fit.values['g2_amplitude']*scale_factor,'g2_amp_stderr':g_fit.params['g2_amplitude'].stderr}))
line_info=pd.concat([line_info,line_info2],axis=0)
if ngauss==3:
line_info3=pd.Series(({'g3_cen':g_fit.values['g3_center'],'g3_stderr':g3_stderr,
'g3_fwhm':g_fit.values['g3_fwhm'],'g3_fwhm_stderr':g_fit.params['g3_fwhm'].stderr,
'g3_amp':g_fit.values['g3_amplitude']*scale_factor,'g3_amp_stderr':g_fit.params['g3_amplitude'].stderr}))
line_info=pd.concat([line_info,line_info3],axis=0)
if neg==True:
line_info4=pd.Series(({'g4_cen':g_fit.values['g4_center'],'g4_stderr':g4_stderr,
'g4_fwhm':g_fit.values['g4_fwhm'],'g4_fwhm_stderr':g_fit.params['g4_fwhm'].stderr,
'g4_amp':g_fit.values['g4_amplitude']*scale_factor,'g4_amp_stderr':g_fit.params['g4_amplitude'].stderr}))
line_info=pd.concat([line_info,line_info4],axis=0)
else:
line_info=None
pass_gof='N'
if gof < gof_min and line_close==True:
pass_gof='Y'
if printout==True:
print(g_fit.fit_report(min_correl=0.25))
#print('corrected chi^2: %.5f' %(g_fit.redchi / np.std(y)**2))
#print(np.sum(((y - g_fit.best_fit)**2) / g_fit.best_fit) / (g_fit.nfree))
#print(np.sum(((y - g_fit.best_fit)**2)/ np.std(y)**2) / (g_fit.nfree))
#print(np.sum(((y - g_fit.best_fit)**2)/ np.sqrt(np.mean(y**2))**2) / (g_fit.nfree))
if reject_low_gof==True and gof > gof_min:
print('GoF too low to produce output / save file')
if reject_low_gof==True and gof < gof_min and line_close==True or reject_low_gof==False:
if output==True or savefig==True:
ioff()
if subplot==True:
fig, ax = subplots(1, 2)#,figsize=(9,6))#,gridspec_kw={'wspace':0})
fig.suptitle('%s Fit of line at %.2f Angstroms Pass GoF:%s \n' %(obs,line,pass_gof),fontsize=8)
xlabel("common X")
ylabel("common Y")
for x1 in ax:
x1.set(xlabel='Velocity (km/s)', ylabel='Flux')
for x2 in ax:
x2.label_outer()
ax[0].set_title('GoF: %.5f, FWHM: %.2f \n Int.Flux: %.4f, Asym: %.4f' %(gof ,g_fit.values['g1_fwhm'], int_flux, asym),fontsize=6)
ax[0].plot(x,y, 'b--',lw=2,label='Input')
#ax[0].plot(x, g_fit.init_fit, 'k--', label='initial fit')
ax[0].plot(x, g_fit.best_fit, 'm-',lw=3, label='Best fit')
ax[0].axvline(x=centre_x,color='k',linewidth=0.5,linestyle='--')#centre
#try:
# ax[0].axvline(x=x[centre_x_idx-50],color='k',linewidth=0.5,linestyle='--')#asym window
# ax[0].axvline(x=x[centre_x_idx+50],color='k',linewidth=0.5,linestyle='--')#asym window
#except:
# pass
ax[0].legend(loc='upper right')
#figure(6,figsize=(5,5))
comps = g_fit.eval_components(x=x)
ax[1].plot(x, y, 'b')
ax[1].plot(x, comps['g1_'], 'g--', label='Gauss 1')
ax[1].plot(x, comps['line_'], 'k--', label='Cont.')
ax[1].axvline(x=centre_x,color='k',linewidth=0.5,linestyle='--')
if ngauss==1:
ax[1].set_title('Line: %s%s %s-%s, g1_cen= %.1f \n SNR: %.2f, line slope: %.2f ' %(
ele,sp_num,J_i,J_k,g_fit.best_values['g1_center'],SNR,abs(g_fit.best_values['line_slope']/peak_y)),fontsize=6)
if ngauss==2:
ax[1].set_title('Line: %s%s %s-%s, g1_cen= %.1f g2_cen=%.1f \n SNR: %.2f, line slope: %.2f ' %(
ele,J_i,J_k,g_fit.best_values['g1_center'],g_fit.best_values['g2_center'],SNR,abs(g_fit.best_values['line_slope']/peak_y)),fontsize=6)
ax[1].plot(x, comps['g2_'], 'm--', label='Gauss comp. 2')
ax[1].axvline(x=centre_x2,color='k',linewidth=0.5,linestyle='--')
if neg==True:
ax[1].plot(x, comps['g4_'], 'c--', label='Neg Gauss comp')
ax[1].legend(loc='upper right')
elif subplot==False:
fig, ax = subplots(1,1,figsize=USH.fig_size_s)#,gridspec_kw={'wspace':0})
if title=='full':
fig.suptitle('%s fit of line at %.2f Angstroms, Pass GoF:%s \n' %(obs,line,pass_gof),fontsize=10)
if flux_scaled==False:
ax.set(xlabel='Velocity (km/s)', ylabel='Flux')
else:
ax.set(xlabel='Velocity (km/s)', ylabel='Flux x10^(%.0f)'%(log10(scale_factor)))
ax.plot(x,y, 'k-',lw=2,alpha=1,label='Input')
#ax[0].plot(x, g_fit.init_fit, 'k--', label='initial fit')
ax.plot(x, g_fit.best_fit, 'r-',lw=3, label='Best fit')
if plot_comps==True:
ax.axvline(x=centre_x1,color='k',linewidth=0.5,linestyle='--')#centre
ax.fill_between(x,g_fit.best_fit-dely,g_fit.best_fit+dely,color='#ABABAB',alpha=0.75,label='3-$\sigma$ uncertainty')
#ax.set_xlim(-250,250)
#ax.set_ylim(-0.05,0.25)
#locator_params(nbins=5)
ax.axvline(x=0,color='k',linewidth=0.75,linestyle='--')
#ax.axhline(y=0,color='k',linewidth=0.75,linestyle='--')
comps = g_fit.eval_components(x=x)
#ax[1].plot(x, y, 'b')
if plot_comps==True:
ax.plot(x, comps['g1_'], 'g--', label='Gauss 1')
ax.axvline(x=centre_x1,color='k',linewidth=0.5,linestyle='--')
if ngauss==1:
if title=='full':
ax.set_title('GoF: %.2e, FWHM: %.2f Int.Flux: %.2E, Asym: %.4f \n Line: %s%s %s-%s, g1_cen= %.1f$\pm$%.2f SNR: %.2f' %(
gof ,g_fit.values['g1_fwhm'], int_flux, asym,ele,sp_num,J_i,J_k,g_fit.best_values['g1_center'],g1_stderr,SNR),fontsize=8)
elif title=='simple':
ax.set_title('%s %s %.2f' %(obs,ele,line),fontsize=12)
if ngauss==2:
if title=='full':
ax.set_title('GoF: %.2e, g1_FWHM: %.2f, Int.Flux: %.2E, Asym: %.2f \n Line: %s%s %s-%s, G1_cen= %.1f$\pm$%.2f, G2_cen= %.1f$\pm$%.2f, SNR: %.2f' %(
gof ,g_fit.values['g1_fwhm'], int_flux, asym,ele,sp_num,J_i,J_k,g_fit.best_values['g1_center'],g1_stderr,g_fit.best_values['g2_center'],g2_stderr,SNR),fontsize=8)
elif title=='simple':
ax.set_title('%s %s %.2f' %(obs,ele,line),fontsize=12)
if plot_comps==True:
ax.plot(x, comps['g2_'], 'm--', label='Gauss comp. 2')
ax.axvline(x=centre_x2,color='k',linewidth=0.5,linestyle='--')
#ax.axvline(x=centre_fit,color='r',linewidth=0.5,linestyle='--')
if ngauss==3:
if title=='full':
ax.set_title('GoF: %.2e, g1_FWHM: %.2f, Int.Flux: %.2E, Asym: %.2f \n Line: %s%s %s-%s, G1_cen= %.1f$\pm$%.2f, G2_cen= %.1f$\pm$%.2f, G3_cen= %.1f$\pm$%.2f, SNR: %.2f' %(
gof ,g_fit.values['g1_fwhm'], int_flux, asym,ele,sp_num,J_i,J_k,g_fit.best_values['g1_center'],g1_stderr,g_fit.best_values['g2_center'],g3_stderr,g_fit.best_values['g3_center'],g3_stderr,SNR),fontsize=8)
elif title=='simple':
ax.set_title('%s %s %.2f' %(obs,ele,line),fontsize=12)
if plot_comps==True:
ax.plot(x, comps['g2_'], 'm--', label='Gauss comp. 2')
ax.axvline(x=centre_x2,color='k',linewidth=0.5,linestyle='--')
ax.plot(x, comps['g3_'], 'y--', label='Gauss comp. 3')
ax.axvline(x=centre_x3,color='k',linewidth=0.5,linestyle='--')
if neg==True:
if plot_comps==True:
ax.plot(x, comps['g4_'], 'c--', label='Gauss abs.')
ax.axvline(x=centre_x4,color='k',linewidth=0.5,linestyle='--')
if vred==True:
ax.plot(x_min,y_min,'yo',markersize=12)
ax.plot(depth10_x,depth10_y,'bo',markersize=12)
if ngauss==1:
if title=='full':
ax.set_title('GoF: %.2e, FWHM: %.2f Int.Flux: %.2E, Asym: %.4f \n Line: %s%s %s-%s, g1_cen= %.1f, neg_cen= %.1f, SNR: %.2f' %(
gof ,g_fit.values['g1_fwhm'], int_flux, asym,ele,sp_num,J_i,J_k,g_fit.best_values['g1_center'],g_fit.best_values['g4_center'],SNR),fontsize=8)
elif title=='simple':
ax.set_title('%s %s %.2f' %(obs,ele,line),fontsize=12)
if ngauss==2:
if title=='full':
ax.set_title('GoF: %.2e, g1_FWHM: %.2f, Int.Flux: %.2E, Asym: %.2f \n Line: %s%s %s-%s, G1_cen= %.1f, G2_cen= %.1f, neg_cen= %.1f, SNR: %.2f' %(
gof ,g_fit.values['g1_fwhm'], int_flux, asym,ele,sp_num,J_i,J_k,g_fit.best_values['g1_center'],g_fit.best_values['g2_center'],g_fit.best_values['g4_center'],SNR),fontsize=8)
elif title=='simple':
ax.set_title('%s %s %.2f' %(obs,ele,line),fontsize=12)
if plot_comps==True and sub_cont_fit==False:
ax.plot(x, comps['line_'], 'k--', label='Continuum')
if legend==True:
ax.legend(fontsize=12)
if output==True:
tight_layout()
show()
else:
close()
if savefig==True:
#output dir
#dirname=os.path.join('output_plots', target+'_'+timenow)
dirname=os.path.join('output_plots',timenow)
if not os.path.exists(dirname):
os.makedirs(dirname)
#fig.savefig(os.path.join(dirname,ele+'_'+str(np.round(line,2))+'_'+str(obs)+'.pdf'))#,bbox_inches="tight")
fig.savefig(os.path.join(dirname,target+'_'+ele+'_'+str(np.round(line,2))+'_'+str(obs)+'.png'),dpi=300)#,bbox_inches="tight")
#print('saving file',os.path.join(dirname,ele+str(np.round(line,2))+'.pdf'))
if output==False:
close()
ion()
return g_fit,x,g_fit.best_fit, line_info
def get_line_results(em_matches,df_av_norm,line_date_list,target,w_range=0.6,title='full',radvel=USH.radvel,
ngauss=1,neg=False,vred=False,gof_min=0.2,reject_low_gof=True,reject_line_close=True,
g1_cen=None,g2_cen=None,g3_cen=None,neg_cen=None,g1_sig=None,g2_sig=None,g3_sig=None,neg_sig=None,
printout=False,output=False,savefig=False,plot_comps=True):
'''
Parameters
----------
em_matches : data frame
results from get_em_lines().
df_av_norm : data frame
normalised dataframe of flux values.
line_date_list : list
list of observation dates for available data, can use one obs but still within a list
eg ['med_flux'].
target : str
target star name for saving plots.
w_range : float, optional
wavelength range passed to get_line_spec(). The default is 0.6.
ngauss : int, optional
number of gauss to fit, 1 to 3. The default is 1.
neg : bool, optional
whether to force one of the gauss to be negative, for ngauss > 1. The default is False.
gof_min : float, optional
min value for keeping fits. The default is 0.2.
reject_low_gof : bool, optional
option to only include lines below gof_min. The default is True.
printout : bool, optional
print out details of fits to screen. The default is False.
output : bool, optional
plot results of fiting on screen. The default is False.
savefig : bool, optional
save plot results. The default is False.
Returns
-------
em_line_date_results : data frame
data frame of gauss line results for all lines and all dates.
em_line_date_results_common_Ek : data frame
subset of above for lines originating from common upper every levels.
'''
#check that list of lines within range of data, good for loaded in lists of lines covering larger ranges
em_matches=em_matches[em_matches.w0.between(min(df_av_norm.wave),max(df_av_norm.wave))]
if USH.instrument[0]=='XMM' or USH.instrument[0]=='Sol' or USH.instrument[0]=='COS' or USH.instrument[0]=='STIS':
wave='ritz_wl_vac'
else:
wave='obs_wl_air'
print('Fitting lines using',wave,' and radvel=',radvel)
line_results=pd.DataFrame()
for index,row in em_matches.iterrows():
line = row[wave]
df_av_line=get_line_spec(df_av_norm,line,vel_offset=radvel,w_range=w_range,vel=True)
for date in line_date_list:# ['med_flux']:#[df_av_line.columns[2]]:# df_av_line.columns[1:-3]:
out,x,y,line_info=gauss_stats(df_av_line,date,ngauss=ngauss,neg=neg,em_row=row,target=target,vred=vred,
gof_min=gof_min,printout=printout,output=output,savefig=savefig,title=title,
reject_low_gof=reject_low_gof,reject_line_close=reject_line_close,plot_comps=plot_comps,
g1_cen=g1_cen,g2_cen=g2_cen,g3_cen=g3_cen,neg_cen=neg_cen,
g1_sig=g1_sig,g2_sig=g2_sig,g3_sig=g3_sig,neg_sig=neg_sig)
line_results=pd.concat([line_results,line_info],axis=1,ignore_index=True)
#line_results=line_results.append(line_info,ignore_index=True)
line_results=line_results.T
#display(line_results)
try:
if neg==False:
if ngauss==3:
cols_to_move = ['mjd','w0','rv_wl',wave,'element','sp_num','int_flux','asym', 'g1_cen', 'g1_stderr','g2_cen','g2_stderr','g1_fwhm','g2_fwhm','g3_cen', 'g3_stderr','g3_fwhm', 'gof']
em_line_date_results= line_results[ cols_to_move + [ col for col in line_results.columns if col not in cols_to_move ] ]
elif ngauss==2:
cols_to_move = ['mjd','w0','rv_wl',wave,'element','sp_num','int_flux','asym', 'g1_cen', 'g1_stderr','g2_cen','g2_stderr','g1_fwhm','g2_fwhm', 'gof']
em_line_date_results= line_results[ cols_to_move + [ col for col in line_results.columns if col not in cols_to_move ] ]
else:
cols_to_move = ['mjd','w0','rv_wl',wave,'element','sp_num','int_flux','asym', 'g1_cen', 'g1_stderr','g1_fwhm', 'gof']
em_line_date_results= line_results[ cols_to_move + [ col for col in line_results.columns if col not in cols_to_move ] ]
else:
if ngauss==3:
cols_to_move = ['mjd','w0','rv_wl',wave,'element','sp_num','int_flux','asym', 'g1_cen', 'g1_stderr','g2_cen','g2_stderr','g1_fwhm','g2_fwhm','g3_cen', 'g3_stderr','g3_fwhm','g4_cen', 'g4_stderr','g4_fwhm', 'Vred', 'gof']
em_line_date_results= line_results[ cols_to_move + [ col for col in line_results.columns if col not in cols_to_move ] ]
elif ngauss==2:
cols_to_move = ['mjd','w0','rv_wl',wave,'element','sp_num','int_flux','asym', 'g1_cen', 'g1_stderr','g2_cen','g2_stderr','g1_fwhm','g2_fwhm','g4_cen', 'g4_stderr','g4_fwhm', 'Vred', 'gof']
em_line_date_results= line_results[ cols_to_move + [ col for col in line_results.columns if col not in cols_to_move ] ]
else:
cols_to_move = ['mjd','w0','rv_wl',wave,'element','sp_num','int_flux','asym', 'g1_cen', 'g1_stderr','g1_fwhm','g4_cen', 'g4_stderr','g4_fwhm', 'Vred','gof']
em_line_date_results= line_results[ cols_to_move + [ col for col in line_results.columns if col not in cols_to_move ] ]
print('total number of em lines fit:',len(em_line_date_results))
print('number of observed em lines fit:',len(unique(em_line_date_results.w0)))
except:
print('---no lines fit---')
em_line_date_results=0
#em_line_date_results=line_results
# # find em lines that are from the same upper energy level that were fitted
try:
em_line_date_results_no_dup=em_line_date_results.drop_duplicates(subset='w0')
check_Ek=np.column_stack(np.unique(em_line_date_results_no_dup.Ek,return_counts=True))
common_Ek=check_Ek[check_Ek[:,1]>1][:,0]
em_line_date_results_common_Ek=em_line_date_results_no_dup[em_line_date_results_no_dup.Ek.isin(common_Ek)]
print('number of em lines fit from same upper energy level:',len(em_line_date_results_common_Ek))
except:
em_line_date_results_common_Ek=0
print('no lines fit from same upper energy level')
if savefig==True:
print('saving files to output dir')
return em_line_date_results,em_line_date_results_common_Ek
def periodogram_indiv(line_results,method='auto',cen_cor=False,plot_title=False):
#mjd=Time(em_line_dates.mjd,format='mjd')
date_range=max(line_results.mjd)-min(line_results.mjd)
mjd=line_results.mjd
fig, ax = subplots(2, 1)#,figsize=(9,6))#,gridspec_kw={'wspace':0})
if plot_title==True:
fig.suptitle('%s %.0f line at %.2f Angstroms'%(line_results.element.any(),line_results.sp_num.values[0],line_results.obs_wl_air.values[0]),fontsize=14)
centre=line_results.g1_cen
ax[0].plot(mjd,centre,'b.',label='Observed')
ax[0].set_xlabel('MJD')
ax[0].set_ylabel('Line centre [km/s]')
ls=LombScargle(mjd,centre)
frequency,power=ls.autopower(method=method)
ax[1].plot(1/frequency, power)
ax[1].set_xlabel('Period [day]')
ax[1].set_xlim([0,date_range])
ax[1].set_ylabel('Power')
tight_layout()
print('max power:',power.max())
print('F.A.P at max power:',ls.false_alarm_probability(power.max()))
print('power req. for F.A.P of 50%,10%:',ls.false_alarm_level([0.5,0.1]))
def phase_period(em_line_date_results,linewav,mjd0,period=17,gofmin=0.2,filmin=-20,filmax=20,
maxper=100,minper=0,errmin=100,mjmin=0,mjmax=600000):
#At present, the program does the wrapping with the period you give by hand
#It also calculates the period by itself and prints some info on it.
#It does not take the period from the periodogram, but the one you provide, for the plots
#The line fit is also done for that period. This allows for instance to wrap a line to check
#around one period and get the fit even if that line does not show a significant period in terms of
#FAP. It also allows to, once you know the period, fit the lines between two MJD to check
#if the fit (phase, amplitude, offset) changes in time.
period=period #period to check
gofmin=gofmin #filter the velo
filmin=filmin #only velos between those limits
filmax=filmax
maxper=maxper #if you want to find a period below a given limit
minper=minper #the other limit to the period to avoid the 1d thingy if that gives problems
mjd0=mjd0 #initial date, use the same for all lines from one source or phases will be odd!
errmin=errmin #maximum error to use value
mjmin=float(mjmin) #minimum mjd for selection
mjmax=float(mjmax) #maximum mjd for selection
mjd=em_line_date_results.mjd[em_line_date_results.obs_wl_air==linewav].astype(float)
velo=em_line_date_results.g1_cen[em_line_date_results.obs_wl_air==linewav]
veloerr=em_line_date_results.g1_stderr[em_line_date_results.obs_wl_air==linewav]
gof=em_line_date_results.gof[em_line_date_results.obs_wl_air==linewav]
ele=em_line_date_results.element[em_line_date_results.obs_wl_air==linewav].any()
linename=str(int(linewav)) #ele+' '+str(int(linewav)) # line name to use it for the figure labels
# -----------------------------------------------------------------------------------------------
#Function to fit
#RV=V0+A*sin(phi-phi0)
#where V0 is the velo offset, A is the amplitude (Vsini*sin(theta_spot))
#phi0 is the phase origin, phi the phase
def rv(x,v0,a0,phi0):
return v0+abs(a0)*sin(2.*np.pi*(x-phi0))
#initial params
pini=[0.,2.,0.]
# -----------------------------------------------------------------------------------------------
phaseu=numpy.mod(mjd/period,1.0)
phasetime=floor((mjd-mjd0)/period) #color with respect to first period
approxer=gof
#filter data according to gof, velo limits (use to exclude crazy values), max error, and mjd
fil=(gof<gofmin) & (velo>filmin) & (velo<filmax) & (veloerr<errmin) & (mjd>=mjmin) & (mjd<=mjmax)
phaseu=phaseu[fil]
velo=velo[fil]
veloerr=veloerr[fil]
phasetime=phasetime[fil]
mjdu=mjd[fil]
fig = figure()
#fig.set_size_inches(12, 8, forward=True)
subplots_adjust(left=0.15, bottom=0.12, right=0.98, top=0.97, wspace=0.25, hspace=0.25)
#ax=[-0.1, 2.1,-2, 2]
x=[-0.5,2.5]
y=[0,0]
# -----------------------------------------------------------------------------------------------
errorbar(phaseu,velo,yerr=veloerr, fmt='',ecolor='k', alpha=0.5, elinewidth=1,linewidth=0)
errorbar(phaseu+1,velo,yerr=veloerr, fmt='',ecolor='k', alpha=0.5, elinewidth=1,linewidth=0)
#scatter(phaseu, velo,s=250,c=phasetime, marker = 'o', edgecolor='none', alpha=0.8, cmap= cm.terrain,vmin=min(phasetime),vmax=max(phasetime))
#scatter(phaseu+1, velo,s=250,c=phasetime, marker = 'o', edgecolor='none', alpha=0.8, cmap= cm.terrain,vmin=min(phasetime),vmax=max(phasetime))
#phasetime=np.log(abs(phasetime))
scatter(phaseu, velo,s=150,c=phasetime, marker = 'o', edgecolor='none', alpha=0.7, cmap= cm.prism,vmin=min(phasetime),vmax=max(phasetime))
scatter(phaseu+1, velo,s=150,c=phasetime, marker = 'o', edgecolor='none', alpha=0.7, cmap= cm.prism,vmin=min(phasetime),vmax=max(phasetime))
# -----------------------------------------------------------------------------------------------
#plot and fit the curve
pout,cova=curve_fit(rv,phaseu,velo,pini)
#plot fit
xx=arange(-0.5,2.5,0.01)
yy=rv(xx,pout[0],pout[1],pout[2])
plot(xx,yy,'k:', linewidth=3, alpha=0.5)
#Because this gives a complete turn when x goes from 0-2pi, units of phi0 are "phase" and not degrees.
#Therefore, the total angle offset is 2*pi*phi0 in radians.
#Thus to convert the phase into degrees I will need to do 2*pi*phi0*180/pi = 360*phi0
#Get the uncertainty from the covariance matrix, I assume correlated part negligible
print('Fit; for the period given in argument',period,'d')
print('Offset',pout[0],'+-',sqrt(cova[0,0]), 'km/s')
print('Amplitude',pout[1],'+-',sqrt(cova[1,1]), 'km/s')
print('Phase',pout[2]*360.,'+-',sqrt(cova[2,2])*360, 'degrees')
# -----------------------------------------------------------------------------------------------
#Do Lomb Scargle
perio = np.linspace(0.1,700, 100000)
freq= 1 / perio
#use median errors to avoid issues with LSP
veloerr0=np.ones([np.size(velo)])*np.median(veloerr)
ls=LombScargle(mjdu,velo,veloerr0) #.power(freq)
f,ls=LombScargle(mjdu,velo,veloerr0).autopower(minimum_frequency=min(freq),maximum_frequency=max(freq),samples_per_peak=50)
autoperio=1/f
#plot(1/freq, ls)
ls0=LombScargle(mjdu,velo,veloerr0)
f0,p0=ls0.autopower(minimum_frequency=min(freq),maximum_frequency=max(freq),samples_per_peak=50)
#plot(1/f0,p0, alpha=0.2)
fap= ls0.false_alarm_probability(max(ls),method='baluev')
print('Line velocity periodicity:\n Estimated fap=', fap)
print(' For period:', autoperio[np.argmax(ls)],'d \n')
print(' Nr of datapoints:', np.size(velo),'\n')
level99=ls0.false_alarm_level(0.001)
#a=[min(perio),max(perio)]
#b=[level99,level99]
#plot(a,b,'k-',alpha=0.2)
#For period limit too, that tells me the significance of any other point I see by eye
#or to help getting rid of annoying features like the 1d period or the long period bump.
#fli=(perio<maxper)
fli=(autoperio<maxper) & (autoperio>minper)
lslim=ls[fli]
autoperiolim=autoperio[fli]
faplim= ls0.false_alarm_probability(max(lslim),method='baluev')
print('Best period within limits', minper, '-', maxper, 'd')
print('Line velocity: Estimated fap=', faplim)
print('For period:', autoperiolim[np.argmax(lslim)],'d \n')
# -----------------------------------------------------------------------------------------------
#legend(loc=4, fontsize=20)
#legend(loc='upper left', fontsize=15)
ax=[-0.1,2.1,min(velo)-0.5,max(velo)+0.5]
ytext='V (km/s) for '+ linename
xtext='Phase (for a ' + str(round(period,3)) + 'd period)'
#xlabel ('Phase (for a 7.41d period)')
xlabel(xtext)
ylabel (ytext)
axis(ax)
show()
peri=re.sub('\.','p',str(round(period,3)))
linename2=re.sub(' ','',sys.argv[2])
perithing='_p'+ peri + '_' + linename2 + '_gof_'+ str(round(gofmin,1))+'_mjd_' + str(round(average(mjdu))) + '_wrapped.png'
#namefig=re.sub('.csv',perithing,filename)
#savefig(namefig)
####
fig = figure()
#fig.set_size_inches(12, 8, forward=True)
#matplotlib.rc('font', family='serif',size=20)
subplots_adjust(left=0.13, bottom=0.12, right=0.98, top=0.97, wspace=0.25, hspace=0.25)
perithing2='_p'+ peri + '_' + linename + '_gof_'+ str(round(gofmin,1)) + '_mjd_' + str(round(average(mjdu))) + '_GLS.png'
#linename2 to avoid gap in filename
#namefig2=re.sub('.csv',perithing2,filename)
#mark the rotational period being checked
xx=autoperio[np.argmax(ls)]
x=[xx,xx]
y=[0,1]
plot(x,y,'k-',linewidth=5,alpha=0.2)
#plot(1/freq, ls)
plot(1/f,ls,linewidth=2)
#plot(1/f0,p0, 'r-',alpha=0.2)
#Limit to plot this is a bit arbitrary but zoomed for the features we see so far in stars
ax=[min(perio),maxper,0,max(ls)+0.1]
#ax=[min(perio),45,0,1]#max(ls)+0.1]
axis(ax)
powertext='Power for ' + linename
ylabel(powertext)
xlabel('Period (d)')
#savefig(namefig2)
show()
def bary_corr(mjd_insts,simbad_table,observatory='lasilla'):
coords=SkyCoord(simbad_table['RA'][0] +' '+ simbad_table['DEC'][0],unit=(u.hourangle, u.deg))
location=EarthLocation.of_site(observatory)
mjd_table=mjd_insts[mjd_insts.inst=='FEROS']
mjd_table.reset_index(drop=True,inplace=True)
bary_corr_list=[]
bary_corr_list1=[]
for mjd in mjd_table['mjd']:
obs_time=Time(mjd,format='mjd')
bary_corr,HJD=pyasl.helcorr(location.lon.deg,location.lat.deg,location.height.value,
coords.ra.deg,coords.dec.deg,obs_time.jd)
heli_corr,bary_corr1=pyasl.baryCorr(obs_time.jd,coords.ra.deg,coords.dec.deg)
bary_corr_list.append(bary_corr)
bary_corr_list1.append(bary_corr1)
mjd_bary=pd.DataFrame({'mjd':mjd_table['mjd'],
'DRS':mjd_table['bary'],
'bary_cor':bary_corr_list,
'diff':mjd_table['bary']-bary_corr_list})
return mjd_bary
def apply_bary_cor_FEROS(mjd_insts,simbad_table,em_line_date_results):
if 'av_flux' in str(em_line_date_results.mjd.values) or 'med_flux' in str(em_line_date_results.mjd.values):
print('ERROR: cannot apply barycentric correction to average flux')
return None
bary=bary_corr(mjd_insts,simbad_table)
em_line_date_results_bary=em_line_date_results.copy()
for i in range(len(bary)):
em_line_date_results_bary.loc[isclose(bary['mjd'][i],em_line_date_results_bary['mjd']),'g1_cen']=em_line_date_results_bary[isclose(bary['mjd'][i],em_line_date_results_bary['mjd'])].g1_cen+bary['diff'][i]
try:
em_line_date_results_bary.loc[isclose(bary['mjd'][i],em_line_date_results_bary['mjd']),'g2_cen']=em_line_date_results_bary[isclose(bary['mjd'][i],em_line_date_results_bary['mjd'])].g2_cen+bary['diff'][i]
except:
pass
try:
em_line_date_results_bary.loc[isclose(bary['mjd'][i],em_line_date_results_bary['mjd']),'Vred']=em_line_date_results_bary[isclose(bary['mjd'][i],em_line_date_results_bary['mjd'])].Vred+bary['diff'][i]
except:
pass
return em_line_date_results_bary
def fit_gauss_ab19(x,y,fit_nc=True,fit_bc=False,fit_hvc1=False,fit_abs=False,nc_cen=None,bc_cen=None,hvc1_cen=None,abs_cen=None,
nc_sig=None,bc_sig=None,hvc1_sig=None,abs_sig=None):
gauss1 = GaussianModel(prefix='nc_')
gauss2 = GaussianModel(prefix='bc_')
gauss3 = GaussianModel(prefix='hvc1_')
gauss4 = GaussianModel(prefix='abs_')
line1=LinearModel(prefix='cont_')
pars_g1 = gauss1.guess(y, x=x)
pars_line = line1.guess(y, x=x)
pars_g2 = gauss2.guess(y, x=x)
pars_g3 = gauss3.guess(y, x=x)
pars_g4 = gauss4.guess(y, x=x ,negative=True)
mod = line1
pars = pars_line
if fit_nc == True:
mod+= gauss1
pars+= pars_g1
pars['nc_amplitude'].set(min=0)
if fit_bc==True:
mod += gauss2
pars+= pars_g2
pars['bc_amplitude'].set(min=0)
if fit_hvc1==True:
mod += gauss3
pars += pars_g3
pars['hvc1_amplitude'].set(min=0)
if fit_abs==True:
mod += gauss4
pars += pars_g4
pars['abs_amplitude'].set(max=0)
###
if nc_cen != None and fit_nc==True:
pars['nc_center'].set(value=(nc_cen[0]+nc_cen[1])/2, min=nc_cen[0], max=nc_cen[1])
if bc_cen != None and fit_bc==True:
pars['bc_center'].set(value=(bc_cen[0]+bc_cen[1])/2, min=bc_cen[0], max=bc_cen[1])
if hvc1_cen != None and fit_hvc1==True:
pars['hvc1_center'].set(value=(hvc1_cen[0]+hvc1_cen[1])/2, min=hvc1_cen[0], max=hvc1_cen[1])
if abs_cen != None and fit_abs==True:
pars['abs_center'].set(value=(abs_cen[0]+abs_cen[1])/2, min=abs_cen[0], max=abs_cen[1])
if nc_sig != None and fit_nc==True:
pars['nc_sigma'].set(value=(nc_sig[0]+nc_sig[1])/2, min=nc_sig[0], max=nc_sig[1])
if bc_sig != None and fit_bc==True:
pars['bc_sigma'].set(value=(bc_sig[0]+bc_sig[1])/2, min=bc_sig[0], max=bc_sig[1])
if hvc1_sig != None and fit_hvc1==True:
pars['hvc1_sigma'].set(value=(hvc1_sig[0]+hvc1_sig[1])/2, min=hvc1_sig[0], max=hvc1_sig[1])
if abs_sig != None and fit_abs==True:
pars['abs_sigma'].set(value=(abs_sig[0]+abs_sig[1])/2, min=abs_sig[0], max=abs_sig[1])
out = mod.fit(y, pars, x=x, weights = 1/np.std(y)) #use weights to obtain red. chi sq
return out
def gauss_stats_ab19(df_av_line,obs,em_row=999,target='temp',
gof_min=0.2,printout=False,output=False,savefig=False,plot_comps=True,legend=True,
reject_low_gof=False,reject_line_close=False,title='full',
fit_nc=True,fit_bc=False,fit_hvc1=False,fit_abs=False,nc_cen=None,bc_cen=None,hvc1_cen=None,abs_cen=None,
nc_sig=None,bc_sig=None,hvc1_sig=None,abs_sig=None):
'''
'''
clight=astropy.constants.c.to('km/s').to_value()
#if a row from the emmission line matching results table is parsed, take needed values from that, if not, assign values and unknowns from the w0 position
try:
line=em_row.obs_wl_air #rest wavelenth used for plot titles, not for 0 vel point
ele=em_row.element
sp_num=em_row.sp_num
J_i=em_row.J_i
J_k=em_row.J_k
#w0_vel=rv
# w0_vel=((em_row.w0 - line)*clight/line)-rv
SNR=em_row.SNR
except:
try:
line=em_row.ritz_wl_vac #rest wavelenth used for plot titles, not for 0 vel point
ele=em_row.element
sp_num=em_row.sp_num
J_i=em_row.J_i
J_k=em_row.J_k
#w0_vel=rv
# w0_vel=((em_row.w0 - line)*clight/line)-rv
SNR=em_row.SNR
except:
line=em_row
ele='unk'
sp_num=0
J_i='0'
J_k='0'
#w0_vel=0
SNR=0
x=df_av_line['vel'].values
#y=df_av_line.iloc[:,2].values #change iloc to user input or average
y=df_av_line[obs].values #take observation date from function specified input
flux_scaled=False
scale_factor=1
if abs(mean(y)) < 1e-5: #for absolute flux units, remove the small order of mag for error calculations in the fitting
scale_factor=10**floor(log10(mean(y)))
y=y/scale_factor
flux_scaled=True
try:
#y -= min(y) #shift all the lines to be min 0 flux
g_fit=fit_gauss_ab19(x,y,fit_nc=fit_nc,fit_bc=fit_bc,fit_hvc1=fit_hvc1,fit_abs=fit_abs,nc_cen=nc_cen,bc_cen=bc_cen,hvc1_cen=hvc1_cen,abs_cen=abs_cen,
nc_sig=nc_sig,bc_sig=bc_sig,hvc1_sig=hvc1_sig,abs_sig=abs_sig) #fit the linear model using above function
except:
#this is the exception for the fit failing, will just pass
print(line, obs,'has no data within specified range')
return None,None,None,None
gof=g_fit.redchi # / np.std(y)**2 #do not need to divide here as it is included in the weights in the fit_gauss() fn
comps = g_fit.eval_components(x=x)
y_base=g_fit.best_fit - min(g_fit.best_fit) # determine y values starting from 0 min
line_values= (g_fit.best_values['cont_slope'] * x) + g_fit.best_values['cont_intercept']
y_sub_line=g_fit.best_fit - line_values # remove line component from final fit, for int flux
#y-=min(comps['cont_'])
#calculate intergrated flux just from flux above continuum, i.e. subtract line compnent before integrating
#int_flux=np.round(np.trapz(y_sub_line,x),4)
int_flux=np.trapz(y_sub_line,x)# check units here, need to do line * int_flux / clight for absolute fluxs
EW=line * (int_flux/median(line_values))/clight #in angstroms
int_flux=(line*10) * int_flux / clight #for XS flux units of erg/s/cm2/nm
#calculate asym from the intergrated flux above the zero baseline, comparing each side of peak
#centre_x=closest(x,g_fit.best_values['g1_center'])
centre_x=closest(x,0) #calculate wrt to 0 velocity rather than g1 centre
centre_x_idx=np.where(x==centre_x)[0][0]
#centre_x1=closest(x,g_fit.best_values['nc_center'])
peak_y=float(g_fit.best_fit[centre_x_idx])
peak_y_base=y_base[centre_x_idx]
lhs_int_flux=np.trapz(y_sub_line[0:centre_x_idx],x[0:centre_x_idx])
rhs_int_flux=np.trapz(y_sub_line[centre_x_idx:-1],x[centre_x_idx:-1])
if (lhs_int_flux + rhs_int_flux)!=0:
asym=lhs_int_flux/(lhs_int_flux + rhs_int_flux)
else:
asym=999
#asym=lhs_int_flux/(int_flux)
if fit_nc!=False:
centre_x1=closest(x,g_fit.best_values['nc_center'])
nc_stderr=g_fit.params['nc_center'].stderr
if (nc_stderr) is None:
nc_stderr=999#np.nan
nc_amp_stderr=g_fit.params['nc_amplitude'].stderr
if nc_amp_stderr is None:
nc_amp_stderr=999
try:
dely=g_fit.eval_uncertainty(sigma=3)
except:
dely=0
if fit_bc!=False:
centre_x2=closest(x,g_fit.best_values['bc_center'])
bc_stderr=g_fit.params['bc_center'].stderr
if (bc_stderr) is None:
bc_stderr=999
if fit_hvc1!=False:
centre_x3=closest(x,g_fit.best_values['hvc1_center'])
hvc1_stderr=g_fit.params['hvc1_center'].stderr
if (hvc1_stderr) is None:
hvc1_stderr=999
if fit_abs!=False:
centre_x4=closest(x,g_fit.best_values['abs_center'])
abs_stderr=g_fit.params['abs_center'].stderr
if (abs_stderr) is None:
abs_stderr=999
#for reject_low_gof==True, also reject lines whose gauss centre are far from ref centre
#also reject lines where peak value is negative (may have to change this in future for abs lines)
#if g_fit.values['nc_center'] > w0_vel-10 and g_fit.values['nc_center'] < w0_vel+10 and g_fit.values['nc_fwhm'] < 30 and peak_y > 0:
#if g_fit.values['nc_center'] > min(x) and g_fit.values['nc_center'] < max(x):# and nc_stderr < 900:# and int_flux > 0:# and abs(g_fit.best_values['line_slope']/peak_y)<0.02: #and g_fit.values['nc_fwhm'] < 50
if centre_x > min(x) and centre_x < max(x):# and nc_stderr < 900:# and int_flux > 0:# and abs(g_fit.best_values['line_slope']/peak_y)<0.02: #and g_fit.values['nc_fwhm'] < 50
line_close=True
elif reject_line_close==False:
line_close=True
else:
line_close=False
if reject_low_gof==True and gof < gof_min and line_close==True or reject_low_gof==False:
line_info=pd.Series(({'star':target,'int_flux':int_flux*scale_factor,'EW':EW, 'med_cont':median(line_values)*scale_factor,'mjd':obs,'gof':gof,'peak':peak_y, 'asym':asym,
'fit_nc':fit_nc,'fit_bc':fit_bc,'fit_hvc1':fit_hvc1}),dtype='object')
try:
line_info=pd.concat([em_row,line_info],axis=0)
except:
pass
if fit_nc==True:
line_info1=pd.Series(({'nc_cen':g_fit.values['nc_center'],'nc_stderr':nc_stderr,
'nc_fwhm':g_fit.values['nc_fwhm'],'nc_fwhm_stderr':g_fit.params['nc_fwhm'].stderr,
'nc_amp':g_fit.values['nc_amplitude']*scale_factor,'nc_amp_stderr':nc_amp_stderr*scale_factor}))
else:
line_info1=pd.Series(({'nc_cen':nan,'nc_stderr':nan,
'nc_fwhm':nan,'nc_fwhm_stderr':nan,
'nc_amp':nan,'nc_amp_stderr':nan}))
line_info=pd.concat([line_info,line_info1],axis=0)
if fit_bc==True:
line_info2=pd.Series(({'bc_cen':g_fit.values['bc_center'],'bc_stderr':bc_stderr,
'bc_fwhm':g_fit.values['bc_fwhm'],'bc_fwhm_stderr':g_fit.params['bc_fwhm'].stderr,
'bc_amp':g_fit.values['bc_amplitude']*scale_factor,'bc_amp_stderr':g_fit.params['bc_amplitude'].stderr}))
else:
line_info2=pd.Series(({'bc_cen':nan,'bc_stderr':nan,
'bc_fwhm':nan,'bc_fwhm_stderr':nan,
'bc_amp':nan,'bc_amp_stderr':nan}))
line_info=pd.concat([line_info,line_info2],axis=0)
if fit_hvc1==True:
line_info3=pd.Series(({'hvc1_cen':g_fit.values['hvc1_center'],'hvc1_stderr':hvc1_stderr,
'hvc1_fwhm':g_fit.values['hvc1_fwhm'],'hvc1_fwhm_stderr':g_fit.params['hvc1_fwhm'].stderr,
'hvc1_amp':g_fit.values['hvc1_amplitude']*scale_factor,'hvc1_amp_stderr':g_fit.params['hvc1_amplitude'].stderr}))
else:
line_info3=pd.Series(({'hvc1_cen':nan,'hvc1_stderr':nan,
'hvc1_fwhm':nan,'hvc1_fwhm_stderr':nan,
'hvc1_amp':nan,'hvc1_amp_stderr':nan}))
line_info=pd.concat([line_info,line_info3],axis=0)
if fit_abs==True:
line_info4=pd.Series(({'abs_cen':g_fit.values['abs_center'],'abs_stderr':abs_stderr,
'abs_fwhm':g_fit.values['abs_fwhm'],'abs_fwhm_stderr':g_fit.params['abs_fwhm'].stderr,
'abs_amp':g_fit.values['abs_amplitude']*scale_factor,'abs_amp_stderr':g_fit.params['abs_amplitude'].stderr}))
line_info=pd.concat([line_info,line_info4],axis=0)
else:
line_info=None
pass_gof='N'
if gof < gof_min and line_close==True:
pass_gof='Y'
if printout==True:
print(g_fit.fit_report(min_correl=0.25))
#print('corrected chi^2: %.5f' %(g_fit.redchi / np.std(y)**2))
#print(np.sum(((y - g_fit.best_fit)**2) / g_fit.best_fit) / (g_fit.nfree))
#print(np.sum(((y - g_fit.best_fit)**2)/ np.std(y)**2) / (g_fit.nfree))
#print(np.sum(((y - g_fit.best_fit)**2)/ np.sqrt(np.mean(y**2))**2) / (g_fit.nfree))
if reject_low_gof==True and gof > gof_min:
print('GoF too low to produce output / save file')
if reject_low_gof==True and gof < gof_min and line_close==True or reject_low_gof==False:
if output==True or savefig==True:
ioff()
fig, ax = subplots(1,1,figsize=USH.fig_size_s)#,gridspec_kw={'wspace':0})
if title=='full':
fig.suptitle('%s fit of line at %.2f Angstroms, Pass GoF:%s \n' %(obs,line,pass_gof),fontsize=10)
if flux_scaled==False:
ax.set(xlabel='Velocity (km/s)', ylabel='Flux')
else:
ax.set(xlabel='Velocity (km/s)', ylabel='Flux x10^(%.0f)'%(log10(scale_factor)))
ax.plot(x,y, c='black',ls='-',lw=2,label='Input')
#ax[0].plot(x, g_fit.init_fit, 'k--', label='initial fit')
ax.plot(x, g_fit.best_fit, c='fuchsia',ls='--',lw=2, label='Best fit')
ax.fill_between(x,g_fit.best_fit-dely,g_fit.best_fit+dely,color='#ABABAB',label='3-$\sigma$ uncertainty')
#ax[1].plot(x, y, 'b')
plot_title=('GoF: %.2e, Int.Flux: %.2E, Asym: %.4f, Line: %s%s %s-%s \n ' %(
gof, int_flux, asym,ele,sp_num,J_i,J_k))
if fit_bc==True:
if title=='full':
plot_title+=(' bc_cen= %.1f$\pm$%.2f, bc_fwhm= %.2f' %(
g_fit.best_values['bc_center'],bc_stderr, g_fit.values['bc_fwhm']))
elif title=='simple':
plot_title=('%s %s %.2f' %(obs,ele,line))
if plot_comps==True:
ax.plot(x, comps['bc_'], c='red',ls='--', label='BC')
ax.fill_between(x,comps['bc_'],color='red')
ax.axvline(x=centre_x2,color='k',linewidth=0.5,linestyle='--')
if fit_nc==True:
if title=='full':
plot_title+=('nc_cen= %.1f$\pm$%.2f, ' %(
g_fit.best_values['nc_center'],nc_stderr))
elif title=='simple':
plot_title=('%s %s %.2f' %(obs,ele,line))
if plot_comps==True:
if fit_bc==True:
ax.plot(x, comps['nc_'], c='deepskyblue',ls='--', label='NC')
ax.fill_between(x,comps['nc_'],color='deepskyblue')
else:
ax.plot(x, comps['nc_'], c='blue',ls='--', label='NC')
ax.fill_between(x,comps['nc_'],color='blue')
ax.axvline(x=centre_x1,color='k',linewidth=0.5,linestyle='--')
if fit_hvc1==True:
if title=='full':
plot_title+=('hvc1_cen= %.1f$\pm$%.2f, ' %(
g_fit.best_values['hvc1_center'],hvc1_stderr))
elif title=='simple':
plot_title=('%s %s %.2f' %(obs,ele,line))
if plot_comps==True:
ax.plot(x, comps['hvc1_'], c='lime',ls='--', label='HVC1')
ax.fill_between(x,comps['hvc1_'],color='lime')
ax.axvline(x=centre_x3,color='k',linewidth=0.5,linestyle='--')
if fit_abs==True:
if plot_comps==True:
ax.plot(x, comps['abs_'], 'c--', label='Abs.')
ax.axvline(x=centre_x4,color='k',linewidth=0.5,linestyle='--')
plot_title+=('abs_cen= %.1f$\pm$%.2f' %(
g_fit.best_values['abs_center'],abs_stderr))
ax.set_title(str(plot_title))
if plot_comps==True:
ax.plot(x, comps['cont_'], c='orange',ls='--', label='Continuum')
if legend==True:
ax.legend(fontsize=12)
if output==True:
#tight_layout()
show()
else:
close()
if savefig==True:
#output dir
#dirname=os.path.join('output_plots', target+'_'+timenow)
timenow_plot=time.strftime("%d_%b_%Y_%Hh", time.gmtime())
dirname=os.path.join('output_plots',timenow_plot)
if not os.path.exists(dirname):
os.makedirs(dirname)
plot_name=target+'_'+ele+'_'+str(np.round(line,2))+'_'+str(obs)
plot_name=plot_name+'_nc' if fit_nc == True else plot_name
plot_name=plot_name+'_bc' if fit_bc == True else plot_name
plot_name=plot_name+'_hvc1' if fit_hvc1 == True else plot_name
fig.savefig(os.path.join(dirname,plot_name+'.png'),dpi=300)#,bbox_inches="tight")
#print('saving file',os.path.join(dirname,ele+str(np.round(line,2))+'.pdf'))
if output==False:
close()
ion()
return g_fit,x,g_fit.best_fit, line_info
def get_line_results_ab19(em_matches,df_av,line_date_list,target,w_range=0.6,radvel=USH.radvel,legend=True,title='full',
gof_min=0.2,reject_low_gof=True,reject_line_close=True,printout=False,output=False,savefig=False,plot_comps=True,
fit_nc=True,fit_bc=False,fit_hvc1=False,fit_abs=False,nc_cen=None,bc_cen=None,hvc1_cen=None,abs_cen=None,
nc_sig=None,bc_sig=None,hvc1_sig=None,abs_sig=None,savefile=False,filename='em_line_results_temp.csv'):
'''
'''
#check that list of lines within range of data, good for loaded in lists of lines covering larger ranges
em_matches=em_matches[em_matches.w0.between(min(df_av.wave),max(df_av.wave))]
if USH.instrument[0]=='XMM' or USH.instrument[0]=='Sol' or USH.instrument[0]=='COS' or USH.instrument[0]=='STIS':
wave='ritz_wl_vac'
else:
wave='obs_wl_air'
print('Fitting lines using',wave,' and radvel=',radvel)
line_results=pd.DataFrame()
for index,row in em_matches.iterrows():
line = row[wave]
df_av_line=get_line_spec(df_av,line,vel_offset=radvel,w_range=w_range,vel=True)
df_av_line=df_av_line.query('vel < 233 or vel > 300')
for date in line_date_list:# ['med_flux']:#[df_av_line.columns[2]]:# df_av_line.columns[1:-3]:
out,x,y,line_info=gauss_stats_ab19(df_av_line,date,em_row=row,target=target,
gof_min=gof_min,printout=printout,output=output,savefig=savefig,title=title,
reject_low_gof=reject_low_gof,reject_line_close=reject_line_close,plot_comps=plot_comps,
fit_nc=fit_nc,fit_bc=fit_bc,fit_hvc1=fit_hvc1,fit_abs=fit_abs,nc_cen=nc_cen,bc_cen=bc_cen,hvc1_cen=hvc1_cen,
abs_cen=abs_cen,nc_sig=nc_sig,bc_sig=bc_sig,hvc1_sig=hvc1_sig,abs_sig=abs_sig)
line_results=pd.concat([line_results,line_info],axis=1,ignore_index=True)
#line_results=line_results.append(line_info,ignore_index=True)
line_results=line_results.T
#display(line_results)
try:
cols_to_move = ['star','mjd','w0',wave,'element','sp_num','int_flux','EW','asym', 'gof','fit_nc', 'fit_bc', 'fit_hvc1', 'nc_cen', 'nc_stderr',
'nc_fwhm', 'nc_fwhm_stderr', 'nc_amp', 'nc_amp_stderr', 'bc_cen',
'bc_stderr', 'bc_fwhm', 'bc_fwhm_stderr', 'bc_amp', 'bc_amp_stderr',
'hvc1_cen', 'hvc1_stderr', 'hvc1_fwhm', 'hvc1_fwhm_stderr', 'hvc1_amp',
'hvc1_amp_stderr']
em_line_date_results= line_results[ cols_to_move + [ col for col in line_results.columns if col not in cols_to_move ] ]
em_line_date_results=em_line_date_results.drop(columns=['mjd','rv_wl','vel_diff', 'prev_obs', 'intens',
'prev', 'rv_shift', 'vsini_shift', 'f0_flat',
'f0_data', 'SNR', 'multi', 'abs_vel_diff'])
print('total number of em lines fit:',len(em_line_date_results))
print('number of observed em lines fit:',len(unique(em_line_date_results.w0)))
except:
print('---no lines fit---')
em_line_date_results=0
return line_results,0
#em_line_date_results=line_results
# # find em lines that are from the same upper energy level that were fitted
try:
em_line_date_results_no_dup=em_line_date_results.drop_duplicates(subset='w0')
check_Ek=np.column_stack(np.unique(em_line_date_results_no_dup.Ek,return_counts=True))
common_Ek=check_Ek[check_Ek[:,1]>1][:,0]
em_line_date_results_common_Ek=em_line_date_results_no_dup[em_line_date_results_no_dup.Ek.isin(common_Ek)]
print('number of em lines fit from same upper energy level:',len(em_line_date_results_common_Ek))
except:
em_line_date_results_common_Ek=0
print('no lines fit from same upper energy level')
if savefig==True:
print('saving plots to output dir')
try:
if savefile==True:
timenow_bu=time.strftime("%d_%b_%Y_%H_%M", time.gmtime())
if os.path.exists(filename):
os.system('cp %s backups/%s'%(filename,timenow_bu+filename))
em_line_date_results.to_csv(filename, mode='a', header=not os.path.exists(filename),index=False)
print('saving results to output file: ',filename)
except:
pass
return em_line_date_results,em_line_date_results_common_Ek
|
justyncwREPO_NAMESTAR_MELTPATH_START.@STAR_MELT_extracted@STAR_MELT-main@[email protected]_END.py
|
{
"filename": "test_lasso.py",
"repo_name": "hjens/lasso_spectra",
"repo_path": "lasso_spectra_extracted/lasso_spectra-master/tests/test_lasso.py",
"type": "Python"
}
|
import pylab as pl
import numpy as np
import sys
sys.path.append('..')
import generalized_lasso as gl
import lasso
# Data generation
def get_test_model(n_features=100):
'''
Return a linear function with n_features random
coefficients plus noise
'''
b = np.random.random()*4.
coeffs = np.random.normal(size=(n_features, 1))*4.
for i in range(len(coeffs)): #Make some coefficients zero
if np.random.random() < 0.2:
coeffs[i] = 0.
def func(x):
return np.dot(x, coeffs) + b
func.coeffs = coeffs
return func
def get_random_dataset(func, n_features=100, n_datapoints=1e4,
noise_level=1.e-10):
'''
Generate a test set with the given dimensions,
using a test model.
Returns:
input_data - n_features x n_datapoints
output_data - n_datapoints
'''
input_data = np.random.random((n_datapoints, n_features))*10.
output_data = func(input_data) + np.random.normal(size=input_data.shape,
scale=noise_level)
return input_data, output_data
def sigmoid(x):
return 1./(1 + np.exp(-x))
# Tests ------------------
def test_cross_validation():
np.random.seed(1)
n_features = 3
func = get_test_model(n_features=n_features)
dataset_train, labels_train = get_random_dataset(func,
n_datapoints=5e3, n_features=n_features, noise_level=1.)
alphas = 10**np.linspace(-3, 2, 10)
# Fit scikit lasso
lasso_scikit = lasso.SKLasso(alpha=0.001, max_iter=1000)
lasso_scikit.fit_CV(dataset_train, labels_train[:,0], alphas=alphas,
n_folds=5)
# Fit tf lasso
gen_lasso = gl.GeneralizedLasso(alpha=0.001, max_iter=1000,
link_function=None)
gen_lasso.fit_CV(dataset_train, labels_train[:,0], alphas=alphas,
n_folds=5)
pl.figure()
pl.title('CV test. TF will have higher errors, since it is not exact')
pl.semilogx(alphas, gen_lasso.alpha_mse, 'o-', label='tf')
pl.semilogx(alphas, lasso_scikit.alpha_mse, '*-', label='scikit')
pl.legend(loc='best')
pl.xlabel('alpha')
pl.ylabel('cost')
pl.show()
def test_linear_regression():
np.random.seed(1)
n_features = 5
func = get_test_model(n_features=n_features)
dataset_train, labels_train = get_random_dataset(func,
n_datapoints=5e2, n_features=n_features, noise_level=1.e-10)
# Fit tf lasso
gen_lasso = gl.GeneralizedLasso(alpha=1.e-10, max_iter=5000,
link_function=None)
gen_lasso.fit(dataset_train, labels_train[:,0])
# Fit scikit lasso
lasso_scikit = lasso.SKLasso(alpha=1.e-10, max_iter=5000)
lasso_scikit.fit(dataset_train, labels_train[:,0])
# Print mse. This will be worse for TF, since it is not exact
print 'Scikit mse', lasso_scikit.mse(dataset_train, labels_train[:,0])
print 'TF mse', gen_lasso.mse(dataset_train, labels_train[:,0])
# Plot results
pl.plot(gen_lasso.coeffs, 'o-', label='tf fit')
pl.plot(func.coeffs, 'x-', label='true')
pl.plot(lasso_scikit.coeffs, '^', label='scikit')
pl.legend(loc='best')
pl.title('Test linear regression')
pl.ylabel('Coeff value')
pl.show()
def test_link_function():
np.random.seed(3) #This seed gives both positive and negative coefficients
n_features = 4
func = get_test_model(n_features=n_features)
dataset_train, labels_train = get_random_dataset(func,
n_datapoints=5e3, n_features=n_features, noise_level=1.e-10)
labels_train = sigmoid(labels_train)
# Fit tf lasso
gen_lasso = gl.GeneralizedLasso(alpha=1.e-10, max_iter=2000,
link_function='sigmoid', learning_rate=0.1)
gen_lasso.fit(dataset_train, labels_train[:,0])
# Predict values
predicted = gen_lasso.predict(dataset_train)
# Plot results
pl.subplot(131)
pl.plot(gen_lasso.coeffs, 'o-', label='tf fit')
pl.plot(func.coeffs, 'x-', label='true')
pl.legend(loc='best')
pl.title('Test sigmoid link function')
pl.ylabel('Coeff value')
pl.subplot(132)
pl.semilogy(gen_lasso.cost_history)
pl.ylabel('cost')
pl.xlabel('iterations')
pl.subplot(133)
pl.plot(predicted, labels_train, 'o')
pl.xlabel('Predicted')
pl.ylabel('True')
pl.show()
def test_regularization():
np.random.seed(1)
n_features = 5
func = get_test_model(n_features=n_features)
dataset_train, labels_train = get_random_dataset(func, n_datapoints=1e3,
n_features=n_features,
noise_level=1.e-10)
alphas = 10**np.linspace(-1, 3, 10)
alpha_coeffs = np.zeros([n_features, len(alphas)])
for i, alpha in enumerate(alphas):
gen_lasso = gl.GeneralizedLasso(alpha=alpha, max_iter=2000,
link_function=None)
gen_lasso.fit(dataset_train, labels_train[:,0])
alpha_coeffs[:,i] = gen_lasso.coeffs[:,0]
# Plot results
for i in range(n_features):
pl.semilogx(alphas, alpha_coeffs[i,:], label='coeff no %d' % i)
pl.semilogx(alphas, np.ones_like(alphas)*func.coeffs[i], ':')
pl.legend(loc='best')
pl.title('Test regularization')
pl.ylabel('Coeff value')
pl.xlabel('alpha')
pl.show()
def test_normalization():
'''
Not implemented yet!
'''
np.random.seed(1)
n_features = 5
func = get_test_model(n_features=n_features)
dataset_train, labels_train = get_random_dataset(func,
n_datapoints=5e2,
n_features=n_features,
noise_level=1.e-10)
# Fit tf lasso without normalization
gen_lasso = gl.GeneralizedLasso(alpha=1.e-10, max_iter=5000,
link_function=None, normalize=False)
gen_lasso.fit(dataset_train, labels_train[:, 0])
# Fit tf lasso with normalization
gen_lasso_norm = gl.GeneralizedLasso(alpha=1.e-10, max_iter=5000,
link_function=None, normalize=True)
gen_lasso_norm.fit(dataset_train, labels_train[:, 0])
# Plot results
pl.plot(gen_lasso.coeffs, 'o-', label='without norm')
pl.plot(gen_lasso_norm.coeffs, 'o-', label='with norm')
pl.plot(func.coeffs, 'x-', label='true')
pl.legend(loc='best')
pl.title('Test normalization')
pl.ylabel('Coeff value')
pl.show()
if __name__ == '__main__':
test_cross_validation()
test_linear_regression()
test_regularization()
test_link_function()
|
hjensREPO_NAMElasso_spectraPATH_START.@lasso_spectra_extracted@lasso_spectra-master@tests@[email protected]_END.py
|
{
"filename": "performance.ipynb",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/docs_nnx/guides/performance.ipynb",
"type": "Jupyter Notebook"
}
|
# Performance considerations
Currently, Flax [`nnx.jit`](https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/transforms.html#flax.nnx.jit) traverses the object graph in pure Python, which is slow and adds overhead. This is why in order to solve this the Flax team will be developing a Rust extension called `flaxlib` to speed up some of the traversal logic in [`graph.py`](https://github.com/google/flax/blob/main/flax/nnx/graph.py). This will be similar to how the JAX team resolved a similar issue by introducing [`jaxlib`](https://jax.readthedocs.io/en/latest/installation.html#installation) for standard [JAX pytrees](https://jax.readthedocs.io/en/latest/key-concepts.html#pytrees) (refer to the first steps in [Flax PR #4196](https://github.com/google/flax/pull/4196)).
However, there are two things to consider:
* The overhead is only relevant for small models (refer to [Asynchronous dispatch](#asynchronous-dispatch).
* You can remove the overhead by using [`jax.jit`](https://jax.readthedocs.io/en/latest/_autosummary/jax.jit.html) + [`flax.nnx.split`](https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/graph.html#flax.nnx.split) / [`flax.nnx.merge`](https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/graph.html#flax.nnx.merge) to stage out the traversal logic (Refer to [Lowering the Python overhead](#lowering-the-python-overhead).
## Asynchronous dispatch
In [benchmarks/nnx_simple_training.py](https://github.com/google/flax/blob/main/benchmarks/nnx_simple_training.py) we are increasing the layer width (features per layer) and measuring the total training time for the same model trained both with [`nnx.jit`](https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/transforms.html#flax.nnx.jit) and [`jax.jit`](https://jax.readthedocs.io/en/latest/_autosummary/jax.jit.html).
As demonstrated in the graph below, after a certain model size the time spent in the traversal is completely absorbed by async dispatch. This happens when Python is able to finish the current for loop step, and reach the next `train_step` and JAX is still not done with the previous `train_step`.

This means that you only need to worry about the `nnx.jit` overhead for small models. If you are working with a small model, check out the next section to see how you can remove the overhead.
## Lowering the Python overhead
To remove the Python overhead, you can use regular `jax.jit` in combination with `nnx.split` and `nnx.merge` to stage out the traversal logic.
To learn how to do this, let’s first create the following simple `Model`:
```python
from flax import nnx
import jax
import jax.numpy as jnp
import optax
class Model(nnx.Module):
def __init__(self, din, dmid, dout, rngs: nnx.Rngs):
self.linear = nnx.Linear(din, dmid, rngs=rngs)
self.bn = nnx.BatchNorm(dmid, rngs=rngs)
self.dropout = nnx.Dropout(0.2, rngs=rngs)
self.linear_out = nnx.Linear(dmid, dout, rngs=rngs)
def __call__(self, x):
x = nnx.relu(self.dropout(self.bn(self.linear(x))))
return self.linear_out(x)
```
Next, let’s create a `train_step()` function that uses [`nnx.jit`](https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/transforms.html#flax.nnx.jit), taking in the `model`, `optimizer`, and `metrics`, all of which are Flax NNX objects:
```python
model = Model(2, 64, 3, rngs=nnx.Rngs(0)) # eager initialization
optimizer = nnx.Optimizer(model, optax.adam(1e-3)) # reference sharing
metrics = nnx.MultiMetric(
loss=nnx.metrics.Average('loss'),
)
@nnx.jit # <== currently slow
def train_step(model, optimizer, metrics, x, y):
def loss_fn(model):
y_pred = model(x) # call methods directly
return ((y_pred - y) ** 2).mean()
loss, grads = nnx.value_and_grad(loss_fn)(model)
optimizer.update(grads) # in-place updates
metrics.update(loss=loss)
return loss
for _ in range(10):
x, y = jnp.ones((32, 2)), jnp.zeros((32, 3))
loss = train_step(model, optimizer, metrics, x, y)
```
To speed this up, before starting the training loop we can use [`nnx.split`](https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/graph.html#flax.nnx.split) over all the Flax NNX objects that are inputs to `train_step()` to create `graphdef` and `state` pytrees that are faster to traverse.
Next, we change `train_step()` to accept `graphdef` and `state`, and use [`nnx.merge`](https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/graph.html#flax.nnx.merge) and [`nnx.split`](https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/graph.html#flax.nnx.split) at the beginning and the end of `train_step()` to switch back and forth between the objects and their pytree representations. And even though `nnx.split` and `nnx.merge` are slow, it doesn't matter because they will run only once during tracing.
With this in place, we can change the `train_step()` function to use `jax.jit` instead of `nnx.jit`:
```python
model = Model(2, 64, 3, rngs=nnx.Rngs(0)) # eager initialization
optimizer = nnx.Optimizer(model, optax.adamw(1e-3)) # reference sharing
metrics = nnx.MultiMetric(
loss=nnx.metrics.Average('loss'),
)
# split before training loop
graphdef, state = nnx.split((model, optimizer, metrics))
@jax.jit # regular JAX
def train_step(graphdef, state, x, y):
# merge at the beginning of the function
model, optimizer, metrics = nnx.merge(graphdef, state)
def loss_fn(model):
y_pred = model(x) # call methods directly
return ((y_pred - y) ** 2).mean()
loss, grads = nnx.value_and_grad(loss_fn)(model)
optimizer.update(grads)
metrics.update(loss=loss)
# split at the end of the function
_, state = nnx.split((model, optimizer, metrics))
# return new state
return state, loss
for _ in range(10):
x, y = jnp.ones((32, 2)), jnp.zeros((32, 3))
state, loss = train_step(graphdef, state, x, y)
# update objects after training
nnx.update((model, optimizer, metrics), state)
```
Notice that we only do this for `jit`. You can still use other [Flax transforms](https://flax.readthedocs.io/en/latest/guides/transforms.html#transformations) like [`nnx.value_and_grad`](https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/transforms.html#flax.nnx.value_and_grad) shown in the above example since their overhead is already absorbed by the outer `jit`.
And after the training loop is done (or whenever it is needed), we can use Flax [`nnx.update`](https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/graph.html#flax.nnx.update) to update Flax NNX objects like `model`, `optimizer`, and `metrics` to a new `state`.
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@docs_nnx@[email protected]@.PATH_END.py
|
{
"filename": "gaussian_mixture.py",
"repo_name": "CobayaSampler/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/likelihoods/gaussian_mixture/gaussian_mixture.py",
"type": "Python"
}
|
"""
.. module:: likelihoods.gaussian_mixture
:Synopsis: Gaussian mixture likelihood
:Author: Jesus Torrado
"""
# Global
from typing import Any
import numpy as np
from scipy.stats import multivariate_normal, uniform, random_correlation
from scipy.special import logsumexp
# Local
from cobaya.likelihood import Likelihood
from cobaya.log import LoggedError
from cobaya.mpi import share_mpi, is_main_process
from cobaya.typing import InputDict, Union, Sequence
from cobaya.functions import inverse_cholesky
derived_suffix = "_derived"
class GaussianMixture(Likelihood):
"""
Gaussian likelihood.
"""
file_base_name = 'gaussian_mixture'
# yaml variables
means: Union[Sequence, np.ndarray]
covs: Union[Sequence, np.ndarray]
weights: Union[np.ndarray, float]
derived: bool
input_params_prefix: str
output_params_prefix: str
def d(self):
"""
Dimension of the input vector.
"""
return len(self.input_params)
def initialize_with_params(self):
"""
Initializes the gaussian distributions.
"""
self.log.debug("Initializing")
# Load mean and cov, and check consistency of n_modes and dimensionality
if self.means is not None and self.covs is not None:
# Wrap them in the right arrays (n_mode, param) and check consistency
self.means = np.atleast_1d(self.means)
while len(np.array(self.means).shape) < 2:
self.means = np.array([self.means])
mean_n_modes, mean_dim = self.means.shape
self.covs = np.atleast_1d(self.covs)
while len(np.array(self.covs).shape) < 3:
self.covs = np.array([self.covs])
cov_n_modes, cov_dim, cov_dim_2 = self.covs.shape
if cov_dim != cov_dim_2:
raise LoggedError(
self.log, "The covariance matrix(/ces) do not appear to be square!\n"
"Got %r", self.covs)
if mean_dim != cov_dim:
raise LoggedError(
self.log,
"The dimensionalities guessed from mean(s) and cov(s) do not match!")
if mean_n_modes != cov_n_modes:
raise LoggedError(
self.log,
"The numbers of modes guessed from mean(s) and cov(s) do not match!")
if mean_dim != self.d():
raise LoggedError(
self.log,
"The dimensionality is %d (guessed from given means and covmats) "
"but was passed %d parameters instead. " +
("Maybe you forgot to specify the prefix by which to identify them?"
if self.input_params_prefix else ""), mean_dim,
len(self.input_params))
self.n_modes = mean_n_modes
if self.derived and len(self.output_params) != self.d() * self.n_modes:
raise LoggedError(
self.log,
"The number of derived parameters must be equal to the dimensionality"
" times the number of modes, i.e. %d x %d = %d, but was given %d "
"derived parameters.", self.d(), self.n_modes,
self.d() * self.n_modes,
len(self.output_params))
elif not self.derived and self.output_params:
raise LoggedError(
self.log,
"Derived parameters were requested, but 'derived' option is False. "
"Set to True and define as many derived parameters as the "
"dimensionality times the number of modes, i.e. %d x %d = %d.",
self.d(), self.n_modes, self.d() * self.n_modes)
else:
raise LoggedError(
self.log, "You must specify both a mean (or a list of them) and a "
"covariance matrix, or a list of them.")
self.gaussians = [multivariate_normal(mean=mean, cov=cov)
for mean, cov in zip(self.means, self.covs)]
if self.weights:
self.weights = np.asarray(self.weights)
if not len(self.weights) == len(self.gaussians):
raise LoggedError(self.log,
"There must be as many weights as components.")
if not np.isclose(sum(self.weights), 1):
self.weights = self.weights / sum(self.weights)
self.log.warning(
"Weights of components renormalized to %r", self.weights)
else:
self.weights = 1 / len(self.gaussians)
# Prepare the transformation(s) for the derived parameters
self.inv_choleskyL = [inverse_cholesky(cov) for cov in self.covs]
def logp(self, **params_values):
"""
Computes the log-likelihood for a given set of parameters.
"""
self.wait()
# Prepare the vector of sampled parameter values
x = np.array([params_values[p] for p in self.input_params])
# Fill the derived parameters
derived = params_values.get("_derived")
if derived is not None:
n = self.d()
for i in range(self.n_modes):
standard = self.inv_choleskyL[i].dot(x - self.means[i])
derived.update(
(p, v) for p, v in
zip(list(self.output_params)[i * n:(i + 1) * n], standard))
# Compute the likelihood and return
if len(self.gaussians) == 1:
return self.gaussians[0].logpdf(x)
else:
return logsumexp([gauss.logpdf(x) for gauss in self.gaussians],
b=self.weights)
# Scripts to generate random means and covariances #######################################
def random_mean(ranges, n_modes=1, mpi_warn=True, random_state=None):
"""
Returns a uniformly sampled point (as an array) within a list of bounds ``ranges``.
The output of this function can be used directly as the value of the option ``mean``
of the :class:`likelihoods.gaussian`.
If ``n_modes>1``, returns an array of such points.
"""
if not is_main_process() and mpi_warn:
print("WARNING! "
"Using with MPI: different process will produce different random results.")
mean = np.array([uniform.rvs(loc=r[0], scale=r[1] - r[0], size=n_modes,
random_state=random_state) for r in ranges])
mean = mean.T
if n_modes == 1:
mean = mean[0]
return mean
def random_cov(ranges, O_std_min=1e-2, O_std_max=1, n_modes=1,
mpi_warn=True, random_state=None):
"""
Returns a random covariance matrix, with standard deviations sampled log-uniformly
from the length of the parameter ranges times ``O_std_min`` and ``O_std_max``, and
uniformly sampled correlation coefficients between ``rho_min`` and ``rho_max``.
The output of this function can be used directly as the value of the option ``cov`` of
the :class:`likelihoods.gaussian`.
If ``n_modes>1``, returns a list of such matrices.
"""
if not is_main_process() and mpi_warn:
print("WARNING! "
"Using with MPI: different process will produce different random results.")
dim = len(ranges)
scales = np.array([r[1] - r[0] for r in ranges])
cov = []
for _ in range(n_modes):
stds = scales * 10 ** (uniform.rvs(size=dim, loc=np.log10(O_std_min),
scale=np.log10(O_std_max / O_std_min),
random_state=random_state))
this_cov = np.diag(stds).dot(
(random_correlation.rvs(dim * stds / sum(stds), random_state=random_state)
if dim > 1 else np.eye(1)).dot(np.diag(stds)))
# Symmetrize (numerical noise is usually introduced in the last step)
cov += [(this_cov + this_cov.T) / 2]
if n_modes == 1:
cov = cov[0]
return cov
def info_random_gaussian_mixture(ranges, n_modes=1, input_params_prefix="",
output_params_prefix="", O_std_min=1e-2, O_std_max=1,
derived=False, mpi_aware=True,
random_state=None, add_ref=False):
"""
Wrapper around ``random_mean`` and ``random_cov`` to generate the likelihood and
parameter info for a random Gaussian.
If ``mpi_aware=True``, it draws the random stuff only once, and communicates it to
the rest of the MPI processes.
If ``add_ref=True`` (default: False) adds a reference pdf for the input parameters,
provided that the gaussian mixture is unimodal (otherwise raises ``ValueError``).
"""
cov: Any
mean: Any
if is_main_process() or not mpi_aware:
cov = random_cov(ranges, n_modes=n_modes, O_std_min=O_std_min,
O_std_max=O_std_max, mpi_warn=False, random_state=random_state)
if n_modes == 1:
cov = [cov]
# Make sure it stays away from the edges
mean = [[]] * n_modes
for i in range(n_modes):
std = np.sqrt(cov[i].diagonal())
factor = 3
ranges_mean = [[r[0] + factor * s, r[1] - +factor * s] for r, s in
zip(ranges, std)]
# If this implies min>max, take the centre
ranges_mean = [
(r if r[0] <= r[1] else 2 * [(r[0] + r[1]) / 2]) for r in ranges_mean]
mean[i] = random_mean(ranges_mean, n_modes=1, mpi_warn=False,
random_state=random_state)
else:
mean, cov = None, None
if mpi_aware:
mean, cov = share_mpi((mean, cov))
dimension = len(ranges)
info: InputDict = {"likelihood": {"gaussian_mixture": {
"means": mean, "covs": cov, "input_params_prefix": input_params_prefix,
"output_params_prefix": output_params_prefix, "derived": derived}},
"params": dict(
# sampled
tuple((input_params_prefix + "_%d" % i,
{"prior": {"min": ranges[i][0], "max": ranges[i][1]},
"latex": r"\alpha_{%i}" % i})
for i in range(dimension)) +
# derived
(tuple((output_params_prefix + "_%d" % i,
{"latex": r"\beta_{%i}" % i})
for i in range(dimension * n_modes)) if derived else ()))}
if add_ref:
if n_modes > 1:
raise ValueError("Cannot add a good reference pdf ('add_ref=True') for "
"multimodal distributions")
for i, (p, v) in enumerate(info["params"].items()):
v["ref"] = {"dist": "norm", "loc": mean[0][i], "scale": np.sqrt(cov[0][i, i])}
return info
|
CobayaSamplerREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@likelihoods@gaussian_mixture@[email protected]_END.py
|
{
"filename": "test_sanitize_center.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/geometry/coordinates/tests/test_sanitize_center.py",
"type": "Python"
}
|
import re
import pytest
from unyt import unyt_array
from unyt.exceptions import UnitConversionError
from yt.testing import fake_amr_ds
@pytest.fixture(scope="module")
def reusable_fake_dataset():
ds = fake_amr_ds(
fields=[("gas", "density")],
units=["g/cm**3"],
)
return ds
valid_single_str_values = ("center",)
valid_field_loc_str_values = ("min", "max")
DEFAUT_ERROR_MESSAGE = (
"Expected any of the following\n"
"- 'c', 'center', 'l', 'left', 'r', 'right', 'm', 'max', or 'min'\n"
"- a 2 element tuple with 'min' or 'max' as the first element, followed by a field identifier\n"
"- a 3 element array-like: for a unyt_array, expects length dimensions, otherwise code_lenght is assumed"
)
@pytest.mark.parametrize(
"user_input",
(
# second element can be a single str or a field tuple (2 str), but not three
(("max", ("not", "a", "field"))),
# a 1-tuple is also not a valid field key
(("max", ("notafield",))),
# both elements need to be str
(("max", (0, "invalid_field_type"))),
(("max", ("invalid_field_type", 1))),
),
)
def test_invalid_center_type_default_error(reusable_fake_dataset, user_input):
ds = reusable_fake_dataset
with pytest.raises(
TypeError,
match=re.escape(f"Received {user_input!r}, ")
+ r"but failed to transform to a unyt_array \(obtained .+\)\.",
):
# at the time of writing `axis` is an unused parameter of the base
# sanitize center method, which is used directly for cartesian coordinate handlers
# this probably hints that a refactor would make sense to separaet center sanitizing
# and display_center calculation
ds.coordinates.sanitize_center(user_input, axis=None)
@pytest.mark.parametrize(
"user_input, error_type, error_message",
(
(
"bad_str",
ValueError,
re.escape(
"Received unknown center single string value 'bad_str'. "
+ DEFAUT_ERROR_MESSAGE
),
),
(
("bad_str", ("gas", "density")),
ValueError,
re.escape(
"Received unknown string value 'bad_str'. "
f"Expected one of {valid_field_loc_str_values} (case insensitive)"
),
),
(
("bad_str", "density"),
ValueError,
re.escape(
"Received unknown string value 'bad_str'. "
"Expected one of ('min', 'max') (case insensitive)"
),
),
# even with exactly three elements, the dimension should be length
(
unyt_array([0.5] * 3, "kg"),
UnitConversionError,
"...", # don't match the exact error message since it's unyt's responsibility
),
# only validate 3 elements unyt_arrays
(
unyt_array([0.5] * 2, "cm"),
TypeError,
re.escape("Received unyt_array([0.5, 0.5], 'cm')"),
),
(
unyt_array([0.5] * 4, "cm"),
TypeError,
# don't attempt to match error message as details of how
# a unyt array with more than a couple elements is displayed are out of our control
"...",
),
(
# check that the whole shape is used in validation, not just the length (number of rows)
unyt_array([0.5] * 6, "cm").reshape(3, 2),
TypeError,
# don't attempt to match error message as details of how
# a unyt array with more than a couple elements is displayed are out of our control
"...",
),
),
)
def test_invalid_center_special_cases(
reusable_fake_dataset, user_input, error_type, error_message
):
ds = reusable_fake_dataset
with pytest.raises(error_type, match=error_message):
ds.coordinates.sanitize_center(user_input, axis=None)
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@geometry@coordinates@tests@[email protected]_END.py
|
{
"filename": "svhn.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/torchvision/datasets/svhn.py",
"type": "Python"
}
|
import os.path
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url, verify_str_arg
from .vision import VisionDataset
class SVHN(VisionDataset):
"""`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset.
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which
expect the class labels to be in the range `[0, C-1]`
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load data from `.mat` format.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset where the data is stored.
split (string): One of {'train', 'test', 'extra'}.
Accordingly dataset is selected. 'extra' is Extra training set.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
"train": [
"http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat",
"e26dedcc434d2e4c54c9b2d4a06d8373",
],
"test": [
"http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat",
"eb5a983be6a315427106f1b164d9cef3",
],
"extra": [
"http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat",
"a93ce644f1a588dc4d68dda5feec44a7",
],
}
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat["X"]
# loading from the .mat file gives an np.ndarray of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.labels = loaded_mat["y"].astype(np.int64).squeeze()
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.labels, self.labels == 10, 0)
self.data = np.transpose(self.data, (3, 2, 0, 1))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self) -> None:
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self) -> str:
return "Split: {split}".format(**self.__dict__)
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@torchvision@[email protected]@.PATH_END.py
|
{
"filename": "skip_deprecated.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/doc/sphinxext/skip_deprecated.py",
"type": "Python"
}
|
# Skip deprecated members
def skip_deprecated(app, what, name, obj, skip, options):
if skip:
return skip
skipped = {"matplotlib.colors": ["ColorConverter", "hex2color", "rgb2hex"]}
skip_list = skipped.get(getattr(obj, "__module__", None))
if skip_list is not None:
return getattr(obj, "__name__", None) in skip_list
def setup(app):
app.connect('autodoc-skip-member', skip_deprecated)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@doc@sphinxext@[email protected]_END.py
|
{
"filename": "dsi2j2000.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/erg/satellite/erg/common/cotrans/dsi2j2000.py",
"type": "Python"
}
|
import numpy as np
from pyspedas import tinterpol
from pytplot import tcrossp
from pytplot import tnormalize
from pyspedas.cotrans_tools.cotrans import cotrans
from pytplot import time_string
from pytplot import get_data, get_timespan, options, store_data, tplot_names
from ...orb.orb import orb
from .cart_trans_matrix_make import cart_trans_matrix_make
from .erg_interpolate_att import erg_interpolate_att
def dsi2j2000(name_in=None,
name_out=None,
no_orb=False,
J20002DSI=False,
noload=False):
"""
This function transform a time series data between the DSI and J2000 coordinate systems
Parameters:
name_in : str
input tplot variable to be transformed
name_out : str
Name of the tplot variable in which the transformed data is stored
J20002DSI : bool
Set to transform data from J2000 to DSI. If not set, it transforms data from DSI to J2000.
Returns:
None
"""
if (name_in is None) or (name_in not in tplot_names(quiet=True)):
print('Input of Tplot name is undifiend')
return
if name_out is None:
print('Tplot name for output is undifiend')
name_out = 'result_of_dsi2j2000'
# prepare for transformed Tplot Variable
reload = not noload
dl_in = get_data(name_in, metadata=True)
get_data_array = get_data(name_in)
time_array = get_data_array[0]
time_length = time_array.shape[0]
dat = get_data_array[1]
# Get the SGI axis by interpolating the attitude data
dsiz_j2000 = erg_interpolate_att(name_in, noload=noload)['sgiz_j2000']
# Sun direction in J2000
sundir = np.array([[1., 0., 0.]]*time_length)
if no_orb:
store_data('sundir_gse', data={'x': time_array, 'y': sundir})
else: # Calculate the sun directions from the instantaneous satellite locations
if reload:
tr = get_timespan(name_in)
orb(trange=time_string([tr[0] - 60., tr[1] + 60.]))
tinterpol('erg_orb_l2_pos_gse', time_array)
scpos = get_data('erg_orb_l2_pos_gse-itrp')[1]
sunpos = np.array([[1.496e+08, 0., 0.]]*time_length)
sundir = sunpos - scpos
store_data('sundir_gse', data={'x': time_array, 'y': sundir})
tnormalize('sundir_gse', newname='sundir_gse')
# Derive DSI-X and DSI-Y axis vectors in J2000.
# The elementary vectors below are the definition of DSI. The detailed relationship
# between the spin phase, sun pulse timing, sun direction, and the actual subsolar point
# on the spining s/c body should be incorporated into the calculation below.
if reload:
cotrans(name_in='sundir_gse', name_out='sundir_j2000',
coord_in='gse', coord_out='j2000')
sun_j2000 = get_data('sundir_j2000')
dsiy = tcrossp(dsiz_j2000['y'], sun_j2000[1], return_data=True)
dsix = tcrossp(dsiy, dsiz_j2000['y'], return_data=True)
dsix_j2000 = {'x': time_array, 'y': dsix}
dsiy_j2000 = {'x': time_array, 'y': dsiy}
if not J20002DSI:
print('DSI --> J2000')
mat = cart_trans_matrix_make(
dsix_j2000['y'], dsiy_j2000['y'], dsiz_j2000['y'])
j2000x_in_dsi = np.dot(mat, np.array([1., 0., 0.]))
j2000y_in_dsi = np.dot(mat, np.array([0., 1., 0.]))
j2000z_in_dsi = np.dot(mat, np.array([0., 0., 1.]))
mat = cart_trans_matrix_make(
j2000x_in_dsi, j2000y_in_dsi, j2000z_in_dsi)
dat_new = np.einsum("ijk,ik->ij", mat, dat)
else:
print('J2000 --> DSI')
mat = cart_trans_matrix_make(
dsix_j2000['y'], dsiy_j2000['y'], dsiz_j2000['y'])
dat_new = np.einsum("ijk,ik->ij", mat, dat)
store_data(name_out, data={'x': time_array, 'y': dat_new}, attr_dict=dl_in)
options(name_out, 'ytitle', '\n'.join(name_out.split('_')))
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@erg@satellite@erg@common@[email protected]@.PATH_END.py
|
{
"filename": "node.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/edge/node/node.py",
"type": "Python"
}
|
##############################################################################
#
# Crossbar.io
# Copyright (C) Crossbar.io Technologies GmbH. All rights reserved.
#
##############################################################################
import sys
import os
import json
import pkg_resources
from collections import OrderedDict
import click
import psutil
import txaio
from txaio import make_logger
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.ssl import optionsForClientTLS
from twisted.internet.task import LoopingCall
from autobahn import wamp
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationRunner
from autobahn.wamp.types import ComponentConfig, CallOptions
from autobahn.wamp.exception import ApplicationError
from autobahn.websocket.util import parse_url
from crossbar._util import hl, hltype, hlid
from crossbar.common import checkconfig
from crossbar.node import node, controller
from crossbar.edge.node.management import NodeManagementBridgeSession, NodeManagementSession
try:
from crossbar.edge.node.docker import DockerClient
_HAS_DOCKER = True
except ImportError:
_HAS_DOCKER = False
_ALLOWED_ACTIVATION_TAGS = [
'created-at', 'management-url', 'management-realm', 'management-realm-oid', 'node-oid', 'node-authid',
'node-cluster-ip', 'activation-code', 'public-key-ed25519'
]
def _parse_activation_file(path):
"""
Internal helper. This parses a ``key.activation`` file and returns a dict mapping tags -> values.
.. code-block::console
Crossbar.io node activation
created-at: 2020-07-05T11:49:59.125Z
management-url: ws://localhost:9000/ws
management-realm: default
management-realm-oid: 6e8117fb-5bd8-4e83-860c-decefa1e95ac
node-oid: 664e99a6-6a65-4f64-a95e-46ac9c28c80e
node-authid: node-664e99
activation-code: P33W-GS4H-5L4Q
public-key-ed25519: 22c6e16005dfb0824466e35ae4b4f71746230628c2dec233f3b8cba22c4acce8
"""
if not os.path.exists(path):
raise Exception('activation file path "{}" does not exist'.format(path))
if not os.path.isfile(path):
raise Exception('activation file path "{}" exists, but is not a file'.format(path))
tags = OrderedDict()
with open(path, 'r') as key_file:
got_blankline = False
for line in key_file.readlines():
if line.strip() == '':
got_blankline = True
elif got_blankline:
tag, value = line.split(':', 1)
tag = tag.strip().lower()
value = value.strip()
if tag not in _ALLOWED_ACTIVATION_TAGS:
raise Exception('invalid tag "{}" in activation file "{}"'.format(tag, path))
if tag in tags:
raise Exception('duplicate tag "{}" in activation file "{}"'.format(tag, path))
tags[tag] = value
return tags
class FabricNodeControllerSession(controller.NodeController):
"""
This is the central node controller for CF nodes.
It derives of the node controller base class in CB and adds
the following functionality exposed to CFC:
- can manage a host Docker daemon
"""
# yapf: disable
log = make_logger()
def onUserError(self, fail, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.onUserError`
"""
if isinstance(fail.value, ApplicationError):
self.log.debug('{klass}.onUserError(): "{msg}"',
klass=self.__class__.__name__,
msg=fail.value.error_message())
else:
self.log.error(
'{klass}.onUserError(): "{msg}"\n{traceback}',
klass=self.__class__.__name__,
msg=msg,
traceback=txaio.failure_format_traceback(fail),
)
def __init__(self, node):
controller.NodeController.__init__(self, node)
# Docker client used for exposing the host Docker
# daemon via the node management API.
self._docker_client = None
def onConnect(self):
# attach to host docker daemon
if _HAS_DOCKER:
if self._node._enable_docker:
if os.path.exists('/run/docker.sock'):
self._docker_client = DockerClient(self._node._reactor, self)
self._docker_client.startup()
else:
self.log.warn('Docker daemon integration enabled, but Docker Unix domain socket path cannot be accessed!')
else:
self.log.info('Docker daemon integration disabled!')
else:
self.log.info('Docker unavailable or unsupported!')
controller.NodeController.onConnect(self)
@inlineCallbacks
def _shutdown(self, restart=False, mode=None):
# override base class method (without calling the base method) ..
self.log.info('{klass}._shutdown(restart={restart}, mode={mode})',
klass=self.__class__.__name__, restart=restart, mode=mode)
if self._node._manager_runner:
self.log.warn('Stopping management uplink ..')
yield self._node._manager_runner.stop()
self._node._manager = None
self._node._manager_runner = None
if self._docker_client:
yield self._docker_client.shutdown()
self._docker_client = None
@wamp.register(None)
def get_status(self, details=None):
"""
Return basic information about this node.
:returns: Information on the Crossbar.io node.
:rtype: dict
"""
status = super(FabricNodeControllerSession, self).get_status(details=details)
status.update({
# the following come from CFC (and are only filled
# when the node personality is FABRIC!)
'management_realm':
self._node._management_realm,
'management_node_id':
self._node._node_id,
'management_session_id':
self._node._manager._session_id if self._node._manager else None,
'management_node_extra':
self._node._node_extra,
# True if remote Docker management is available
'has_docker':
self._docker_client is not None
})
return status
#
# Docker support
# https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#docker-control
#
def _ensure_docker(self):
if not self._docker_client:
raise ApplicationError("crossbar.error.feature_unavailable",
"Docker not available or Docker daemon integration not enabled")
@wamp.register(None)
@inlineCallbacks
def get_docker_info(self, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerget_info
"""
self._ensure_docker()
return (yield self._docker_client.get_info())
@wamp.register(None)
@inlineCallbacks
def get_docker_ping(self, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerget_ping
"""
self._ensure_docker()
return (yield self._docker_client.ping())
@wamp.register(None)
@inlineCallbacks
def get_docker_version(self, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerget_version
"""
self._ensure_docker()
return (yield self._docker_client.version())
@wamp.register(None)
@inlineCallbacks
def get_docker_df(self, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerget_df
"""
self._ensure_docker()
return (yield self._docker_client.df())
@wamp.register(None)
@inlineCallbacks
def get_docker_containers(self, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerget_containers
"""
self._ensure_docker()
return (yield self._docker_client.get_containers())
@wamp.register(None)
@inlineCallbacks
def get_docker_container(self, container_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerget_container
"""
self._ensure_docker()
return (yield self._docker_client.get_container(container_id))
@wamp.register(None)
@inlineCallbacks
def start_docker_container(self, container_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerstart_container
"""
self._ensure_docker()
return (yield self._docker_client.start(container_id))
@wamp.register(None)
@inlineCallbacks
def create_docker_container(self, image, config={}, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockercreate_container
"""
self._ensure_docker()
return (yield self._docker_client.create(image, config))
@wamp.register(None)
@inlineCallbacks
def stop_docker_container(self, container_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerstop_container
"""
self._ensure_docker()
return (yield self._docker_client.container(container_id, 'stop'))
@wamp.register(None)
@inlineCallbacks
def restart_docker_container(self, container_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerrestart_container
"""
self._ensure_docker()
return (yield self._docker_client.restart(container_id))
@wamp.register(None)
@inlineCallbacks
def destroy_docker_container(self, container_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerremove_container
"""
self._ensure_docker()
return (yield self._docker_client.container(container_id, 'remove'))
@wamp.register(None)
@inlineCallbacks
def pause_docker_container(self, container_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerpause_container
"""
self._ensure_docker()
return (yield self._docker_client.container(container_id, 'pause'))
@wamp.register(None)
@inlineCallbacks
def unpause_docker_container(self, container_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerunpause_container
"""
self._ensure_docker()
return (yield self._docker_client.container(container_id, 'unpause'))
@wamp.register(None)
def request_docker_tty(self, id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerrequest_tty`
"""
self._ensure_docker()
return self._docker_client.request_tty(id)
@wamp.register(None)
@inlineCallbacks
def watch_docker_container(self, container_id, tty_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerwatch_container
"""
self._ensure_docker()
return (yield self._docker_client.watch(container_id, tty_id))
@wamp.register(None)
@inlineCallbacks
def shell_docker_container(self, container_id, tty_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockershell_container
"""
self._ensure_docker()
return (yield self._docker_client.shell(container_id, tty_id))
@wamp.register(None)
@inlineCallbacks
def backlog_docker_container(self, container_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerbacklog_container
"""
self._ensure_docker()
return (yield self._docker_client.backlog(container_id))
@wamp.register(None)
@inlineCallbacks
def keystroke_docker_container(self, container_id, data, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerkeystroke_container
"""
self._ensure_docker()
return (yield self._docker_client.keystroke(container_id, data))
@wamp.register(None)
@inlineCallbacks
def get_docker_images(self, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerget_images
"""
self._ensure_docker()
return (yield self._docker_client.get_images())
@wamp.register(None)
@inlineCallbacks
def delete_docker_image(self, image_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerdelete_image
"""
self._ensure_docker()
return (yield self._docker_client.delete_image(image_id))
@wamp.register(None)
@inlineCallbacks
def get_docker_image(self, image_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerget_image
"""
self._ensure_docker()
return (yield self._docker_client.get_image(image_id))
@wamp.register(None)
@inlineCallbacks
def remove_docker_image(self, image_id, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerremove_image
"""
self._ensure_docker()
return (yield self._docker_client.image(image_id, 'remove'))
@wamp.register(None)
@inlineCallbacks
def prune_docker_images(self, filter, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerprune_image
"""
self._ensure_docker()
return (yield self._docker_client.prune(filter))
@wamp.register(None)
@inlineCallbacks
def fs_docker_open(self, id, path, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerfsopen
"""
self._ensure_docker()
return (yield self._docker_client.fs_open(id, path))
@wamp.register(None)
@inlineCallbacks
def fs_docker_get(self, id, path, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerfsget
"""
self._ensure_docker()
return (yield self._docker_client.fs_get(id, path))
@wamp.register(None)
@inlineCallbacks
def fs_docker_put(self, id, path, data, details=None):
"""
https://github.com/crossbario/crossbar-fabric-public/blob/master/docs/api/Management-API.md#crossbarfabriccenterremotedockerfsput
"""
self._ensure_docker()
return (yield self._docker_client.fs_put(id, path, data))
def compute_mgmt_uplink_config(log, cbdir, config, fn_reboot=None, use_activation_file=True, use_default_fabric=False):
"""
Determine the transport configuration of the management uplink for this node
in the following order (using the first one that succeeds):
* node activation file
* management URL environment variable
* node configuration file
* built-in default ("master.xbr.network")
:param cbdir:
:param config:
:param fn_reboot:
:param use_activation_file:
:param use_default_fabric:
:return:
"""
fabric_transport_config = None
# [1] pick up auto-activation files dropped by a master node (`${CBDIR}/key.activate`)
if not fabric_transport_config and use_activation_file:
def do_check_for_activation_file(activation_file, reboot_on_discover):
if os.path.isfile(activation_file):
tags = _parse_activation_file(activation_file)
is_secure, hostname, port, _, _, _ = parse_url(tags['management-url'])
config = {
'type': 'websocket',
'url': tags['management-url'],
'endpoint': {
'type': 'tcp',
'host': hostname,
'port': port,
'timeout': 5
}
}
if is_secure:
config['endpoint']['tls'] = {
'hostname': hostname
}
_msg = 'Found auto-activation file "{}", using management URL "{}" - [1]'.format(activation_file,
config['url'])
log.info(click.style(_msg, fg='red', bold=True))
if reboot_on_discover and fn_reboot:
# stop the node and enforce complete reboot - which will then pick up the new configuration
fn_reboot()
# return the management uplink transport configuration, as derived from the activation file
return config
# an activation file must be placed next to the node key pair (key.pub, key.priv)
activation_file = os.path.join(cbdir, 'key.activate')
# check and maybe load config from activation file
fabric_transport_config = do_check_for_activation_file(activation_file, reboot_on_discover=False)
# if there wasn't an activation file, periodically check for ..
if not fabric_transport_config:
lc = LoopingCall(do_check_for_activation_file, activation_file, reboot_on_discover=True)
lc.start(1)
log.info('Looping call to check for node activation file started! - [1]')
# [2] management uplink configured via env var
if not fabric_transport_config:
url = os.environ['CROSSBAR_FABRIC_URL'].strip() if 'CROSSBAR_FABRIC_URL' in os.environ else ''
if url != '':
secure, host, tcp_port, _, _, _ = parse_url(url)
fabric_transport_config = {
'type': 'websocket',
'url': url,
'endpoint': {
'type': 'tcp',
'host': host,
'port': tcp_port,
'timeout': 5
}
}
if secure:
fabric_transport_config['endpoint']['tls'] = {
'hostname': host
}
log.info(
click.style('Using management uplink at "{}" (from envvar) - [2]'.format(url),
fg='red', bold=True))
# [3] user has configured a custom management uplink in the node configuration
if not fabric_transport_config:
if 'controller' in config and 'fabric' in config['controller'] and config['controller']['fabric']:
fabric_config = config['controller']['fabric']
# allow to deactivate the management uplink connecting transport by setting "transport" to null
fabric_transport_config = fabric_config.get('transport', None)
if fabric_transport_config:
url = fabric_transport_config.get('url', None)
log.info(
click.style('Using management uplink at "{}" (from node configuration) - [3]'.format(url),
fg='red', bold=True))
else:
log.info(
click.style('Management uplink deactivated - [3]',
fg='red', bold=True))
# [4] use hard-coded default management uplink
if not fabric_transport_config and use_default_fabric:
# default CFC (= our cloud hosted CFC service)
fabric_transport_config = {
'type': 'websocket',
'url': 'wss://master.xbr.network/ws',
'endpoint': {
'type': 'tcp',
'host': 'master.xbr.network',
'port': 443,
'timeout': 5,
'tls': {
'hostname': 'master.xbr.network'
}
}
}
log.info(
click.style(
'Using default fabric controller at URL "{}" (from envvar) - [4]'.format(fabric_transport_config['url']),
fg='red', bold=True))
return fabric_transport_config
class FabricNode(node.Node):
"""
Crossbar.io node personality.
"""
DEFAULT_CONFIG_PATH = 'edge/node/config/bare.json'
NODE_CONTROLLER = FabricNodeControllerSession
def __init__(self, personality, cbdir=None, reactor=None, native_workers=None, options=None):
node.Node.__init__(self, personality, cbdir, reactor, native_workers, options)
# looping call that runs the node controller watchdog
self._watchdog_looper = None
# the node controller realm (where eg worker controller live). we explicitly use the
# same realm as Crossbar.io OSS
self._realm = 'crossbar'
# enable docker daemon integration
self._enable_docker = None
# when running in managed mode, this will hold the bridge session
# attached to the local management router
self._bridge_session = None
# if this node has a proper management uplink configured to connect to
self._has_management_config = False
# if this node was connected to its configured management uplink successfully at least once
# during run-time (since last reboot) of this node
self._was_management_connected = False
# when we periodically check for a node activation file, the looping call for doing
# the check - and automatically shutdown when an activation file was found (after boot)
self._check_for_activation_file = None
# when self._was_management_connected, the URL we've been connected to
self._management_url = None
# when running in managed mode, this will hold the management uplink session to
# the crossbar master node
self._manager = None
self._manager_runner = None
# the node's management realm when running in managed mode (this comes from CFC!)
self._management_realm = None
# the node's ID when running in managed mode (this comes from CFC!)
self._node_id = None
# node extra when running in managed mode (this comes from CFC!)
self._node_extra = None
# when the node starts up, it will connect to CFC, and then apply the
# local node configuration, and this flag will be set. when the CFC connection
# is lost, and then reestablished, the local node configuration should NOT
# be applied a second time though - hence this flag
self._local_config_applied = False
# We really only need to see this once (?)
self._displayed_pairing_message = False
# for automatic ID assignment of "makers" within workers of type "xbrmm"
self._maker_no = 1
def load_config(self, configfile=None):
"""
Check and load the node configuration from:
* from ``.crossbar/config.json`` or
* from built-in (empty) default configuration
This is the _second_ function being called after the Node has been instantiated.
IMPORTANT: this function is run _before_ start of Twisted reactor!
"""
config_source = None
config_path = None
# if the node hasn't been configured from XBR network, fallback to loading config from local config file
if not self._config:
default_filename = pkg_resources.resource_filename('crossbar', self.DEFAULT_CONFIG_PATH)
with open(default_filename) as f:
default_config = json.load(f)
config_source, config_path = node.Node.load_config(self, configfile, default_config)
self.log.info('Node configuration loaded from {config_source} ({config_path})',
config_source=hlid(config_source),
config_path=hlid(config_path))
# Docker host integration
if _HAS_DOCKER and self._config and 'controller' in self._config:
self._enable_docker = self._config['controller'].get('enable_docker', False)
return config_source, config_path
def _watchdog(self):
# on Linux, check that we start with sufficient system entropy
entropy_avail = None
if sys.platform.startswith('linux'):
try:
with open('/proc/sys/kernel/random/entropy_avail', 'r') as ent:
entropy_avail = int(ent.read())
# FIXME: my machine never has more than ~ 300 units available, 1000 seems a little optomistic!
if entropy_avail < 64:
self.log.warn('WARNING: insufficient entropy ({} bytes) available - try installing rng-tools'.format(entropy_avail))
except PermissionError:
# this happens when packaged as a snap: the code prevented from reading a location
# # that is not allowed to a confined snap package
entropy_avail = -1
# check for at least 100MB free memory
mem_avail = psutil.virtual_memory().available // 2 ** 20
if mem_avail < 100:
self.log.warn('WARNING: available memory dropped to {mem_avail} MB', mem_avail=mem_avail)
self.log.trace('WATCHDOG: entropy_avail {entropy_avail} bytes, mem_avail {mem_avail} MB',
entropy_avail=entropy_avail, mem_avail=mem_avail)
@inlineCallbacks
def start(self, node_id=None):
self.log.info('{note} [{method}]',
note=hl('Starting node (initialize edge-node personality) ..', color='green', bold=True),
method=hltype(FabricNode.start))
# run watchdog at 5Hz
self._watchdog_looper = LoopingCall(self._watchdog)
self._watchdog_looper.start(.2)
res = yield node.Node.start(self, node_id or self._node_id)
return res
@inlineCallbacks
def boot(self, use_activation_file=True, use_default_fabric=False):
self.log.info('Booting node {method}', method=hltype(FabricNode.boot))
def reboot():
self.stop(restart=True)
# determine the transport configuration of the management uplink for this node
fabric_transport_config = compute_mgmt_uplink_config(self.log, self._cbdir, self._config, reboot,
use_activation_file=use_activation_file,
use_default_fabric=use_default_fabric)
# now finally, if we do have a transport configuration for the management uplink at this point,
# then start the management uplink ..
if fabric_transport_config:
self._has_management_config = True
url = fabric_transport_config['url']
hostname = None
if 'tls' in fabric_transport_config.get('endpoint', {}):
hostname = fabric_transport_config['endpoint']['tls']['hostname']
self._manager_runner = ApplicationRunner(
url=url,
realm=None,
extra=None,
ssl=optionsForClientTLS(hostname) if hostname else None,
)
def make(config):
# extra info forwarded to CFC client session
extra = {
'node': self,
'on_ready': Deferred(),
'on_exit': Deferred(),
}
@inlineCallbacks
def on_ready_success(res):
try:
self._manager, self._management_realm, self._management_session_id, self._node_id, self._node_extra = res
if self._bridge_session:
try:
yield self._bridge_session.attach_manager(
self._manager, self._management_realm, self._node_id)
except:
self.log.failure()
else:
while True:
try:
# we actually test the management uplink by calling a procedure on the master
yield self._manager.call('crossbarfabriccenter.mrealm.get_status')
except ApplicationError as e:
if e.error == 'wamp.error.no_such_procedure':
self.log.info('Could not get master status ("wamp.error.no_such_procedure") - retrying in 5 secs ..')
else:
self.log.failure()
yield sleep(5)
except:
self.log.failure()
self.log.info('Could not get master status - retrying in 5 secs ..')
yield sleep(5)
else:
self.log.info(
click.style(
'Connected to Crossbar.io Master at management realm "{realm}", set node ID "{node_id}" (extra={node_extra}, session_id={session_id})',
fg='green',
bold=True),
realm=self._management_realm,
node_id=self._node_id,
node_extra=self._node_extra,
session_id=self._manager._session_id)
# if the management uplink was successfully established and tested once, mark it so
if not self._was_management_connected:
self._was_management_connected = True
self._management_url = url
try:
worker_ids = yield self._bridge_session.call(
'crossbar.get_workers')
for worker_id in worker_ids:
yield self._bridge_session.call(
'crossbar.worker.{}.set_node_id'.format(worker_id),
self._node_id)
except:
self.log.warn(
'INTERNAL ERROR: could not set node_id "{node_id}" after CFC connection was established',
node_id=self._node_id)
self.log.failure()
break
else:
self.log.warn(
'Uplink Crossbar.io Master session established, but no bridge session setup!'
)
except Exception as e:
self.log.warn('error in on_ready: {}'.format(e))
# ok, we are connected to CFC and normally will be configurated programmatically from there.
# however, it is still possible to apply any local node configuration by setting
#
# node_extra:
# {
# "on_start_apply_config", true
# }
#
# node_extra comes from CFC and has to be configured there (when the node is paired)
#
if self._node_extra:
# by default, apply local config (from a node configuration file, if there is one)
on_start_apply_config = self._node_extra.get('on_start_apply_config', True)
if on_start_apply_config:
if not self._local_config_applied:
self.log.info('Applying local node configuration (on_start_apply_config is enabled)')
yield self.boot_from_config(self._config)
self._local_config_applied = True
else:
self.log.info('Local node configuration was already applied - skipping')
else:
self.log.info('Skipping any local node configuration (no local config or on_start_apply_config is "off")')
def on_ready_error(err):
if isinstance(
err.value,
ApplicationError) and err.value.error in ['fabric.auth-failed.node-unpaired', 'fabric.auth-failed.node-already-connected']:
if not self._displayed_pairing_message:
self._displayed_pairing_message = True
self.log.error(
click.style(err.value.error_message().upper(), fg='red', bold=True))
self.stop()
else:
self.log.error(click.style(
'Could not connect to CFC: {error}', fg='red', bold=True), error=err.value
)
@inlineCallbacks
def on_exit_success(reason):
if self._bridge_session:
try:
yield self._bridge_session.detach_manager()
except:
self.log.failure()
else:
self.log.debug(
'Disconnected from Crossbar.io Master for management realm "{realm}"',
realm=self._management_realm)
else:
self.log.warn(
'Uplink Crossbar.io Master session lost, but no bridge session setup!')
self._manager, self._management_realm, self._management_session_id, self._node_id, self._node_extra = None, None, None, None, None
def on_exit_error(err):
print(err)
extra['on_ready'].addCallbacks(on_ready_success, on_ready_error)
extra['on_exit'].addCallbacks(on_exit_success, on_exit_error)
config = ComponentConfig(extra=extra)
session = NodeManagementSession(self._manager_runner, config)
return session
self.log.info('Connecting to Crossbar.io Master at {url} ..', url=url)
yield self._manager_runner.run(make, start_reactor=False, auto_reconnect=True)
else:
# here, we don't have a management uplink :(
self.log.info(
hl('No management uplink configured (running unmanaged/single-node)',
color='red',
bold=True))
self._has_management_config = False
# nevertheless, now boot from local node config!
yield self.boot_from_config(self._config)
self._local_config_applied = True
def _add_extra_controller_components(self, controller_config):
extra = {
'node': self,
'controller_config': controller_config,
}
cfg = ComponentConfig(self._realm, extra=extra)
self._bridge_session = NodeManagementBridgeSession(cfg)
router = self._router_factory.get(self._realm)
self._router_session_factory.add(self._bridge_session, router, authrole='trusted')
def _set_shutdown_triggers(self, controller_options):
if 'shutdown' in controller_options:
self._node_shutdown_triggers = controller_options['shutdown']
self.log.info("Using node shutdown triggers {triggers} from configuration", triggers=self._node_shutdown_triggers)
else:
# NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED
# NODE_SHUTDOWN_ON_WORKER_EXIT
# NODE_SHUTDOWN_ON_WORKER_EXIT_WITH_ERROR
# NODE_SHUTDOWN_ON_LAST_WORKER_EXIT
# in managed mode, a node - by default - only shuts down when explicitly asked to,
# or upon a fatal error in the node controller
self._node_shutdown_triggers = [checkconfig.NODE_SHUTDOWN_ON_SHUTDOWN_REQUESTED]
self.log.info("Using default node shutdown triggers {triggers}", triggers=self._node_shutdown_triggers)
def _add_worker_role(self, worker_auth_role, options):
worker_role_config = {
# each (native) worker is authenticated under a worker-specific authrole
"name":
worker_auth_role,
"permissions": [
# the worker requires these permissions to work:
{
# management API provided by the worker. note that the worker management API is provided under
# the URI prefix "crossbar.worker.<worker_id>". note that the worker is also authenticated
# under authrole <worker_auth_role> on realm "crossbar"
"uri": worker_auth_role,
"match": "prefix",
"allow": {
"call": True,
"register": True,
"publish": True,
"subscribe": True
},
"disclose": {
"caller": True,
"publisher": True
},
"cache": True
},
{
# controller procedure called by the worker (to check for controller status)
"uri": "crossbar.get_status",
"match": "exact",
"allow": {
"call": True,
"register": False,
"publish": False,
"subscribe": False
},
"disclose": {
"caller": True,
"publisher": True
},
"cache": True
}
]
}
# if configured to expose the controller connection within the worker (to make it available
# in user code such as dynamic authenticators and router/container components), also add
# permissions to actually use the (local) node management API
if options.get('expose_controller', True):
vendor_permissions = {
"uri": "crossbar.",
"match": "prefix",
"allow": {
"call": True,
"register": False,
"publish": False,
"subscribe": True
},
"disclose": {
"caller": True,
"publisher": True
},
"cache": True
}
worker_role_config["permissions"].append(vendor_permissions)
vendor_permissions = {
"uri": "crossbarfabriccenter.",
"match": "prefix",
"allow": {
"call": True,
"register": True,
"publish": True,
"subscribe": True
},
"disclose": {
"caller": True,
"publisher": True
},
"cache": True
}
worker_role_config["permissions"].append(vendor_permissions)
self._router_factory.add_role(self._realm, worker_role_config)
self.log.info(
'worker-specific role "{authrole}" added on node management router realm "{realm}" {func}',
func=hltype(self._add_worker_role),
authrole=hlid(worker_role_config['name']),
realm=hlid(self._realm))
def _extend_worker_args(self, args, options):
if 'expose_shared' in options and options['expose_shared']:
args.extend(['--expose_shared=true'])
if 'expose_controller' in options and options['expose_controller']:
args.extend(['--expose_controller=true'])
@inlineCallbacks
def _configure_native_worker_connections(self, worker_logname, worker_id, worker):
# start connections (such as PostgreSQL database connection pools)
# to run embedded in the router
for connection in worker.get('connections', []):
if 'id' in connection:
connection_id = connection.pop('id')
else:
connection_id = 'connection{:03d}'.format(self._connection_no)
self._connection_no += 1
yield self._controller.call('crossbar.worker.{}.start_connection'.format(worker_id), connection_id, connection, options=CallOptions())
self.log.info(
"{logname}: connection '{connection}' started",
logname=worker_logname,
connection=connection_id,
)
@inlineCallbacks
def _configure_native_worker_router(self, worker_logname, worker_id, worker):
# setup db connection pool
yield self._configure_native_worker_connections(worker_logname, worker_id, worker)
# in this case, call the base class method _after_ above - because we want db connections
# to be available when router/container components might start ..
yield node.Node._configure_native_worker_router(self, worker_logname, worker_id, worker)
@inlineCallbacks
def _configure_native_worker_container(self, worker_logname, worker_id, worker):
# setup db connection pool
yield self._configure_native_worker_connections(worker_logname, worker_id, worker)
# in this case, call the base class method _after_ above - because we want db connections
# to be available when router/container components might start ..
yield node.Node._configure_native_worker_container(self, worker_logname, worker_id, worker)
@inlineCallbacks
def _configure_native_worker_hostmonitor(self, worker_logname, worker_id, worker):
# after the native worker has started, and the HostMonitor controller session
# has attached to the local node router, we need to do the following, if we
# want it to work _also_ from a local node config.json. driving from CFC
# at run-time always works (also without the bits here)
# 1. the native workers' common options are configured by calling into the worker
yield self._configure_native_worker_common(worker_logname, worker_id, worker)
# 2. the host monitor specific actions need to be done, namely, starting the monitoring
monitoring_config = worker.get('monitor', None)
yield self._controller.call('crossbar.worker.{}.start_monitoring'.format(worker_id), monitoring_config, options=CallOptions())
@inlineCallbacks
def _configure_native_worker_xbrmm(self, worker_logname, worker_id, worker):
# 1. configure native workers' common options
yield self._configure_native_worker_common(worker_logname, worker_id, worker)
# 2. configure market makers (defined within the xbrmm worker)
for maker in worker.get('makers', []):
if 'id' in maker:
maker_id = maker.pop('id')
else:
maker_id = 'maker{:03d}'.format(self._maker_no)
self._maker_no += 1
maker['id'] = maker_id
yield self._controller.call('crossbar.worker.{}.start_market_maker'.format(worker_id), maker_id, maker, options=CallOptions())
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@edge@[email protected]@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "spicy-oil/hfs_fit",
"repo_path": "hfs_fit_extracted/hfs_fit-master/README.md",
"type": "Markdown"
}
|
# hfs_fit
**A python fitting program for atomic emission lines with hyperfine structure (HFS).**
**It has nice sliders for parameters to change the fit visually, useful when HFS constants are unknown.**
<img src="data/z3P2---a3D3 (spectrum.txt).png">
# Quickstart
* requires python 3.6 or higher
* run `pip install -rrequirements.txt` to install dependencies
* run `example.py` in an interactive python environment
# Files and Explanations
1) example.py - basic usage.
2) fitLog.xlsx - parameters saved here when desired.
3) hfs_fit.py - Main script that makes use of others, contains class for spectrum, contains fitting, plotting algorithms.
4) interpolation.py - used for cubic spline interpolation when specified in hfs_fit.py.
5) LU.py - LU decomposition for interpolation.py.
6) matrixmult.py - matrix multiplication for LU.py.
7) relInt.py - routine to calculate relative intensities of HFS components, used by hfs_fit.py.
8) spectrum.txt - a small portion of an UV Co II spectrum with 4 Co II lines.
9) fits - folder containing saved plots.
# Useful Functions and Notes
- Can plot transition diagram with components using the LineFig() method in the hfs class. nInterp argument for this is the number of points to artificially add to make lines smooth, 1 for no interpolation (default). The spacing between texts may not be perfect, most of the time the level label will touch a level line, can change this by changing the location of the texts from lines 678-681.
- Can plot spectrum using the PlotSpec() method in the hfs class, put a wavenumber in the bracket and it will plot around that wavenumber.
- Can plot residual using Residual() in the hfs class, e.g. class.Residual(class.paramsGuess,plot = True)
- Use hjw() of hfs class to half all jumpwidths before Optimise(), this is convenient when performing the final optimisation of parameters, or if the initial guess is very good.
- Can always re-open the sliders plot with PlotGuess() method of the hfs class. If the sliders don't work, try closing and opening it up again (this happens sometimes in iPython).
- Can also add points for smoothing during fitting, to do this change the nInterp value in the WNRange() method of hfs and re-import hfs.
- HFS components are plotted by default, can turn this off using PlotGuess(components = False)
- The reset button of PlotGuess() doesn't seem to work in iPython.
- If the instrumental profile (Fourier transform spectroscopy only) is negligible, put icut at the maximum value.
|
spicy-oilREPO_NAMEhfs_fitPATH_START.@hfs_fit_extracted@[email protected]@.PATH_END.py
|
{
"filename": "markdown-notebooks.ipynb",
"repo_name": "exo-cesm/CESM2.1.3",
"repo_path": "CESM2.1.3_extracted/CESM2.1.3-main/_build/html/_sources/markdown-notebooks.ipynb",
"type": "Jupyter Notebook"
}
|
# Notebooks with MyST Markdown
Jupyter Book also lets you write text-based notebooks using MyST Markdown.
See [the Notebooks with MyST Markdown documentation](https://jupyterbook.org/file-types/myst-notebooks.html) for more detailed instructions.
This page shows off a notebook written in MyST Markdown.
## An example cell
With MyST Markdown, you can define code cells with a directive like so:
```python
print(2 + 2)
```
4
When your book is built, the contents of any `{code-cell}` blocks will be
executed with your default Jupyter kernel, and their outputs will be displayed
in-line with the rest of your content.
```{seealso}
Jupyter Book uses [Jupytext](https://jupytext.readthedocs.io/en/latest/) to convert text-based files to notebooks, and can support [many other text-based notebook files](https://jupyterbook.org/file-types/jupytext.html).
```
## Create a notebook with MyST Markdown
MyST Markdown notebooks are defined by two things:
1. YAML metadata that is needed to understand if / how it should convert text files to notebooks (including information about the kernel needed).
See the YAML at the top of this page for example.
2. The presence of `{code-cell}` directives, which will be executed with your book.
That's all that is needed to get started!
## Quickly add YAML metadata for MyST Notebooks
If you have a markdown file and you'd like to quickly add YAML metadata to it, so that Jupyter Book will treat it as a MyST Markdown Notebook, run the following command:
```
jupyter-book myst init path/to/markdownfile.md
```
|
[email protected][email protected]@_build@html@[email protected]@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/legendgrouptitle/font/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="funnel.legendgrouptitle.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@funnel@legendgrouptitle@font@[email protected]_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/hoverlabel/grouptitlefont/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="layout.hoverlabel.grouptitlefont",
**kwargs,
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@hoverlabel@grouptitlefont@[email protected]_END.py
|
{
"filename": "FS.py",
"repo_name": "rat-pac/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/Node/FS.py",
"type": "Python"
}
|
"""scons.Node.FS
File system nodes.
These Nodes represent the canonical external objects that people think
of when they think of building software: files and directories.
This holds a "default_fs" variable that should be initialized with an FS
that can be used by scripts or modules looking for the canonical default.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Node/FS.py 4043 2009/02/23 09:06:45 scons"
from itertools import izip
import cStringIO
import fnmatch
import os
import os.path
import re
import shutil
import stat
import string
import sys
import time
try:
import codecs
except ImportError:
pass
else:
# TODO(2.2): Remove when 2.3 becomes the minimal supported version.
try:
codecs.BOM_UTF8
except AttributeError:
codecs.BOM_UTF8 = '\xef\xbb\xbf'
try:
codecs.BOM_UTF16
except AttributeError:
if sys.byteorder == 'little':
codecs.BOM_UTF16 = '\xff\xfe'
else:
codecs.BOM_UTF16 = '\xfe\xff'
import SCons.Action
from SCons.Debug import logInstanceCreation
import SCons.Errors
import SCons.Memoize
import SCons.Node
import SCons.Node.Alias
import SCons.Subst
import SCons.Util
import SCons.Warnings
from SCons.Debug import Trace
do_store_info = True
class EntryProxyAttributeError(AttributeError):
"""
An AttributeError subclass for recording and displaying the name
of the underlying Entry involved in an AttributeError exception.
"""
def __init__(self, entry_proxy, attribute):
AttributeError.__init__(self)
self.entry_proxy = entry_proxy
self.attribute = attribute
def __str__(self):
entry = self.entry_proxy.get()
fmt = "%s instance %s has no attribute %s"
return fmt % (entry.__class__.__name__,
repr(entry.name),
repr(self.attribute))
# The max_drift value: by default, use a cached signature value for
# any file that's been untouched for more than two days.
default_max_drift = 2*24*60*60
#
# We stringify these file system Nodes a lot. Turning a file system Node
# into a string is non-trivial, because the final string representation
# can depend on a lot of factors: whether it's a derived target or not,
# whether it's linked to a repository or source directory, and whether
# there's duplication going on. The normal technique for optimizing
# calculations like this is to memoize (cache) the string value, so you
# only have to do the calculation once.
#
# A number of the above factors, however, can be set after we've already
# been asked to return a string for a Node, because a Repository() or
# VariantDir() call or the like may not occur until later in SConscript
# files. So this variable controls whether we bother trying to save
# string values for Nodes. The wrapper interface can set this whenever
# they're done mucking with Repository and VariantDir and the other stuff,
# to let this module know it can start returning saved string values
# for Nodes.
#
Save_Strings = None
def save_strings(val):
global Save_Strings
Save_Strings = val
#
# Avoid unnecessary function calls by recording a Boolean value that
# tells us whether or not os.path.splitdrive() actually does anything
# on this system, and therefore whether we need to bother calling it
# when looking up path names in various methods below.
#
do_splitdrive = None
def initialize_do_splitdrive():
global do_splitdrive
drive, path = os.path.splitdrive('X:/foo')
do_splitdrive = not not drive
initialize_do_splitdrive()
#
needs_normpath_check = None
def initialize_normpath_check():
"""
Initialize the normpath_check regular expression.
This function is used by the unit tests to re-initialize the pattern
when testing for behavior with different values of os.sep.
"""
global needs_normpath_check
if os.sep == '/':
pattern = r'.*/|\.$|\.\.$'
else:
pattern = r'.*[/%s]|\.$|\.\.$' % re.escape(os.sep)
needs_normpath_check = re.compile(pattern)
initialize_normpath_check()
#
# SCons.Action objects for interacting with the outside world.
#
# The Node.FS methods in this module should use these actions to
# create and/or remove files and directories; they should *not* use
# os.{link,symlink,unlink,mkdir}(), etc., directly.
#
# Using these SCons.Action objects ensures that descriptions of these
# external activities are properly displayed, that the displays are
# suppressed when the -s (silent) option is used, and (most importantly)
# the actions are disabled when the the -n option is used, in which case
# there should be *no* changes to the external file system(s)...
#
if hasattr(os, 'link'):
def _hardlink_func(fs, src, dst):
# If the source is a symlink, we can't just hard-link to it
# because a relative symlink may point somewhere completely
# different. We must disambiguate the symlink and then
# hard-link the final destination file.
while fs.islink(src):
link = fs.readlink(src)
if not os.path.isabs(link):
src = link
else:
src = os.path.join(os.path.dirname(src), link)
fs.link(src, dst)
else:
_hardlink_func = None
if hasattr(os, 'symlink'):
def _softlink_func(fs, src, dst):
fs.symlink(src, dst)
else:
_softlink_func = None
def _copy_func(fs, src, dest):
shutil.copy2(src, dest)
st = fs.stat(src)
fs.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
Valid_Duplicates = ['hard-soft-copy', 'soft-hard-copy',
'hard-copy', 'soft-copy', 'copy']
Link_Funcs = [] # contains the callables of the specified duplication style
def set_duplicate(duplicate):
# Fill in the Link_Funcs list according to the argument
# (discarding those not available on the platform).
# Set up the dictionary that maps the argument names to the
# underlying implementations. We do this inside this function,
# not in the top-level module code, so that we can remap os.link
# and os.symlink for testing purposes.
link_dict = {
'hard' : _hardlink_func,
'soft' : _softlink_func,
'copy' : _copy_func
}
if not duplicate in Valid_Duplicates:
raise SCons.Errors.InternalError, ("The argument of set_duplicate "
"should be in Valid_Duplicates")
global Link_Funcs
Link_Funcs = []
for func in string.split(duplicate,'-'):
if link_dict[func]:
Link_Funcs.append(link_dict[func])
def LinkFunc(target, source, env):
# Relative paths cause problems with symbolic links, so
# we use absolute paths, which may be a problem for people
# who want to move their soft-linked src-trees around. Those
# people should use the 'hard-copy' mode, softlinks cannot be
# used for that; at least I have no idea how ...
src = source[0].abspath
dest = target[0].abspath
dir, file = os.path.split(dest)
if dir and not target[0].fs.isdir(dir):
os.makedirs(dir)
if not Link_Funcs:
# Set a default order of link functions.
set_duplicate('hard-soft-copy')
fs = source[0].fs
# Now link the files with the previously specified order.
for func in Link_Funcs:
try:
func(fs, src, dest)
break
except (IOError, OSError):
# An OSError indicates something happened like a permissions
# problem or an attempt to symlink across file-system
# boundaries. An IOError indicates something like the file
# not existing. In either case, keeping trying additional
# functions in the list and only raise an error if the last
# one failed.
if func == Link_Funcs[-1]:
# exception of the last link method (copy) are fatal
raise
return 0
Link = SCons.Action.Action(LinkFunc, None)
def LocalString(target, source, env):
return 'Local copy of %s from %s' % (target[0], source[0])
LocalCopy = SCons.Action.Action(LinkFunc, LocalString)
def UnlinkFunc(target, source, env):
t = target[0]
t.fs.unlink(t.abspath)
return 0
Unlink = SCons.Action.Action(UnlinkFunc, None)
def MkdirFunc(target, source, env):
t = target[0]
if not t.exists():
t.fs.mkdir(t.abspath)
return 0
Mkdir = SCons.Action.Action(MkdirFunc, None, presub=None)
MkdirBuilder = None
def get_MkdirBuilder():
global MkdirBuilder
if MkdirBuilder is None:
import SCons.Builder
import SCons.Defaults
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
MkdirBuilder = SCons.Builder.Builder(action = Mkdir,
env = None,
explain = None,
is_explicit = None,
target_scanner = SCons.Defaults.DirEntryScanner,
name = "MkdirBuilder")
return MkdirBuilder
class _Null:
pass
_null = _Null()
DefaultSCCSBuilder = None
DefaultRCSBuilder = None
def get_DefaultSCCSBuilder():
global DefaultSCCSBuilder
if DefaultSCCSBuilder is None:
import SCons.Builder
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
act = SCons.Action.Action('$SCCSCOM', '$SCCSCOMSTR')
DefaultSCCSBuilder = SCons.Builder.Builder(action = act,
env = None,
name = "DefaultSCCSBuilder")
return DefaultSCCSBuilder
def get_DefaultRCSBuilder():
global DefaultRCSBuilder
if DefaultRCSBuilder is None:
import SCons.Builder
# "env" will get filled in by Executor.get_build_env()
# calling SCons.Defaults.DefaultEnvironment() when necessary.
act = SCons.Action.Action('$RCS_COCOM', '$RCS_COCOMSTR')
DefaultRCSBuilder = SCons.Builder.Builder(action = act,
env = None,
name = "DefaultRCSBuilder")
return DefaultRCSBuilder
# Cygwin's os.path.normcase pretends it's on a case-sensitive filesystem.
_is_cygwin = sys.platform == "cygwin"
if os.path.normcase("TeSt") == os.path.normpath("TeSt") and not _is_cygwin:
def _my_normcase(x):
return x
else:
def _my_normcase(x):
return string.upper(x)
class DiskChecker:
def __init__(self, type, do, ignore):
self.type = type
self.do = do
self.ignore = ignore
self.set_do()
def set_do(self):
self.__call__ = self.do
def set_ignore(self):
self.__call__ = self.ignore
def set(self, list):
if self.type in list:
self.set_do()
else:
self.set_ignore()
def do_diskcheck_match(node, predicate, errorfmt):
result = predicate()
try:
# If calling the predicate() cached a None value from stat(),
# remove it so it doesn't interfere with later attempts to
# build this Node as we walk the DAG. (This isn't a great way
# to do this, we're reaching into an interface that doesn't
# really belong to us, but it's all about performance, so
# for now we'll just document the dependency...)
if node._memo['stat'] is None:
del node._memo['stat']
except (AttributeError, KeyError):
pass
if result:
raise TypeError, errorfmt % node.abspath
def ignore_diskcheck_match(node, predicate, errorfmt):
pass
def do_diskcheck_rcs(node, name):
try:
rcs_dir = node.rcs_dir
except AttributeError:
if node.entry_exists_on_disk('RCS'):
rcs_dir = node.Dir('RCS')
else:
rcs_dir = None
node.rcs_dir = rcs_dir
if rcs_dir:
return rcs_dir.entry_exists_on_disk(name+',v')
return None
def ignore_diskcheck_rcs(node, name):
return None
def do_diskcheck_sccs(node, name):
try:
sccs_dir = node.sccs_dir
except AttributeError:
if node.entry_exists_on_disk('SCCS'):
sccs_dir = node.Dir('SCCS')
else:
sccs_dir = None
node.sccs_dir = sccs_dir
if sccs_dir:
return sccs_dir.entry_exists_on_disk('s.'+name)
return None
def ignore_diskcheck_sccs(node, name):
return None
diskcheck_match = DiskChecker('match', do_diskcheck_match, ignore_diskcheck_match)
diskcheck_rcs = DiskChecker('rcs', do_diskcheck_rcs, ignore_diskcheck_rcs)
diskcheck_sccs = DiskChecker('sccs', do_diskcheck_sccs, ignore_diskcheck_sccs)
diskcheckers = [
diskcheck_match,
diskcheck_rcs,
diskcheck_sccs,
]
def set_diskcheck(list):
for dc in diskcheckers:
dc.set(list)
def diskcheck_types():
return map(lambda dc: dc.type, diskcheckers)
class EntryProxy(SCons.Util.Proxy):
def __get_abspath(self):
entry = self.get()
return SCons.Subst.SpecialAttrWrapper(entry.get_abspath(),
entry.name + "_abspath")
def __get_filebase(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[0],
name + "_filebase")
def __get_suffix(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[1],
name + "_suffix")
def __get_file(self):
name = self.get().name
return SCons.Subst.SpecialAttrWrapper(name, name + "_file")
def __get_base_path(self):
"""Return the file's directory and file name, with the
suffix stripped."""
entry = self.get()
return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(entry.get_path())[0],
entry.name + "_base")
def __get_posix_path(self):
"""Return the path with / as the path separator,
regardless of platform."""
if os.sep == '/':
return self
else:
entry = self.get()
r = string.replace(entry.get_path(), os.sep, '/')
return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_posix")
def __get_windows_path(self):
"""Return the path with \ as the path separator,
regardless of platform."""
if os.sep == '\\':
return self
else:
entry = self.get()
r = string.replace(entry.get_path(), os.sep, '\\')
return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_windows")
def __get_srcnode(self):
return EntryProxy(self.get().srcnode())
def __get_srcdir(self):
"""Returns the directory containing the source node linked to this
node via VariantDir(), or the directory of this node if not linked."""
return EntryProxy(self.get().srcnode().dir)
def __get_rsrcnode(self):
return EntryProxy(self.get().srcnode().rfile())
def __get_rsrcdir(self):
"""Returns the directory containing the source node linked to this
node via VariantDir(), or the directory of this node if not linked."""
return EntryProxy(self.get().srcnode().rfile().dir)
def __get_dir(self):
return EntryProxy(self.get().dir)
dictSpecialAttrs = { "base" : __get_base_path,
"posix" : __get_posix_path,
"windows" : __get_windows_path,
"win32" : __get_windows_path,
"srcpath" : __get_srcnode,
"srcdir" : __get_srcdir,
"dir" : __get_dir,
"abspath" : __get_abspath,
"filebase" : __get_filebase,
"suffix" : __get_suffix,
"file" : __get_file,
"rsrcpath" : __get_rsrcnode,
"rsrcdir" : __get_rsrcdir,
}
def __getattr__(self, name):
# This is how we implement the "special" attributes
# such as base, posix, srcdir, etc.
try:
attr_function = self.dictSpecialAttrs[name]
except KeyError:
try:
attr = SCons.Util.Proxy.__getattr__(self, name)
except AttributeError, e:
# Raise our own AttributeError subclass with an
# overridden __str__() method that identifies the
# name of the entry that caused the exception.
raise EntryProxyAttributeError(self, name)
return attr
else:
return attr_function(self)
class Base(SCons.Node.Node):
"""A generic class for file system entries. This class is for
when we don't know yet whether the entry being looked up is a file
or a directory. Instances of this class can morph into either
Dir or File objects by a later, more precise lookup.
Note: this class does not define __cmp__ and __hash__ for
efficiency reasons. SCons does a lot of comparing of
Node.FS.{Base,Entry,File,Dir} objects, so those operations must be
as fast as possible, which means we want to use Python's built-in
object identity comparisons.
"""
memoizer_counters = []
def __init__(self, name, directory, fs):
"""Initialize a generic Node.FS.Base object.
Call the superclass initialization, take care of setting up
our relative and absolute paths, identify our parent
directory, and indicate that this node should use
signatures."""
if __debug__: logInstanceCreation(self, 'Node.FS.Base')
SCons.Node.Node.__init__(self)
# Filenames and paths are probably reused and are intern'ed to
# save some memory.
self.name = intern(name)
self.suffix = intern(SCons.Util.splitext(name)[1])
self.fs = fs
assert directory, "A directory must be provided"
self.abspath = intern(directory.entry_abspath(name))
self.labspath = intern(directory.entry_labspath(name))
if directory.path == '.':
self.path = intern(name)
else:
self.path = intern(directory.entry_path(name))
if directory.tpath == '.':
self.tpath = intern(name)
else:
self.tpath = intern(directory.entry_tpath(name))
self.path_elements = directory.path_elements + [self]
self.dir = directory
self.cwd = None # will hold the SConscript directory for target nodes
self.duplicate = directory.duplicate
def str_for_display(self):
return '"' + self.__str__() + '"'
def must_be_same(self, klass):
"""
This node, which already existed, is being looked up as the
specified klass. Raise an exception if it isn't.
"""
if isinstance(self, klass) or klass is Entry:
return
raise TypeError, "Tried to lookup %s '%s' as a %s." %\
(self.__class__.__name__, self.path, klass.__name__)
def get_dir(self):
return self.dir
def get_suffix(self):
return self.suffix
def rfile(self):
return self
def __str__(self):
"""A Node.FS.Base object's string representation is its path
name."""
global Save_Strings
if Save_Strings:
return self._save_str()
return self._get_str()
memoizer_counters.append(SCons.Memoize.CountValue('_save_str'))
def _save_str(self):
try:
return self._memo['_save_str']
except KeyError:
pass
result = intern(self._get_str())
self._memo['_save_str'] = result
return result
def _get_str(self):
global Save_Strings
if self.duplicate or self.is_derived():
return self.get_path()
srcnode = self.srcnode()
if srcnode.stat() is None and self.stat() is not None:
result = self.get_path()
else:
result = srcnode.get_path()
if not Save_Strings:
# We're not at the point where we're saving the string string
# representations of FS Nodes (because we haven't finished
# reading the SConscript files and need to have str() return
# things relative to them). That also means we can't yet
# cache values returned (or not returned) by stat(), since
# Python code in the SConscript files might still create
# or otherwise affect the on-disk file. So get rid of the
# values that the underlying stat() method saved.
try: del self._memo['stat']
except KeyError: pass
if self is not srcnode:
try: del srcnode._memo['stat']
except KeyError: pass
return result
rstr = __str__
memoizer_counters.append(SCons.Memoize.CountValue('stat'))
def stat(self):
try: return self._memo['stat']
except KeyError: pass
try: result = self.fs.stat(self.abspath)
except os.error: result = None
self._memo['stat'] = result
return result
def exists(self):
return self.stat() is not None
def rexists(self):
return self.rfile().exists()
def getmtime(self):
st = self.stat()
if st: return st[stat.ST_MTIME]
else: return None
def getsize(self):
st = self.stat()
if st: return st[stat.ST_SIZE]
else: return None
def isdir(self):
st = self.stat()
return st is not None and stat.S_ISDIR(st[stat.ST_MODE])
def isfile(self):
st = self.stat()
return st is not None and stat.S_ISREG(st[stat.ST_MODE])
if hasattr(os, 'symlink'):
def islink(self):
try: st = self.fs.lstat(self.abspath)
except os.error: return 0
return stat.S_ISLNK(st[stat.ST_MODE])
else:
def islink(self):
return 0 # no symlinks
def is_under(self, dir):
if self is dir:
return 1
else:
return self.dir.is_under(dir)
def set_local(self):
self._local = 1
def srcnode(self):
"""If this node is in a build path, return the node
corresponding to its source file. Otherwise, return
ourself.
"""
srcdir_list = self.dir.srcdir_list()
if srcdir_list:
srcnode = srcdir_list[0].Entry(self.name)
srcnode.must_be_same(self.__class__)
return srcnode
return self
def get_path(self, dir=None):
"""Return path relative to the current working directory of the
Node.FS.Base object that owns us."""
if not dir:
dir = self.fs.getcwd()
if self == dir:
return '.'
path_elems = self.path_elements
try: i = path_elems.index(dir)
except ValueError: pass
else: path_elems = path_elems[i+1:]
path_elems = map(lambda n: n.name, path_elems)
return string.join(path_elems, os.sep)
def set_src_builder(self, builder):
"""Set the source code builder for this node."""
self.sbuilder = builder
if not self.has_builder():
self.builder_set(builder)
def src_builder(self):
"""Fetch the source code builder for this node.
If there isn't one, we cache the source code builder specified
for the directory (which in turn will cache the value from its
parent directory, and so on up to the file system root).
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.dir.src_builder()
self.sbuilder = scb
return scb
def get_abspath(self):
"""Get the absolute path of the file."""
return self.abspath
def for_signature(self):
# Return just our name. Even an absolute path would not work,
# because that can change thanks to symlinks or remapped network
# paths.
return self.name
def get_subst_proxy(self):
try:
return self._proxy
except AttributeError:
ret = EntryProxy(self)
self._proxy = ret
return ret
def target_from_source(self, prefix, suffix, splitext=SCons.Util.splitext):
"""
Generates a target entry that corresponds to this entry (usually
a source file) with the specified prefix and suffix.
Note that this method can be overridden dynamically for generated
files that need different behavior. See Tool/swig.py for
an example.
"""
return self.dir.Entry(prefix + splitext(self.name)[0] + suffix)
def _Rfindalldirs_key(self, pathlist):
return pathlist
memoizer_counters.append(SCons.Memoize.CountDict('Rfindalldirs', _Rfindalldirs_key))
def Rfindalldirs(self, pathlist):
"""
Return all of the directories for a given path list, including
corresponding "backing" directories in any repositories.
The Node lookups are relative to this Node (typically a
directory), so memoizing result saves cycles from looking
up the same path for each target in a given directory.
"""
try:
memo_dict = self._memo['Rfindalldirs']
except KeyError:
memo_dict = {}
self._memo['Rfindalldirs'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
create_dir_relative_to_self = self.Dir
result = []
for path in pathlist:
if isinstance(path, SCons.Node.Node):
result.append(path)
else:
dir = create_dir_relative_to_self(path)
result.extend(dir.get_all_rdirs())
memo_dict[pathlist] = result
return result
def RDirs(self, pathlist):
"""Search for a list of directories in the Repository list."""
cwd = self.cwd or self.fs._cwd
return cwd.Rfindalldirs(pathlist)
memoizer_counters.append(SCons.Memoize.CountValue('rentry'))
def rentry(self):
try:
return self._memo['rentry']
except KeyError:
pass
result = self
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try:
node = dir.entries[norm_name]
except KeyError:
if dir.entry_exists_on_disk(self.name):
result = dir.Entry(self.name)
break
self._memo['rentry'] = result
return result
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
return []
class Entry(Base):
"""This is the class for generic Node.FS entries--that is, things
that could be a File or a Dir, but we're just not sure yet.
Consequently, the methods in this class really exist just to
transform their associated object into the right class when the
time comes, and then call the same-named method in the transformed
class."""
def diskcheck_match(self):
pass
def disambiguate(self, must_exist=None):
"""
"""
if self.isdir():
self.__class__ = Dir
self._morph()
elif self.isfile():
self.__class__ = File
self._morph()
self.clear()
else:
# There was nothing on-disk at this location, so look in
# the src directory.
#
# We can't just use self.srcnode() straight away because
# that would create an actual Node for this file in the src
# directory, and there might not be one. Instead, use the
# dir_on_disk() method to see if there's something on-disk
# with that name, in which case we can go ahead and call
# self.srcnode() to create the right type of entry.
srcdir = self.dir.srcnode()
if srcdir != self.dir and \
srcdir.entry_exists_on_disk(self.name) and \
self.srcnode().isdir():
self.__class__ = Dir
self._morph()
elif must_exist:
msg = "No such file or directory: '%s'" % self.abspath
raise SCons.Errors.UserError, msg
else:
self.__class__ = File
self._morph()
self.clear()
return self
def rfile(self):
"""We're a generic Entry, but the caller is actually looking for
a File at this point, so morph into one."""
self.__class__ = File
self._morph()
self.clear()
return File.rfile(self)
def scanner_key(self):
return self.get_suffix()
def get_contents(self):
"""Fetch the contents of the entry. Returns the exact binary
contents of the file."""
try:
self = self.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_contents() in emitters and the
# like (e.g. in qt.py) don't have to disambiguate by hand
# or catch the exception.
return ''
else:
return self.get_contents()
def get_text_contents(self):
"""Fetch the decoded text contents of a Unicode encoded Entry.
Since this should return the text contents from the file
system, we check to see into what sort of subclass we should
morph this Entry."""
try:
self = self.disambiguate(must_exist=1)
except SCons.Errors.UserError:
# There was nothing on disk with which to disambiguate
# this entry. Leave it as an Entry, but return a null
# string so calls to get_text_contents() in emitters and
# the like (e.g. in qt.py) don't have to disambiguate by
# hand or catch the exception.
return ''
else:
return self.get_text_contents()
def must_be_same(self, klass):
"""Called to make sure a Node is a Dir. Since we're an
Entry, we can morph into one."""
if self.__class__ is not klass:
self.__class__ = klass
self._morph()
self.clear()
# The following methods can get called before the Taskmaster has
# had a chance to call disambiguate() directly to see if this Entry
# should really be a Dir or a File. We therefore use these to call
# disambiguate() transparently (from our caller's point of view).
#
# Right now, this minimal set of methods has been derived by just
# looking at some of the methods that will obviously be called early
# in any of the various Taskmasters' calling sequences, and then
# empirically figuring out which additional methods are necessary
# to make various tests pass.
def exists(self):
"""Return if the Entry exists. Check the file system to see
what we should turn into first. Assume a file if there's no
directory."""
return self.disambiguate().exists()
def rel_path(self, other):
d = self.disambiguate()
if d.__class__ is Entry:
raise "rel_path() could not disambiguate File/Dir"
return d.rel_path(other)
def new_ninfo(self):
return self.disambiguate().new_ninfo()
def changed_since_last_build(self, target, prev_ni):
return self.disambiguate().changed_since_last_build(target, prev_ni)
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
return self.disambiguate()._glob1(pattern, ondisk, source, strings)
def get_subst_proxy(self):
return self.disambiguate().get_subst_proxy()
# This is for later so we can differentiate between Entry the class and Entry
# the method of the FS class.
_classEntry = Entry
class LocalFS:
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
# This class implements an abstraction layer for operations involving
# a local file system. Essentially, this wraps any function in
# the os, os.path or shutil modules that we use to actually go do
# anything with or to the local file system.
#
# Note that there's a very good chance we'll refactor this part of
# the architecture in some way as we really implement the interface(s)
# for remote file system Nodes. For example, the right architecture
# might be to have this be a subclass instead of a base class.
# Nevertheless, we're using this as a first step in that direction.
#
# We're not using chdir() yet because the calling subclass method
# needs to use os.chdir() directly to avoid recursion. Will we
# really need this one?
#def chdir(self, path):
# return os.chdir(path)
def chmod(self, path, mode):
return os.chmod(path, mode)
def copy(self, src, dst):
return shutil.copy(src, dst)
def copy2(self, src, dst):
return shutil.copy2(src, dst)
def exists(self, path):
return os.path.exists(path)
def getmtime(self, path):
return os.path.getmtime(path)
def getsize(self, path):
return os.path.getsize(path)
def isdir(self, path):
return os.path.isdir(path)
def isfile(self, path):
return os.path.isfile(path)
def link(self, src, dst):
return os.link(src, dst)
def lstat(self, path):
return os.lstat(path)
def listdir(self, path):
return os.listdir(path)
def makedirs(self, path):
return os.makedirs(path)
def mkdir(self, path):
return os.mkdir(path)
def rename(self, old, new):
return os.rename(old, new)
def stat(self, path):
return os.stat(path)
def symlink(self, src, dst):
return os.symlink(src, dst)
def open(self, path):
return open(path)
def unlink(self, path):
return os.unlink(path)
if hasattr(os, 'symlink'):
def islink(self, path):
return os.path.islink(path)
else:
def islink(self, path):
return 0 # no symlinks
if hasattr(os, 'readlink'):
def readlink(self, file):
return os.readlink(file)
else:
def readlink(self, file):
return ''
#class RemoteFS:
# # Skeleton for the obvious methods we might need from the
# # abstraction layer for a remote filesystem.
# def upload(self, local_src, remote_dst):
# pass
# def download(self, remote_src, local_dst):
# pass
class FS(LocalFS):
memoizer_counters = []
def __init__(self, path = None):
"""Initialize the Node.FS subsystem.
The supplied path is the top of the source tree, where we
expect to find the top-level build file. If no path is
supplied, the current directory is the default.
The path argument must be a valid absolute path.
"""
if __debug__: logInstanceCreation(self, 'Node.FS')
self._memo = {}
self.Root = {}
self.SConstruct_dir = None
self.max_drift = default_max_drift
self.Top = None
if path is None:
self.pathTop = os.getcwd()
else:
self.pathTop = path
self.defaultDrive = _my_normcase(os.path.splitdrive(self.pathTop)[0])
self.Top = self.Dir(self.pathTop)
self.Top.path = '.'
self.Top.tpath = '.'
self._cwd = self.Top
DirNodeInfo.fs = self
FileNodeInfo.fs = self
def set_SConstruct_dir(self, dir):
self.SConstruct_dir = dir
def get_max_drift(self):
return self.max_drift
def set_max_drift(self, max_drift):
self.max_drift = max_drift
def getcwd(self):
return self._cwd
def chdir(self, dir, change_os_dir=0):
"""Change the current working directory for lookups.
If change_os_dir is true, we will also change the "real" cwd
to match.
"""
curr=self._cwd
try:
if dir is not None:
self._cwd = dir
if change_os_dir:
os.chdir(dir.abspath)
except OSError:
self._cwd = curr
raise
def get_root(self, drive):
"""
Returns the root directory for the specified drive, creating
it if necessary.
"""
drive = _my_normcase(drive)
try:
return self.Root[drive]
except KeyError:
root = RootDir(drive, self)
self.Root[drive] = root
if not drive:
self.Root[self.defaultDrive] = root
elif drive == self.defaultDrive:
self.Root[''] = root
return root
def _lookup(self, p, directory, fsclass, create=1):
"""
The generic entry point for Node lookup with user-supplied data.
This translates arbitrary input into a canonical Node.FS object
of the specified fsclass. The general approach for strings is
to turn it into a fully normalized absolute path and then call
the root directory's lookup_abs() method for the heavy lifting.
If the path name begins with '#', it is unconditionally
interpreted relative to the top-level directory of this FS. '#'
is treated as a synonym for the top-level SConstruct directory,
much like '~' is treated as a synonym for the user's home
directory in a UNIX shell. So both '#foo' and '#/foo' refer
to the 'foo' subdirectory underneath the top-level SConstruct
directory.
If the path name is relative, then the path is looked up relative
to the specified directory, or the current directory (self._cwd,
typically the SConscript directory) if the specified directory
is None.
"""
if isinstance(p, Base):
# It's already a Node.FS object. Make sure it's the right
# class and return.
p.must_be_same(fsclass)
return p
# str(p) in case it's something like a proxy object
p = str(p)
initial_hash = (p[0:1] == '#')
if initial_hash:
# There was an initial '#', so we strip it and override
# whatever directory they may have specified with the
# top-level SConstruct directory.
p = p[1:]
directory = self.Top
if directory and not isinstance(directory, Dir):
directory = self.Dir(directory)
if do_splitdrive:
drive, p = os.path.splitdrive(p)
else:
drive = ''
if drive and not p:
# This causes a naked drive letter to be treated as a synonym
# for the root directory on that drive.
p = os.sep
absolute = os.path.isabs(p)
needs_normpath = needs_normpath_check.match(p)
if initial_hash or not absolute:
# This is a relative lookup, either to the top-level
# SConstruct directory (because of the initial '#') or to
# the current directory (the path name is not absolute).
# Add the string to the appropriate directory lookup path,
# after which the whole thing gets normalized.
if not directory:
directory = self._cwd
if p:
p = directory.labspath + '/' + p
else:
p = directory.labspath
if needs_normpath:
p = os.path.normpath(p)
if drive or absolute:
root = self.get_root(drive)
else:
if not directory:
directory = self._cwd
root = directory.root
if os.sep != '/':
p = string.replace(p, os.sep, '/')
return root._lookup_abs(p, fsclass, create)
def Entry(self, name, directory = None, create = 1):
"""Look up or create a generic Entry node with the specified name.
If the name is a relative path (begins with ./, ../, or a file
name), then it is looked up relative to the supplied directory
node, or to the top level directory of the FS (supplied at
construction time) if no directory is supplied.
"""
return self._lookup(name, directory, Entry, create)
def File(self, name, directory = None, create = 1):
"""Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path.
"""
return self._lookup(name, directory, File, create)
def Dir(self, name, directory = None, create = True):
"""Look up or create a Dir node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a normal file is found at the
specified path.
"""
return self._lookup(name, directory, Dir, create)
def VariantDir(self, variant_dir, src_dir, duplicate=1):
"""Link the supplied variant directory to the source directory
for purposes of building files."""
if not isinstance(src_dir, SCons.Node.Node):
src_dir = self.Dir(src_dir)
if not isinstance(variant_dir, SCons.Node.Node):
variant_dir = self.Dir(variant_dir)
if src_dir.is_under(variant_dir):
raise SCons.Errors.UserError, "Source directory cannot be under variant directory."
if variant_dir.srcdir:
if variant_dir.srcdir == src_dir:
return # We already did this.
raise SCons.Errors.UserError, "'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir)
variant_dir.link(src_dir, duplicate)
def Repository(self, *dirs):
"""Specify Repository directories to search."""
for d in dirs:
if not isinstance(d, SCons.Node.Node):
d = self.Dir(d)
self.Top.addRepository(d)
def variant_dir_target_climb(self, orig, dir, tail):
"""Create targets in corresponding variant directories
Climb the directory tree, and look up path names
relative to any linked variant directories we find.
Even though this loops and walks up the tree, we don't memoize
the return value because this is really only used to process
the command-line targets.
"""
targets = []
message = None
fmt = "building associated VariantDir targets: %s"
start_dir = dir
while dir:
for bd in dir.variant_dirs:
if start_dir.is_under(bd):
# If already in the build-dir location, don't reflect
return [orig], fmt % str(orig)
p = apply(os.path.join, [bd.path] + tail)
targets.append(self.Entry(p))
tail = [dir.name] + tail
dir = dir.up()
if targets:
message = fmt % string.join(map(str, targets))
return targets, message
def Glob(self, pathname, ondisk=True, source=True, strings=False, cwd=None):
"""
Globs
This is mainly a shim layer
"""
if cwd is None:
cwd = self.getcwd()
return cwd.glob(pathname, ondisk, source, strings)
class DirNodeInfo(SCons.Node.NodeInfoBase):
# This should get reset by the FS initialization.
current_version_id = 1
fs = None
def str_to_node(self, s):
top = self.fs.Top
root = top.root
if do_splitdrive:
drive, s = os.path.splitdrive(s)
if drive:
root = self.fs.get_root(drive)
if not os.path.isabs(s):
s = top.labspath + '/' + s
return root._lookup_abs(s, Entry)
class DirBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
glob_magic_check = re.compile('[*?[]')
def has_glob_magic(s):
return glob_magic_check.search(s) is not None
class Dir(Base):
"""A class for directories in a file system.
"""
memoizer_counters = []
NodeInfo = DirNodeInfo
BuildInfo = DirBuildInfo
def __init__(self, name, directory, fs):
if __debug__: logInstanceCreation(self, 'Node.FS.Dir')
Base.__init__(self, name, directory, fs)
self._morph()
def _morph(self):
"""Turn a file system Node (either a freshly initialized directory
object or a separate Entry object) into a proper directory object.
Set up this directory's entries and hook it into the file
system tree. Specify that directories (this Node) don't use
signatures for calculating whether they're current.
"""
self.repositories = []
self.srcdir = None
self.entries = {}
self.entries['.'] = self
self.entries['..'] = self.dir
self.cwd = self
self.searched = 0
self._sconsign = None
self.variant_dirs = []
self.root = self.dir.root
# Don't just reset the executor, replace its action list,
# because it might have some pre-or post-actions that need to
# be preserved.
self.builder = get_MkdirBuilder()
self.get_executor().set_action_list(self.builder.action)
def diskcheck_match(self):
diskcheck_match(self, self.isfile,
"File %s found where directory expected.")
def __clearRepositoryCache(self, duplicate=None):
"""Called when we change the repository(ies) for a directory.
This clears any cached information that is invalidated by changing
the repository."""
for node in self.entries.values():
if node != self.dir:
if node != self and isinstance(node, Dir):
node.__clearRepositoryCache(duplicate)
else:
node.clear()
try:
del node._srcreps
except AttributeError:
pass
if duplicate is not None:
node.duplicate=duplicate
def __resetDuplicate(self, node):
if node != self:
node.duplicate = node.get_dir().duplicate
def Entry(self, name):
"""
Looks up or creates an entry node named 'name' relative to
this directory.
"""
return self.fs.Entry(name, self)
def Dir(self, name, create=True):
"""
Looks up or creates a directory node named 'name' relative to
this directory.
"""
return self.fs.Dir(name, self, create)
def File(self, name):
"""
Looks up or creates a file node named 'name' relative to
this directory.
"""
return self.fs.File(name, self)
def _lookup_rel(self, name, klass, create=1):
"""
Looks up a *normalized* relative path name, relative to this
directory.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the Entry(), Dir() and File() methods above.
This method does *no* input checking and will die or give
incorrect results if it's passed a non-normalized path name (e.g.,
a path containing '..'), an absolute path name, a top-relative
('#foo') path name, or any kind of object.
"""
name = self.entry_labspath(name)
return self.root._lookup_abs(name, klass, create)
def link(self, srcdir, duplicate):
"""Set this directory as the variant directory for the
supplied source directory."""
self.srcdir = srcdir
self.duplicate = duplicate
self.__clearRepositoryCache(duplicate)
srcdir.variant_dirs.append(self)
def getRepositories(self):
"""Returns a list of repositories for this directory.
"""
if self.srcdir and not self.duplicate:
return self.srcdir.get_all_rdirs() + self.repositories
return self.repositories
memoizer_counters.append(SCons.Memoize.CountValue('get_all_rdirs'))
def get_all_rdirs(self):
try:
return list(self._memo['get_all_rdirs'])
except KeyError:
pass
result = [self]
fname = '.'
dir = self
while dir:
for rep in dir.getRepositories():
result.append(rep.Dir(fname))
if fname == '.':
fname = dir.name
else:
fname = dir.name + os.sep + fname
dir = dir.up()
self._memo['get_all_rdirs'] = list(result)
return result
def addRepository(self, dir):
if dir != self and not dir in self.repositories:
self.repositories.append(dir)
dir.tpath = '.'
self.__clearRepositoryCache()
def up(self):
return self.entries['..']
def _rel_path_key(self, other):
return str(other)
memoizer_counters.append(SCons.Memoize.CountDict('rel_path', _rel_path_key))
def rel_path(self, other):
"""Return a path to "other" relative to this directory.
"""
# This complicated and expensive method, which constructs relative
# paths between arbitrary Node.FS objects, is no longer used
# by SCons itself. It was introduced to store dependency paths
# in .sconsign files relative to the target, but that ended up
# being significantly inefficient.
#
# We're continuing to support the method because some SConstruct
# files out there started using it when it was available, and
# we're all about backwards compatibility..
try:
memo_dict = self._memo['rel_path']
except KeyError:
memo_dict = {}
self._memo['rel_path'] = memo_dict
else:
try:
return memo_dict[other]
except KeyError:
pass
if self is other:
result = '.'
elif not other in self.path_elements:
try:
other_dir = other.get_dir()
except AttributeError:
result = str(other)
else:
if other_dir is None:
result = other.name
else:
dir_rel_path = self.rel_path(other_dir)
if dir_rel_path == '.':
result = other.name
else:
result = dir_rel_path + os.sep + other.name
else:
i = self.path_elements.index(other) + 1
path_elems = ['..'] * (len(self.path_elements) - i) \
+ map(lambda n: n.name, other.path_elements[i:])
result = string.join(path_elems, os.sep)
memo_dict[other] = result
return result
def get_env_scanner(self, env, kw={}):
import SCons.Defaults
return SCons.Defaults.DirEntryScanner
def get_target_scanner(self):
import SCons.Defaults
return SCons.Defaults.DirEntryScanner
def get_found_includes(self, env, scanner, path):
"""Return this directory's implicit dependencies.
We don't bother caching the results because the scan typically
shouldn't be requested more than once (as opposed to scanning
.h file contents, which can be requested as many times as the
files is #included by other files).
"""
if not scanner:
return []
# Clear cached info for this Dir. If we already visited this
# directory on our walk down the tree (because we didn't know at
# that point it was being used as the source for another Node)
# then we may have calculated build signature before realizing
# we had to scan the disk. Now that we have to, though, we need
# to invalidate the old calculated signature so that any node
# dependent on our directory structure gets one that includes
# info about everything on disk.
self.clear()
return scanner(self, env, path)
#
# Taskmaster interface subsystem
#
def prepare(self):
pass
def build(self, **kw):
"""A null "builder" for directories."""
global MkdirBuilder
if self.builder is not MkdirBuilder:
apply(SCons.Node.Node.build, [self,], kw)
#
#
#
def _create(self):
"""Create this directory, silently and without worrying about
whether the builder is the default or not."""
listDirs = []
parent = self
while parent:
if parent.exists():
break
listDirs.append(parent)
p = parent.up()
if p is None:
# Don't use while: - else: for this condition because
# if so, then parent is None and has no .path attribute.
raise SCons.Errors.StopError, parent.path
parent = p
listDirs.reverse()
for dirnode in listDirs:
try:
# Don't call dirnode.build(), call the base Node method
# directly because we definitely *must* create this
# directory. The dirnode.build() method will suppress
# the build if it's the default builder.
SCons.Node.Node.build(dirnode)
dirnode.get_executor().nullify()
# The build() action may or may not have actually
# created the directory, depending on whether the -n
# option was used or not. Delete the _exists and
# _rexists attributes so they can be reevaluated.
dirnode.clear()
except OSError:
pass
def multiple_side_effect_has_builder(self):
global MkdirBuilder
return self.builder is not MkdirBuilder and self.has_builder()
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
return self.fs.variant_dir_target_climb(self, self, [])
def scanner_key(self):
"""A directory does not get scanned."""
return None
def get_text_contents(self):
"""We already emit things in text, so just return the binary
version."""
return self.get_contents()
def get_contents(self):
"""Return content signatures and names of all our children
separated by new-lines. Ensure that the nodes are sorted."""
contents = []
name_cmp = lambda a, b: cmp(a.name, b.name)
sorted_children = self.children()[:]
sorted_children.sort(name_cmp)
for node in sorted_children:
contents.append('%s %s\n' % (node.get_csig(), node.name))
return string.join(contents, '')
def get_csig(self):
"""Compute the content signature for Directory nodes. In
general, this is not needed and the content signature is not
stored in the DirNodeInfo. However, if get_contents on a Dir
node is called which has a child directory, the child
directory should return the hash of its contents."""
contents = self.get_contents()
return SCons.Util.MD5signature(contents)
def do_duplicate(self, src):
pass
changed_since_last_build = SCons.Node.Node.state_has_changed
def is_up_to_date(self):
"""If any child is not up-to-date, then this directory isn't,
either."""
if self.builder is not MkdirBuilder and not self.exists():
return 0
up_to_date = SCons.Node.up_to_date
for kid in self.children():
if kid.get_state() > up_to_date:
return 0
return 1
def rdir(self):
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try: node = dir.entries[norm_name]
except KeyError: node = dir.dir_on_disk(self.name)
if node and node.exists() and \
(isinstance(dir, Dir) or isinstance(dir, Entry)):
return node
return self
def sconsign(self):
"""Return the .sconsign file info for this directory,
creating it first if necessary."""
if not self._sconsign:
import SCons.SConsign
self._sconsign = SCons.SConsign.ForDirectory(self)
return self._sconsign
def srcnode(self):
"""Dir has a special need for srcnode()...if we
have a srcdir attribute set, then that *is* our srcnode."""
if self.srcdir:
return self.srcdir
return Base.srcnode(self)
def get_timestamp(self):
"""Return the latest timestamp from among our children"""
stamp = 0
for kid in self.children():
if kid.get_timestamp() > stamp:
stamp = kid.get_timestamp()
return stamp
def entry_abspath(self, name):
return self.abspath + os.sep + name
def entry_labspath(self, name):
return self.labspath + '/' + name
def entry_path(self, name):
return self.path + os.sep + name
def entry_tpath(self, name):
return self.tpath + os.sep + name
def entry_exists_on_disk(self, name):
try:
d = self.on_disk_entries
except AttributeError:
d = {}
try:
entries = os.listdir(self.abspath)
except OSError:
pass
else:
for entry in map(_my_normcase, entries):
d[entry] = True
self.on_disk_entries = d
if sys.platform == 'win32':
name = _my_normcase(name)
result = d.get(name)
if result is None:
# Belt-and-suspenders for Windows: check directly for
# 8.3 file names that don't show up in os.listdir().
result = os.path.exists(self.abspath + os.sep + name)
d[name] = result
return result
else:
return d.has_key(name)
memoizer_counters.append(SCons.Memoize.CountValue('srcdir_list'))
def srcdir_list(self):
try:
return self._memo['srcdir_list']
except KeyError:
pass
result = []
dirname = '.'
dir = self
while dir:
if dir.srcdir:
result.append(dir.srcdir.Dir(dirname))
dirname = dir.name + os.sep + dirname
dir = dir.up()
self._memo['srcdir_list'] = result
return result
def srcdir_duplicate(self, name):
for dir in self.srcdir_list():
if self.is_under(dir):
# We shouldn't source from something in the build path;
# variant_dir is probably under src_dir, in which case
# we are reflecting.
break
if dir.entry_exists_on_disk(name):
srcnode = dir.Entry(name).disambiguate()
if self.duplicate:
node = self.Entry(name).disambiguate()
node.do_duplicate(srcnode)
return node
else:
return srcnode
return None
def _srcdir_find_file_key(self, filename):
return filename
memoizer_counters.append(SCons.Memoize.CountDict('srcdir_find_file', _srcdir_find_file_key))
def srcdir_find_file(self, filename):
try:
memo_dict = self._memo['srcdir_find_file']
except KeyError:
memo_dict = {}
self._memo['srcdir_find_file'] = memo_dict
else:
try:
return memo_dict[filename]
except KeyError:
pass
def func(node):
if (isinstance(node, File) or isinstance(node, Entry)) and \
(node.is_derived() or node.exists()):
return node
return None
norm_name = _my_normcase(filename)
for rdir in self.get_all_rdirs():
try: node = rdir.entries[norm_name]
except KeyError: node = rdir.file_on_disk(filename)
else: node = func(node)
if node:
result = (node, self)
memo_dict[filename] = result
return result
for srcdir in self.srcdir_list():
for rdir in srcdir.get_all_rdirs():
try: node = rdir.entries[norm_name]
except KeyError: node = rdir.file_on_disk(filename)
else: node = func(node)
if node:
result = (File(filename, self, self.fs), srcdir)
memo_dict[filename] = result
return result
result = (None, None)
memo_dict[filename] = result
return result
def dir_on_disk(self, name):
if self.entry_exists_on_disk(name):
try: return self.Dir(name)
except TypeError: pass
node = self.srcdir_duplicate(name)
if isinstance(node, File):
return None
return node
def file_on_disk(self, name):
if self.entry_exists_on_disk(name) or \
diskcheck_rcs(self, name) or \
diskcheck_sccs(self, name):
try: return self.File(name)
except TypeError: pass
node = self.srcdir_duplicate(name)
if isinstance(node, Dir):
return None
return node
def walk(self, func, arg):
"""
Walk this directory tree by calling the specified function
for each directory in the tree.
This behaves like the os.path.walk() function, but for in-memory
Node.FS.Dir objects. The function takes the same arguments as
the functions passed to os.path.walk():
func(arg, dirname, fnames)
Except that "dirname" will actually be the directory *Node*,
not the string. The '.' and '..' entries are excluded from
fnames. The fnames list may be modified in-place to filter the
subdirectories visited or otherwise impose a specific order.
The "arg" argument is always passed to func() and may be used
in any way (or ignored, passing None is common).
"""
entries = self.entries
names = entries.keys()
names.remove('.')
names.remove('..')
func(arg, self, names)
select_dirs = lambda n, e=entries: isinstance(e[n], Dir)
for dirname in filter(select_dirs, names):
entries[dirname].walk(func, arg)
def glob(self, pathname, ondisk=True, source=False, strings=False):
"""
Returns a list of Nodes (or strings) matching a specified
pathname pattern.
Pathname patterns follow UNIX shell semantics: * matches
any-length strings of any characters, ? matches any character,
and [] can enclose lists or ranges of characters. Matches do
not span directory separators.
The matches take into account Repositories, returning local
Nodes if a corresponding entry exists in a Repository (either
an in-memory Node or something on disk).
By defafult, the glob() function matches entries that exist
on-disk, in addition to in-memory Nodes. Setting the "ondisk"
argument to False (or some other non-true value) causes the glob()
function to only match in-memory Nodes. The default behavior is
to return both the on-disk and in-memory Nodes.
The "source" argument, when true, specifies that corresponding
source Nodes must be returned if you're globbing in a build
directory (initialized with VariantDir()). The default behavior
is to return Nodes local to the VariantDir().
The "strings" argument, when true, returns the matches as strings,
not Nodes. The strings are path names relative to this directory.
The underlying algorithm is adapted from the glob.glob() function
in the Python library (but heavily modified), and uses fnmatch()
under the covers.
"""
dirname, basename = os.path.split(pathname)
if not dirname:
return self._glob1(basename, ondisk, source, strings)
if has_glob_magic(dirname):
list = self.glob(dirname, ondisk, source, strings=False)
else:
list = [self.Dir(dirname, create=True)]
result = []
for dir in list:
r = dir._glob1(basename, ondisk, source, strings)
if strings:
r = map(lambda x, d=str(dir): os.path.join(d, x), r)
result.extend(r)
result.sort(lambda a, b: cmp(str(a), str(b)))
return result
def _glob1(self, pattern, ondisk=True, source=False, strings=False):
"""
Globs for and returns a list of entry names matching a single
pattern in this directory.
This searches any repositories and source directories for
corresponding entries and returns a Node (or string) relative
to the current directory if an entry is found anywhere.
TODO: handle pattern with no wildcard
"""
search_dir_list = self.get_all_rdirs()
for srcdir in self.srcdir_list():
search_dir_list.extend(srcdir.get_all_rdirs())
selfEntry = self.Entry
names = []
for dir in search_dir_list:
# We use the .name attribute from the Node because the keys of
# the dir.entries dictionary are normalized (that is, all upper
# case) on case-insensitive systems like Windows.
#node_names = [ v.name for k, v in dir.entries.items() if k not in ('.', '..') ]
entry_names = filter(lambda n: n not in ('.', '..'), dir.entries.keys())
node_names = map(lambda n, e=dir.entries: e[n].name, entry_names)
names.extend(node_names)
if not strings:
# Make sure the working directory (self) actually has
# entries for all Nodes in repositories or variant dirs.
for name in node_names: selfEntry(name)
if ondisk:
try:
disk_names = os.listdir(dir.abspath)
except os.error:
continue
names.extend(disk_names)
if not strings:
# We're going to return corresponding Nodes in
# the local directory, so we need to make sure
# those Nodes exist. We only want to create
# Nodes for the entries that will match the
# specified pattern, though, which means we
# need to filter the list here, even though
# the overall list will also be filtered later,
# after we exit this loop.
if pattern[0] != '.':
#disk_names = [ d for d in disk_names if d[0] != '.' ]
disk_names = filter(lambda x: x[0] != '.', disk_names)
disk_names = fnmatch.filter(disk_names, pattern)
dirEntry = dir.Entry
for name in disk_names:
# Add './' before disk filename so that '#' at
# beginning of filename isn't interpreted.
name = './' + name
node = dirEntry(name).disambiguate()
n = selfEntry(name)
if n.__class__ != node.__class__:
n.__class__ = node.__class__
n._morph()
names = set(names)
if pattern[0] != '.':
#names = [ n for n in names if n[0] != '.' ]
names = filter(lambda x: x[0] != '.', names)
names = fnmatch.filter(names, pattern)
if strings:
return names
#return [ self.entries[_my_normcase(n)] for n in names ]
return map(lambda n, e=self.entries: e[_my_normcase(n)], names)
class RootDir(Dir):
"""A class for the root directory of a file system.
This is the same as a Dir class, except that the path separator
('/' or '\\') is actually part of the name, so we don't need to
add a separator when creating the path names of entries within
this directory.
"""
def __init__(self, name, fs):
if __debug__: logInstanceCreation(self, 'Node.FS.RootDir')
# We're going to be our own parent directory (".." entry and .dir
# attribute) so we have to set up some values so Base.__init__()
# won't gag won't it calls some of our methods.
self.abspath = ''
self.labspath = ''
self.path = ''
self.tpath = ''
self.path_elements = []
self.duplicate = 0
self.root = self
Base.__init__(self, name, self, fs)
# Now set our paths to what we really want them to be: the
# initial drive letter (the name) plus the directory separator,
# except for the "lookup abspath," which does not have the
# drive letter.
self.abspath = name + os.sep
self.labspath = ''
self.path = name + os.sep
self.tpath = name + os.sep
self._morph()
self._lookupDict = {}
# The // and os.sep + os.sep entries are necessary because
# os.path.normpath() seems to preserve double slashes at the
# beginning of a path (presumably for UNC path names), but
# collapses triple slashes to a single slash.
self._lookupDict[''] = self
self._lookupDict['/'] = self
self._lookupDict['//'] = self
self._lookupDict[os.sep] = self
self._lookupDict[os.sep + os.sep] = self
def must_be_same(self, klass):
if klass is Dir:
return
Base.must_be_same(self, klass)
def _lookup_abs(self, p, klass, create=1):
"""
Fast (?) lookup of a *normalized* absolute path.
This method is intended for use by internal lookups with
already-normalized path data. For general-purpose lookups,
use the FS.Entry(), FS.Dir() or FS.File() methods.
The caller is responsible for making sure we're passed a
normalized absolute path; we merely let Python's dictionary look
up and return the One True Node.FS object for the path.
If no Node for the specified "p" doesn't already exist, and
"create" is specified, the Node may be created after recursive
invocation to find or create the parent directory or directories.
"""
k = _my_normcase(p)
try:
result = self._lookupDict[k]
except KeyError:
if not create:
raise SCons.Errors.UserError
# There is no Node for this path name, and we're allowed
# to create it.
dir_name, file_name = os.path.split(p)
dir_node = self._lookup_abs(dir_name, Dir)
result = klass(file_name, dir_node, self.fs)
# Double-check on disk (as configured) that the Node we
# created matches whatever is out there in the real world.
result.diskcheck_match()
self._lookupDict[k] = result
dir_node.entries[_my_normcase(file_name)] = result
dir_node.implicit = None
else:
# There is already a Node for this path name. Allow it to
# complain if we were looking for an inappropriate type.
result.must_be_same(klass)
return result
def __str__(self):
return self.abspath
def entry_abspath(self, name):
return self.abspath + name
def entry_labspath(self, name):
return '/' + name
def entry_path(self, name):
return self.path + name
def entry_tpath(self, name):
return self.tpath + name
def is_under(self, dir):
if self is dir:
return 1
else:
return 0
def up(self):
return None
def get_dir(self):
return None
def src_builder(self):
return _null
class FileNodeInfo(SCons.Node.NodeInfoBase):
current_version_id = 1
field_list = ['csig', 'timestamp', 'size']
# This should get reset by the FS initialization.
fs = None
def str_to_node(self, s):
top = self.fs.Top
root = top.root
if do_splitdrive:
drive, s = os.path.splitdrive(s)
if drive:
root = self.fs.get_root(drive)
if not os.path.isabs(s):
s = top.labspath + '/' + s
return root._lookup_abs(s, Entry)
class FileBuildInfo(SCons.Node.BuildInfoBase):
current_version_id = 1
def convert_to_sconsign(self):
"""
Converts this FileBuildInfo object for writing to a .sconsign file
This replaces each Node in our various dependency lists with its
usual string representation: relative to the top-level SConstruct
directory, or an absolute path if it's outside.
"""
if os.sep == '/':
node_to_str = str
else:
def node_to_str(n):
try:
s = n.path
except AttributeError:
s = str(n)
else:
s = string.replace(s, os.sep, '/')
return s
for attr in ['bsources', 'bdepends', 'bimplicit']:
try:
val = getattr(self, attr)
except AttributeError:
pass
else:
setattr(self, attr, map(node_to_str, val))
def convert_from_sconsign(self, dir, name):
"""
Converts a newly-read FileBuildInfo object for in-SCons use
For normal up-to-date checking, we don't have any conversion to
perform--but we're leaving this method here to make that clear.
"""
pass
def prepare_dependencies(self):
"""
Prepares a FileBuildInfo object for explaining what changed
The bsources, bdepends and bimplicit lists have all been
stored on disk as paths relative to the top-level SConstruct
directory. Convert the strings to actual Nodes (for use by the
--debug=explain code and --implicit-cache).
"""
attrs = [
('bsources', 'bsourcesigs'),
('bdepends', 'bdependsigs'),
('bimplicit', 'bimplicitsigs'),
]
for (nattr, sattr) in attrs:
try:
strings = getattr(self, nattr)
nodeinfos = getattr(self, sattr)
except AttributeError:
continue
nodes = []
for s, ni in izip(strings, nodeinfos):
if not isinstance(s, SCons.Node.Node):
s = ni.str_to_node(s)
nodes.append(s)
setattr(self, nattr, nodes)
def format(self, names=0):
result = []
bkids = self.bsources + self.bdepends + self.bimplicit
bkidsigs = self.bsourcesigs + self.bdependsigs + self.bimplicitsigs
for bkid, bkidsig in izip(bkids, bkidsigs):
result.append(str(bkid) + ': ' +
string.join(bkidsig.format(names=names), ' '))
result.append('%s [%s]' % (self.bactsig, self.bact))
return string.join(result, '\n')
class File(Base):
"""A class for files in a file system.
"""
memoizer_counters = []
NodeInfo = FileNodeInfo
BuildInfo = FileBuildInfo
md5_chunksize = 64
def diskcheck_match(self):
diskcheck_match(self, self.isdir,
"Directory %s found where file expected.")
def __init__(self, name, directory, fs):
if __debug__: logInstanceCreation(self, 'Node.FS.File')
Base.__init__(self, name, directory, fs)
self._morph()
def Entry(self, name):
"""Create an entry node named 'name' relative to
the directory of this file."""
return self.dir.Entry(name)
def Dir(self, name, create=True):
"""Create a directory node named 'name' relative to
the directory of this file."""
return self.dir.Dir(name, create=create)
def Dirs(self, pathlist):
"""Create a list of directories relative to the SConscript
directory of this file."""
# TODO(1.5)
# return [self.Dir(p) for p in pathlist]
return map(lambda p, s=self: s.Dir(p), pathlist)
def File(self, name):
"""Create a file node named 'name' relative to
the directory of this file."""
return self.dir.File(name)
#def generate_build_dict(self):
# """Return an appropriate dictionary of values for building
# this File."""
# return {'Dir' : self.Dir,
# 'File' : self.File,
# 'RDirs' : self.RDirs}
def _morph(self):
"""Turn a file system node into a File object."""
self.scanner_paths = {}
if not hasattr(self, '_local'):
self._local = 0
# If there was already a Builder set on this entry, then
# we need to make sure we call the target-decider function,
# not the source-decider. Reaching in and doing this by hand
# is a little bogus. We'd prefer to handle this by adding
# an Entry.builder_set() method that disambiguates like the
# other methods, but that starts running into problems with the
# fragile way we initialize Dir Nodes with their Mkdir builders,
# yet still allow them to be overridden by the user. Since it's
# not clear right now how to fix that, stick with what works
# until it becomes clear...
if self.has_builder():
self.changed_since_last_build = self.decide_target
def scanner_key(self):
return self.get_suffix()
def get_contents(self):
if not self.rexists():
return ''
fname = self.rfile().abspath
try:
contents = open(fname, "rb").read()
except EnvironmentError, e:
if not e.filename:
e.filename = fname
raise
return contents
try:
import codecs
except ImportError:
get_text_contents = get_contents
else:
# This attempts to figure out what the encoding of the text is
# based upon the BOM bytes, and then decodes the contents so that
# it's a valid python string.
def get_text_contents(self):
contents = self.get_contents()
if contents.startswith(codecs.BOM_UTF8):
contents = contents.decode('utf-8')
elif contents.startswith(codecs.BOM_UTF16):
contents = contents.decode('utf-16')
return contents
def get_content_hash(self):
"""
Compute and return the MD5 hash for this file.
"""
if not self.rexists():
return SCons.Util.MD5signature('')
fname = self.rfile().abspath
try:
cs = SCons.Util.MD5filesignature(fname,
chunksize=SCons.Node.FS.File.md5_chunksize*1024)
except EnvironmentError, e:
if not e.filename:
e.filename = fname
raise
return cs
memoizer_counters.append(SCons.Memoize.CountValue('get_size'))
def get_size(self):
try:
return self._memo['get_size']
except KeyError:
pass
if self.rexists():
size = self.rfile().getsize()
else:
size = 0
self._memo['get_size'] = size
return size
memoizer_counters.append(SCons.Memoize.CountValue('get_timestamp'))
def get_timestamp(self):
try:
return self._memo['get_timestamp']
except KeyError:
pass
if self.rexists():
timestamp = self.rfile().getmtime()
else:
timestamp = 0
self._memo['get_timestamp'] = timestamp
return timestamp
def store_info(self):
# Merge our build information into the already-stored entry.
# This accomodates "chained builds" where a file that's a target
# in one build (SConstruct file) is a source in a different build.
# See test/chained-build.py for the use case.
if do_store_info:
self.dir.sconsign().store_info(self.name, self)
convert_copy_attrs = [
'bsources',
'bimplicit',
'bdepends',
'bact',
'bactsig',
'ninfo',
]
convert_sig_attrs = [
'bsourcesigs',
'bimplicitsigs',
'bdependsigs',
]
def convert_old_entry(self, old_entry):
# Convert a .sconsign entry from before the Big Signature
# Refactoring, doing what we can to convert its information
# to the new .sconsign entry format.
#
# The old format looked essentially like this:
#
# BuildInfo
# .ninfo (NodeInfo)
# .bsig
# .csig
# .timestamp
# .size
# .bsources
# .bsourcesigs ("signature" list)
# .bdepends
# .bdependsigs ("signature" list)
# .bimplicit
# .bimplicitsigs ("signature" list)
# .bact
# .bactsig
#
# The new format looks like this:
#
# .ninfo (NodeInfo)
# .bsig
# .csig
# .timestamp
# .size
# .binfo (BuildInfo)
# .bsources
# .bsourcesigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bdepends
# .bdependsigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bimplicit
# .bimplicitsigs (NodeInfo list)
# .bsig
# .csig
# .timestamp
# .size
# .bact
# .bactsig
#
# The basic idea of the new structure is that a NodeInfo always
# holds all available information about the state of a given Node
# at a certain point in time. The various .b*sigs lists can just
# be a list of pointers to the .ninfo attributes of the different
# dependent nodes, without any copying of information until it's
# time to pickle it for writing out to a .sconsign file.
#
# The complicating issue is that the *old* format only stored one
# "signature" per dependency, based on however the *last* build
# was configured. We don't know from just looking at it whether
# it was a build signature, a content signature, or a timestamp
# "signature". Since we no longer use build signatures, the
# best we can do is look at the length and if it's thirty two,
# assume that it was (or might have been) a content signature.
# If it was actually a build signature, then it will cause a
# rebuild anyway when it doesn't match the new content signature,
# but that's probably the best we can do.
import SCons.SConsign
new_entry = SCons.SConsign.SConsignEntry()
new_entry.binfo = self.new_binfo()
binfo = new_entry.binfo
for attr in self.convert_copy_attrs:
try:
value = getattr(old_entry, attr)
except AttributeError:
continue
setattr(binfo, attr, value)
delattr(old_entry, attr)
for attr in self.convert_sig_attrs:
try:
sig_list = getattr(old_entry, attr)
except AttributeError:
continue
value = []
for sig in sig_list:
ninfo = self.new_ninfo()
if len(sig) == 32:
ninfo.csig = sig
else:
ninfo.timestamp = sig
value.append(ninfo)
setattr(binfo, attr, value)
delattr(old_entry, attr)
return new_entry
memoizer_counters.append(SCons.Memoize.CountValue('get_stored_info'))
def get_stored_info(self):
try:
return self._memo['get_stored_info']
except KeyError:
pass
try:
sconsign_entry = self.dir.sconsign().get_entry(self.name)
except (KeyError, EnvironmentError):
import SCons.SConsign
sconsign_entry = SCons.SConsign.SConsignEntry()
sconsign_entry.binfo = self.new_binfo()
sconsign_entry.ninfo = self.new_ninfo()
else:
if isinstance(sconsign_entry, FileBuildInfo):
# This is a .sconsign file from before the Big Signature
# Refactoring; convert it as best we can.
sconsign_entry = self.convert_old_entry(sconsign_entry)
try:
delattr(sconsign_entry.ninfo, 'bsig')
except AttributeError:
pass
self._memo['get_stored_info'] = sconsign_entry
return sconsign_entry
def get_stored_implicit(self):
binfo = self.get_stored_info().binfo
binfo.prepare_dependencies()
try: return binfo.bimplicit
except AttributeError: return None
def rel_path(self, other):
return self.dir.rel_path(other)
def _get_found_includes_key(self, env, scanner, path):
return (id(env), id(scanner), path)
memoizer_counters.append(SCons.Memoize.CountDict('get_found_includes', _get_found_includes_key))
def get_found_includes(self, env, scanner, path):
"""Return the included implicit dependencies in this file.
Cache results so we only scan the file once per path
regardless of how many times this information is requested.
"""
memo_key = (id(env), id(scanner), path)
try:
memo_dict = self._memo['get_found_includes']
except KeyError:
memo_dict = {}
self._memo['get_found_includes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if scanner:
# result = [n.disambiguate() for n in scanner(self, env, path)]
result = scanner(self, env, path)
result = map(lambda N: N.disambiguate(), result)
else:
result = []
memo_dict[memo_key] = result
return result
def _createDir(self):
# ensure that the directories for this node are
# created.
self.dir._create()
def push_to_cache(self):
"""Try to push the node into a cache
"""
# This should get called before the Nodes' .built() method is
# called, which would clear the build signature if the file has
# a source scanner.
#
# We have to clear the local memoized values *before* we push
# the node to cache so that the memoization of the self.exists()
# return value doesn't interfere.
if self.nocache:
return
self.clear_memoized_values()
if self.exists():
self.get_build_env().get_CacheDir().push(self)
def retrieve_from_cache(self):
"""Try to retrieve the node's content from a cache
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Returns true iff the node was successfully retrieved.
"""
if self.nocache:
return None
if not self.is_derived():
return None
return self.get_build_env().get_CacheDir().retrieve(self)
def visited(self):
if self.exists():
self.get_build_env().get_CacheDir().push_if_forced(self)
ninfo = self.get_ninfo()
csig = self.get_max_drift_csig()
if csig:
ninfo.csig = csig
ninfo.timestamp = self.get_timestamp()
ninfo.size = self.get_size()
if not self.has_builder():
# This is a source file, but it might have been a target file
# in another build that included more of the DAG. Copy
# any build information that's stored in the .sconsign file
# into our binfo object so it doesn't get lost.
old = self.get_stored_info()
self.get_binfo().__dict__.update(old.binfo.__dict__)
self.store_info()
def find_src_builder(self):
if self.rexists():
return None
scb = self.dir.src_builder()
if scb is _null:
if diskcheck_sccs(self.dir, self.name):
scb = get_DefaultSCCSBuilder()
elif diskcheck_rcs(self.dir, self.name):
scb = get_DefaultRCSBuilder()
else:
scb = None
if scb is not None:
try:
b = self.builder
except AttributeError:
b = None
if b is None:
self.builder_set(scb)
return scb
def has_src_builder(self):
"""Return whether this Node has a source builder or not.
If this Node doesn't have an explicit source code builder, this
is where we figure out, on the fly, if there's a transparent
source code builder for it.
Note that if we found a source builder, we also set the
self.builder attribute, so that all of the methods that actually
*build* this file don't have to do anything different.
"""
try:
scb = self.sbuilder
except AttributeError:
scb = self.sbuilder = self.find_src_builder()
return scb is not None
def alter_targets(self):
"""Return any corresponding targets in a variant directory.
"""
if self.is_derived():
return [], None
return self.fs.variant_dir_target_climb(self, self.dir, [self.name])
def _rmv_existing(self):
self.clear_memoized_values()
e = Unlink(self, [], None)
if isinstance(e, SCons.Errors.BuildError):
raise e
#
# Taskmaster interface subsystem
#
def make_ready(self):
self.has_src_builder()
self.get_binfo()
def prepare(self):
"""Prepare for this file to be created."""
SCons.Node.Node.prepare(self)
if self.get_state() != SCons.Node.up_to_date:
if self.exists():
if self.is_derived() and not self.precious:
self._rmv_existing()
else:
try:
self._createDir()
except SCons.Errors.StopError, drive:
desc = "No drive `%s' for target `%s'." % (drive, self)
raise SCons.Errors.StopError, desc
#
#
#
def remove(self):
"""Remove this file."""
if self.exists() or self.islink():
self.fs.unlink(self.path)
return 1
return None
def do_duplicate(self, src):
self._createDir()
Unlink(self, None, None)
e = Link(self, src, None)
if isinstance(e, SCons.Errors.BuildError):
desc = "Cannot duplicate `%s' in `%s': %s." % (src.path, self.dir.path, e.errstr)
raise SCons.Errors.StopError, desc
self.linked = 1
# The Link() action may or may not have actually
# created the file, depending on whether the -n
# option was used or not. Delete the _exists and
# _rexists attributes so they can be reevaluated.
self.clear()
memoizer_counters.append(SCons.Memoize.CountValue('exists'))
def exists(self):
try:
return self._memo['exists']
except KeyError:
pass
# Duplicate from source path if we are set up to do this.
if self.duplicate and not self.is_derived() and not self.linked:
src = self.srcnode()
if src is not self:
# At this point, src is meant to be copied in a variant directory.
src = src.rfile()
if src.abspath != self.abspath:
if src.exists():
self.do_duplicate(src)
# Can't return 1 here because the duplication might
# not actually occur if the -n option is being used.
else:
# The source file does not exist. Make sure no old
# copy remains in the variant directory.
if Base.exists(self) or self.islink():
self.fs.unlink(self.path)
# Return None explicitly because the Base.exists() call
# above will have cached its value if the file existed.
self._memo['exists'] = None
return None
result = Base.exists(self)
self._memo['exists'] = result
return result
#
# SIGNATURE SUBSYSTEM
#
def get_max_drift_csig(self):
"""
Returns the content signature currently stored for this node
if it's been unmodified longer than the max_drift value, or the
max_drift value is 0. Returns None otherwise.
"""
old = self.get_stored_info()
mtime = self.get_timestamp()
max_drift = self.fs.max_drift
if max_drift > 0:
if (time.time() - mtime) > max_drift:
try:
n = old.ninfo
if n.timestamp and n.csig and n.timestamp == mtime:
return n.csig
except AttributeError:
pass
elif max_drift == 0:
try:
return old.ninfo.csig
except AttributeError:
pass
return None
def get_csig(self):
"""
Generate a node's content signature, the digested signature
of its content.
node - the node
cache - alternate node to use for the signature cache
returns - the content signature
"""
ninfo = self.get_ninfo()
try:
return ninfo.csig
except AttributeError:
pass
csig = self.get_max_drift_csig()
if csig is None:
try:
if self.get_size() < SCons.Node.FS.File.md5_chunksize:
contents = self.get_contents()
else:
csig = self.get_content_hash()
except IOError:
# This can happen if there's actually a directory on-disk,
# which can be the case if they've disabled disk checks,
# or if an action with a File target actually happens to
# create a same-named directory by mistake.
csig = ''
else:
if not csig:
csig = SCons.Util.MD5signature(contents)
ninfo.csig = csig
return csig
#
# DECISION SUBSYSTEM
#
def builder_set(self, builder):
SCons.Node.Node.builder_set(self, builder)
self.changed_since_last_build = self.decide_target
def changed_content(self, target, prev_ni):
cur_csig = self.get_csig()
try:
return cur_csig != prev_ni.csig
except AttributeError:
return 1
def changed_state(self, target, prev_ni):
return self.state != SCons.Node.up_to_date
def changed_timestamp_then_content(self, target, prev_ni):
if not self.changed_timestamp_match(target, prev_ni):
try:
self.get_ninfo().csig = prev_ni.csig
except AttributeError:
pass
return False
return self.changed_content(target, prev_ni)
def changed_timestamp_newer(self, target, prev_ni):
try:
return self.get_timestamp() > target.get_timestamp()
except AttributeError:
return 1
def changed_timestamp_match(self, target, prev_ni):
try:
return self.get_timestamp() != prev_ni.timestamp
except AttributeError:
return 1
def decide_source(self, target, prev_ni):
return target.get_build_env().decide_source(self, target, prev_ni)
def decide_target(self, target, prev_ni):
return target.get_build_env().decide_target(self, target, prev_ni)
# Initialize this Node's decider function to decide_source() because
# every file is a source file until it has a Builder attached...
changed_since_last_build = decide_source
def is_up_to_date(self):
T = 0
if T: Trace('is_up_to_date(%s):' % self)
if not self.exists():
if T: Trace(' not self.exists():')
# The file doesn't exist locally...
r = self.rfile()
if r != self:
# ...but there is one in a Repository...
if not self.changed(r):
if T: Trace(' changed(%s):' % r)
# ...and it's even up-to-date...
if self._local:
# ...and they'd like a local copy.
e = LocalCopy(self, r, None)
if isinstance(e, SCons.Errors.BuildError):
raise
self.store_info()
if T: Trace(' 1\n')
return 1
self.changed()
if T: Trace(' None\n')
return None
else:
r = self.changed()
if T: Trace(' self.exists(): %s\n' % r)
return not r
memoizer_counters.append(SCons.Memoize.CountValue('rfile'))
def rfile(self):
try:
return self._memo['rfile']
except KeyError:
pass
result = self
if not self.exists():
norm_name = _my_normcase(self.name)
for dir in self.dir.get_all_rdirs():
try: node = dir.entries[norm_name]
except KeyError: node = dir.file_on_disk(self.name)
if node and node.exists() and \
(isinstance(node, File) or isinstance(node, Entry) \
or not node.is_derived()):
result = node
# Copy over our local attributes to the repository
# Node so we identify shared object files in the
# repository and don't assume they're static.
#
# This isn't perfect; the attribute would ideally
# be attached to the object in the repository in
# case it was built statically in the repository
# and we changed it to shared locally, but that's
# rarely the case and would only occur if you
# intentionally used the same suffix for both
# shared and static objects anyway. So this
# should work well in practice.
result.attributes = self.attributes
break
self._memo['rfile'] = result
return result
def rstr(self):
return str(self.rfile())
def get_cachedir_csig(self):
"""
Fetch a Node's content signature for purposes of computing
another Node's cachesig.
This is a wrapper around the normal get_csig() method that handles
the somewhat obscure case of using CacheDir with the -n option.
Any files that don't exist would normally be "built" by fetching
them from the cache, but the normal get_csig() method will try
to open up the local file, which doesn't exist because the -n
option meant we didn't actually pull the file from cachedir.
But since the file *does* actually exist in the cachedir, we
can use its contents for the csig.
"""
try:
return self.cachedir_csig
except AttributeError:
pass
cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self)
if not self.exists() and cachefile and os.path.exists(cachefile):
self.cachedir_csig = SCons.Util.MD5filesignature(cachefile, \
SCons.Node.FS.File.md5_chunksize * 1024)
else:
self.cachedir_csig = self.get_csig()
return self.cachedir_csig
def get_cachedir_bsig(self):
try:
return self.cachesig
except AttributeError:
pass
# Add the path to the cache signature, because multiple
# targets built by the same action will all have the same
# build signature, and we have to differentiate them somehow.
children = self.children()
executor = self.get_executor()
# sigs = [n.get_cachedir_csig() for n in children]
sigs = map(lambda n: n.get_cachedir_csig(), children)
sigs.append(SCons.Util.MD5signature(executor.get_contents()))
sigs.append(self.path)
result = self.cachesig = SCons.Util.MD5collect(sigs)
return result
default_fs = None
def get_default_fs():
global default_fs
if not default_fs:
default_fs = FS()
return default_fs
class FileFinder:
"""
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self):
self._memo = {}
def filedir_lookup(self, p, fd=None):
"""
A helper method for find_file() that looks up a directory for
a file we're trying to find. This only creates the Dir Node if
it exists on-disk, since if the directory doesn't exist we know
we won't find any files in it... :-)
It would be more compact to just use this as a nested function
with a default keyword argument (see the commented-out version
below), but that doesn't work unless you have nested scopes,
so we define it here just so this work under Python 1.5.2.
"""
if fd is None:
fd = self.default_filedir
dir, name = os.path.split(fd)
drive, d = os.path.splitdrive(dir)
if not name and d[:1] in ('/', os.sep):
#return p.fs.get_root(drive).dir_on_disk(name)
return p.fs.get_root(drive)
if dir:
p = self.filedir_lookup(p, dir)
if not p:
return None
norm_name = _my_normcase(name)
try:
node = p.entries[norm_name]
except KeyError:
return p.dir_on_disk(name)
if isinstance(node, Dir):
return node
if isinstance(node, Entry):
node.must_be_same(Dir)
return node
return None
def _find_file_key(self, filename, paths, verbose=None):
return (filename, paths)
memoizer_counters.append(SCons.Memoize.CountDict('find_file', _find_file_key))
def find_file(self, filename, paths, verbose=None):
"""
find_file(str, [Dir()]) -> [nodes]
filename - a filename to find
paths - a list of directory path *nodes* to search in. Can be
represented as a list, a tuple, or a callable that is
called with no arguments and returns the list or tuple.
returns - the node created from the found file.
Find a node corresponding to either a derived file or a file
that exists already.
Only the first file found is returned, and none is returned
if no file is found.
"""
memo_key = self._find_file_key(filename, paths)
try:
memo_dict = self._memo['find_file']
except KeyError:
memo_dict = {}
self._memo['find_file'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
if verbose and not callable(verbose):
if not SCons.Util.is_String(verbose):
verbose = "find_file"
verbose = ' %s: ' % verbose
verbose = lambda s, v=verbose: sys.stdout.write(v + s)
filedir, filename = os.path.split(filename)
if filedir:
# More compact code that we can't use until we drop
# support for Python 1.5.2:
#
#def filedir_lookup(p, fd=filedir):
# """
# A helper function that looks up a directory for a file
# we're trying to find. This only creates the Dir Node
# if it exists on-disk, since if the directory doesn't
# exist we know we won't find any files in it... :-)
# """
# dir, name = os.path.split(fd)
# if dir:
# p = filedir_lookup(p, dir)
# if not p:
# return None
# norm_name = _my_normcase(name)
# try:
# node = p.entries[norm_name]
# except KeyError:
# return p.dir_on_disk(name)
# if isinstance(node, Dir):
# return node
# if isinstance(node, Entry):
# node.must_be_same(Dir)
# return node
# if isinstance(node, Dir) or isinstance(node, Entry):
# return node
# return None
#paths = filter(None, map(filedir_lookup, paths))
self.default_filedir = filedir
paths = filter(None, map(self.filedir_lookup, paths))
result = None
for dir in paths:
if verbose:
verbose("looking for '%s' in '%s' ...\n" % (filename, dir))
node, d = dir.srcdir_find_file(filename)
if node:
if verbose:
verbose("... FOUND '%s' in '%s'\n" % (filename, d))
result = node
break
memo_dict[memo_key] = result
return result
find_file = FileFinder().find_file
def invalidate_node_memos(targets):
"""
Invalidate the memoized values of all Nodes (files or directories)
that are associated with the given entries. Has been added to
clear the cache of nodes affected by a direct execution of an
action (e.g. Delete/Copy/Chmod). Existing Node caches become
inconsistent if the action is run through Execute(). The argument
`targets` can be a single Node object or filename, or a sequence
of Nodes/filenames.
"""
from traceback import extract_stack
# First check if the cache really needs to be flushed. Only
# actions run in the SConscript with Execute() seem to be
# affected. XXX The way to check if Execute() is in the stacktrace
# is a very dirty hack and should be replaced by a more sensible
# solution.
for f in extract_stack():
if f[2] == 'Execute' and f[0][-14:] == 'Environment.py':
break
else:
# Dont have to invalidate, so return
return
if not SCons.Util.is_List(targets):
targets = [targets]
for entry in targets:
# If the target is a Node object, clear the cache. If it is a
# filename, look up potentially existing Node object first.
try:
entry.clear_memoized_values()
except AttributeError:
# Not a Node object, try to look up Node by filename. XXX
# This creates Node objects even for those filenames which
# do not correspond to an existing Node object.
node = get_default_fs().Entry(entry)
if node:
node.clear_memoized_values()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
rat-pacREPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@[email protected]@.PATH_END.py
|
{
"filename": "graph.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scikit-learn/py2/sklearn/neighbors/graph.py",
"type": "Python"
}
|
"""Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self):
"""Return the query based on include_self param"""
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default=False.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=False, n_jobs=1):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self)
return X.radius_neighbors_graph(query, radius, mode)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scikit-learn@py2@sklearn@[email protected]@.PATH_END.py
|
{
"filename": "fit_begin_and_end.py",
"repo_name": "cjekel/piecewise_linear_fit_py",
"repo_path": "piecewise_linear_fit_py_extracted/piecewise_linear_fit_py-master/examples/tf/fit_begin_and_end.py",
"type": "Python"
}
|
# fit and predict between a known begging and known ending
# import our libraries
import numpy as np
import matplotlib.pyplot as plt
import pwlf
from scipy.optimize import differential_evolution
# your data
y = np.array([0.00000000e+00, 9.69801700e-03, 2.94350340e-02,
4.39052750e-02, 5.45343950e-02, 6.74104940e-02,
8.34831790e-02, 1.02580042e-01, 1.22767939e-01,
1.42172312e-01, 0.00000000e+00, 8.58600000e-06,
8.31543400e-03, 2.34184100e-02, 3.39709150e-02,
4.03581990e-02, 4.53545600e-02, 5.02345260e-02,
5.55253360e-02, 6.14750770e-02, 6.82125120e-02,
7.55892510e-02, 8.38356810e-02, 9.26413070e-02,
1.02039790e-01, 1.11688258e-01, 1.21390666e-01,
1.31196948e-01, 0.00000000e+00, 1.56706510e-02,
3.54628780e-02, 4.63739040e-02, 5.61442590e-02,
6.78542550e-02, 8.16388310e-02, 9.77756110e-02,
1.16531753e-01, 1.37038283e-01, 0.00000000e+00,
1.16951050e-02, 3.12089850e-02, 4.41776550e-02,
5.42877590e-02, 6.63321350e-02, 8.07655920e-02,
9.70363280e-02, 1.15706975e-01, 1.36687642e-01,
0.00000000e+00, 1.50144640e-02, 3.44519970e-02,
4.55907760e-02, 5.59556700e-02, 6.88450940e-02,
8.41374060e-02, 1.01254006e-01, 1.20605073e-01,
1.41881288e-01, 1.62618058e-01])
x = np.array([0.00000000e+00, 8.82678000e-03, 3.25615100e-02,
5.66106800e-02, 7.95549800e-02, 1.00936330e-01,
1.20351520e-01, 1.37442010e-01, 1.51858250e-01,
1.64433570e-01, 0.00000000e+00, -2.12600000e-05,
7.03872000e-03, 1.85494500e-02, 3.00926700e-02,
4.17617000e-02, 5.37279600e-02, 6.54941000e-02,
7.68092100e-02, 8.76596300e-02, 9.80525800e-02,
1.07961810e-01, 1.17305210e-01, 1.26063930e-01,
1.34180360e-01, 1.41725010e-01, 1.48629710e-01,
1.55374770e-01, 0.00000000e+00, 1.65610200e-02,
3.91016100e-02, 6.18679400e-02, 8.30997400e-02,
1.02132890e-01, 1.19011260e-01, 1.34620080e-01,
1.49429370e-01, 1.63539960e-01, -0.00000000e+00,
1.01980300e-02, 3.28642800e-02, 5.59461900e-02,
7.81388400e-02, 9.84458400e-02, 1.16270210e-01,
1.31279040e-01, 1.45437090e-01, 1.59627540e-01,
0.00000000e+00, 1.63404300e-02, 4.00086000e-02,
6.34390200e-02, 8.51085900e-02, 1.04787860e-01,
1.22120350e-01, 1.36931660e-01, 1.50958760e-01,
1.65299640e-01, 1.79942720e-01])
# initialize piecewise linear fit with your x and y data
my_pwlf = pwlf.PiecewiseLinFitTF(x, y, disp_res=True)
# fit the function with four line segments
# force the function to go through the data points
# (0.0, 0.0) and (0.19, 0.16)
# where the data points are of the form (x, y)
x_c = [0.0, 0.19]
y_c = [0.0, 0.2]
breaks = [0.00711605, 0.12014667, 0.1799223]
L = my_pwlf.fit_with_breaks_force_points(breaks, x_c, y_c)
# predict for the determined points
xHat = np.linspace(min(x), 0.19, num=10000)
yHat = my_pwlf.predict(xHat)
# plot the results
plt.figure()
plt.plot(x, y, 'o')
plt.plot(xHat, yHat, '-')
plt.show()
|
cjekelREPO_NAMEpiecewise_linear_fit_pyPATH_START.@piecewise_linear_fit_py_extracted@piecewise_linear_fit_py-master@examples@tf@[email protected]_END.py
|
{
"filename": "_maxpoints.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/volume/stream/_maxpoints.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="maxpoints", parent_name="volume.stream", **kwargs):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 10000),
min=kwargs.pop("min", 0),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@volume@stream@[email protected]_END.py
|
{
"filename": "mms_curl.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/mms/fgm/mms_curl.py",
"type": "Python"
}
|
import logging
import math
import numpy as np
from pytplot import get_data, store_data, options
from pyspedas import tinterpol
def mms_curl(fields=None, positions=None, suffix=''):
"""
This function applies the curlometer technique to MMS FGM data
Parameters
----------
fields : list of str
List of tplot variables containing the B-field for each spacecraft
(in GSE coordinates)
positions : list of str
List of tplot variables containing the S/C position vectors for
each spacecraft (also GSE coordinates)
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
Notes
----------
The input B-field data and position data are required to be in
GSE coordinates
Based on the original mms_curl, written in IDL, by Jonathan Eastwood
For more info on this method, see:
Chanteur, G., Spatial Interpolation for Four Spacecraft: Theory,
Chapter 14 of Analysis methods for multi-spacecraft data, G.
Paschmann and P. W. Daly (Eds.) ISSI Scientific Report SR-001.
Returns
----------
List of tplot variables created
"""
if fields is None or positions is None:
logging.error('Error: B-field and spacecraft position keywords required.')
return
if len(fields) != 4 or len(positions) != 4:
logging.error('Error, fields and positions keywords should be specified as 4-element arrays containing the tplot variable name for the field and position variables')
return
# *********************************************************
# Magnetic Field
# *********************************************************
# interpolate the magnetic field data all onto the same timeline (MMS1):
# should be in GSE coordinates
tinterpol(fields[1], fields[0], newname=fields[1] + '_i')
tinterpol(fields[2], fields[0], newname=fields[2] + '_i')
tinterpol(fields[3], fields[0], newname=fields[3] + '_i')
# interpolate the definitive ephemeris onto the magnetic field timeseries
# should be in GSE coordinates
tinterpol(positions[0], fields[0], newname=positions[0] + '_i')
tinterpol(positions[1], fields[0], newname=positions[1] + '_i')
tinterpol(positions[2], fields[0], newname=positions[2] + '_i')
tinterpol(positions[3], fields[0], newname=positions[3] + '_i')
m0 = 4.0*math.pi*1e-7
mms1_bfield = get_data(fields[0])
mms2_bfield = get_data(fields[1] + '_i')
mms3_bfield = get_data(fields[2] + '_i')
mms4_bfield = get_data(fields[3] + '_i')
if mms1_bfield is None:
logging.error('Error, B-field variable is missing: ' + fields[0])
return
elif mms2_bfield is None:
logging.error('Error, B-field variable is missing: ' + fields[1] + '_i')
return
elif mms3_bfield is None:
logging.error('Error, B-field variable is missing: ' + fields[2] + '_i')
return
elif mms4_bfield is None:
logging.error('Error, B-field variable is missing: ' + fields[3] + '_i')
return
timesb1, datab1 = mms1_bfield
timesb2, datab2 = mms2_bfield
timesb3, datab3 = mms3_bfield
timesb4, datab4 = mms4_bfield
# extract the vector
b1 = datab1[:, 0:3]
b2 = datab2[:, 0:3]
b3 = datab3[:, 0:3]
b4 = datab4[:, 0:3]
mms1_pos = get_data(positions[0] + '_i')
mms2_pos = get_data(positions[1] + '_i')
mms3_pos = get_data(positions[2] + '_i')
mms4_pos = get_data(positions[3] + '_i')
if mms1_pos is None:
logging.error('Error, S/C position variable is missing: ' + positions[0] + '_i')
return
elif mms2_pos is None:
logging.error('Error, S/C position variable is missing: ' + positions[1] + '_i')
return
elif mms3_pos is None:
logging.error('Error, S/C position variable is missing: ' + positions[2] + '_i')
return
elif mms4_pos is None:
logging.error('Error, S/C position variable is missing: ' + positions[3] + '_i')
return
timesp1, p1 = mms1_pos
timesp2, p2 = mms2_pos
timesp3, p3 = mms3_pos
timesp4, p4 = mms4_pos
divb = np.zeros([len(timesb1), 5])
baryb = np.zeros([len(timesb1), 3])
baryb2 = np.zeros([len(timesb1), 3])
baryb3 = np.zeros([len(timesb1), 3])
baryb4 = np.zeros([len(timesb1), 3])
jtotal = np.zeros([len(timesb1), 4])
btotal = np.zeros([len(timesb1), 1])
jparallel = np.zeros([len(timesb1), 1])
jperpvec = np.zeros([len(timesb1), 4])
alphaparallel = np.zeros([len(timesb1), 1])
alpha = np.zeros([len(timesb1), 1])
# leave as a loop for now because you have to construct and manipulate a matrix for each time step.
for i, time in enumerate(timesb1):
p12 = p2[i, 0:3]-p1[i, 0:3]
p13 = p3[i, 0:3]-p1[i, 0:3]
p14 = p4[i, 0:3]-p1[i, 0:3]
k2 = np.cross(p13, p14)*(1/(np.matmul(p12, np.transpose(np.cross(p13, p14)))))
k3 = np.cross(p12, p14)*(1/(np.matmul(p13, np.transpose(np.cross(p12, p14)))))
k4 = np.cross(p12, p13)*(1/(np.matmul(p14, np.transpose(np.cross(p12, p13)))))
k1 = 0-k4-k3-k2
curlmag = np.cross(k1, b1[i, :])+np.cross(k2, b2[i, :])+np.cross(k3, b3[i, :])+np.cross(k4, b4[i, :])
divergence = np.matmul(b1[i, :], k1) + np.matmul(b2[i, :], k2) + np.matmul(b3[i, :], k3) + np.matmul(b4[i, :], k4)
gradbx = b1[i, 0]*k1 + b2[i, 0]*k2 + b3[i, 0]*k3 + b4[i, 0]*k4
gradby = b1[i, 1]*k1 + b2[i, 1]*k2 + b3[i, 1]*k3 + b4[i, 1]*k4
gradbz = b1[i, 2]*k1 + b2[i, 2]*k2 + b3[i, 2]*k3 + b4[i, 2]*k4
barycentre = (p1[i, 0:3] + p2[i, 0:3] + p3[i, 0:3] + p4[i, 0:3])/4.0
# and here is the field at the barycentre (calculate 4 ways)
baryb[i, 0] = b1[i, 0] + np.sum(gradbx*(barycentre-p1[i, 0:3]))
baryb[i, 1] = b1[i, 1] + np.sum(gradby*(barycentre-p1[i, 0:3]))
baryb[i, 2] = b1[i, 2] + np.sum(gradbz*(barycentre-p1[i, 0:3]))
baryb2[i, 0] = b2[i, 0] + np.sum(gradbx*(barycentre-p2[i, 0:3]))
baryb2[i, 1] = b2[i, 1] + np.sum(gradby*(barycentre-p2[i, 0:3]))
baryb2[i, 2] = b2[i, 2] + np.sum(gradbz*(barycentre-p2[i, 0:3]))
baryb3[i, 0] = b3[i, 0] + np.sum(gradbx*(barycentre-p3[i, 0:3]))
baryb3[i, 1] = b3[i, 1] + np.sum(gradby*(barycentre-p3[i, 0:3]))
baryb3[i, 2] = b3[i, 2] + np.sum(gradbz*(barycentre-p3[i, 0:3]))
baryb4[i, 0] = b4[i, 0] + np.sum(gradbx*(barycentre-p4[i, 0:3]))
baryb4[i, 1] = b4[i, 1] + np.sum(gradby*(barycentre-p4[i, 0:3]))
baryb4[i, 2] = b4[i, 2] + np.sum(gradbz*(barycentre-p4[i, 0:3]))
# (these above all agree so this is the magnetic field at the barycentre)
divb[i, 0] = time
divb[i, 1] = divergence
divb[i, 2] = curlmag[0]
divb[i, 3] = curlmag[1]
divb[i, 4] = curlmag[2]
# the cross product of the calculated curl and the sample field times 1e-21 (SI), divided by m0
# curl is in nT/km, nT/km*1e-12 = T/m
# field is in nT, nT*1e-9 = T
# j is curl B / m0 (curl B = m0*j)
# use the magnetic field at the barycentre
# compute the current components and total specifically
jtotal[i, 0:3] = 1e-12*divb[i, 2:5]/m0
jtotal[i, 3] = np.sqrt(jtotal[i, 0]**2+jtotal[i, 1]**2+jtotal[i, 2]**2)
# compute the parallel and perpendicular components of the current
btotal[i] = np.sqrt(np.dot(baryb[i, 0:3], baryb[i, 0:3]))
# parallel is J.B/|B|
jparallel[i] = np.dot(jtotal[i, 0:3], baryb[i, 0:3])/btotal[i]
jparallel[i] = jparallel[i][0]
# perp is J - J// B/|B| (components and total perpendicular current)
jperpvec[i, 0:3] = jtotal[i, 0:3] - (jparallel[i]*baryb[i, 0:3])/btotal[i]
jperpvec[i, 3] = np.sqrt(jperpvec[i, 0]**2 + jperpvec[i, 1]**2 + jperpvec[i, 2]**2)
# alpha parameter
alphaparallel[i] = np.abs(jparallel[i])/(1e-9*btotal[i])
alpha[i] = np.abs(jtotal[i, 3])/(1e-9*btotal[i])
# create the output variables
store_data('baryb' + suffix, data={'x': timesb1, 'y': baryb})
store_data('curlB' + suffix, data={'x': timesb1, 'y': divb[:, 2:5]})
store_data('divB' + suffix, data={'x': timesb1, 'y': divb[:, 1]})
store_data('jtotal' + suffix, data={'x': timesb1, 'y': jtotal[:, 0:3]})
store_data('jpar' + suffix, data={'x': timesb1, 'y': jparallel})
store_data('jperp' + suffix, data={'x': timesb1, 'y': jperpvec[:, 0:3]})
store_data('alpha' + suffix, data={'x': timesb1, 'y': alpha})
store_data('alphaparallel' + suffix, data={'x': timesb1, 'y': alphaparallel})
# set some options
options('baryb' + suffix, 'ytitle', 'baryb')
options('baryb' + suffix, 'ysubtitle', '[nT]')
options('divB' + suffix, 'ytitle', 'div(B)')
options('divB' + suffix, 'ysubtitle', '[nT/km]')
options('curlB' + suffix, 'ytitle', 'curl(B)')
options('curlB' + suffix, 'ysubtitle', '[nT/km]')
options('curlB' + suffix, 'legend_names', ['delBx', 'delBy', 'delBz'])
options('jtotal' + suffix, 'ytitle', 'J')
options('jtotal' + suffix, 'ysubtitle', '[A/m^2]')
options('jtotal' + suffix, 'legend_names', ['Jx', 'Jy', 'Jz'])
options('jperp' + suffix, 'ytitle', 'Jperp')
options('jperp' + suffix, 'ysubtitle', '[A/m^2]')
options('jperp' + suffix, 'legend_names', ['Jperpx', 'Jperpy', 'Jperpz'])
options('jpar' + suffix, 'ytitle', 'Jparallel')
options('jpar' + suffix, 'ysubtitle', '[A/m^2]')
return ['baryb', 'curlB', 'divB', 'jtotal', 'jpar', 'jperp', 'alpha', 'alphaparallel']
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@mms@fgm@[email protected]_END.py
|
{
"filename": "test_thomson.py",
"repo_name": "PlasmaPy/PlasmaPy",
"repo_path": "PlasmaPy_extracted/PlasmaPy-main/tests/diagnostics/test_thomson.py",
"type": "Python"
}
|
"""
Tests for Thomson scattering analysis functions
"""
import copy
import astropy.constants as const
import astropy.units as u
import numpy as np
import pytest
from lmfit import Parameter, Parameters
from plasmapy.diagnostics import thomson
from plasmapy.particles import Particle, particle_mass
from plasmapy.particles.particle_collections import ParticleList
def example_instr_func(w):
"""
Example instrument function for use in testing
"""
sigma = 0.5 * u.nm
arg = (w / sigma).to(u.dimensionless_unscaled).value
inst = np.exp(-(arg**2))
inst *= 1 / np.sum(inst)
return inst
def example_invalid_instr_func_bad_type(w):
"""
Example instrument function for use in testing
This instrument function is invalid because it does not return a plain
np.ndarray.
"""
sigma = 0.5 * u.nm
arg = (w / sigma).to(u.dimensionless_unscaled)
inst = np.exp(-(arg**2))
inst *= 1 / np.sum(inst)
return inst * u.m
def example_invalid_instr_func_bad_shape(w):
"""
Example instrument function for use in testing
This instrument function is invalid because it returns an array of a
different shape than the provided wavelength array
"""
sigma = 0.5 * u.nm
arg = (w / sigma).to(u.dimensionless_unscaled).value
inst = np.exp(-(arg**2))
inst *= 1 / np.sum(inst)
return inst[2:]
# A list of invalid instrument functions
invalid_instr_func_list = [
(example_invalid_instr_func_bad_type),
(example_invalid_instr_func_bad_shape),
]
def width_at_value(x, y, val):
"""
Calculates the width of a curve at a given value.
"""
above = np.where(y > val, x, np.nan)
return np.abs(np.nanmax(above) - np.nanmin(above))
def spectral_density_args_kwargs(kwargs):
"""
Separate positional arguments and keyword arguments
for the spectral_density function from a dictionary of both that is
easy to use in parametrized tests.
"""
# Pull out the non-keyword arguments
args = (
kwargs["wavelengths"],
kwargs["probe_wavelength"],
kwargs["n"],
)
del kwargs["wavelengths"]
del kwargs["probe_wavelength"]
del kwargs["n"]
return args, kwargs
def args_to_lite_args(kwargs): # noqa: C901
"""
Converts a dict of args for the spectral density function and converts
them to input for the lite function.
Used to facilitate testing the two functions against each other.
"""
keys = list(kwargs.keys())
if "wavelengths" in keys:
kwargs["wavelengths"] = kwargs["wavelengths"].to(u.m).value
if "probe_wavelength" in keys:
kwargs["probe_wavelength"] = kwargs["probe_wavelength"].to(u.m).value
if "n" in keys:
kwargs["n"] = kwargs["n"].to(u.m**-3).value
if "T_e" in keys:
kwargs["T_e"] = (kwargs["T_e"] / const.k_B).to(u.K).value
if "T_i" in keys:
kwargs["T_i"] = (kwargs["T_i"] / const.k_B).to(u.K).value
if "electron_vel" in keys:
kwargs["electron_vel"] = kwargs["electron_vel"].to(u.m / u.s).value
if "ion_vel" in keys:
kwargs["ion_vel"] = kwargs["ion_vel"].to(u.m / u.s).value
if kwargs["T_e"].size == 1:
kwargs["T_e"] = np.array(
[
kwargs["T_e"],
]
)
if kwargs["T_i"].size == 1:
kwargs["T_i"] = np.array(
[
kwargs["T_i"],
]
)
if not isinstance(kwargs["ions"], list):
kwargs["ions"] = [
kwargs["ions"],
]
ion_z = np.zeros(len(kwargs["ions"]))
ion_mass = np.zeros(len(kwargs["ions"]))
for i, particle in enumerate(kwargs["ions"]):
if not isinstance(particle, Particle):
particle = Particle(particle) # noqa: PLW2901
ion_z[i] = particle.charge_number
ion_mass[i] = particle_mass(particle).to(u.kg).value
kwargs["ion_z"] = ion_z
kwargs["ion_mass"] = ion_mass
del kwargs["ions"]
return kwargs
@pytest.fixture
def single_species_collective_args():
"""
Standard args
Includes both kwargs and args: separated by the function
spectral_density_args_kwargs
"""
return {
"wavelengths": np.arange(520, 545, 0.01) * u.nm,
"probe_wavelength": 532 * u.nm,
"n": 5e17 * u.cm**-3,
"T_e": 10 * u.eV,
"T_i": 10 * u.eV,
"efract": np.array([1.0]),
"ifract": np.array([1.0]),
"ions": "C-12 5+",
"electron_vel": np.array([[0, 0, 0]]) * u.km / u.s,
"ion_vel": np.array([[0, 0, 0]]) * u.km / u.s,
"probe_vec": np.array([1, 0, 0]),
"scatter_vec": np.array([0, 1, 0]),
}
@pytest.fixture
def single_species_collective_spectrum(single_species_collective_args):
"""
Generates an example Thomson scattering spectrum in the collective regime
"""
wavelengths = single_species_collective_args["wavelengths"]
args, kwargs = spectral_density_args_kwargs(single_species_collective_args)
alpha, Skw = thomson.spectral_density(*args, **kwargs)
return (alpha, wavelengths, Skw)
@pytest.mark.slow
def test_single_species_collective_spectrum(single_species_collective_spectrum) -> None:
"""
Compares the generated spectrum to previously determined values
"""
alpha, wavelength, Skw = single_species_collective_spectrum
# Check that alpha is correct
assert np.isclose(
alpha, 1.801, atol=0.01
), f"Collective case alpha returns {alpha} instead of expected 1.801"
i_width = width_at_value(wavelength.value, Skw.value, 2e-13)
e_width = width_at_value(wavelength.value, Skw.value, 0.2e-13)
# Check that the widths of the ion and electron features match expectations
assert np.isclose(i_width, 0.1599, 1e-3), (
"Collective case ion feature "
f"width is {i_width}"
"instead of expected 0.1599"
)
assert np.isclose(e_width, 17.7899, 1e-3), (
"Collective case electron "
f"feature width is {e_width} "
"instead of expected 17.7899"
)
@pytest.mark.parametrize(
("notch", "notch_num"),
[
# one notch
(np.array([531, 533]) * u.nm, 1),
# two notches
(np.array([np.array([520, 525]), np.array([530, 540])]) * u.nm, 2),
],
)
def test_notched_spectrum(notch, notch_num, single_species_collective_args) -> None:
"""
Compares notched and unnotched spectra
"""
# make a copy of the input args
args_fixture_copy = copy.copy(single_species_collective_args)
wavelengths = single_species_collective_args["wavelengths"]
# Compute spectrum with no notch included
args, kwargs = spectral_density_args_kwargs(single_species_collective_args)
alpha_unnotched, Skw_unnotched = thomson.spectral_density(*args, **kwargs)
# Compute same spectrum with notch
args, kwargs = spectral_density_args_kwargs(args_fixture_copy)
kwargs["notch"] = notch
alpha_notched, Skw_notched = thomson.spectral_density(*args, **kwargs)
# Check that notch does not affect alpha
assert np.isclose(alpha_notched, alpha_unnotched)
if notch_num == 1:
# Record wavelength array indices corresponding to notch
x0 = np.argwhere(wavelengths > notch[0])[0][0]
x1 = np.argwhere(wavelengths > notch[1])[0][0]
# Check that regions outside the notch are the same for both Skws
assert np.allclose(Skw_notched[:x0], Skw_unnotched[:x0])
assert np.allclose(Skw_notched[x1:], Skw_unnotched[x1:])
# Check that region inside the notch is 0 for notched Skw
assert np.allclose(Skw_notched[x0:x1], np.zeros(x1 - x0))
elif notch_num == 2:
x0 = np.argwhere(wavelengths > notch[0, 0])[0][0]
x1 = np.argwhere(wavelengths > notch[0, 1])[0][0]
x2 = np.argwhere(wavelengths > notch[1, 0])[0][0]
x3 = np.argwhere(wavelengths > notch[1, 1])[0][0]
# Check that regions outside the notches are the same for both Skws
assert np.allclose(Skw_notched[:x0], Skw_unnotched[:x0])
assert np.allclose(Skw_notched[x1:x2], Skw_unnotched[x1:x2])
assert np.allclose(Skw_notched[x3:], Skw_unnotched[x3:])
# Check that region inside the notches is 0 for notched Skw
assert np.allclose(Skw_notched[x0:x1], np.zeros(x1 - x0))
assert np.allclose(Skw_notched[x2:x3], np.zeros(x3 - x2))
@pytest.mark.parametrize(
("notch"),
[
(np.array([533, 531]) * u.nm), # Elements not in montonic increasing order
(np.array([530, 531, 533]) * u.nm), # Not exactly 2 elements
],
)
def test_notch_errors(notch, single_species_collective_args) -> None:
"""
Check notch input validation
"""
args, kwargs = spectral_density_args_kwargs(single_species_collective_args)
kwargs["notch"] = notch
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(*args, **kwargs)
@pytest.mark.slow
def test_spectral_density_minimal_arguments(single_species_collective_args) -> None:
"""
Check that spectral density runs with minimal arguments
"""
single_species_collective_args["wavelengths"]
args, kwargs = spectral_density_args_kwargs(single_species_collective_args)
# Delete the arguments that have default values
optional_keys = [
"efract",
"ifract",
"ions",
"electron_vel",
"ion_vel",
"probe_vec",
"scatter_vec",
"instr_func",
"notch",
]
for key in optional_keys:
if key in kwargs:
del kwargs[key]
alpha, Skw = thomson.spectral_density(*args, **kwargs)
def test_single_species_collective_lite(single_species_collective_args) -> None:
# Make a copy of the input args
args_fixture_copy = copy.copy(single_species_collective_args)
args, kwargs = spectral_density_args_kwargs(single_species_collective_args)
alpha1, Skw1 = thomson.spectral_density(*args, **kwargs)
lite_kwargs = args_to_lite_args(args_fixture_copy)
args, kwargs = spectral_density_args_kwargs(lite_kwargs)
alpha2, Skw2 = thomson.spectral_density.lite(*args, **kwargs)
assert np.isclose(alpha1, alpha2)
assert np.allclose(Skw1.to(u.s / u.rad).value, Skw2)
def test_spectral_density_lite_minimal_arguments(
single_species_collective_args,
) -> None:
lite_kwargs = args_to_lite_args(single_species_collective_args)
args, kwargs = spectral_density_args_kwargs(lite_kwargs)
# Delete the arguments that have default values
optional_keys = [
"instr_func_arr",
]
for key in optional_keys:
if key in kwargs:
del kwargs[key]
alpha, Skw = thomson.spectral_density.lite(*args, **kwargs)
@pytest.fixture
def multiple_species_collective_args():
"""
Standard args
Includes both kwargs and args: separated by the function
spectral_density_args_kwargs
"""
kwargs = {
"wavelengths": np.arange(520, 545, 0.01) * u.nm,
"probe_wavelength": 532 * u.nm,
"n": 5e17 * u.cm**-3,
"T_e": 10 * u.eV,
}
kwargs["T_i"] = np.array([25, 25]) * u.eV
kwargs["ions"] = [Particle("p+"), Particle("C-12 5+")]
kwargs["probe_vec"] = np.array([1, 0, 0])
kwargs["scatter_vec"] = np.array([0, 1, 0])
kwargs["efract"] = np.array([1.0])
kwargs["ifract"] = np.array([0.7, 0.3])
kwargs["electron_vel"] = np.array([[0, 0, 0]]) * u.km / u.s
kwargs["ion_vel"] = np.array([[-100, 0, 0], [0, 100, 0]]) * u.km / u.s
return kwargs
def test_efract_sum_error(single_species_collective_args) -> None:
args, kwargs = spectral_density_args_kwargs(single_species_collective_args)
kwargs["efract"] = np.array([2.0]) # Sum is not 1
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(*args, **kwargs)
def test_ifract_sum_error(single_species_collective_args) -> None:
args, kwargs = spectral_density_args_kwargs(single_species_collective_args)
kwargs["ifract"] = np.array([0.5, 1.2]) # Sum is not 1
with pytest.raises(ValueError):
alpha, Skw = thomson.spectral_density(*args, **kwargs)
@pytest.fixture
def multiple_species_collective_spectrum(multiple_species_collective_args):
"""
Generates an example Thomson scattering spectrum for multiple ion species
that also have drift velocities. Parameters are set to be in the
collective regime where ion species are important.
"""
wavelengths = multiple_species_collective_args["wavelengths"]
args, kwargs = spectral_density_args_kwargs(multiple_species_collective_args)
alpha, Skw = thomson.spectral_density(*args, **kwargs)
return (alpha, wavelengths, Skw)
def test_multiple_species_collective_spectrum(
multiple_species_collective_spectrum,
) -> None:
"""
Compares the generated spectrum to previously determined values
"""
alpha, wavelength, Skw = multiple_species_collective_spectrum
# Compute the width and max of the spectrum, and the wavelength
# of the max (sensitive to ion vel)
max_skw = np.nanmax(Skw.value)
width = width_at_value(wavelength.value, Skw.value, 2e-12)
max_wavelength = wavelength.value[np.argmax(Skw.value)]
# Check width
assert np.isclose(width, 0.17, 1e-2), (
f"Multiple ion species case spectrum width is {width} instead of "
"expected 0.17"
)
# Check max value
assert np.isclose(max_skw, 6e-12, 1e-11), (
f"Multiple ion species case spectrum max is {max_skw} instead of "
"expected 6e-12"
)
# Check max peak location
assert np.isclose(max_wavelength, 532, 1e-2), (
"Multiple ion species case spectrum peak wavelength is "
f"{max_wavelength} instead of expected 532"
)
@pytest.fixture
def single_species_non_collective_args():
"""
Standard args
Includes both kwargs and args: separated by the function
spectral_density_args_kwargs
"""
kwargs = {
"wavelengths": np.arange(500, 570, 0.01) * u.nm,
"probe_wavelength": 532 * u.nm,
"n": 5e15 * u.cm**-3,
"T_e": 100 * u.eV,
}
kwargs["T_i"] = np.array([10]) * u.eV
kwargs["efract"] = np.array([1.0])
kwargs["ifract"] = np.array([1.0])
kwargs["ions"] = ["H+"]
kwargs["electron_vel"] = np.array([[0, 0, 0]]) * u.km / u.s
kwargs["ion_vel"] = np.array([[0, 0, 0]]) * u.km / u.s
kwargs["probe_vec"] = np.array([1, 0, 0])
kwargs["scatter_vec"] = np.array([0, 1, 0])
return kwargs
@pytest.fixture
def single_species_non_collective_spectrum(single_species_non_collective_args):
"""
Generates an example Thomson scattering spectrum in the non-collective
regime
"""
wavelengths = single_species_non_collective_args["wavelengths"]
args, kwargs = spectral_density_args_kwargs(single_species_non_collective_args)
alpha, Skw = thomson.spectral_density(*args, **kwargs)
return (alpha, wavelengths, Skw)
@pytest.mark.slow
def test_single_species_non_collective_spectrum(
single_species_non_collective_spectrum,
) -> None:
"""
Compares the generated spectrum to previously determined values
"""
alpha, wavelength, Skw = single_species_non_collective_spectrum
# Check that alpha is correct
assert np.isclose(
alpha, 0.05707, atol=0.01
), f"Non-collective case alpha returns {alpha} instead of expected 0.05707"
e_width = width_at_value(wavelength.value, Skw.value, 0.2e-13)
# Check that the widths of the electron feature matches expectations
assert np.isclose(e_width, 22.6699, 1e-3), (
"Non-collective case electron "
f"feature width is {e_width} "
"instead of expected 22.6699"
)
@pytest.mark.parametrize(
("kwargs", "error", "msg"),
[
# Ion species provided but empty
(
{"ions": []},
ValueError,
"At least one ion species needs to be defined.",
),
# Inconsistent number of ion parameters
(
{
"ifract": [0.5, 0.5],
"T_i": 5 * u.eV,
},
ValueError,
"Inconsistent number of ion species in ifract",
),
(
{"ifract": [0.5, 0.5], "ion_vel": np.array([[100, 0, 0]]) * u.km / u.s},
ValueError,
"Inconsistent number of ion species in ifract",
),
# Inconsistent number of electron parameters
(
{
"efract": [0.5, 0.5],
"T_e": np.array(
[
5,
]
)
* u.eV,
},
ValueError,
"number of electron populations",
),
(
{
"efract": [0.5, 0.5],
"electron_vel": np.array([[100, 0, 0]]) * u.km / u.s,
},
ValueError,
"number of electron populations",
),
# List of strings
(
{
"ions": [
"p+",
]
},
None,
None,
),
# List of Particles
(
{
"ions": [
Particle("p+"),
]
},
None,
None,
),
# Particle list
({"ions": ParticleList(["p+"])}, None, None),
# ValueError when an ion is negative
(
{"ions": ParticleList(["p-"])},
ValueError,
"All ions must be positively charged.",
),
# ValueError when an ion charge information is not provided
(
{"ions": ParticleList(["He"])},
ValueError,
"All ions must be positively charged.",
),
],
)
def test_spectral_density_input_errors(
kwargs, error, msg, single_species_collective_args
) -> None:
"""
Validate errors with invalid argument and keyword arguments in
spectral_density
"""
args = single_species_collective_args
# Replace any modified keys
for key, value in kwargs.items():
args[key] = value
# Separate the arguments into args and kwargs for spectral_density
args, kwargs = spectral_density_args_kwargs(args)
if error is None:
alpha, Skw = thomson.spectral_density(*args, **kwargs)
else:
with pytest.raises(error) as excinfo:
alpha, Skw = thomson.spectral_density(*args, **kwargs)
# If msg is not None, check that this string is a subset of the
# error message
if msg is not None:
assert msg in str(excinfo.value)
@pytest.mark.slow
def test_split_populations() -> None:
"""
Make sure that splitting a single population of ions or electrons
into two identical halves returns the same result.
"""
wavelengths = np.arange(520, 545, 0.01) * u.nm
probe_wavelength = 532 * u.nm
n = 5e17 * u.cm**-3
probe_vec = np.array([1, 0, 0])
scatter_vec = np.array([0, 1, 0])
# Combined
T_e = np.array([10]) * u.eV
T_i = np.array([10]) * u.eV
ions = ["H+"]
ifract = np.array([1.0])
efract = np.array([1.0])
alpha, Skw0 = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
T_e=T_e,
T_i=T_i,
ifract=ifract,
efract=efract,
ions=ions,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Split e and i populations into two parts
# this should not change the results since the parts are identical
T_e = np.array([10, 10]) * u.eV
T_i = np.array([10, 10]) * u.eV
ions = ["H+", "H+"]
ifract = np.array([0.2, 0.8])
efract = np.array([0.8, 0.2])
alpha, Skw1 = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
T_e=T_e,
T_i=T_i,
ifract=ifract,
efract=efract,
ions=ions,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Calculate the deviation between the two spectra
# (any differences should be in the noise)
deviation = (Skw0 - Skw1) / Skw0 * 100
assert np.all(deviation < 1e-6), "Failed split populations test"
def test_thomson_with_instrument_function(single_species_collective_args) -> None:
"""
Generates an example Thomson scattering spectrum with an instrument
function applied
"""
wavelengths = single_species_collective_args["wavelengths"]
args, kwargs = spectral_density_args_kwargs(single_species_collective_args)
alpha, Skw_with = thomson.spectral_density(
*args, **kwargs, instr_func=example_instr_func
)
alpha, Skw_without = thomson.spectral_density(*args, **kwargs)
# Assert that the instrument function has made the IAW peak wider
w1 = width_at_value(wavelengths.value, Skw_with.value, 2e-13)
w2 = width_at_value(wavelengths.value, Skw_without.value, 2e-13)
assert w1 > w2
@pytest.mark.parametrize("instr_func", invalid_instr_func_list)
def test_thomson_with_invalid_instrument_function(
instr_func,
single_species_collective_args,
) -> None:
"""
Verifies that an exception is raised if the provided instrument function
is invalid.
"""
args, kwargs = spectral_density_args_kwargs(single_species_collective_args)
kwargs["instr_func"] = instr_func
with pytest.raises(ValueError):
alpha, Skw_with = thomson.spectral_density(*args, **kwargs)
def test_param_to_array_fcns() -> None:
"""
Tests a few low-level routines used to convert lmfit scalar parameters
into array input for `spectral_density` based on a naming convention
"""
params = Parameters()
# Create two groups of test variables, one of scalars and one of vectors
prefix = "T_e"
for i in range(3):
params.add(f"{prefix}_{i}", value=2)
prefix = "ion_vel"
for i in range(2):
for j in ("x", "y", "z"):
params.add(f"{prefix}_{j}_{i}", value=2)
arr = thomson._params_to_array(params, "T_e", vector=False)
assert arr.shape == (3,)
assert np.mean(arr) == 2
arr = thomson._params_to_array(params, "ion_vel", vector=True)
assert arr.shape == (2, 3)
assert np.mean(arr) == 2
def run_fit(
wavelengths,
params,
settings,
noise_amp: float = 0.05,
fit_method: str = "differential_evolution",
fit_kws={}, # noqa: B006
max_iter=None,
check_errors: bool = True, # noqa: ARG001
require_redchi: float = 1.0,
# If false, don't perform the actual fit but instead just create the Model
run_fit: bool = True,
) -> None:
"""
Take a Parameters object, generate some synthetic data near it,
perturb the initial values, then try a fit.
Note: `ions` is passed in settings here (instead of parameters)
because we need the full ions list to make the spectrum. They are then
moved to parameters later in this function.
"""
wavelengths = (wavelengths * u.m).to(u.nm)
true_params = copy.deepcopy(params) # noqa: F841
skeys = list(settings.keys())
pkeys = list(params.keys())
# Fill any missing required parameters
if "efract_0" not in pkeys:
params.add("efract_0", value=1.0, vary=False)
if "ifract_0" not in pkeys:
params.add("ifract_0", value=1.0, vary=False)
if "electron_speed" not in pkeys:
params.add("electron_speed_0", value=0.0, vary=False)
if "ion_speed" not in pkeys:
params.add("ion_speed_0", value=0.0, vary=False)
# LOAD FROM PARAMS
n = params["n"]
T_e = thomson._params_to_array(params, "T_e")
T_i = thomson._params_to_array(params, "T_i")
efract = thomson._params_to_array(params, "efract")
ifract = thomson._params_to_array(params, "ifract")
electron_speed = thomson._params_to_array(params, "electron_speed")
ion_speed = thomson._params_to_array(params, "ion_speed")
if "instr_func" not in skeys:
settings["instr_func"] = None
if "notch" not in skeys:
settings["notch"] = None
# LOAD FROM SETTINGS
ions = settings["ions"]
probe_vec = settings["probe_vec"]
scatter_vec = settings["scatter_vec"]
probe_wavelength = settings["probe_wavelength"]
instr_func = settings["instr_func"]
notch = settings["notch"]
electron_vdir = settings.get("electron_vdir", np.ones([len(T_e), 3]))
ion_vdir = settings.get("ion_vdir", np.ones([len(T_i), 3]))
electron_vel = electron_speed[:, np.newaxis] * electron_vdir
ion_vel = ion_speed[:, np.newaxis] * ion_vdir
# Create the synthetic data
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength * u.m,
n * u.m**-3,
T_e=T_e * u.eV,
T_i=T_i * u.eV,
ifract=ifract,
efract=efract,
ions=ions,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
electron_vel=electron_vel * u.m / u.s,
ion_vel=ion_vel * u.m / u.s,
instr_func=instr_func,
notch=notch,
)
data = Skw
data *= 1 + np.random.normal( # noqa: NPY002
loc=0, scale=noise_amp, size=wavelengths.size
)
data *= 1 / np.nanmax(data)
# Randomly choose the starting values of the parameters within the
# search space (to make the algorithm do some work!)
for p in list(params.keys()):
if params[p].vary:
params[p].value = np.random.uniform( # noqa: NPY002
low=params[p].min, high=params[p].max, size=1
)
# Make the model, then perform the fit
model = thomson.spectral_density_model(
wavelengths.to(u.m).value,
settings,
params,
)
if run_fit:
result = model.fit(
data,
params,
wavelengths=wavelengths.to(u.m).value,
method=fit_method,
max_nfev=max_iter,
fit_kws=fit_kws,
)
# Assert that the fit reduced chi2 is under the requirement specified
assert result.redchi < require_redchi
def spectral_density_model_settings_params(kwargs):
"""
Separate a settings dict and a parameters object from a provided
dictionary.
This is useful for testing the spectral_density_model function.
The dictionary needs to hold a Parameter object for Parameters.
"""
if "wavelengths" in kwargs:
wavelengths = kwargs["wavelengths"]
else:
raise ValueError("Kwargs must include 'wavelengths'")
settings = {}
setting_names = [
"probe_wavelength",
"probe_vec",
"scatter_vec",
"ions",
"electron_vdir",
"ion_vdir",
"instr_func",
"notch",
]
params = Parameters()
for k, v in kwargs.items():
# If key is a setting, add the value to the settings
if k == "wavelengths":
pass
elif k in setting_names:
settings[k] = v
# If v is a parameter, add to the params
elif isinstance(v, Parameter):
params.add(v)
else:
raise ValueError(f"Invalid key: {k}")
return wavelengths, params, settings
@pytest.fixture
def epw_single_species_settings_params():
"""
Standard input for the spectral_density_model function
Includes both settings and params: separated by the function
spectral_density_model_settings_params
"""
probe_wavelength = 532 * u.nm
scattering_angle = np.deg2rad(63)
scatter_vec = np.array([np.cos(scattering_angle), np.sin(scattering_angle), 0])
notch = np.array([531, 533]) * u.nm
kwargs = {"probe_wavelength": probe_wavelength.to(u.m).value}
kwargs["probe_vec"] = np.array([1, 0, 0])
kwargs["scatter_vec"] = scatter_vec
kwargs["notch"] = notch
kwargs["ions"] = ["H+"]
kwargs["n"] = Parameter(
"n", value=2e17 * 1e6, vary=True, min=8e16 * 1e6, max=6e17 * 1e6
)
kwargs["T_e_0"] = Parameter("T_e_0", value=10, vary=True, min=5, max=20)
kwargs["T_i_0"] = Parameter("T_i_0", value=20, vary=False, min=5, max=70)
w0 = probe_wavelength.value
kwargs["wavelengths"] = (
(np.linspace(w0 - 40, w0 + 40, num=512) * u.nm).to(u.m).value
)
return kwargs
@pytest.fixture
def epw_multi_species_settings_params():
"""
Standard input for the spectral_density_model function
Includes both settings and params: separated by the function
spectral_density_model_settings_params
"""
probe_wavelength = 532 * u.nm
probe_vec = np.array([1, 0, 0])
scattering_angle = np.deg2rad(63)
scatter_vec = np.array([np.cos(scattering_angle), np.sin(scattering_angle), 0])
notch = np.array([531, 533]) * u.nm
kwargs = {"probe_wavelength": probe_wavelength.to(u.m).value}
kwargs["probe_vec"] = probe_vec
kwargs["scatter_vec"] = scatter_vec
kwargs["notch"] = notch
kwargs["ions"] = ["H+"]
kwargs["n"] = Parameter(
"n", value=2e17 * 1e6, vary=True, min=8e16 * 1e6, max=6e17 * 1e6
)
kwargs["T_e_0"] = Parameter("T_e_0", value=10, vary=True, min=5, max=20)
kwargs["T_e_1"] = Parameter("T_e_1", value=35, vary=True, min=5, max=20)
kwargs["T_i_0"] = Parameter("T_i_0", value=20, vary=False, min=5, max=70)
kwargs["efract_0"] = Parameter("efract_0", value=0.5, vary=False)
kwargs["efract_1"] = Parameter("efract_1", value=0.5, vary=False)
w0 = probe_wavelength.value
kwargs["wavelengths"] = (
(np.linspace(w0 - 40, w0 + 40, num=512) * u.nm).to(u.m).value
)
return kwargs
@pytest.fixture
def iaw_single_species_settings_params():
"""
Standard input for the spectral_density_model function
Includes both settings and params: separated by the function
spectral_density_model_settings_params
"""
probe_wavelength = 532 * u.nm
probe_vec = np.array([1, 0, 0])
scattering_angle = np.deg2rad(90)
scatter_vec = np.array([np.cos(scattering_angle), np.sin(scattering_angle), 0])
kwargs = {
"probe_wavelength": probe_wavelength.to(u.m).value,
"probe_vec": probe_vec,
"scatter_vec": scatter_vec,
"ions": ["H+"],
"ion_vdir": np.array([[1, 0, 0]]),
"electron_vdir": np.array([[1, 0, 0]]),
"n": Parameter("n", value=2e17 * 1e6, vary=False),
"T_e_0": Parameter("T_e_0", value=10, vary=False, min=5, max=20),
"T_i_0": Parameter("T_i_0", value=20, vary=True, min=5, max=70),
"ifract_0": Parameter("ifract_0", value=1.0, vary=False),
"ion_speed_0": Parameter("ion_speed_0", value=0, vary=False),
"electron_speed_0": Parameter("electron_speed_0", value=0, vary=False),
}
w0 = probe_wavelength.value
kwargs["wavelengths"] = (np.linspace(w0 - 5, w0 + 5, num=512) * u.nm).to(u.m).value
return kwargs
@pytest.fixture
def iaw_multi_species_settings_params():
"""
Standard input for the spectral_density_model function
Includes both settings and params: separated by the function
spectral_density_model_settings_params
"""
probe_wavelength = 532 * u.nm
probe_vec = np.array([1, 0, 0])
scattering_angle = np.deg2rad(63)
scatter_vec = np.array([np.cos(scattering_angle), np.sin(scattering_angle), 0])
kwargs = {
"probe_wavelength": probe_wavelength.to(u.m).value,
"probe_vec": probe_vec,
"scatter_vec": scatter_vec,
"ions": ["H+", "H+", "C-12 +4"],
"ion_vdir": np.array([[0.5, 0.5, 0]]),
"electron_vdir": np.array([[0, 0.2, 0.7]]),
"n": Parameter("n", value=1e19 * 1e6, vary=False),
"T_e_0": Parameter("T_e_0", value=500, vary=False, min=5, max=1000),
"T_i_0": Parameter("T_i_0", value=200, vary=True, min=5, max=1000),
"T_i_1": Parameter("T_i_1", value=500, vary=True, min=5, max=1000),
"T_i_2": Parameter("T_i_2", value=400, vary=False, min=5, max=1000),
"ifract_0": Parameter("ifract_0", value=0.4, vary=False, min=0.2, max=0.8),
"ifract_1": Parameter("ifract_1", value=0.3, vary=False, min=0.2, max=0.8),
"ifract_2": Parameter("ifract_2", value=0.3, vary=False, min=0.2, max=0.8),
"ion_speed_0": Parameter("ion_speed_0", value=0, vary=False),
"ion_speed_1": Parameter("ion_speed_1", value=1e5, vary=True, min=0, max=5e5),
"ion_speed_2": Parameter("ion_speed_2", value=2e5, vary=False, min=0, max=5e5),
"electron_speed_0": Parameter("electron_speed_0", value=0, vary=False),
}
w0 = probe_wavelength.value
kwargs["wavelengths"] = (np.linspace(w0 - 5, w0 + 5, num=512) * u.nm).to(u.m).value
return kwargs
@pytest.fixture
def noncollective_single_species_settings_params():
"""
Standard input for the spectral_density_model function
Includes both settings and params: separated by the function
spectral_density_model_settings_params
"""
probe_wavelength = 532 * u.nm
probe_vec = np.array([1, 0, 0])
scattering_angle = np.deg2rad(30)
scatter_vec = np.array([np.cos(scattering_angle), np.sin(scattering_angle), 0])
kwargs = {
"probe_wavelength": probe_wavelength.to(u.m).value,
"probe_vec": probe_vec,
"scatter_vec": scatter_vec,
"ions": ["H+"],
"ion_vdir": np.array([[1, 0, 0]]),
"electron_vdir": np.array([[1, 0, 0]]),
"n": Parameter(
"n", value=2e17 * 1e6, vary=True, min=8e16 * 1e6, max=6e17 * 1e6
),
"T_e_0": Parameter("T_e_0", value=10, vary=True, min=5, max=20),
"T_i_0": Parameter("T_i_0", value=120, vary=False, min=5, max=70),
"efract_0": Parameter("efract_0", value=1.0, vary=False),
"electron_speed_0": Parameter("electron_speed_0", value=0, vary=False),
}
w0 = probe_wavelength.value
kwargs["wavelengths"] = (
(np.linspace(w0 - 60, w0 + 60, num=512) * u.nm).to(u.m).value
)
return kwargs
@pytest.mark.slow
def test_fit_epw_single_species(epw_single_species_settings_params) -> None:
wavelengths, params, settings = spectral_density_model_settings_params(
epw_single_species_settings_params
)
run_fit(wavelengths, params, settings)
@pytest.mark.slow
def test_fit_epw_multi_species(epw_multi_species_settings_params) -> None:
wavelengths, params, settings = spectral_density_model_settings_params(
epw_multi_species_settings_params
)
run_fit(wavelengths, params, settings)
@pytest.mark.slow
def test_fit_iaw_single_species(iaw_single_species_settings_params) -> None:
wavelengths, params, settings = spectral_density_model_settings_params(
iaw_single_species_settings_params
)
run_fit(wavelengths, params, settings)
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_fit_iaw_instr_func(iaw_single_species_settings_params) -> None:
"""
Tests fitting with an instrument function
"""
wavelengths, params, settings = spectral_density_model_settings_params(
iaw_single_species_settings_params
)
settings["instr_func"] = example_instr_func
run_fit(wavelengths, params, settings)
@pytest.mark.slow
def test_fit_ion_mu_and_z(iaw_single_species_settings_params) -> None:
"""
Tests fitting with ion parameters explicitly set and allowed to vary
"""
wavelengths, params, settings = spectral_density_model_settings_params(
iaw_single_species_settings_params
)
for i, ion in enumerate(settings["ions"]):
_ion = Particle(ion)
mass = _ion.mass.to(u.kg).value
Z = _ion.charge_number
params.add(f"ion_mu_{i!s}", value=mass, vary=True, min=0.5, max=10)
params.add(f"ion_z_{i!s}", value=Z, vary=True, min=1, max=10)
run_fit(wavelengths, params, settings)
@pytest.mark.slow
def test_fit_iaw_multi_species(iaw_multi_species_settings_params) -> None:
wavelengths, params, settings = spectral_density_model_settings_params(
iaw_multi_species_settings_params
)
run_fit(wavelengths, params, settings)
@pytest.mark.slow
def test_fit_noncollective_single_species(
noncollective_single_species_settings_params,
) -> None:
wavelengths, params, settings = spectral_density_model_settings_params(
noncollective_single_species_settings_params
)
run_fit(wavelengths, params, settings)
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_fit_with_instr_func(epw_single_species_settings_params) -> None:
"""
Check that fitting works with an instrument function.
It specifically tests the case where a notch is being used in the filter,
because this can cause a potential error with the instrument function.
"""
wavelengths, params, settings = spectral_density_model_settings_params(
epw_single_species_settings_params
)
settings["instr_func"] = example_instr_func
settings["notch"] = np.array([531, 533]) * 1e-9 * u.nm
# Warns that data should not include any NaNs
# This is taken care of in run_fit by deleting the notch region rather than
# replacing it with np.nan
with pytest.warns(UserWarning, match="If an instrument function is included,"):
run_fit(
wavelengths,
params,
settings,
run_fit=False,
)
# Run the same fit using np.delete instead of np.nan values
run_fit(wavelengths, params, settings)
@pytest.mark.parametrize("instr_func", invalid_instr_func_list)
def test_fit_with_invalid_instr_func(
instr_func, iaw_single_species_settings_params
) -> None:
"""
Verifies that an exception is raised if the provided instrument function
is invalid.
"""
wavelengths, params, settings = spectral_density_model_settings_params(
iaw_single_species_settings_params
)
settings["instr_func"] = instr_func
with pytest.raises(ValueError):
run_fit(wavelengths, params, settings)
@pytest.mark.slow
def test_fit_with_minimal_parameters() -> None:
# Create example data for fitting
probe_wavelength = 532 * u.nm
probe_vec = np.array([1, 0, 0])
scattering_angle = np.deg2rad(90)
scatter_vec = np.array([np.cos(scattering_angle), np.sin(scattering_angle), 0])
w0 = probe_wavelength.value
wavelengths = np.linspace(w0 - 5, w0 + 5, num=512) * u.nm
ions = [Particle("H+")]
n = 2e17 * u.cm**-3
T_i = 20 * u.eV
T_e = 10 * u.eV
alpha, Skw = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
T_e=T_e,
T_i=T_i,
ions=ions,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
data = Skw.value
data *= 1 + np.random.normal( # noqa: NPY002
loc=0, scale=0.1, size=wavelengths.size
)
data *= 1 / np.nanmax(data)
# Create settings and params using only the minimal parameters
# intentionally leave out a few required values to check to make sure an
# exception is raised
settings = {"probe_vec": probe_vec, "scatter_vec": scatter_vec, "ions": ions}
params = Parameters()
params.add("T_e_0", value=T_e.value, vary=False, min=5, max=20)
params.add("T_i_0", value=T_i.value, vary=True, min=5, max=70)
params.add("ion_mu_0", value=1, vary=False)
params.add("ion_z_0", value=ions[0].charge_number, vary=False)
# Try creating model: will raise exception because required values
# are missing in settings, eg. 'probe_wavelength'
with pytest.raises(ValueError):
model = thomson.spectral_density_model(wavelengths, settings, params)
# Add back in the required values
settings["probe_wavelength"] = probe_wavelength.to(u.m).value
# Still raises an exception because T_e_0 is still missing
with pytest.raises(ValueError):
model = thomson.spectral_density_model(wavelengths, settings, params)
params.add("n", value=n.to(u.m**-3).value, vary=False)
# Make the model, then perform the fit
model = thomson.spectral_density_model(wavelengths.to(u.m).value, settings, params)
result = model.fit( # noqa: F841
data,
params,
wavelengths=wavelengths.to(u.m).value,
method="differential_evolution",
max_nfev=2000,
)
@pytest.mark.parametrize(
("control", "error", "msg"),
[
# Required settings
(
{"probe_wavelength": None},
ValueError,
"not provided in settings, but is required",
),
(
{"scatter_vec": None},
ValueError,
"not provided in settings, but is required",
),
({"probe_vec": None}, ValueError, "not provided in settings, but is required"),
(
{"ions": None},
ValueError,
"not provided in settings, but is required",
),
# Required parameters
({"n": None}, ValueError, "was not provided in parameters, but is required."),
(
{"T_e_0": None},
ValueError,
"was not provided in parameters, but is required.",
),
# Two ion temps are required for this multi-ion example
(
{"T_i_0": None},
ValueError,
"was not provided in parameters, but is required.",
),
(
{"T_i_1": None},
ValueError,
"was not provided in parameters, but is required.",
),
# If speed is not zero, vdir must be set
(
{
"electron_speed_0": Parameter("electron_speed_0", 1e5),
"electron_vdir": None,
},
ValueError,
"electron_vdir must be set if electron_speeds",
),
(
{"ion_speed_0": Parameter("ion_speed_0", 1e5), "ion_vdir": None},
ValueError,
"ion_vdir must be set if ion_speeds",
),
],
)
def test_model_input_validation(
control, error, msg, iaw_multi_species_settings_params
) -> None:
kwargs = iaw_multi_species_settings_params
# We'll need to switch from print() to using logging library
print(list(control.keys())) # noqa: T201
# Remove or replace values in kwargs
for k, v in control.items():
if v is None:
del kwargs[k]
else:
kwargs[k] = v
wavelengths, params, settings = spectral_density_model_settings_params(kwargs)
if error is None:
thomson.spectral_density_model(
wavelengths,
settings,
params,
)
else:
with pytest.raises(error) as excinfo:
thomson.spectral_density_model(wavelengths, settings, params)
# If msg is not None, check that this string is a subset of the
# error message
if msg is not None:
print(excinfo.value) # noqa: T201
assert msg in str(excinfo.value)
|
PlasmaPyREPO_NAMEPlasmaPyPATH_START.@PlasmaPy_extracted@PlasmaPy-main@tests@diagnostics@[email protected]_END.py
|
{
"filename": "for_blog.py",
"repo_name": "eggplantbren/DNest4",
"repo_path": "DNest4_extracted/DNest4-master/code/Templates/Builder/for_blog.py",
"type": "Python"
}
|
import numpy as np
import dnest4.builder as bd
data = {"x": np.array([1.0, 2.0, 3.0, 4.0, 5.0]),\
"y": np.array([1.0, 2.0, 3.0, 3.9, 5.1]),\
"N": 5}
# Create the model
model = bd.Model()
# Slope and intercept
model.add_node(bd.Node("m", bd.Uniform(-100.0, 100.0)))
model.add_node(bd.Node("b", bd.Uniform(-100.0, 100.0)))
# Noise standard deviation
model.add_node(bd.Node("log_sigma", bd.Uniform(-10.0, 10.0)))
model.add_node(bd.Node("sigma", bd.Delta("exp(log_sigma)")))
# p(data | parameters)
for i in range(0, data["N"]):
name = "y{index}".format(index=i)
mean = "m*x{index} + b".format(index=i)
model.add_node(bd.Node(name, bd.Normal(mean, "sigma"), observed=True))
# Create the C++ code
bd.generate_h(model, data)
bd.generate_cpp(model, data)
# Compile the C++ code so it's ready to go
import os
os.system("make")
|
eggplantbrenREPO_NAMEDNest4PATH_START.@DNest4_extracted@DNest4-master@code@Templates@Builder@[email protected]_END.py
|
{
"filename": "drs_utils.py",
"repo_name": "njcuk9999/apero-drs",
"repo_path": "apero-drs_extracted/apero-drs-main/apero/core/utils/drs_utils.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
APERO core utility and miscellaneous functionality
Created on 2020-10-2020-10-05 17:43
@author: cook
"""
from collections import OrderedDict
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
from apero.base import base
from apero.core import constants
from apero.core.core import drs_base_classes as base_class
from apero.core.core import drs_database
from apero.core.core import drs_exceptions
from apero.core.core import drs_log
from apero.core.core import drs_misc
from apero.core.core import drs_text
from apero.io import drs_fits
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'drs_utils.py'
__INSTRUMENT__ = 'None'
__PACKAGE__ = base.__PACKAGE__
__version__ = base.__version__
__author__ = base.__author__
__date__ = base.__date__
__release__ = base.__release__
# get display func
display_func = drs_log.display_func
# get time object
Time = base.Time
# Get Logging function
WLOG = drs_log.wlog
# alias pcheck
pcheck = constants.PCheck(wlog=WLOG)
# get parameter dictionary
ParamDict = constants.ParamDict
# get the binary dictionary
BinaryDict = base_class.BinaryDict
# get exceptions
DrsCodedException = drs_exceptions.DrsCodedException
# get databases
FileIndexDatabase = drs_database.FileIndexDatabase
LogDatabase = drs_database.LogDatabase
# get header classes from io.drs_fits
Header = drs_fits.Header
FitsHeader = drs_fits.fits.Header
# =============================================================================
# Define Classes
# =============================================================================
class RecipeLog:
"""
Recipe log class - to store recipe log data
"""
def __init__(self, name: str, sname: str, params: ParamDict, level: int = 0,
logger: Union[None, drs_log.Logger] = None,
database: Union[LogDatabase, None] = None,
flags: Optional[BinaryDict] = None):
"""
Constructor for the recipe log
:param name: str, the recipe name this recipe log belong to
:param params: ParamDict, the constants parameter dictionary
:param level: int, the level of this log 0 is root, higher numbers are
children of the root
:param logger: if set the WLOG (Logger) instance to use
"""
# set class name
self.class_name = 'RecipeLog'
# set function name
_ = drs_misc.display_func('__init__', __NAME__, self.class_name)
# get a database instance
if isinstance(database, LogDatabase):
self.logdbm = database
else:
self.logdbm = LogDatabase(params)
self.logdbm.load_db()
# get the recipe name
self.name = str(name)
self.sname = str(sname)
# the block kind (raw/tmp/red etc)
self.block_kind = 'None'
# the default logging absolute path
self.defaultpath = str(params['DRS_DATA_MSG_FULL'])
# the log fits file name (log.fits)
self.logfitsfile = str(params['DRS_LOG_FITS_NAME'])
# the recipe input directory from recipe.inputdir
self.inputdir = str(params['INPATH'])
# the recipe output directory from recipe.outputdir
self.outputdir = str(params['OUTPATH'])
# the parameter dictionary of constants
self.params = params
# ---------------------------------------------------------------------
self.no_log = False
# deal with no save --> no log
if 'INPUTS' in params:
if 'NOSAVE' in params['INPUTS']:
if params['INPUTS']['NOSAVE']:
self.no_log = True
# ---------------------------------------------------------------------
# the Logger instances (or None)
self.wlog = logger
# set the pid
self.pid = str(params['PID'])
# set the human time
self.htime = str(params['DATE_NOW'])
self.utime = Time(self.htime).unix
self.start_time = str(params['DATE_NOW'])
self.end_time = 'None'
self.log_start = 'None'
self.log_end = 'None'
# set the group name
self.group = str(params['DRS_GROUP'])
# set the night name directory (and deal with no value)
if 'OBS_DIR' not in params:
self.obs_dir = 'other'
elif params['OBS_DIR'] in [None, 'None', '']:
self.obs_dir = 'other'
else:
self.obs_dir = str(params['OBS_DIR'])
# set the log file name (just used to save log directory)
# for log table entry
self.log_file = 'Not Set'
# set the plot file name (just used to save the plot directory) for
# log table entry
self.plot_dir = 'Not Set'
# set the inputs
self.args = ''
self.kwargs = ''
self.skwargs = ''
self.runstring = ''
self.recipe_type = str(params['DRS_RECIPE_TYPE'])
self.recipe_kind = str(params['DRS_RECIPE_KIND'])
self.program_name = str(params['DRS_USER_PROGRAM'])
# set that recipe started
self.started = True
# set the iteration
self.set = []
# set the level (top level=0)
self.level = level
# set the level criteria
self.level_criteria = ''
self.level_iteration = 0
# set qc
self.passed_qc = False
# set qc paarams
self.qc_string = ''
self.qc_name = ''
self.qc_value = ''
self.qc_pass = ''
self.qc_logic = ''
# keep the flags
self.flags = flags
self.flagnum = 0
self.flagstr = ''
# set flag: in parallel
if 'INPUTS' in params:
in_parallel = params['INPUTS'].get('PARALLEL', False)
self.flags['IN_PARALLEL'] = in_parallel
# set flag: running
self.flags['RUNNING'] = True
# set the errors
self.errors = ''
# get system stats at start
stats = drs_misc.get_system_stats()
# system stats
self.ram_usage_start = stats['ram_used']
self.ram_usage_end = -1
self.ram_total = stats['raw_total']
self.swap_usage_start = stats['swap_used']
self.swap_usage_end = -1
self.swap_total = stats['swap_total']
self.cpu_usage_start = stats['cpu_percent']
self.cpu_usage_end = -1
self.cpu_num = stats['cpu_total']
def __getstate__(self) -> dict:
"""
For when we have to pickle the class
:return:
"""
# set state to __dict__
state = dict(self.__dict__)
# return dictionary state
return state
def __setstate__(self, state: dict):
"""
For when we have to unpickle the class
:param state: dictionary from pickle
:return:
"""
# update dict with state
self.__dict__.update(state)
def __str__(self) -> str:
"""
String representation of this class
:return:
"""
# set function name
_ = drs_misc.display_func('__str__', __NAME__, self.class_name)
# return string representation of RecipeLOg
return 'RecipeLog[{0}]'.format(self.name)
def copy(self, rlog: 'RecipeLog'):
"""
Copy another RecipeLog over this one
:param rlog: Another RecipeLog instance
:return:
"""
# set function name
_ = drs_misc.display_func('copy', __NAME__, self.class_name)
# copy parameters
self.name = str(rlog.name)
self.sname = str(rlog.sname)
self.block_kind = str(rlog.block_kind)
self.recipe_type = str(rlog.recipe_type)
self.recipe_kind = str(rlog.recipe_kind)
self.pid = str(rlog.pid)
self.htime = str(rlog.htime)
self.utime = str(rlog.utime)
self.group = str(rlog.group)
self.obs_dir = str(rlog.obs_dir)
self.defaultpath = str(rlog.defaultpath)
self.inputdir = str(rlog.inputdir)
self.outputdir = str(rlog.outputdir)
self.log_file = str(rlog.log_file)
self.plot_dir = str(rlog.plot_dir)
self.runstring = str(rlog.runstring)
self.args = str(rlog.args)
self.kwargs = str(rlog.kwargs)
self.skwargs = str(rlog.skwargs)
self.start_time = str(rlog.start_time)
self.end_time = str(rlog.end_time)
self.level_criteria = str(rlog.level_criteria)
self.passed_qc = bool(rlog.passed_qc)
self.qc_string = str(rlog.qc_string)
self.qc_name = str(rlog.qc_name)
self.qc_value = str(rlog.qc_value)
self.qc_pass = str(rlog.qc_pass)
self.qc_logic = str(rlog.qc_logic)
self.flags = rlog.flags.copy()
self.flagnum = int(rlog.flagnum)
self.flagstr = str(rlog.flagstr)
self.errors = str(rlog.errors)
self.ram_usage_start = float(rlog.ram_usage_start)
self.ram_usage_end = float(rlog.ram_usage_end)
self.ram_total = float(rlog.ram_total)
self.swap_usage_start = float(rlog.swap_usage_start)
self.swap_usage_end = float(rlog.swap_usage_end)
self.swap_total = float(rlog.swap_total)
self.cpu_usage_start = float(rlog.cpu_usage_start)
self.cpu_usage_end = float(rlog.cpu_usage_end)
self.cpu_num = int(rlog.cpu_num)
def set_log_file(self, logfile: Union[str, Path]):
"""
Set the log file
:param logfile: str, the log file
:return:
"""
# set function name
_ = drs_misc.display_func('set_log_file', __NAME__,
self.class_name)
# set the log file
self.log_file = str(logfile)
def set_plot_dir(self, params: ParamDict,
location: Union[str, Path, None] = None,
write: bool = True):
"""
Set the plot directory for RecipeLog and all children
:param params: ParamDict, the constants parameter dictionary
:param location: str or Path, the path of the plot directory
:param write: bool, if True update the log file
:return:
"""
# set function name
_ = drs_misc.display_func('set_plot_dir', __NAME__,
self.class_name)
# deal with location being set
if location is not None:
self.plot_dir = str(location)
# update children
if len(self.set) != 0:
for child in self.set:
child.set_plot_dir(params, location, write=False)
else:
self.plot_dir = 'None'
# whether to write (update) recipe log file
if write:
self.write_logfile()
def add_level(self, params: ParamDict, key: str, value: Any,
write: bool = True) -> 'RecipeLog':
"""
Add a child level to the recipe log i.e. inside a for loop we may want
one log entry for each iteration (level is incremented - root = 0)
:param params: ParamDict, the constants parameter dictionary
:param key: str, text describing this new level (i.e. fiber or
iteration) converted to key = value
e.g. key: fiber
we have a level with the following:
fiber = A
fiber = B
fiber = C
:param value: Any (must be convertable to string) the value of this
iterations key i.e. key = value
:param write: bool, if True writes to RecipeLog fits file
:return: RecipeLog, the child instance of the parent RecipeLog
all children are stored inside a parent
"""
# set function name
# _ = drs_misc.display_func('add_level', __NAME__, self.class_name)
# get new level
level = self.level + 1
# create new log
newlog = RecipeLog(self.name, self.sname, params, level=level,
logger=self.wlog, database=self.logdbm,
flags=self.flags)
# copy from parent
newlog.copy(self)
# set log start time
newlog.log_start = str(Time.now().iso)
# record level criteria
newlog.level_criteria += '{0}={1} '.format(key, value)
# update the level iteration
newlog.level_iteration = len(self.set)
# add newlog to set
self.set.append(newlog)
# ---------------------------------------------------------------------
# whether to write (update) recipe log file
if write:
self.write_logfile()
# return newlog (for use)
return newlog
def add_qc(self,
qc_params: Tuple[List[str], List[Any], List[str], List[int]],
passed: Union[int, bool, str], write: bool = True):
"""
Add the quality control criteria (stored in qc_params) to the recipe
log
:param qc_params: the quality control storage, constists of
qc_names, qc_values, qc_logic, qc_pass where
qc_names is a list of variable names,
qc_values is a list of value for each variable
qc_logic is the pass/fail logic for variable
qc_pass is either 1 for passed qc or 0 for failure
:param passed: int/bool/str if 1 or True or '1' all quality control was
passed (this is stored as a column in log database)
:param write: bool, if True write parameters to log database
:return:
"""
# set function name
_ = drs_misc.display_func('add_qc', __NAME__, self.class_name)
# update passed
if passed in [1, True, '1']:
self.passed_qc = True
else:
self.passed_qc = False
# update qc params
qc_names, qc_values, qc_logic, qc_pass = qc_params
for it in range(len(qc_names)):
# deal with no qc set
if qc_names[it] in ['None', None, '']:
continue
# set up qc pass string
if qc_pass[it]:
pass_str = 'PASSED'
else:
pass_str = 'FAILED'
# deal with qc set
qargs = [qc_names[it], qc_values[it], qc_logic[it], pass_str]
self.qc_string += '{0}={1} [{2}] {3} ||'.format(*qargs)
self.qc_name += '{0}||'.format(qc_names[it])
self.qc_value += '{0}||'.format(qc_values[it])
self.qc_logic += '{0}||'.format(qc_logic[it])
self.qc_pass += '{0}||'.format(qc_pass[it])
# whether to write (update) recipe log file
if write:
self.write_logfile()
def no_qc(self, write: bool = True):
"""
Writes that quality control passed (there were no quality control)
:param write: bool, whether to write to log database
:return:
"""
# set function name
_ = drs_misc.display_func('no_qc', __NAME__, self.class_name)
# set passed_qc to True (no qc means automatic pass)
self.passed_qc = True
# all children must also not have qc
instances = self.get_children()
# loop around instances
for inst in instances:
inst.passed_qc = True
# whether to write (update) recipe log file
if write:
self.write_logfile()
def add_error(self, errortype: Union[Exception, str],
errormsg: str, write: bool = True):
"""
Add an error (exception) to the database in the errors column
errors are separate by two ||
ErrorType: ErrorMessage ||
:param errortype: Exception or string, the error exception or a string
representation of it
:param errormsg: str, the error message to store
:param write: bool, if True writes to the log database
:return:
"""
# set function name
_ = drs_misc.display_func('add_error', __NAME__, self.class_name)
# add errors in form ErrorType: ErrorMessage ||
self.errors += '"{0}":"{1}"||'.format(errortype, errormsg)
# whether to write (update) recipe log file
if write:
self.write_logfile()
def get_children(self) -> List['RecipeLog']:
"""
Get all child classes attached to this instance
:return:
"""
# if we have a set then we look for children
if len(self.set) != 0:
children = []
# loop around children and check for children of children
for child in self.set:
children += child.get_children()
# return this list of instances
return children
# if we don't have a set we just return ourself (weirdly we are one of
# our children in this definition)
else:
return [self]
def end(self, write: bool = True, success: bool = True):
"""
Add the row that says recipe finished correctly to database
:param write: bool, whether to write to log database
:param success: bool, if True adds an ended flag
:return:
"""
# set function name
_ = drs_misc.display_func('end', __NAME__, self.class_name)
# add the end time
end_time = str(Time.now().iso)
# both log end (for child) and full end time are updated
self.log_end = end_time
self.end_time = end_time
# set the ended parameter to True
if success:
self.flags['ENDED'] = True
# set the running parameter to False (we have finished whether
# successful or not)
self.flags['RUNNING'] = False
# get system stats at end
stats = drs_misc.get_system_stats()
self.ram_usage_end = stats['ram_used']
self.swap_usage_end = stats['swap_used']
self.cpu_usage_end = stats['cpu_percent']
# whether to write (update) recipe log file
if write:
self.write_logfile()
def write_logfile(self):
"""
Write to the log database
:return: None, unless return_values is True
"""
# set function name
_ = drs_misc.display_func('write_logfile', __NAME__,
self.class_name)
# do not write log if we have the no log flag
if self.no_log:
return
# ---------------------------------------------------------------------
# remove all entries with this pid
self.logdbm.remove_pids(self.pid)
# ---------------------------------------------------------------------
# add instances (if we have a set use the set otherwise just add
# your self)
instances = self.get_children()
# loop around instances
for inst in instances:
# get utime
utime = float(Time(inst.htime).unix)
# convert flags before writing
inst.convert_flags()
# add entries
self.logdbm.add_entries(recipe=inst.name, sname=inst.sname,
block_kind=inst.block_kind,
recipe_type=inst.recipe_type,
recipe_kind=inst.recipe_kind,
program_name=inst.program_name,
pid=inst.pid, htime=inst.htime,
unixtime=utime, group=inst.group,
level=inst.level,
sublevel=inst.level_iteration,
levelcrit=inst.level_criteria,
inpath=inst.inputdir,
outpath=inst.outputdir,
obs_dir=inst.obs_dir,
logfile=inst.log_file,
plotdir=inst.plot_dir,
runstring=inst.runstring, args=inst.args,
kwargs=inst.kwargs, skwargs=inst.skwargs,
start_time=inst.start_time,
# end time has to be taken from parent
end_time=self.end_time,
started=inst.started,
passed_all_qc=inst.passed_qc,
qc_string=inst.qc_string,
qc_names=inst.qc_name,
qc_values=inst.qc_value,
qc_logic=inst.qc_logic,
qc_pass=inst.qc_pass,
errors=inst.errors,
ended=int(inst.flags['ENDED']),
flagnum=inst.flagnum,
flagstr=inst.flagstr,
used=1,
ram_usage_start=inst.ram_usage_start,
ram_usage_end=inst.ram_usage_end,
ram_total=inst.ram_total,
swap_usage_start=inst.swap_usage_start,
swap_usage_end=inst.swap_usage_end,
swap_total=inst.swap_total,
cpu_usage_start=inst.cpu_usage_start,
cpu_usage_end=inst.cpu_usage_end,
cpu_num=inst.cpu_num,
log_start=inst.log_start,
log_end=inst.log_end)
def _make_row(self) -> OrderedDict:
"""
Make a row in the RecipeLog file
:return: OrderedDict the row entry where each key is a column name
"""
# set function name
_ = drs_misc.display_func('_make_row', __NAME__, self.class_name)
# convert flags
self.convert_flags()
# set rows
row = OrderedDict()
row['RECIPE'] = self.name
row['BLOCK_KIND'] = self.block_kind
row['RECIPE_TYPE'] = self.recipe_type
row['RECIPE_KIND'] = self.recipe_kind
row['PID'] = self.pid
row['HTIME'] = self.htime
row['GROUPNAME'] = self.group
row['LEVEL'] = self.level
row['SUBLEVEL'] = self.level_iteration
row['LEVEL_CRIT'] = self.level_criteria
row['INPATH'] = self.inputdir
row['OUTPATH'] = self.outputdir
row['OBS_DIR'] = self.obs_dir
row['LOGFILE'] = self.log_file
row['PLOTDIR'] = self.plot_dir
row['RUNSTRING'] = self.runstring
# add inputs
row['ARGS'] = self.args
row['KWARGS'] = self.kwargs
row['SKWARGS'] = self.skwargs
# add timings
row['START_TIME'] = self.start_time
row['END_TIME'] = self.end_time
# add whether recipe started
row['STARTED'] = self.started
# add whether all qc passed
row['PASSED_ALL_QC'] = self.passed_qc
# qc columns
row['QC_STRING'] = self.qc_string.strip().strip('||').strip()
row['QC_NAMES'] = self.qc_name.strip().strip('||').strip()
row['QC_VALUES'] = self.qc_value.strip().strip('||').strip()
row['QC_LOGIC'] = self.qc_logic.strip().strip('||').strip()
row['QC_PASS'] = self.qc_pass.strip().strip('||').strip()
# add errors
row['ERRORMSGS'] = self.errors
# add flags
row['FLAGNUM'] = self.flagnum
row['FLAGSTR'] = self.flagstr
# add system stats
row['RAM_USAGE_START'] = self.ram_usage_start
row['RAM_USAGE_END'] = self.ram_usage_end
row['RAW_TOTAL'] = self.ram_total
row['SWAP_USAGE_START'] = self.swap_usage_start
row['SWAP_USAGE_END'] = self.swap_usage_end
row['SWAP_TOTAL'] = self.swap_total
row['CPU_USAGE_START'] = self.cpu_usage_start
row['CPU_USAGE_END'] = self.cpu_usage_end
row['CPU_NUM'] = self.cpu_num
row['LOG_START'] = self.log_start
row['LOG_END'] = self.log_end
# return row
return row
def update_flags(self, **kwargs: bool):
"""
Update the log flags
:param kwargs: str, the keys to update
:return:
"""
# loop around flags and update the required ones
for kwarg in kwargs:
self.flags[kwarg] = bool(kwargs[kwarg])
# convert flags for logging
self.convert_flags()
# whether to write (update) recipe log file
self.write_logfile()
def convert_flags(self):
"""
Convert flags from a list to a string (keys separated by |)
and decode the flag number from the individual flags
:return: None, updates flagnum and flagstr
"""
self.flagnum = self.flags.decode()
self.flagstr = '|'.join(list(self.flags.keys()))
def get_rows(self) -> List[OrderedDict]:
"""
Get all rows for a entry (including all rows from the child Recipelog
entries
:return:
"""
# set function name
_ = drs_misc.display_func('_get_rows', __NAME__, self.class_name)
# set rows storage
rows = []
# case where we have no sets
if len(self.set) == 0:
rows.append(self._make_row())
else:
# else we have children
for child in self.set:
rows += child.get_rows()
# return rows
return rows
# complex param table return
ParamTableReturn = Tuple[List[str], List[str], list, List[str], List[str],
List[int]]
def get_param_table(self) -> ParamTableReturn:
"""
Make lists of the names, kinds, values, sources, descriptions and counts
for param table addition
Note columns should be rlog.{LOG_DB_COLUMN}
where LOG_DB_COLUMN is lowercase and is checked at the end
:return: tuple of lists (name/kinds/values/sources/descriptions/counts)
"""
# set function name
func_name = display_func('get_param_table', __NAME__, self.class_name)
# storage arrays
names = []
param_kinds = []
values = []
source = []
description = []
count = []
# get log keys
ldb_cols = constants.pload().LOG_DB_COLUMNS()
log_keys = list(ldb_cols.altnames)
log_comments = list(ldb_cols.comments)
# convert the flags
self.convert_flags()
# ---------------------------------------------------------------------
# define the values for each column (must be same length as
# LOG_DB_COLUMNS
log_values = [self.name, self.sname, self.block_kind,
self.recipe_type, self.recipe_kind, self.program_name,
self.pid, self.htime, float(Time(self.htime).unix),
self.group, self.level, self.level_iteration,
self.level_criteria, self.inputdir, self.outputdir,
self.obs_dir, self.log_file, self.plot_dir,
self.runstring, self.args, self.kwargs, self.skwargs,
self.start_time, self.end_time, self.started,
self.passed_qc, self.qc_string,
self.qc_name, self.qc_value, self.qc_logic, self.qc_pass,
self.errors, int(self.flags['ENDED']),
self.flagnum, self.flagstr, 1, self.ram_usage_start,
self.ram_usage_end, self.ram_total, self.swap_usage_start,
self.swap_usage_end, self.swap_total,
self.cpu_usage_start, self.cpu_usage_end, self.cpu_num,
self.log_start, self.log_end]
# ---------------------------------------------------------------------
# loop around all rows and add to params
for it in range(len(log_keys)):
# ended will be zero as we are inside a recipe
# for the rlog table we assume the recipe finished
# (otherwise we would have to update the header at some point
# after the recipe finished)
if log_keys[it].endswith('ENDED'):
value = 1
else:
value = log_values[it]
# set the name (from log key)
names.append(log_keys[it])
# set the parameter kind (rlog)
param_kinds.append('rlog')
# set the value
values.append(value)
# set the source of the value
source.append(func_name)
# set the description using the log comments
description.append(log_comments[it])
# set the count to 1 (always 1)
count.append(1)
# ---------------------------------------------------------------------
# return lists
return names, param_kinds, values, source, description, count
# =============================================================================
# Define functions
# =============================================================================
# complex typing for update_index_db
FileType = Union[List[Path], Path, List[str], str, None]
def update_index_db(params: ParamDict, block_kind: str,
includelist: Union[List[str], None] = None,
excludelist: Union[List[str], None] = None,
filename: FileType = None,
suffix: str = '',
findexdbm: Union[FileIndexDatabase, None] = None
) -> FileIndexDatabase:
"""
Block function to update index database
(if params['INPUTS']['PARALLEL'] is True does not update database).
:param params: ParamDict, the parameter dictionary of constants
:param block_kind: str, the block kind (raw/tmp/red)
:param includelist: list of strings or None, if set the observation
directories to include in update
:param excludelist: list of strings or None, if set the observation
directories to exclude in update
:param filename: list of paths, path, list or strings or string or None,
if set the filename or filenames to update
:param suffix: str, the suffix (i.e. extension of filenames) - filters
to only set these files
:param findexdbm: IndexDatabase instance or None, if set will not reload
index database if None will load index database
:return: updated or loaded index database unless
params['INPUTS']['PARALLEL'] is True
"""
# -------------------------------------------------------------------------
# load the index database
if findexdbm is None:
findexdbm = FileIndexDatabase(params)
findexdbm.load_db()
# -------------------------------------------------------------------------
# check whether we are updating the index
update_index = True
if 'INPUTS' in params:
if params['INPUTS'].get('PARALLEL', False):
update_index = False
if not update_index:
return findexdbm
# -------------------------------------------------------------------------
# deal with white list and black list
# no include dirs
if drs_text.null_text(includelist, ['None', 'All', '']):
include_dirs = None
elif includelist in [['All'], ['None'], ['']]:
include_dirs = None
# else use include list dirs
else:
include_dirs = list(includelist)
# no exclude dirs
if drs_text.null_text(excludelist, ['None', 'All', '']):
exclude_dirs = None
elif excludelist in [['All'], ['None'], ['']]:
exclude_dirs = None
# else exclude dirs
else:
exclude_dirs = list(excludelist)
# -------------------------------------------------------------------------
# update index database with raw files
findexdbm.update_entries(block_kind=block_kind,
exclude_directories=exclude_dirs,
include_directories=include_dirs,
filename=filename, suffix=suffix)
# -------------------------------------------------------------------------
# we need to reset some globally stored variables - these should be
# recalculated when used
store = drs_database.PandasDBStorage()
store.reset(subkey=block_kind)
# return the database
return findexdbm
def find_files(params: ParamDict, block_kind: str, filters: Dict[str, str],
columns='ABSPATH',
findexdbm: Union[FileIndexDatabase, None] = None
) -> Union[np.ndarray, pd.DataFrame]:
"""
Find a type of files from the file index database using a set of filters
:param params: ParamDict, the parameter dictionary of constants
:param block_kind: str, the block kind (raw/tmp/red etc)
:param filters: dict, the column names within the file index database
with which to filter by, the values of the dictionary
filter the database. filters are used with "AND" logic
:param columns: str, the columns to return from the database (can use
'*' for all, if a single column is given a numpy array
if returned otherwise a pandas dataframe is returned
:param findexdbm: FileIndexDatabase class or None, pass a current
file index database class (otherwise reloaded)
:return: if one column a numpy 1D array is returned, otherwise a pandas
dataframe is returned with all the requested columns
"""
# update database
update_index = True
if 'PARALLEL' in params:
if params['INPUTS']['PARALLEL']:
update_index = False
# update index database if required
if update_index:
findexdbm = update_index_db(params, block_kind=block_kind,
findexdbm=findexdbm)
# get columns
colnames = findexdbm.database.colnames('*')
# get file list using filters
condition = 'BLOCK_KIND="{0}"'.format(block_kind)
# loop around filters
for fkey in filters:
if fkey in colnames:
_filters = filters[fkey]
# make sure filter is a list
if isinstance(_filters, str):
_filters = [_filters]
# loop around filter elements and combine with "OR"
subconditions = []
for _filter in _filters:
# make sure filter is stripped
_filter = _filter.strip()
# add to subconditions
subconditions.append('{0}="{1}"'.format(fkey, _filter))
# add subconditions to condition
condition += ' AND ({0})'.format(' OR '.join(subconditions))
# get columns for this condition
return findexdbm.get_entries(columns, block_kind=block_kind,
condition=condition)
def uniform_time_list(times: Union[List[float], np.ndarray], number: int
) -> np.ndarray:
"""
Create a very uniformly distributed list of times (distributed uniformly in
time goes. Takes the full times vector and cuts it down list of positions
(length "number") that are uniform in time
:param times: list or numpy 1D array, a 1D vector of times matching the
:param number: int, the number elements to have after cut
:return: np.ndarray, mask, True where time should be used
"""
# if we have less than the required number of files return a mask of all
# files
if len(times) <= number:
return np.ones_like(times).astype(bool)
# convert times to numpy array
times = np.array(times)
# copy the times to new vector
times2 = times[np.argsort(times)]
# loop around until we have N files in times2
while len(times2) > number:
# work out the difference in times between previous and next times
dt1 = np.abs(times2 - np.roll(times2, 1))
dt2 = np.abs(times2 - np.roll(times2, -1))
# find all times larger before than after
dmask = dt2 < dt1
# push values from before to after if smaller
dt1[dmask] = dt2[dmask]
# remove smallest delta time
times2 = np.delete(times2, np.argmin(dt1))
# create a mask of positions of times in times2
mask = np.array(np.in1d(times, times2))
# return the mask
return mask
def display_flag(params: ParamDict):
"""
Print out the binary flags used throughout the logging process
:param params: ParamDict, parameter dictionary of constants
:return: None, prints out flags using logger
"""
# get inputs
inputs = params['INPUTS']
null_text = ['None', '', 'Null']
# flag mode
cond1 = drs_text.null_text(inputs.get('RECIPE', None), null_text)
cond2 = drs_text.null_text(inputs.get('FLAGNUM', None), null_text)
# deal with recipe or flagnum being None
if cond1 or cond2:
return
# get the recipe name (or short name)
recipe = str(params['INPUTS']['recipe'].replace('.py', ''))
# get the flag number
flagnum = params['INPUTS']['flagnum']
# print progress
WLOG(params, '', 'Flag mode: {0}[{1}]'.format(recipe, flagnum))
# load pseudo constants
pconst = constants.pload()
# get the recipe module
rmod = pconst.RECIPEMOD().get()
# get binary flags for recipe
srecipes = rmod.recipes
srecipe = None
found = False
# find recipe in recipes
for srecipe in srecipes:
# remove py from recipe name
rname = srecipe.name.replace('.py', '')
rname = rname.replace(__INSTRUMENT__.lower(), '')
rname = rname.strip('_')
# test recipe and short name
cond3 = recipe.upper() == rname.upper()
cond4 = recipe.upper() == srecipe.shortname.upper()
# if cond 3 or cond 4 we have found our recipe
if cond3 or cond4:
found = True
break
# deal with recipe not being found
if not found:
WLOG(params, 'warning', 'Invalid "recipe" argument.')
return
# get binary flags
flags = srecipe.flags
# deal with non-integer
if not isinstance(flagnum, int):
WLOG(params, 'warning', 'Invalid flag number (must be int).')
# encode the given number
flags.encode(flagnum)
# print the recipe name
WLOG(params, '', 'recipe = {0}'.format(srecipe.name))
# print the flags
for flag in flags:
WLOG(params, '', '\t{0:20s}: {1}'.format(flag, flags[flag]))
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
print('Hello World')
# =============================================================================
# End of code
# =============================================================================
|
njcuk9999REPO_NAMEapero-drsPATH_START.@apero-drs_extracted@apero-drs-main@apero@core@utils@[email protected]_END.py
|
{
"filename": "resource_sharer.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/multiprocessing/resource_sharer.py",
"type": "Python"
}
|
#
# We use a background thread for sharing fds on Unix, and for sharing sockets on
# Windows.
#
# A client which wants to pickle a resource registers it with the resource
# sharer and gets an identifier in return. The unpickling process will connect
# to the resource sharer, sends the identifier and its pid, and then receives
# the resource.
#
import os
import signal
import socket
import sys
import threading
from . import process
from .context import reduction
from . import util
__all__ = ['stop']
if sys.platform == 'win32':
__all__ += ['DupSocket']
class DupSocket(object):
'''Picklable wrapper for a socket.'''
def __init__(self, sock):
new_sock = sock.dup()
def send(conn, pid):
share = new_sock.share(pid)
conn.send_bytes(share)
self._id = _resource_sharer.register(send, new_sock.close)
def detach(self):
'''Get the socket. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
share = conn.recv_bytes()
return socket.fromshare(share)
else:
__all__ += ['DupFd']
class DupFd(object):
'''Wrapper for fd which can be used at any time.'''
def __init__(self, fd):
new_fd = os.dup(fd)
def send(conn, pid):
reduction.send_handle(conn, new_fd, pid)
def close():
os.close(new_fd)
self._id = _resource_sharer.register(send, close)
def detach(self):
'''Get the fd. This should only be called once.'''
with _resource_sharer.get_connection(self._id) as conn:
return reduction.recv_handle(conn)
class _ResourceSharer(object):
'''Manager for resources using background thread.'''
def __init__(self):
self._key = 0
self._cache = {}
self._lock = threading.Lock()
self._listener = None
self._address = None
self._thread = None
util.register_after_fork(self, _ResourceSharer._afterfork)
def register(self, send, close):
'''Register resource, returning an identifier.'''
with self._lock:
if self._address is None:
self._start()
self._key += 1
self._cache[self._key] = (send, close)
return (self._address, self._key)
@staticmethod
def get_connection(ident):
'''Return connection from which to receive identified resource.'''
from .connection import Client
address, key = ident
c = Client(address, authkey=process.current_process().authkey)
c.send((key, os.getpid()))
return c
def stop(self, timeout=None):
'''Stop the background thread and clear registered resources.'''
from .connection import Client
with self._lock:
if self._address is not None:
c = Client(self._address,
authkey=process.current_process().authkey)
c.send(None)
c.close()
self._thread.join(timeout)
if self._thread.is_alive():
util.sub_warning('_ResourceSharer thread did '
'not stop when asked')
self._listener.close()
self._thread = None
self._address = None
self._listener = None
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
def _afterfork(self):
for key, (send, close) in self._cache.items():
close()
self._cache.clear()
self._lock._at_fork_reinit()
if self._listener is not None:
self._listener.close()
self._listener = None
self._address = None
self._thread = None
def _start(self):
from .connection import Listener
assert self._listener is None, "Already have Listener"
util.debug('starting listener and thread for sending handles')
self._listener = Listener(authkey=process.current_process().authkey, backlog=128)
self._address = self._listener.address
t = threading.Thread(target=self._serve)
t.daemon = True
t.start()
self._thread = t
def _serve(self):
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
while 1:
try:
with self._listener.accept() as conn:
msg = conn.recv()
if msg is None:
break
key, destination_pid = msg
send, close = self._cache.pop(key)
try:
send(conn, destination_pid)
finally:
close()
except:
if not util.is_exiting():
sys.excepthook(*sys.exc_info())
_resource_sharer = _ResourceSharer()
stop = _resource_sharer.stop
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@multiprocessing@[email protected]_END.py
|
{
"filename": "config_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/third_party/xla/xla/python/config_test.py",
"type": "Python"
}
|
# Copyright 2024 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import threading
from absl.testing import absltest
from xla.python import xla_client
config = xla_client._xla.config
class ConfigTest(absltest.TestCase):
def testBasic(self):
c = config.Config(1)
self.assertEqual(c.value, 1)
self.assertEqual(c.get_global(), 1)
self.assertEqual(c.get_local(), config.unset)
c.set_global(2)
self.assertEqual(c.value, 2)
self.assertEqual(c.get_global(), 2)
self.assertEqual(c.get_local(), config.unset)
c.set_local(3)
self.assertEqual(c.value, 3)
self.assertEqual(c.get_global(), 2)
self.assertEqual(c.get_local(), 3)
c.set_global(4)
self.assertEqual(c.value, 3)
self.assertEqual(c.get_global(), 4)
self.assertEqual(c.get_local(), 3)
c.set_local(config.unset)
self.assertEqual(c.value, 4)
self.assertEqual(c.get_global(), 4)
self.assertEqual(c.get_local(), config.unset)
def testThreading(self):
c = config.Config(1)
def Body():
for i in range(100):
c.set_local(i)
self.assertEqual(c.get_local(), i)
self.assertEqual(c.get_global(), 1)
self.assertEqual(c.value, i)
threads = [threading.Thread(target=Body) for _ in range(4)]
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == "__main__":
absltest.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@third_party@xla@xla@python@[email protected]_END.py
|
{
"filename": "customtags.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/webserver/lasair/templatetags/customtags.py",
"type": "Python"
}
|
from django import template
register = template.Library()
@register.filter
def keyvalue(dict, key):
try:
return dict[key]
except KeyError:
return ''
@register.filter
def replace(value, arg):
"""
Replacing filter
Use `{{ "aaa"|replace:"a|b" }}`
"""
if len(arg.split('|')) != 2:
return value
what, to = arg.split('|')
return value.replace(what, to)
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@webserver@lasair@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/scene/yaxis/title/font/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._size import SizeValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._size.SizeValidator", "._family.FamilyValidator", "._color.ColorValidator"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@scene@yaxis@title@font@[email protected]_END.py
|
{
"filename": "antenna_state.py",
"repo_name": "RTIP/artip",
"repo_path": "artip_extracted/artip-master/src/main/python/models/antenna_state.py",
"type": "Python"
}
|
from antenna_status import AntennaStatus
class AntennaState:
def __init__(self, antenna_id, polarization, scan_id):
self.antenna = antenna_id
self.scan_id = scan_id
self.polarization = polarization
self.__closure_phase_status = None
self.__R_phase_status = None
def update_closure_phase_status(self, status):
if self._can_update_status(self.__closure_phase_status, status):
self.__closure_phase_status = status
return True
return False
def is_bad(self):
return self.__R_phase_status == AntennaStatus.BAD and \
self.__closure_phase_status == AntennaStatus.BAD
def get_closure_phase_status(self):
return self.__closure_phase_status
def update_R_phase_status(self, status):
if self._can_update_status(self.__R_phase_status, status):
self.__R_phase_status = status
return True
return False
def _can_update_status(self, current_status, new_status):
return (new_status in AntennaStatus.ALL) and (current_status in [None, AntennaStatus.DOUBTFUL])
def get_R_phase_status(self):
return self.__R_phase_status
def __repr__(self):
return str(self.antenna) + "" + str(self.scan_id) + str(self.polarization)
|
RTIPREPO_NAMEartipPATH_START.@artip_extracted@artip-master@src@main@python@models@[email protected]_END.py
|
{
"filename": "Albedo_module.py",
"repo_name": "joshuakt/Oxygen-False-Positives",
"repo_path": "Oxygen-False-Positives_extracted/Oxygen-False-Positives-main/Albedo_module.py",
"type": "Python"
}
|
import numpy as np
def AB_fun(Tsurf,pH2O,volatile_mass,AL,AH):
if pH2O/1e5<1:
TA = 1000.0
else:
TA = 1000 + 200*np.log10(pH2O/1e5)**2
AB_atmo = 0.5*(AL-AH) * np.tanh((TA-Tsurf)/400.0) + 0.5*(AH+AL)
return AB_atmo
|
joshuaktREPO_NAMEOxygen-False-PositivesPATH_START.@Oxygen-False-Positives_extracted@Oxygen-False-Positives-main@[email protected]_END.py
|
{
"filename": "makeplot.py",
"repo_name": "VirtualPlanetaryLaboratory/vplanet",
"repo_path": "vplanet_extracted/vplanet-main/examples/CosmicShoreline/makeplot.py",
"type": "Python"
}
|
import os
import pathlib
import subprocess
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import vplot as vpl
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
import vplanet
path = pathlib.Path(__file__).parents[0].absolute()
sys.path.insert(1, str(path.parents[0]))
output = vplanet.run(units = False)
# Plot!
fig = plt.figure(figsize=(8.5, 6))
fxuv_earth = output.log.final.Earth.CumulativeXUVFlux
fxuv = []
fxuv.append(output.log.final.Mercury.CumulativeXUVFlux/fxuv_earth)
fxuv.append(output.log.final.Venus.CumulativeXUVFlux/fxuv_earth)
fxuv.append(output.log.final.Earth.CumulativeXUVFlux/fxuv_earth)
fxuv.append(output.log.final.Mars.CumulativeXUVFlux/fxuv_earth)
fxuv.append(output.log.final.Jupiter.CumulativeXUVFlux/fxuv_earth)
fxuv.append(output.log.final.Saturn.CumulativeXUVFlux/fxuv_earth)
fxuv.append(output.log.final.George.CumulativeXUVFlux/fxuv_earth)
fxuv.append(output.log.final.Neptune.CumulativeXUVFlux/fxuv_earth)
escvel = []
escvel.append(output.log.final.Mercury.EscapeVelocity/1e3)
escvel.append(output.log.final.Venus.EscapeVelocity/1e3)
escvel.append(output.log.final.Earth.EscapeVelocity/1e3)
escvel.append(output.log.final.Mars.EscapeVelocity/1e3)
escvel.append(output.log.final.Jupiter.EscapeVelocity/1e3)
escvel.append(output.log.final.Saturn.EscapeVelocity/1e3)
escvel.append(output.log.final.George.EscapeVelocity/1e3)
escvel.append(output.log.final.Neptune.EscapeVelocity/1e3)
shorelinex = []
shorelinex.append(0.2)
shorelinex.append(60)
shoreliney = []
shoreliney.append(1e-6)
shoreliney.append(1e4)
plt.xlabel('Escape Velocity [km/s]')
plt.ylabel('Normalized Cumulative XUV Flux')
plt.plot(shorelinex,shoreliney,color=vpl.colors.pale_blue)
plt.plot(escvel,fxuv,'o',color='k')
plt.xscale('log')
plt.yscale('log')
plt.ylim(1e-6,1e4)
plt.xlim(0.1,200)
plt.annotate('Mercury',(2.7,9))
plt.annotate('Venus',(10.2,2.6))
plt.annotate('Earth',(11.5,0.5))
plt.annotate('Mars',(5,0.2))
plt.annotate('Jupiter',(60,0.05))
plt.annotate('Saturn',(37,0.014))
plt.annotate('Uranus',(22,0.0034))
plt.annotate('Neptune',(24,0.0006))
# Save figure
fig.savefig(path / f"CosmicShoreline.png", bbox_inches="tight", dpi=200)
|
VirtualPlanetaryLaboratoryREPO_NAMEvplanetPATH_START.@vplanet_extracted@vplanet-main@examples@[email protected]@.PATH_END.py
|
{
"filename": "4. Parallax errors.ipynb",
"repo_name": "gaia-unlimited/gaiaunlimited",
"repo_path": "gaiaunlimited_extracted/gaiaunlimited-main/docs/notebooks/Aurora/4. Parallax errors.ipynb",
"type": "Jupyter Notebook"
}
|
# 4. Parallax errors
```python
import os
import pathlib
import numpy as np
from scipy.interpolate import splrep, splev, BSpline
import pandas as pd
from astroquery.gaia import Gaia
import config
```
hpx_order=7 --> (hpx_nside=128, hpx_npix=196608)
```python
cache_path = pathlib.Path(config.cache_path)
cache_path.mkdir(exist_ok=True)
fig_path = pathlib.Path(config.fig_path)
fig_path.mkdir(exist_ok=True)
```
## Query Gaia EDR3 `gaia_source' table
ADQL Coocbook: https://www.gaia.ac.uk/data/gaia-data-release-1/adql-cookbook
ADQL manual: https://www.ivoa.net/documents/ADQL/20180112/PR-ADQL-2.1-20180112.html
```python
def query_plxerr_stats(num_stars):
# The query is long, possibly need to login
Gaia.login()
query = \
f"SELECT \
source_id/{config.hpx_base} AS hpx, g_bin, \
AVG(parallax_error) AS parallax_error, \
AVG(visibility_periods_used) AS visibility_periods_used \
FROM ( SELECT source_id, parallax_error, visibility_periods_used, \
((phot_g_mean_mag-{config.G_min})/{config.dG}) AS g_bin \
FROM gaiadr3.gaia_source WHERE random_index < {num_stars}) AS subquery \
GROUP BY hpx, g_bin \
"
job = Gaia.launch_job_async(query)
return job.get_results().to_pandas()
```
```python
file_name = cache_path / 'dr3_plxerr_stats.hdf5'
if os.path.exists(file_name):
pe = pd.read_hdf(file_name, key='df')
else:
pe = query_plxerr_stats(num_stars=10000000)
# Save
pe.to_hdf(file_name, key='df', format='table', data_columns=True, mode='w', index=False)
pe
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>hpx</th>
<th>g_bin</th>
<th>parallax_error</th>
<th>visibility_periods_used</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>115097</td>
<td>90.431837</td>
<td>NaN</td>
<td>5.0</td>
</tr>
<tr>
<th>1</th>
<td>51930</td>
<td>93.920902</td>
<td>1.402338</td>
<td>11.0</td>
</tr>
<tr>
<th>2</th>
<td>51971</td>
<td>96.581129</td>
<td>NaN</td>
<td>7.0</td>
</tr>
<tr>
<th>3</th>
<td>62925</td>
<td>95.783810</td>
<td>2.076251</td>
<td>10.0</td>
</tr>
<tr>
<th>4</th>
<td>129137</td>
<td>72.139887</td>
<td>0.037037</td>
<td>23.0</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>9983129</th>
<td>193153</td>
<td>94.197821</td>
<td>0.916554</td>
<td>15.0</td>
</tr>
<tr>
<th>9983130</th>
<td>9024</td>
<td>96.154636</td>
<td>NaN</td>
<td>12.0</td>
</tr>
<tr>
<th>9983131</th>
<td>114811</td>
<td>91.732077</td>
<td>NaN</td>
<td>7.0</td>
</tr>
<tr>
<th>9983132</th>
<td>12966</td>
<td>92.579426</td>
<td>0.658761</td>
<td>15.0</td>
</tr>
<tr>
<th>9983133</th>
<td>122047</td>
<td>91.663813</td>
<td>0.602893</td>
<td>15.0</td>
</tr>
</tbody>
</table>
<p>9983134 rows × 4 columns</p>
</div>
```python
pe.describe()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>hpx</th>
<th>g_bin</th>
<th>parallax_error</th>
<th>visibility_periods_used</th>
</tr>
</thead>
<tbody>
<tr>
<th>count</th>
<td>9.983134e+06</td>
<td>9.968616e+06</td>
<td>8.096563e+06</td>
<td>9.983134e+06</td>
</tr>
<tr>
<th>mean</th>
<td>1.236445e+05</td>
<td>8.805558e+01</td>
<td>4.633512e-01</td>
<td>1.627032e+01</td>
</tr>
<tr>
<th>std</th>
<td>4.663376e+04</td>
<td>7.747745e+00</td>
<td>4.197207e-01</td>
<td>6.497347e+00</td>
</tr>
<tr>
<th>min</th>
<td>0.000000e+00</td>
<td>4.767463e+00</td>
<td>7.770088e-03</td>
<td>1.000000e+00</td>
</tr>
<tr>
<th>25%</th>
<td>1.147890e+05</td>
<td>8.457711e+01</td>
<td>1.553369e-01</td>
<td>1.200000e+01</td>
</tr>
<tr>
<th>50%</th>
<td>1.216710e+05</td>
<td>9.011402e+01</td>
<td>3.430443e-01</td>
<td>1.600000e+01</td>
</tr>
<tr>
<th>75%</th>
<td>1.665990e+05</td>
<td>9.375248e+01</td>
<td>6.418277e-01</td>
<td>2.100000e+01</td>
</tr>
<tr>
<th>max</th>
<td>1.966070e+05</td>
<td>1.043085e+02</td>
<td>5.097040e+00</td>
<td>3.300000e+01</td>
</tr>
</tbody>
</table>
</div>
```python
# Clean
mask = (0 <= pe['g_bin']) & (pe['g_bin'] < len(config.G_bins))
pe = pe[mask].copy()
pe.dropna(inplace=True)
pe['g_bin'] = pe['g_bin'].astype(int)
len(pe)
```
8088435
# Some plots
```python
# Read RGB stars catalogue
usecols = ['parallax_error', 'phot_g_mean_mag']
rgb = pd.read_hdf(cache_path / 'rgb.hdf5', key='rgb', usecols=usecols)
```
```python
plt.rc('font', size=6.0)
inch = 2.54 ## cm
width, height = 17/inch, 6/inch
plt.figure(figsize=(width, height), layout='constrained', dpi=config.fig_dpi)
mpl.style.use('tableau-colorblind10')
plt.subplot(1, 2, 1)
plt.hist2d(config.G_bins[pe['g_bin']], pe['parallax_error'], bins=(50,3000), cmap='Greys', norm='log', rasterized=True)
plt.hist2d(rgb['phot_g_mean_mag'], rgb['parallax_error'], bins=(50,3000), cmap='inferno', norm='log', rasterized=True)
plt.xlabel(r"$G$ [mag]")
plt.xlim(config.G_bins[0], config.G_bins[-1])
plt.ylabel(r"$\sigma_\varpi$ [mas]")
plt.yscale('log')
plt.ylim(3.33e-3, 4.0)
plt.subplot(1, 2, 2)
nvis_edges = [2, 10, 15, 22, 33]
nvis_edges = [2, 14, 22, 33]
for n in range(len(nvis_edges)-1)[::-1]:
nvis_lo, nvis_hi = nvis_edges[n], nvis_edges[n+1]
mask = (nvis_lo <= pe['visibility_periods_used']) & (pe['visibility_periods_used'] < nvis_hi)
pe_ = pe[mask]
plt.plot(config.G_bins[pe_['g_bin']], pe_['parallax_error'], '.', alpha=0.5, label=f"[{nvis_lo},{nvis_hi})")
plt.legend(loc='lower right')
plt.xlabel(r"$G$ [mag]")
plt.xlim(config.G_bins[0], config.G_bins[-1])
plt.ylabel(r"$\sigma_\varpi$ [mas]")
plt.yscale('log')
plt.ylim(3.33e-3, 4.0)
plt.savefig(fig_path / 'data-plxerr.png')
plt.show()
plt.close()
```

```python
del(rgb)
```
## Fit parallax errors by G given nvis
```python
nvis_bins = [2, 14, 15, 16, 17, 19, 20, 24, 26, 27.5, 33]
pe['nvis_bin'] = pd.cut(pe['visibility_periods_used'], bins=nvis_bins)
gr = pe.groupby(['nvis_bin', 'g_bin'], observed=True).agg(plxerr=('parallax_error', 'median'))
gr.reset_index(inplace=True)
gr['nvis'] = gr['nvis_bin'].map(lambda x: 0.5*(x.left + x.right))
gr['g'] = 0.5*(config.G_bins[gr['g_bin']] + config.G_bins[gr['g_bin']+1])
```
```python
def fit(G, plxerr):
x = G
y = np.log10(plxerr)
s = len(x)
tck = splrep(x, y, s=s)
return tck
plxerr_nvis_g = {}
G_min = 8.0
G_max = config.G_bins.max()
G_bins_ = np.linspace(G_min, G_max, 17)
for bin, gr_ in gr.groupby('nvis_bin', observed=True):
tck = fit(gr_['g'], gr_['plxerr'])
plxerr_nvis_g[str(bin)] = {}
plxerr_nvis_g[str(bin)]['G'] = G_bins_
plxerr_nvis_g[str(bin)]['logsigma_plx'] = BSpline(*tck)(G_bins_)
```
```python
plt.rc('font', size=6.0)
inch = 2.54 ## cm
width, height = 30/inch, 11/inch
plt.figure(figsize=(width, height), layout='constrained')
plt.suptitle(r"$G$ [mag] vs $\sigma_\varpi$ [mas]")
i = 0
for bin, gr_ in gr.groupby('nvis_bin', observed=True):
plt.subplot(2, 5, i+1)
plt.title(f"nvis: {bin}")
G = config.G_bins[gr_['g_bin']]
plt.plot(G, gr_['plxerr'], ls='', marker='.', alpha=0.5)
plt.plot(G, 10**np.interp(G, plxerr_nvis_g[str(bin)]['G'], plxerr_nvis_g[str(bin)]['logsigma_plx']), 'k')
plt.axhline(3e-2, ls=':', c='r', alpha=0.5)
plt.axhline(1e-2, ls=':', c='r', alpha=0.5)
plt.axvline(8.0, ls=':', c='k', alpha=0.5)
plt.axvline(16.0, ls=':', c='k', alpha=0.5)
plt.xlabel(r"$G$ [mag]")
plt.yscale('log')
plt.ylim(3.33e-3, 4.0)
i += 1
plt.savefig(fig_path / 'data-plxerr-fit.pdf')
plt.show()
plt.close()
```

# Save the parallax errors model
```python
d = {}
d['nvis_bins'] = nvis_bins
d['plxerr_nvis_g']= plxerr_nvis_g
np.savez_compressed(cache_path / 'logplxerr_nvis_g.npz', **d)
```
```python
```
|
gaia-unlimitedREPO_NAMEgaiaunlimitedPATH_START.@gaiaunlimited_extracted@gaiaunlimited-main@docs@notebooks@Aurora@4. Parallax [email protected]_END.py
|
{
"filename": "train.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/examples/wmt/train.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Machine Translation example.
This script trains a Transformer on a WMT dataset.
"""
# pytype: disable=wrong-arg-count
# pytype: disable=attribute-error
import collections
import functools
import os
from absl import logging
from clu import metric_writers
from clu import periodic_actions
from flax import jax_utils
from flax import linen as nn
from flax.training import checkpoints
from flax.training import common_utils
from flax.training import dynamic_scale as dynamic_scale_lib
from flax.training import train_state
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import optax
import tensorflow as tf
import bleu
import decode
import input_pipeline
import models
class TrainState(train_state.TrainState):
dynamic_scale: dynamic_scale_lib.DynamicScale
def rsqrt_schedule(
init_value: float,
shift: int = 0,
):
"""Applies a reverse square-root schedule.
The reverse square root schedule is simply `lr = init_value / sqrt(step)`.
Args:
init_value: Base learning rate (before applying the rsqrt schedule).
shift: How many steps the rsqrt should be shifted. Shifting the rsqrt
schedule makes it less steep in the beginning (close to 0).
Returns:
A schedule `count -> learning_rate`.
"""
def schedule(count):
return init_value * (count + shift) ** -0.5 * shift**0.5
return schedule
def create_learning_rate_schedule(learning_rate: float, warmup_steps: int):
"""Creates a rsqrt schedule with linear warmup."""
return optax.join_schedules(
[
optax.linear_schedule(
init_value=0,
end_value=learning_rate,
transition_steps=warmup_steps,
),
rsqrt_schedule(init_value=learning_rate, shift=warmup_steps),
],
boundaries=[warmup_steps],
)
def compute_weighted_cross_entropy(
logits, targets, weights=None, label_smoothing=0.0
):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length].
label_smoothing: label smoothing constant, used to determine the on and off
values.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError(
"Incorrect shapes. Got shape %s logits and %s targets"
% (str(logits.shape), str(targets.shape))
)
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence)
+ (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)
)
soft_targets = common_utils.onehot(
targets, vocab_size, on_value=confidence, off_value=low_confidence
)
loss = -jnp.sum(soft_targets * nn.log_softmax(logits), axis=-1)
loss = loss - normalizing_constant
normalizing_factor = np.prod(targets.shape)
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_weighted_accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError(
"Incorrect shapes. Got shape %s logits and %s targets"
% (str(logits.shape), str(targets.shape))
)
loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)
normalizing_factor = np.prod(logits.shape[:-1])
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_metrics(logits, labels, weights, label_smoothing=0.0):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(
logits, labels, weights, label_smoothing
)
acc, _ = compute_weighted_accuracy(logits, labels, weights)
metrics = {
"loss": loss,
"accuracy": acc,
"denominator": weight_sum,
}
metrics = jax.lax.psum(metrics, axis_name="batch")
return metrics
# Primary training / eval / decode step functions.
# -----------------------------------------------------------------------------
def train_step(
state,
batch,
config,
learning_rate_fn,
label_smoothing=0.0,
dropout_rng=None,
):
"""Perform a single training step."""
# X_position and X_segmentation are needed only when using "packed examples"
# where multiple sequences are packed into the same example with this
# metadata.
# if such features are not present they are ignored and the example is treated
# like a normal, unpacked sequence example.
train_keys = [
"inputs",
"targets",
"inputs_position",
"targets_position",
"inputs_segmentation",
"targets_segmentation",
]
(
inputs,
targets,
inputs_positions,
targets_positions,
inputs_segmentation,
targets_segmentation,
) = (batch.get(k, None) for k in train_keys)
weights = jnp.where(targets > 0, 1, 0).astype(jnp.float32)
dropout_rng = jax.random.fold_in(dropout_rng, state.step)
def loss_fn(params):
"""loss function used for training."""
logits = models.Transformer(config).apply(
{"params": params},
inputs,
targets,
inputs_positions=inputs_positions,
targets_positions=targets_positions,
inputs_segmentation=inputs_segmentation,
targets_segmentation=targets_segmentation,
rngs={"dropout": dropout_rng},
)
loss, weight_sum = compute_weighted_cross_entropy(
logits, targets, weights, label_smoothing
)
mean_loss = loss / weight_sum
return mean_loss, logits
step = state.step
if state.dynamic_scale:
# dynamic scale takes care of averaging gradients across replicas
grad_fn = state.dynamic_scale.value_and_grad(
loss_fn, has_aux=True, axis_name="batch"
)
dynamic_scale, is_fin, (_, logits), grads = grad_fn(state.params)
state = state.replace(dynamic_scale=dynamic_scale)
else:
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grads = grad_fn(state.params)
grads = jax.lax.pmean(grads, axis_name="batch")
new_state = state.apply_gradients(grads=grads)
metrics = compute_metrics(logits, targets, weights)
metrics["learning_rate"] = learning_rate_fn(step)
if state.dynamic_scale:
# if is_fin == False the gradients contain Inf/NaNs and optimizer state and
# params should be restored (= skip this step).
select_fn = functools.partial(jnp.where, is_fin)
new_state = new_state.replace(
opt_state=jax.tree_util.tree_map(
select_fn, new_state.opt_state, state.opt_state
),
params=jax.tree_util.tree_map(
select_fn, new_state.params, state.params
),
)
metrics["loss_scale"] = dynamic_scale.scale * metrics["denominator"]
return new_state, metrics
def eval_step(params, batch, config, label_smoothing=0.0):
"""Calculate evaluation metrics on a batch."""
inputs, targets = batch["inputs"], batch["targets"]
weights = jnp.where(targets > 0, 1.0, 0.0)
logits = models.Transformer(config).apply({"params": params}, inputs, targets)
return compute_metrics(logits, targets, weights, label_smoothing)
def initialize_cache(inputs, max_decode_len, config):
"""Initialize a cache for a given input shape and max decode length."""
target_shape = (inputs.shape[0], max_decode_len) + inputs.shape[2:]
initial_variables = models.Transformer(config).init(
jax.random.key(0),
jnp.ones(inputs.shape, config.dtype),
jnp.ones(target_shape, config.dtype),
)
return initial_variables["cache"]
def predict_step(
inputs, params, cache, eos_id, max_decode_len, config, beam_size=4
):
"""Predict translation with fast decoding beam search on a batch."""
# Prepare transformer fast-decoder call for beam search: for beam search, we
# need to set up our decoder model to handle a batch size equal to
# batch_size * beam_size, where each batch item"s data is expanded in-place
# rather than tiled.
# i.e. if we denote each batch element subtensor as el[n]:
# [el0, el1, el2] --> beamsize=2 --> [el0,el0,el1,el1,el2,el2]
encoded_inputs = decode.flat_batch_beam_expand(
models.Transformer(config).apply(
{"params": params}, inputs, method=models.Transformer.encode
),
beam_size,
)
raw_inputs = decode.flat_batch_beam_expand(inputs, beam_size)
def tokens_ids_to_logits(flat_ids, flat_cache):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits, new_vars = models.Transformer(config).apply(
{"params": params, "cache": flat_cache},
encoded_inputs,
raw_inputs, # only needed for input padding mask
flat_ids,
mutable=["cache"],
method=models.Transformer.decode,
)
new_flat_cache = new_vars["cache"]
# Remove singleton sequence-length dimension:
# [batch * beam, 1, vocab] --> [batch * beam, vocab]
flat_logits = flat_logits.squeeze(axis=1)
return flat_logits, new_flat_cache
# Using the above-defined single-step decoder function, run a
# beam search over possible sequences given input encoding.
beam_seqs, _ = decode.beam_search(
inputs,
cache,
tokens_ids_to_logits,
beam_size=beam_size,
alpha=0.6,
eos_id=eos_id,
max_decode_len=max_decode_len,
)
# Beam search returns [n_batch, n_beam, n_length + 1] with beam dimension
# sorted in increasing order of log-probability.
# Return the highest scoring beam sequence, drop first dummy 0 token.
return beam_seqs[:, -1, 1:]
# Utils for prediction and BLEU calculation
# -----------------------------------------------------------------------------
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by repeating last slice."""
batch_pad = desired_batch_size - x.shape[0]
return np.concatenate([x, np.tile(x[-1], (batch_pad, 1))], axis=0)
def per_host_sum_pmap(in_tree):
"""Execute psum on in_tree"s leaves over one device per host."""
host2devices = collections.defaultdict(list)
for d in jax.devices():
host2devices[d.process_index].append(d)
devices = [host2devices[k][0] for k in host2devices]
host_psum = jax.pmap(lambda x: jax.lax.psum(x, "i"), "i", devices=devices)
def pre_pmap(xs):
return jax.tree_util.tree_map(
lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs
)
def post_pmap(xs):
return jax.tree_util.tree_map(lambda x: x[0], xs)
return post_pmap(host_psum(pre_pmap(in_tree)))
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))
def evaluate(
*, p_eval_step, params, eval_ds: tf.data.Dataset, num_eval_steps: int
):
"""Evaluate the params an return a dictionary with the metrics."""
logging.info("Gathering evaluation metrics.")
eval_metrics = []
eval_iter = iter(eval_ds) # pytype: disable=wrong-arg-types
for _, eval_batch in zip(range(num_eval_steps), eval_iter):
eval_batch = jax.tree_util.tree_map(lambda x: x._numpy(), eval_batch) # pylint: disable=protected-access
eval_batch = common_utils.shard(eval_batch)
metrics = p_eval_step(params, eval_batch)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_util.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop("denominator")
eval_summary = jax.tree_util.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums,
)
return eval_summary
def translate_and_calculate_bleu(
*,
p_pred_step,
p_init_cache,
params,
predict_ds: tf.data.Dataset,
decode_tokens,
max_predict_length: int,
):
"""Translates the `predict_ds` and calculates the BLEU score."""
n_devices = jax.local_device_count()
logging.info("Translating evaluation dataset.")
sources, references, predictions = [], [], []
for pred_batch in predict_ds:
pred_batch = jax.tree_util.tree_map(lambda x: x._numpy(), pred_batch) # pylint: disable=protected-access
# Handle final odd-sized batch by padding instead of dropping it.
cur_pred_batch_size = pred_batch["inputs"].shape[0]
if cur_pred_batch_size % n_devices:
padded_size = int(np.ceil(cur_pred_batch_size / n_devices) * n_devices)
pred_batch = jax.tree_util.tree_map(
lambda x: pad_examples(x, padded_size), # pylint: disable=cell-var-from-loop
pred_batch,
)
pred_batch = common_utils.shard(pred_batch)
cache = p_init_cache(pred_batch["inputs"])
predicted = p_pred_step(
pred_batch["inputs"], params, cache, decode.EOS_ID, max_predict_length
)
predicted = tohost(predicted)
inputs = tohost(pred_batch["inputs"])
targets = tohost(pred_batch["targets"])
# Iterate through non-padding examples of batch.
for i, s in enumerate(predicted[:cur_pred_batch_size]):
sources.append(decode_tokens(inputs[i]))
references.append(decode_tokens(targets[i]))
predictions.append(decode_tokens(s))
logging.info(
"Translation: %d predictions %d references %d sources.",
len(predictions),
len(references),
len(sources),
)
# Calculate BLEU score for translated eval corpus against reference.
bleu_matches = bleu.bleu_partial(references, predictions)
all_bleu_matches = per_host_sum_pmap(bleu_matches)
bleu_score = bleu.complete_bleu(*all_bleu_matches)
# Save translation samples for tensorboard.
exemplars = ""
for n in np.random.choice(np.arange(len(predictions)), 8):
exemplars += f"{sources[n]}\n\n{references[n]}\n\n{predictions[n]}\n\n"
return exemplars, bleu_score
def preferred_dtype(config):
platform = jax.local_devices()[0].platform
if config.use_mixed_precision:
if platform == "tpu":
return jnp.bfloat16
elif platform == "gpu":
return jnp.float16
return jnp.float32
def train_and_evaluate(config: ml_collections.ConfigDict, workdir: str):
"""Runs a training and evaluation loop.
Args:
config: Configuration to use.
workdir: Working directory for checkpoints and TF summaries. If this
contains checkpoint training will be resumed from the latest checkpoint.
"""
tf.io.gfile.makedirs(workdir)
vocab_path = config.vocab_path
if vocab_path is None:
vocab_path = os.path.join(workdir, "sentencepiece_model")
config.vocab_path = vocab_path
tf.io.gfile.makedirs(os.path.split(vocab_path)[0])
# Load Dataset
# ---------------------------------------------------------------------------
logging.info("Initializing dataset.")
train_ds, eval_ds, predict_ds, encoder = input_pipeline.get_wmt_datasets(
n_devices=jax.local_device_count(),
config=config,
reverse_translation=config.reverse_translation,
vocab_path=vocab_path,
)
train_iter = iter(train_ds)
vocab_size = int(encoder.vocab_size())
eos_id = decode.EOS_ID # Default Sentencepiece EOS token.
def decode_tokens(toks):
valid_toks = toks[: np.argmax(toks == eos_id) + 1].astype(np.int32)
return encoder.detokenize(valid_toks).numpy().decode("utf-8")
if config.num_predict_steps > 0:
predict_ds = predict_ds.take(config.num_predict_steps)
logging.info("Initializing model, optimizer, and step functions.")
dtype = preferred_dtype(config)
# Build Model and Optimizer
# ---------------------------------------------------------------------------
train_config = models.TransformerConfig(
vocab_size=vocab_size,
output_vocab_size=vocab_size,
share_embeddings=config.share_embeddings,
logits_via_embedding=config.logits_via_embedding,
dtype=dtype,
emb_dim=config.emb_dim,
num_heads=config.num_heads,
num_layers=config.num_layers,
qkv_dim=config.qkv_dim,
mlp_dim=config.mlp_dim,
max_len=max(config.max_target_length, config.max_eval_target_length),
dropout_rate=config.dropout_rate,
attention_dropout_rate=config.attention_dropout_rate,
deterministic=False,
decode=False,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
)
eval_config = train_config.replace(deterministic=True)
predict_config = train_config.replace(deterministic=True, decode=True)
start_step = 0
rng = jax.random.key(config.seed)
rng, init_rng = jax.random.split(rng)
input_shape = (config.per_device_batch_size, config.max_target_length)
target_shape = (config.per_device_batch_size, config.max_target_length)
m = models.Transformer(eval_config)
initial_variables = jax.jit(m.init)(
init_rng,
jnp.ones(input_shape, jnp.float32),
jnp.ones(target_shape, jnp.float32),
)
# Create train state with Adam optimizer and weight decay.
learning_rate_fn = create_learning_rate_schedule(
learning_rate=config.learning_rate, warmup_steps=config.warmup_steps
)
dynamic_scale = None
if dtype == jnp.float16:
dynamic_scale = dynamic_scale_lib.DynamicScale()
state = TrainState.create(
apply_fn=m.apply,
params=initial_variables["params"],
tx=optax.adamw(
learning_rate=learning_rate_fn,
b1=0.9,
b2=0.98,
eps=1e-9,
weight_decay=config.weight_decay,
),
dynamic_scale=dynamic_scale,
)
# We access model params only via state.params
del initial_variables
if config.restore_checkpoints:
# Restore unreplicated optimizer + model state from last checkpoint.
state = checkpoints.restore_checkpoint(workdir, state)
# Grab last step.
start_step = int(state.step)
writer = metric_writers.create_default_writer(
workdir, just_logging=jax.process_index() > 0
)
if start_step == 0:
writer.write_hparams(dict(config))
# Replicate state.
state = jax_utils.replicate(state)
# compile multidevice versions of train/eval/predict step and cache init fn.
p_train_step = jax.pmap(
functools.partial(
train_step,
config=train_config,
learning_rate_fn=learning_rate_fn,
label_smoothing=config.label_smoothing,
),
axis_name="batch",
donate_argnums=(0,),
) # pytype: disable=wrong-arg-types
p_eval_step = jax.pmap(
functools.partial(eval_step, config=eval_config), axis_name="batch"
)
p_init_cache = jax.pmap(
functools.partial(
initialize_cache,
max_decode_len=config.max_predict_length,
config=predict_config,
),
axis_name="batch",
)
p_pred_step = jax.pmap(
functools.partial(
predict_step, config=predict_config, beam_size=config.beam_size
),
axis_name="batch",
static_broadcasted_argnums=(3, 4),
) # eos token, max_length are constant
# Main Train Loop
# ---------------------------------------------------------------------------
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap"d training update for performance.
dropout_rngs = jax.random.split(rng, jax.local_device_count())
del rng
logging.info("Starting training loop.")
hooks = []
report_progress = periodic_actions.ReportProgress(
num_train_steps=config.num_train_steps, writer=writer
)
if jax.process_index() == 0:
hooks += [
report_progress,
periodic_actions.Profile(logdir=workdir, num_profile_steps=5),
]
train_metrics = []
with metric_writers.ensure_flushes(writer):
for step in range(start_step, config.num_train_steps):
is_last_step = step == config.num_train_steps - 1
# Shard data to devices and do a training step.
with jax.profiler.StepTraceAnnotation("train", step_num=step):
batch = common_utils.shard(
jax.tree_util.tree_map(np.asarray, next(train_iter))
)
state, metrics = p_train_step(state, batch, dropout_rng=dropout_rngs)
train_metrics.append(metrics)
# Quick indication that training is happening.
logging.log_first_n(logging.INFO, "Finished training step %d.", 5, step)
for h in hooks:
h(step)
# Periodic metric handling.
if step % config.eval_every_steps == 0 or is_last_step:
with report_progress.timed("training_metrics"):
logging.info("Gathering training metrics.")
train_metrics = common_utils.get_metrics(train_metrics)
lr = train_metrics.pop("learning_rate").mean()
metrics_sums = jax.tree_util.tree_map(jnp.sum, train_metrics)
denominator = metrics_sums.pop("denominator")
summary = jax.tree_util.tree_map(
lambda x: x / denominator, metrics_sums
) # pylint: disable=cell-var-from-loop
summary["learning_rate"] = lr
summary = {"train_" + k: v for k, v in summary.items()}
writer.write_scalars(step, summary)
train_metrics = []
with report_progress.timed("eval"):
eval_results = evaluate(
p_eval_step=p_eval_step,
params=state.params,
eval_ds=eval_ds,
num_eval_steps=config.num_eval_steps,
)
writer.write_scalars(
step, {"eval_" + k: v for k, v in eval_results.items()}
)
with report_progress.timed("translate_and_bleu"):
exemplars, bleu_score = translate_and_calculate_bleu(
p_pred_step=p_pred_step,
p_init_cache=p_init_cache,
params=state.params,
predict_ds=predict_ds,
decode_tokens=decode_tokens,
max_predict_length=config.max_predict_length,
)
writer.write_scalars(step, {"bleu": bleu_score})
writer.write_texts(step, {"samples": exemplars})
# Save a checkpoint on one host after every checkpoint_freq steps.
save_checkpoint = (
step % config.checkpoint_every_steps == 0 or is_last_step
)
if config.save_checkpoints and save_checkpoint:
logging.info("Saving checkpoint step %d.", step)
with report_progress.timed("checkpoint"):
checkpoints.save_checkpoint_multiprocess(
workdir, jax_utils.unreplicate(state), step
)
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@examples@[email protected]@.PATH_END.py
|
{
"filename": "test_pathloss.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/pathloss/tests/test_pathloss.py",
"type": "Python"
}
|
"""
Unit tests for pathloss correction
"""
from stdatamodels.jwst.datamodels import MultiSlitModel, PathlossModel
from jwst.pathloss.pathloss import (calculate_pathloss_vector,
get_aperture_from_model,
get_center,
interpolate_onto_grid,
is_pointsource,
shutter_below_is_closed,
shutter_above_is_closed)
from jwst.pathloss.pathloss import do_correction
import numpy as np
def test_get_center_ifu():
"""get_center assumes IFU targets are centered @ (0.0, 0.0)"""
x_pos, y_pos = get_center("NRS_IFU", None)
assert x_pos == y_pos == 0.0
def test_get_center_attr_err():
"""if no center provided for modes that are not IFU,
center is assigned to (0.0, 0.0)"""
datmod = MultiSlitModel()
x_pos, y_pos = get_center("NRS_MSASPEC", datmod)
assert x_pos == y_pos == 0.0
def test_get_center_exp_type():
"""if exp_type is not in NRS, center is returned (0.0,0.0)"""
datmod = MultiSlitModel()
x_pos, y_pos = get_center("NRC_IMAGE", datmod)
assert x_pos == y_pos == 0.0
def test_get_center_exptype():
""" If exptype is "NRS_MSASPEC" | "NRS_FIXEDSLIT" | "NRS_BRIGHTOBJ" and
source_xpos and source_ypos exist in datamod.slits, make sure it's returned"""
datmod = MultiSlitModel()
datmod.slits.append({'source_xpos': 1, 'source_ypos': 2})
for exptype in ["NRS_MSASPEC", "NRS_FIXEDSLIT", "NRS_BRIGHTOBJ"]:
x_pos, y_pos = get_center(exptype, datmod.slits[0])
assert x_pos == 1
assert y_pos == 2
# Begin get_aperture_from_model tests
def test_get_app_from_model_null():
"""If exp_type isn't the NRS or NIS specific mode,
routine returns None"""
datmod = MultiSlitModel()
datmod.meta.exposure.type = 'NRC_IMAGE'
result = get_aperture_from_model(datmod, None)
assert result is None
def test_get_aper_from_model_fixedslit():
"""For a given exposures aperture, make sure the correct
aperture reference data is returned for fixedslit mode"""
datmod = PathlossModel()
datmod.apertures.append({'name': 'S200A1'})
datmod.meta.exposure.type = 'NRS_FIXEDSLIT'
result = get_aperture_from_model(datmod, 'S200A1')
assert result == datmod.apertures[0]
def test_get_aper_from_model_msa():
"""For a given exposures aperture, make sure the correct
aperture reference data is returned for MSA mode"""
datmod = PathlossModel()
datmod.apertures.append({'shutters': 3})
datmod.meta.exposure.type = 'NRS_MSASPEC'
result = get_aperture_from_model(datmod, '11x11')
assert result == datmod.apertures[0]
# Begin calculate_pathloss_vector tests.
def test_calculate_pathloss_vector_pointsource_data():
"""Calculate pathloss vector for 3D pathloss data"""
datmod = PathlossModel()
ref_data = {'pointsource_data': np.ones((10, 10, 10), dtype=np.float32),
'pointsource_wcs': {'crval2': -0.5, 'crpix2': 1.0, 'cdelt2': 0.05,
'cdelt3': 1, 'crval1': -0.5, 'crpix1': 1.0,
'crpix3': 1.0, 'crval3': 1, 'cdelt1': 0.05}}
datmod.apertures.append(ref_data)
wavelength, pathloss, is_inside_slitlet = calculate_pathloss_vector(datmod.apertures[0].pointsource_data,
datmod.apertures[0].pointsource_wcs,
0.0, 0.0)
# Wavelength array is calculated with this: crval3 +(float(i+1) - crpix3)*cdelt3
# Where i is the iteration of np.arange(wavesize) which is the 1st dimension of the pointsource
# data array.
wavelength_comparison = np.array([1 + (float(i + 1) - 1.0) * 1 for i in np.arange(10)])
assert np.allclose(wavelength, wavelength_comparison)
# pathloss vector gets assigned at beginning of calculate_pathloss_vector and in this
# case, doesnt change (np.zeros(wavesize, dtype=np.float32))
pathloss_comparison = np.zeros(10, dtype=np.float32)
assert np.all(pathloss == pathloss_comparison)
# With the current wcs values, the logic should be returning False
assert is_inside_slitlet is False
def test_calculate_pathloss_vector_uniform_data():
"""Calculate the pathloss vector for uniform data arrays."""
datmod = PathlossModel()
ref_data = {'uniform_data': np.ones((10,), dtype=np.float32),
'uniform_wcs': {'crpix1': 1.0, 'cdelt1': 1, 'crval1': 1}}
datmod.apertures.append(ref_data)
wavelength, pathloss, _ = calculate_pathloss_vector(datmod.apertures[0].uniform_data,
datmod.apertures[0].uniform_wcs,
0.0, 0.0)
# Wavelength array is calculated with this: crval1 +(float(i+1) - crpix1)*cdelt1
# Where i is the iteration of np.arange(wavesize) which is the shape of the uniform
# data array.
comparison = np.array([1 + (float(i + 1) - 1) * 1 for i in np.arange(10)])
assert np.all(wavelength == comparison)
# The same array is returned in this case
assert np.all(datmod.apertures[0].uniform_data == pathloss)
def test_calculate_pathloss_vector_interpolation():
"""Calculate the pathloss vector for when interpolation is necessary."""
datmod = PathlossModel()
ref_data = {'pointsource_data': np.ones((10, 10, 10), dtype=np.float32),
'pointsource_wcs': {'crval2': -0.5, 'crpix2': 1.0, 'cdelt2': 0.5,
'cdelt3': 1.0, 'crval1': -0.5, 'crpix1': 1.0,
'crpix3': 1.0, 'crval3': 1.0, 'cdelt1': 0.5}}
datmod.apertures.append(ref_data)
wavelength, pathloss, is_inside_slitlet = calculate_pathloss_vector(datmod.apertures[0].pointsource_data,
datmod.apertures[0].pointsource_wcs,
0.0, 0.0)
# Wavelength array is calculated with this: crval3 +(float(i+1) - crpix3)*cdelt3
# Where i is the iteration of np.arange(wavesize) which is the 1st dimension of the pointsource
# data array.
wavelength_comparison = np.array([1 + (float(i + 1) - 1.0) * 1 for i in np.arange(10)])
assert np.all(wavelength == wavelength_comparison)
# In this instance we interpolate to get the array for pathloss VS wavelength.
# With the current values inside of the of the pointsource_wcs starting at line 143 of pathloss.py
# dx1 = 1 - int(1) = 0.0
# dx2 = 1 - dx1 = 1.0
# dy1 = 1 - int(1) = 0.0
# dy2 = 1 - dx1 = 1.0
# This means a11 == a12 == a21 == 0 and a22 == 1
# This simplifies that pathloss vector to:
# pathloss_vector = (a22*pathloss_ref[:,i,j]) = (1*pathloss_ref[:1,j])
# Thus pathloss == the input array to the function.
pathloss_comparison = datmod.apertures[0].pointsource_data
assert np.all(pathloss == pathloss_comparison)
# With the current wcs values, the logic should be returning True
assert is_inside_slitlet is True
def test_calculate_pathloss_vector_interpolation_nontrivial():
"""Calculate the pathloss vector for when interpolation is necessary."""
datmod = PathlossModel()
ref_data = {'pointsource_data': np.arange(10 * 10 * 10, dtype=np.float32).reshape((10, 10, 10)),
'pointsource_wcs': {'crpix1': 1.75, 'crval1': -0.5, 'cdelt1': 0.5,
'crpix2': 1.25, 'crval2': -0.5, 'cdelt2': 0.5,
'crpix3': 1.0, 'crval3': 1.0, 'cdelt3': 1.0}}
datmod.apertures.append(ref_data)
wavelength, pathloss, is_inside_slitlet = calculate_pathloss_vector(datmod.apertures[0].pointsource_data,
datmod.apertures[0].pointsource_wcs,
0.0, 0.0)
# Wavelength array is calculated with this: crval3 +(float(i+1) - crpix3)*cdelt3
# Where i is the iteration of np.arange(wavesize) which is the 1st dimension of the pointsource
# data array.
wavelength_comparison = np.array([1 + (float(i + 1) - 1.0) * 1 for i in np.arange(10)])
assert np.all(wavelength == wavelength_comparison)
# In this instance we interpolate to get the array for pathloss VS wavelength.
# Data point is at x=1.75, y=1.25, so between pixels 1 and 2, but
# closer to 2 in x, closer to 1 in y
# (remember that y comes first for numpy)
ps_data = datmod.apertures[0].pointsource_data
pathloss_comparison = np.sum([0.75 * 0.25 * ps_data[:, 1, 1],
0.75 * 0.75 * ps_data[:, 1, 2],
0.25 * 0.25 * ps_data[:, 2, 1],
0.25 * 0.75 * ps_data[:, 2, 2]], axis=0)
assert np.all(pathloss == pathloss_comparison)
# With the current wcs values, the logic should be returning True
assert is_inside_slitlet is True
def test_is_pointsource():
"""Check to see if object it point source"""
point_source = None
result = is_pointsource(point_source)
assert result is False
point_source = 'point'
result = is_pointsource(point_source)
assert result is True
point_source = 'not a point'
result = is_pointsource(point_source)
assert result is False
def test_do_correction_msa_slit_size_eq_0():
"""If slits have size 0, quit calibration."""
datmod = MultiSlitModel()
datmod.slits.append({'data': np.array([])})
pathlossmod = PathlossModel()
datmod.meta.exposure.type = 'NRS_MSASPEC'
result, _ = do_correction(datmod, pathlossmod)
assert result.meta.cal_step.pathloss == 'COMPLETE'
def test_do_correction_fixed_slit_exception():
"""If no matching aperture name found, exit."""
datmod = MultiSlitModel()
# Give input_model aperture name
datmod.slits.append({'data': np.array([]), 'name': 'S200A1'})
# Do assign pathloss model aperture with similar name.
pathlossmod = PathlossModel()
datmod.meta.exposure.type = 'NRS_FIXEDSLIT'
result, _ = do_correction(datmod, pathlossmod)
assert result.meta.cal_step.pathloss == 'COMPLETE'
def test_do_correction_nis_soss_tso():
"""If observation is tso, skip correction"""
datmod = MultiSlitModel()
pathlossmod = PathlossModel()
datmod.meta.exposure.type = 'NIS_SOSS'
datmod.meta.visit.tsovisit = True
result, _ = do_correction(datmod, pathlossmod)
assert result.meta.cal_step.pathloss == 'SKIPPED'
def test_do_correction_nis_soss_pupil_position_is_none():
"""If pupil_position is None, skip correction"""
datmod = MultiSlitModel()
pathlossmod = PathlossModel()
datmod.meta.exposure.type = 'NIS_SOSS'
datmod.meta.visit.tsovisit = False
datmod.meta.instrument.pupil_position = None
result, _ = do_correction(datmod, pathlossmod)
assert result.meta.cal_step.pathloss == 'SKIPPED'
def test_do_correction_nis_soss_aperture_is_none():
"""If no matching aperture is found, skip correction."""
datmod = MultiSlitModel()
# Is FULL an option for NIRISS?
# The test doesn't care but something to remember.
datmod.slits.append({'data': np.array([]), 'name': 'FULL'})
# Don't assign pathloss model aperture with similar name
pathlossmod = PathlossModel()
datmod.meta.exposure.type = 'NIS_SOSS'
datmod.meta.visit.tsovisit = False
datmod.meta.instrument.pupil_position = 1
result, _ = do_correction(datmod, pathlossmod)
assert result.meta.cal_step.pathloss == 'SKIPPED'
def test_interpolate_onto_grid():
# Mock wavelength vector, grid and pathloss vector.
wavelength_grid = np.arange(1, 101).reshape(10, 10) * 1.1
wavelength_vector = np.arange(1, 11, dtype='float64')
pathloss_vector = np.arange(1, 11, dtype='float64')
# Call interpolate onto grid
result = interpolate_onto_grid(wavelength_grid,
wavelength_vector,
pathloss_vector)
# Before interpolation is done in interpolate_onto_grid, the vectors are padded
# so interpolation that happens outside of the grid are NaN.
extended_pathloss_vector = np.zeros(len(pathloss_vector) + 2)
extended_pathloss_vector[1:-1] = pathloss_vector
extended_pathloss_vector[0] = np.nan
extended_pathloss_vector[-1] = np.nan
extended_wavelength_vector = np.zeros(len(wavelength_vector) + 2)
extended_wavelength_vector[1:-1] = wavelength_vector
extended_wavelength_vector[0] = wavelength_vector[0] - 0.1
extended_wavelength_vector[-1] = wavelength_vector[-1] + 0.1
# Call numpy interpolation to get truth.
result_comparison = np.interp(wavelength_grid, extended_wavelength_vector, extended_pathloss_vector)
np.testing.assert_array_equal(result, result_comparison)
def test_shutter_below_is_closed():
shutter_below_closed = ['x111', 'x', '10x11']
shutter_below_open = ['11x11', '111x', '11x01']
for shutter_state in shutter_below_closed:
assert shutter_below_is_closed(shutter_state)
for shutter_state in shutter_below_open:
assert not shutter_below_is_closed(shutter_state)
def test_shutter_above_is_closed():
shutter_above_closed = ['111x', 'x', '1x011']
shutter_above_open = ['11x11', 'x111', '110x1']
for shutter_state in shutter_above_closed:
assert shutter_above_is_closed(shutter_state)
for shutter_state in shutter_above_open:
assert not shutter_above_is_closed(shutter_state)
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@pathloss@tests@[email protected]_END.py
|
{
"filename": "2022_07_21_205820_0cf7311d6ea6_add_crashed_state_type.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/server/database/_migrations/versions/postgresql/2022_07_21_205820_0cf7311d6ea6_add_crashed_state_type.py",
"type": "Python"
}
|
"""Add Crashed state type
Revision ID: 0cf7311d6ea6
Revises: bb4dc90d3e29
Create Date: 2022-07-21 20:58:20.807489
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "0cf7311d6ea6"
down_revision = "bb4dc90d3e29"
branch_labels = None
depends_on = None
def upgrade():
op.execute("ALTER TYPE state_type ADD VALUE IF NOT EXISTS 'CRASHED';")
def downgrade():
# removing values from enums is not possible without recreating the column
pass
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@server@database@_migrations@versions@postgresql@[email protected]_END.py
|
{
"filename": "conf.py",
"repo_name": "adrn/thejoker",
"repo_path": "thejoker_extracted/thejoker-main/docs/conf.py",
"type": "Python"
}
|
import importlib.metadata
import os
project = "thejoker"
copyright = "2024, Adrian Price-Whelan"
author = "Adrian Price-Whelan"
version = release = importlib.metadata.version("thejoker")
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"nbsphinx",
# "sphinx_autodoc_typehints",
"sphinx_copybutton",
"sphinx_automodapi.automodapi",
"sphinx_automodapi.smart_resolver",
"rtds_action",
"matplotlib.sphinxext.plot_directive",
"matplotlib.sphinxext.figmpl_directive",
]
source_suffix = [".rst", ".md"]
exclude_patterns = [
"_build",
"**.ipynb_checkpoints",
"Thumbs.db",
".DS_Store",
".env",
".venv",
]
# HTML theme
html_theme = "sphinx_book_theme"
html_copy_source = True
html_show_sourcelink = True
html_sourcelink_suffix = ""
html_title = "thejoker"
html_logo = "_static/thejoker.png"
html_favicon = "_static/icon.ico"
html_static_path = ["_static"]
html_css_files = ["custom.css"]
html_theme_options = {
"path_to_docs": "docs",
"repository_url": "https://github.com/adrn/thejoker",
"repository_branch": "main",
"launch_buttons": {
"binderhub_url": "https://mybinder.org",
"notebook_interface": "classic",
},
"use_edit_page_button": True,
"use_issues_button": True,
"use_repository_button": True,
"use_download_button": True,
}
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"pymc": ("https://www.pymc.io/projects/docs/en/stable", None),
"h5py": ("http://docs.h5py.org/en/latest/", None),
"twobody": ("https://twobody.readthedocs.io/en/latest/", None),
"schwimmbad": ("https://schwimmbad.readthedocs.io/en/latest/", None),
"numpy": (
"https://numpy.org/doc/stable/",
(None, "http://data.astropy.org/intersphinx/numpy.inv"),
),
"scipy": (
"https://docs.scipy.org/doc/scipy/",
(None, "http://data.astropy.org/intersphinx/scipy.inv"),
),
"matplotlib": (
"https://matplotlib.org/stable/",
(None, "http://data.astropy.org/intersphinx/matplotlib.inv"),
),
"astropy": ("https://docs.astropy.org/en/stable/", None),
}
nitpick_ignore = [
("py:class", "_io.StringIO"),
("py:class", "_io.BytesIO"),
]
plot_srcset = ["2.0x"] # for retina displays
plot_rcparams = {"font.size": 16, "font.family": "serif", "figure.figsize": (6, 4)}
plot_apply_rcparams = True
always_document_param_types = True
# We execute the tutorial notebooks using GitHub Actions and upload to RTD:
nbsphinx_execute = "never"
# The name of your GitHub repository
rtds_action_github_repo = "adrn/thejoker"
# The path where the artifact should be extracted
# Note: this is relative to the conf.py file!
rtds_action_path = "examples"
# The "prefix" used in the `upload-artifact` step of the action
rtds_action_artifact_prefix = "notebooks-for-"
# A GitHub personal access token is required, more info below
rtds_action_github_token = os.environ.get("GITHUB_TOKEN", "")
# Whether or not to raise an error on Read the Docs if the
# artifact containing the notebooks can't be downloaded (optional)
rtds_action_error_if_missing = False
|
adrnREPO_NAMEthejokerPATH_START.@thejoker_extracted@thejoker-main@[email protected]@.PATH_END.py
|
{
"filename": "irafukey.py",
"repo_name": "iraf-community/pyraf",
"repo_path": "pyraf_extracted/pyraf-main/pyraf/irafukey.py",
"type": "Python"
}
|
"""
implement IRAF ukey functionality
"""
import os
import sys
import termios
from . import wutil
from .tools import capable, irafutils
# This class emulates the IRAF ukey parameter mechanism. IRAF calls for
# a ukey parameter and expects that the user will type a character in
# response. The value of this character is then returned to the iraf task
def getSingleTTYChar(): # return type str in all Python versions
"""Returns None if Control-C is typed or any other exception occurs"""
# Ripped off from python FAQ
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] = new[3] & ~(termios.ICANON | termios.ECHO | termios.ISIG)
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(fd, termios.TCSANOW, new)
c = None
try:
# allow Tk mainloop to run while waiting...
# vanilla version would be c = os.read(fd, 1)
if capable.OF_GRAPHICS:
c = irafutils.tkread(fd, 1)
else:
c = os.read(fd, 1).decode(errors='replace')
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, old)
return c
def ukey():
"""Returns the string expected for the IRAF ukey parameter"""
# set focus to terminal if it is not already there
wutil.focusController.setFocusTo('terminal')
char = getSingleTTYChar()
if not char:
# on control-C, raise KeyboardInterrupt
raise KeyboardInterrupt()
elif char == '\004':
# on control-D, raise EOF
raise EOFError()
elif ord(char) <= ord(' '):
# convert to octal ascii representation
returnStr = f'\\{ord(char):03o}'
elif char == ':':
# suck in colon string until newline is encountered
done = 0
sys.stdout.write(':')
sys.stdout.flush()
colonString = ''
while not done:
char = getSingleTTYChar()
if (not char) or (char == '\n'):
done = 1
elif char == '\b':
# backspace
colonString = colonString[:-1]
sys.stdout.write('\b \b')
sys.stdout.flush()
elif ord(char) >= ord(' '):
colonString = colonString + char
sys.stdout.write(char)
sys.stdout.flush()
else:
# ignore all other characters
pass
returnStr = ': ' + colonString
else:
returnStr = char
return returnStr
|
iraf-communityREPO_NAMEpyrafPATH_START.@pyraf_extracted@pyraf-main@[email protected]@.PATH_END.py
|
{
"filename": "definitions.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/frontends/_skeleton/definitions.py",
"type": "Python"
}
|
# This file is often empty. It can hold definitions related to a frontend.
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@frontends@[email protected]@.PATH_END.py
|
{
"filename": "AnIsochrone.py",
"repo_name": "jobovy/isodist",
"repo_path": "isodist_extracted/isodist-main/isodist/AnIsochrone.py",
"type": "Python"
}
|
import os, os.path
import csv
import math
import numpy
import gzip
from isodist.Isochrone import Isochrone, FEH2Z, Z2FEH, dict2recarray
from isodist.PadovaIsochrone import _DATADIR
_ANZSOLAR= 0.0176
_ZS= [-0.1,-0.2,-0.3,-0.5,-1.,-1.5,-2.,-3.,0.,0.1,0.2,0.4]
class AnIsochrone (Isochrone):
"""Class that represents a An+08 isochrone"""
def __init__(self,Z=None,filters=None,corrected=True):
"""
NAME:
__init__
PURPOSE:
initialize
INPUT:
corrected= if False, use un-corrected isochrones
Z= load only this metallicity (can be list)
OUTPUT:
HISTORY:
2011-08-05 - Written - Bovy (NYU)
BUGS:
Z determination needs to account for dY/dZ
"""
self._filters= ['u','g','r','i','z']
#Read the files
dicts= []
if Z is None: #Z here is actually FeH, we correct this later
ZS= _ZS
else:
if isinstance(Z,(list,numpy.ndarray)):
ZS= Z
else:
ZS= [Z]
for Zm in ZS:
if Zm >= 0.: signstr= 'p'
else: signstr= 'm'
if corrected: corrstr= 'corr'
else: corrstr= 'marcs'
dicts.append(read_an_isochrone(os.path.join(_DATADIR,
'an_isochrones',
signstr+'%03i_' % (int(numpy.fabs(100.*Zm)))
+corrstr+'.txt'),
filters=self._filters))
self._ZS= numpy.array([FEH2Z(z,zsolar=_ANZSOLAR) for z in ZS])
self._dicts= dicts
#Gather ages
self._logages= numpy.array(sorted(list(set(self._dicts[0]['logage']))))
return None
def __call__(self,logage,Z=None,feh=None,afe=None,maxm=None,
asrecarray=False,stage=None):
"""
NAME:
__call__
PURPOSE:
get a single isochrone from the library
INPUT:
logage - log_10 age
Z= or feh= metallicity (use Z_\odot=0.019)
afe= None (not supported for An; linear relation between afe and
feh is assumed)
maxm= maximum mass to consider (m_ini)
stage= if set, only show this evolutionary stage (NOT IMPLEMENTED FOR AN)
KEYWORDS:
asrecarray= if True, return recarray, otherwise dict
OUTPUT:
isochrone
HISTORY:
2011-08-04 - Written - Bovy (NYU)
"""
if not afe is None:
raise NotImplementedError("'afe=' not implemented for Padova isochrones")
if not feh is None:
Z= 10.**(feh+math.log10(_ANZSOLAR))
indx= (self._ZS == Z)
ii= 0
while (ii < len(self._dicts) and not indx[ii]): ii+= 1
if ii == len(self._dicts):
raise IOError("No isochrone found that matches this metallicity")
thisDict= self._dicts[ii]
if maxm is None:
indx= (thisDict['logage'] == logage)
else:
indx= (thisDict['logage'] == logage)*(thisDict['Mass'] < maxm)
if numpy.sum(indx) == 0:
raise IOError("No isochrone found that matches this logage")
outDict= {}
for key in thisDict.keys():
outDict[key]= thisDict[key][indx]
if asrecarray:
return dict2recarray(outDict)
else:
return outDict
def read_an_isochrone(name,filters=None):
"""
NAME:
read_an_isochrone
PURPOSE:
read an An isochrone file
INPUT:
name- name of the file
filters= list of filters in the file
OUTPUT:
dictionary with the table
HISTORY:
2011-08-04 - Written - Bovy (NYU)
"""
dialect= csv.excel
dialect.skipinitialspace=True
if name[-2:] == 'gz':
file= gzip.open(name,'r')
else:
file= open(name,'r')
reader= csv.reader(file,delimiter=' ',
dialect=dialect)
logage=[]
Mass= []
logL= []
logTe= []
logg= []
mbol= []
mags= []
for row in reader:
try:
if row[0][0:4] == 'Mass': #Header line to skip
continue
except IndexError:
pass
try:
if row[0] == 'Cluster': #Header line to extract age from
thislogage= numpy.log10(float(row[4]))
continue
except IndexError:
pass
logage.append(thislogage) #from the header, see above
Mass.append(float(row[0]))
logTe.append(numpy.log10(float(row[1])))
logL.append(float(row[2]))
logg.append(float(row[3]))
mbol.append(float(row[4]))
r= float(row[5])
gr = float(row[6])
gi = float(row[7])
gz = float(row[8])
ug = float(row[9])
mags.append([r+gr+ug, #u
r+gr, #g
r,
-gi+gr+r, #i
-gz+gr+r]) #z
#Load everything into a dictionary
outDict= {}
outDict['logage']= numpy.array(logage)
outDict['Mass']= numpy.array(Mass)
outDict['logL']= numpy.array(logL)
outDict['logTe']= numpy.array(logTe)
outDict['logg']= numpy.array(logg)
outDict['mbol']= numpy.array(mbol)
for ii in range(len(filters)):
thismag= []
for jj in range(len(mags)):
thismag.append(mags[jj][ii])
outDict[filters[ii]]= numpy.array(thismag)
return outDict
|
jobovyREPO_NAMEisodistPATH_START.@isodist_extracted@isodist-main@[email protected]@.PATH_END.py
|
{
"filename": "_tempita.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_build_utils/tempita/_tempita.py",
"type": "Python"
}
|
"""
A small templating language
This implements a small templating language. This language implements
if/elif/else, for/continue/break, expressions, and blocks of Python
code. The syntax is::
{{any expression (function calls etc)}}
{{any expression | filter}}
{{for x in y}}...{{endfor}}
{{if x}}x{{elif y}}y{{else}}z{{endif}}
{{py:x=1}}
{{py:
def foo(bar):
return 'baz'
}}
{{default var = default_value}}
{{# comment}}
You use this with the ``Template`` class or the ``sub`` shortcut.
The ``Template`` class takes the template string and the name of
the template (for errors) and a default namespace. Then (like
``string.Template``) you can call the ``tmpl.substitute(**kw)``
method to make a substitution (or ``tmpl.substitute(a_dict)``).
``sub(content, **kw)`` substitutes the template immediately. You
can use ``__name='tmpl.html'`` to set the name of the template.
If there are syntax errors ``TemplateError`` will be raised.
"""
import re
import sys
import os
import tokenize
from io import StringIO
from ._looper import looper
__all__ = ["TemplateError", "Template", "sub", "bunch"]
in_re = re.compile(r"\s+in\s+")
var_re = re.compile(r"^[a-z_][a-z0-9_]*$", re.I)
basestring_ = (bytes, str)
def coerce_text(v):
if not isinstance(v, basestring_):
if hasattr(v, "__str__"):
return str(v)
else:
return bytes(v)
return v
class TemplateError(Exception):
"""Exception raised while parsing a template"""
def __init__(self, message, position, name=None):
Exception.__init__(self, message)
self.position = position
self.name = name
def __str__(self):
msg = " ".join(self.args)
if self.position:
msg = "%s at line %s column %s" % (msg, self.position[0], self.position[1])
if self.name:
msg += " in %s" % self.name
return msg
class _TemplateContinue(Exception):
pass
class _TemplateBreak(Exception):
pass
def get_file_template(name, from_template):
path = os.path.join(os.path.dirname(from_template.name), name)
return from_template.__class__.from_filename(
path, namespace=from_template.namespace, get_template=from_template.get_template
)
class Template:
default_namespace = {
"start_braces": "{{",
"end_braces": "}}",
"looper": looper,
}
default_encoding = "utf8"
default_inherit = None
def __init__(
self,
content,
name=None,
namespace=None,
stacklevel=None,
get_template=None,
default_inherit=None,
line_offset=0,
delimiters=None,
):
self.content = content
# set delimiters
if delimiters is None:
delimiters = (
self.default_namespace["start_braces"],
self.default_namespace["end_braces"],
)
else:
# assert len(delimiters) == 2 and all([isinstance(delimiter, basestring)
# for delimiter in delimiters])
self.default_namespace = self.__class__.default_namespace.copy()
self.default_namespace["start_braces"] = delimiters[0]
self.default_namespace["end_braces"] = delimiters[1]
self.delimiters = delimiters
self._unicode = isinstance(content, str)
if name is None and stacklevel is not None:
try:
caller = sys._getframe(stacklevel)
except ValueError:
pass
else:
globals = caller.f_globals
lineno = caller.f_lineno
if "__file__" in globals:
name = globals["__file__"]
if name.endswith(".pyc") or name.endswith(".pyo"):
name = name[:-1]
elif "__name__" in globals:
name = globals["__name__"]
else:
name = "<string>"
if lineno:
name += ":%s" % lineno
self.name = name
self._parsed = parse(
content, name=name, line_offset=line_offset, delimiters=self.delimiters
)
if namespace is None:
namespace = {}
self.namespace = namespace
self.get_template = get_template
if default_inherit is not None:
self.default_inherit = default_inherit
def from_filename(
cls,
filename,
namespace=None,
encoding=None,
default_inherit=None,
get_template=get_file_template,
):
with open(filename, "rb") as f:
c = f.read()
if encoding:
c = c.decode(encoding)
return cls(
content=c,
name=filename,
namespace=namespace,
default_inherit=default_inherit,
get_template=get_template,
)
from_filename = classmethod(from_filename)
def __repr__(self):
return "<%s %s name=%r>" % (
self.__class__.__name__,
hex(id(self))[2:],
self.name,
)
def substitute(self, *args, **kw):
if args:
if kw:
raise TypeError("You can only give positional *or* keyword arguments")
if len(args) > 1:
raise TypeError("You can only give one positional argument")
if not hasattr(args[0], "items"):
raise TypeError(
"If you pass in a single argument, you must pass in a "
"dictionary-like object (with a .items() method); you gave %r"
% (args[0],)
)
kw = args[0]
ns = kw
ns["__template_name__"] = self.name
if self.namespace:
ns.update(self.namespace)
result, defs, inherit = self._interpret(ns)
if not inherit:
inherit = self.default_inherit
if inherit:
result = self._interpret_inherit(result, defs, inherit, ns)
return result
def _interpret(self, ns):
__traceback_hide__ = True
parts = []
defs = {}
self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
if "__inherit__" in defs:
inherit = defs.pop("__inherit__")
else:
inherit = None
return "".join(parts), defs, inherit
def _interpret_inherit(self, body, defs, inherit_template, ns):
__traceback_hide__ = True
if not self.get_template:
raise TemplateError(
"You cannot use inheritance without passing in get_template",
position=None,
name=self.name,
)
templ = self.get_template(inherit_template, self)
self_ = TemplateObject(self.name)
for name, value in defs.items():
setattr(self_, name, value)
self_.body = body
ns = ns.copy()
ns["self"] = self_
return templ.substitute(ns)
def _interpret_codes(self, codes, ns, out, defs):
__traceback_hide__ = True
for item in codes:
if isinstance(item, basestring_):
out.append(item)
else:
self._interpret_code(item, ns, out, defs)
def _interpret_code(self, code, ns, out, defs):
__traceback_hide__ = True
name, pos = code[0], code[1]
if name == "py":
self._exec(code[2], ns, pos)
elif name == "continue":
raise _TemplateContinue()
elif name == "break":
raise _TemplateBreak()
elif name == "for":
vars, expr, content = code[2], code[3], code[4]
expr = self._eval(expr, ns, pos)
self._interpret_for(vars, expr, content, ns, out, defs)
elif name == "cond":
parts = code[2:]
self._interpret_if(parts, ns, out, defs)
elif name == "expr":
parts = code[2].split("|")
base = self._eval(parts[0], ns, pos)
for part in parts[1:]:
func = self._eval(part, ns, pos)
base = func(base)
out.append(self._repr(base, pos))
elif name == "default":
var, expr = code[2], code[3]
if var not in ns:
result = self._eval(expr, ns, pos)
ns[var] = result
elif name == "inherit":
expr = code[2]
value = self._eval(expr, ns, pos)
defs["__inherit__"] = value
elif name == "def":
name = code[2]
signature = code[3]
parts = code[4]
ns[name] = defs[name] = TemplateDef(
self, name, signature, body=parts, ns=ns, pos=pos
)
elif name == "comment":
return
else:
assert 0, "Unknown code: %r" % name
def _interpret_for(self, vars, expr, content, ns, out, defs):
__traceback_hide__ = True
for item in expr:
if len(vars) == 1:
ns[vars[0]] = item
else:
if len(vars) != len(item):
raise ValueError(
"Need %i items to unpack (got %i items)"
% (len(vars), len(item))
)
for name, value in zip(vars, item):
ns[name] = value
try:
self._interpret_codes(content, ns, out, defs)
except _TemplateContinue:
continue
except _TemplateBreak:
break
def _interpret_if(self, parts, ns, out, defs):
__traceback_hide__ = True
# @@: if/else/else gets through
for part in parts:
assert not isinstance(part, basestring_)
name, pos = part[0], part[1]
if name == "else":
result = True
else:
result = self._eval(part[2], ns, pos)
if result:
self._interpret_codes(part[3], ns, out, defs)
break
def _eval(self, code, ns, pos):
__traceback_hide__ = True
try:
try:
value = eval(code, self.default_namespace, ns)
except SyntaxError as e:
raise SyntaxError("invalid syntax in expression: %s" % code)
return value
except Exception as e:
if getattr(e, "args", None):
arg0 = e.args[0]
else:
arg0 = coerce_text(e)
e.args = (self._add_line_info(arg0, pos),)
raise
def _exec(self, code, ns, pos):
__traceback_hide__ = True
try:
exec(code, self.default_namespace, ns)
except Exception as e:
if e.args:
e.args = (self._add_line_info(e.args[0], pos),)
else:
e.args = (self._add_line_info(None, pos),)
raise
def _repr(self, value, pos):
__traceback_hide__ = True
try:
if value is None:
return ""
if self._unicode:
try:
value = str(value)
except UnicodeDecodeError:
value = bytes(value)
else:
if not isinstance(value, basestring_):
value = coerce_text(value)
if isinstance(value, str) and self.default_encoding:
value = value.encode(self.default_encoding)
except Exception as e:
e.args = (self._add_line_info(e.args[0], pos),)
raise
else:
if self._unicode and isinstance(value, bytes):
if not self.default_encoding:
raise UnicodeDecodeError(
"Cannot decode bytes value %r into unicode "
"(no default_encoding provided)" % value
)
try:
value = value.decode(self.default_encoding)
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
e.encoding,
e.object,
e.start,
e.end,
e.reason + " in string %r" % value,
)
elif not self._unicode and isinstance(value, str):
if not self.default_encoding:
raise UnicodeEncodeError(
"Cannot encode unicode value %r into bytes "
"(no default_encoding provided)" % value
)
value = value.encode(self.default_encoding)
return value
def _add_line_info(self, msg, pos):
msg = "%s at line %s column %s" % (msg, pos[0], pos[1])
if self.name:
msg += " in file %s" % self.name
return msg
def sub(content, delimiters=None, **kw):
name = kw.get("__name")
tmpl = Template(content, name=name, delimiters=delimiters)
return tmpl.substitute(kw)
def paste_script_template_renderer(content, vars, filename=None):
tmpl = Template(content, name=filename)
return tmpl.substitute(vars)
class bunch(dict):
def __init__(self, **kw):
for name, value in kw.items():
setattr(self, name, value)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, key):
if "default" in self:
try:
return dict.__getitem__(self, key)
except KeyError:
return dict.__getitem__(self, "default")
else:
return dict.__getitem__(self, key)
def __repr__(self):
return "<%s %s>" % (
self.__class__.__name__,
" ".join(["%s=%r" % (k, v) for k, v in sorted(self.items())]),
)
class TemplateDef:
def __init__(
self, template, func_name, func_signature, body, ns, pos, bound_self=None
):
self._template = template
self._func_name = func_name
self._func_signature = func_signature
self._body = body
self._ns = ns
self._pos = pos
self._bound_self = bound_self
def __repr__(self):
return "<tempita function %s(%s) at %s:%s>" % (
self._func_name,
self._func_signature,
self._template.name,
self._pos,
)
def __str__(self):
return self()
def __call__(self, *args, **kw):
values = self._parse_signature(args, kw)
ns = self._ns.copy()
ns.update(values)
if self._bound_self is not None:
ns["self"] = self._bound_self
out = []
subdefs = {}
self._template._interpret_codes(self._body, ns, out, subdefs)
return "".join(out)
def __get__(self, obj, type=None):
if obj is None:
return self
return self.__class__(
self._template,
self._func_name,
self._func_signature,
self._body,
self._ns,
self._pos,
bound_self=obj,
)
def _parse_signature(self, args, kw):
values = {}
sig_args, var_args, var_kw, defaults = self._func_signature
extra_kw = {}
for name, value in kw.items():
if not var_kw and name not in sig_args:
raise TypeError("Unexpected argument %s" % name)
if name in sig_args:
values[sig_args] = value
else:
extra_kw[name] = value
args = list(args)
sig_args = list(sig_args)
while args:
while sig_args and sig_args[0] in values:
sig_args.pop(0)
if sig_args:
name = sig_args.pop(0)
values[name] = args.pop(0)
elif var_args:
values[var_args] = tuple(args)
break
else:
raise TypeError(
"Extra position arguments: %s" % ", ".join([repr(v) for v in args])
)
for name, value_expr in defaults.items():
if name not in values:
values[name] = self._template._eval(value_expr, self._ns, self._pos)
for name in sig_args:
if name not in values:
raise TypeError("Missing argument: %s" % name)
if var_kw:
values[var_kw] = extra_kw
return values
class TemplateObject:
def __init__(self, name):
self.__name = name
self.get = TemplateObjectGetter(self)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.__name)
class TemplateObjectGetter:
def __init__(self, template_obj):
self.__template_obj = template_obj
def __getattr__(self, attr):
return getattr(self.__template_obj, attr, Empty)
def __repr__(self):
return "<%s around %r>" % (self.__class__.__name__, self.__template_obj)
class _Empty:
def __call__(self, *args, **kw):
return self
def __str__(self):
return ""
def __repr__(self):
return "Empty"
def __unicode__(self):
return ""
def __iter__(self):
return iter(())
def __bool__(self):
return False
Empty = _Empty()
del _Empty
############################################################
## Lexing and Parsing
############################################################
def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None):
"""
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
TemplateError: {{ inside expression at line 1 column 10
"""
if delimiters is None:
delimiters = (
Template.default_namespace["start_braces"],
Template.default_namespace["end_braces"],
)
in_expr = False
chunks = []
last = 0
last_pos = (line_offset + 1, 1)
token_re = re.compile(
r"%s|%s" % (re.escape(delimiters[0]), re.escape(delimiters[1]))
)
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), last, last_pos)
if expr == delimiters[0] and in_expr:
raise TemplateError(
"%s inside expression" % delimiters[0], position=pos, name=name
)
elif expr == delimiters[1] and not in_expr:
raise TemplateError(
"%s outside expression" % delimiters[1], position=pos, name=name
)
if expr == delimiters[0]:
part = s[last:match.start()]
if part:
chunks.append(part)
in_expr = True
else:
chunks.append((s[last: match.start()], last_pos))
in_expr = False
last = match.end()
last_pos = pos
if in_expr:
raise TemplateError(
"No %s to finish last expression" % delimiters[1],
name=name,
position=last_pos,
)
part = s[last:]
if part:
chunks.append(part)
if trim_whitespace:
chunks = trim_lex(chunks)
return chunks
statement_re = re.compile(r"^(?:if |elif |for |def |inherit |default |py:)")
single_statements = ["else", "endif", "endfor", "enddef", "continue", "break"]
trail_whitespace_re = re.compile(r"\n\r?[\t ]*$")
lead_whitespace_re = re.compile(r"^[\t ]*\n")
def trim_lex(tokens):
r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
"""
last_trim = None
for i, current in enumerate(tokens):
if isinstance(current, basestring_):
# we don't trim this
continue
item = current[0]
if not statement_re.search(item) and item not in single_statements:
continue
if not i:
prev = ""
else:
prev = tokens[i - 1]
if i + 1 >= len(tokens):
next_chunk = ""
else:
next_chunk = tokens[i + 1]
if not isinstance(next_chunk, basestring_) or not isinstance(prev, basestring_):
continue
prev_ok = not prev or trail_whitespace_re.search(prev)
if i == 1 and not prev.strip():
prev_ok = True
if last_trim is not None and last_trim + 2 == i and not prev.strip():
prev_ok = "last"
if prev_ok and (
not next_chunk
or lead_whitespace_re.search(next_chunk)
or (i == len(tokens) - 2 and not next_chunk.strip())
):
if prev:
if (i == 1 and not prev.strip()) or prev_ok == "last":
tokens[i - 1] = ""
else:
m = trail_whitespace_re.search(prev)
# +1 to leave the leading \n on:
prev = prev[: m.start() + 1]
tokens[i - 1] = prev
if next_chunk:
last_trim = i
if i == len(tokens) - 2 and not next_chunk.strip():
tokens[i + 1] = ""
else:
m = lead_whitespace_re.search(next_chunk)
next_chunk = next_chunk[m.end():]
tokens[i + 1] = next_chunk
return tokens
def find_position(string, index, last_index, last_pos):
"""Given a string and index, return (line, column)"""
lines = string.count("\n", last_index, index)
if lines > 0:
column = index - string.rfind("\n", last_index, index)
else:
column = last_pos[1] + (index - last_index)
return (last_pos[0] + lines, column)
def parse(s, name=None, line_offset=0, delimiters=None):
r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] # noqa: E501
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}')
Traceback (most recent call last):
...
TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
"""
if delimiters is None:
delimiters = (
Template.default_namespace["start_braces"],
Template.default_namespace["end_braces"],
)
tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
result.append(next_chunk)
return result
def parse_expr(tokens, name, context=()):
if isinstance(tokens[0], basestring_):
return tokens[0], tokens[1:]
expr, pos = tokens[0]
expr = expr.strip()
if expr.startswith("py:"):
expr = expr[3:].lstrip(" \t")
if expr.startswith("\n") or expr.startswith("\r"):
expr = expr.lstrip("\r\n")
if "\r" in expr:
expr = expr.replace("\r\n", "\n")
expr = expr.replace("\r", "")
expr += "\n"
else:
if "\n" in expr:
raise TemplateError(
"Multi-line py blocks must start with a newline",
position=pos,
name=name,
)
return ("py", pos, expr), tokens[1:]
elif expr in ("continue", "break"):
if "for" not in context:
raise TemplateError("continue outside of for loop", position=pos, name=name)
return (expr, pos), tokens[1:]
elif expr.startswith("if "):
return parse_cond(tokens, name, context)
elif expr.startswith("elif ") or expr == "else":
raise TemplateError(
"%s outside of an if block" % expr.split()[0], position=pos, name=name
)
elif expr in ("if", "elif", "for"):
raise TemplateError("%s with no expression" % expr, position=pos, name=name)
elif expr in ("endif", "endfor", "enddef"):
raise TemplateError("Unexpected %s" % expr, position=pos, name=name)
elif expr.startswith("for "):
return parse_for(tokens, name, context)
elif expr.startswith("default "):
return parse_default(tokens, name, context)
elif expr.startswith("inherit "):
return parse_inherit(tokens, name, context)
elif expr.startswith("def "):
return parse_def(tokens, name, context)
elif expr.startswith("#"):
return ("comment", pos, tokens[0][0]), tokens[1:]
return ("expr", pos, tokens[0][0]), tokens[1:]
def parse_cond(tokens, name, context):
start = tokens[0][1]
pieces = []
context = context + ("if",)
while 1:
if not tokens:
raise TemplateError("Missing {{endif}}", position=start, name=name)
if isinstance(tokens[0], tuple) and tokens[0][0] == "endif":
return ("cond", start) + tuple(pieces), tokens[1:]
next_chunk, tokens = parse_one_cond(tokens, name, context)
pieces.append(next_chunk)
def parse_one_cond(tokens, name, context):
(first, pos), tokens = tokens[0], tokens[1:]
content = []
if first.endswith(":"):
first = first[:-1]
if first.startswith("if "):
part = ("if", pos, first[3:].lstrip(), content)
elif first.startswith("elif "):
part = ("elif", pos, first[5:].lstrip(), content)
elif first == "else":
part = ("else", pos, None, content)
else:
assert 0, "Unexpected token %r at %s" % (first, pos)
while 1:
if not tokens:
raise TemplateError("No {{endif}}", position=pos, name=name)
if isinstance(tokens[0], tuple) and (
tokens[0][0] == "endif"
or tokens[0][0].startswith("elif ")
or tokens[0][0] == "else"
):
return part, tokens
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_for(tokens, name, context):
first, pos = tokens[0]
tokens = tokens[1:]
context = ("for",) + context
content = []
assert first.startswith("for "), first
if first.endswith(":"):
first = first[:-1]
first = first[3:].strip()
match = in_re.search(first)
if not match:
raise TemplateError('Bad for (no "in") in %r' % first, position=pos, name=name)
vars = first[: match.start()]
if "(" in vars:
raise TemplateError(
"You cannot have () in the variable section of a for loop (%r)" % vars,
position=pos,
name=name,
)
vars = tuple([v.strip() for v in first[: match.start()].split(",") if v.strip()])
expr = first[match.end():]
while 1:
if not tokens:
raise TemplateError("No {{endfor}}", position=pos, name=name)
if isinstance(tokens[0], tuple) and tokens[0][0] == "endfor":
return ("for", pos, vars, expr, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_default(tokens, name, context):
first, pos = tokens[0]
assert first.startswith("default ")
first = first.split(None, 1)[1]
parts = first.split("=", 1)
if len(parts) == 1:
raise TemplateError(
"Expression must be {{default var=value}}; no = found in %r" % first,
position=pos,
name=name,
)
var = parts[0].strip()
if "," in var:
raise TemplateError(
"{{default x, y = ...}} is not supported", position=pos, name=name
)
if not var_re.search(var):
raise TemplateError(
"Not a valid variable name for {{default}}: %r" % var,
position=pos,
name=name,
)
expr = parts[1].strip()
return ("default", pos, var, expr), tokens[1:]
def parse_inherit(tokens, name, context):
first, pos = tokens[0]
assert first.startswith("inherit ")
expr = first.split(None, 1)[1]
return ("inherit", pos, expr), tokens[1:]
def parse_def(tokens, name, context):
first, start = tokens[0]
tokens = tokens[1:]
assert first.startswith("def ")
first = first.split(None, 1)[1]
if first.endswith(":"):
first = first[:-1]
if "(" not in first:
func_name = first
sig = ((), None, None, {})
elif not first.endswith(")"):
raise TemplateError(
"Function definition doesn't end with ): %s" % first,
position=start,
name=name,
)
else:
first = first[:-1]
func_name, sig_text = first.split("(", 1)
sig = parse_signature(sig_text, name, start)
context = context + ("def",)
content = []
while 1:
if not tokens:
raise TemplateError("Missing {{enddef}}", position=start, name=name)
if isinstance(tokens[0], tuple) and tokens[0][0] == "enddef":
return ("def", start, func_name, sig, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_signature(sig_text, name, pos):
tokens = tokenize.generate_tokens(StringIO(sig_text).readline)
sig_args = []
var_arg = None
var_kw = None
defaults = {}
def get_token(pos=False):
try:
tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens)
except StopIteration:
return tokenize.ENDMARKER, ""
if pos:
return tok_type, tok_string, (srow, scol), (erow, ecol)
else:
return tok_type, tok_string
while 1:
var_arg_type = None
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER:
break
if tok_type == tokenize.OP and (tok_string == "*" or tok_string == "**"):
var_arg_type = tok_string
tok_type, tok_string = get_token()
if tok_type != tokenize.NAME:
raise TemplateError(
"Invalid signature: (%s)" % sig_text, position=pos, name=name
)
var_name = tok_string
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER or (
tok_type == tokenize.OP and tok_string == ","
):
if var_arg_type == "*":
var_arg = var_name
elif var_arg_type == "**":
var_kw = var_name
else:
sig_args.append(var_name)
if tok_type == tokenize.ENDMARKER:
break
continue
if var_arg_type is not None:
raise TemplateError(
"Invalid signature: (%s)" % sig_text, position=pos, name=name
)
if tok_type == tokenize.OP and tok_string == "=":
nest_type = None
unnest_type = None
nest_count = 0
start_pos = end_pos = None
parts = []
while 1:
tok_type, tok_string, s, e = get_token(True)
if start_pos is None:
start_pos = s
end_pos = e
if tok_type == tokenize.ENDMARKER and nest_count:
raise TemplateError(
"Invalid signature: (%s)" % sig_text, position=pos, name=name
)
if not nest_count and (
tok_type == tokenize.ENDMARKER
or (tok_type == tokenize.OP and tok_string == ",")
):
default_expr = isolate_expression(sig_text, start_pos, end_pos)
defaults[var_name] = default_expr
sig_args.append(var_name)
break
parts.append((tok_type, tok_string))
if nest_count and tok_type == tokenize.OP and tok_string == nest_type:
nest_count += 1
elif (
nest_count and tok_type == tokenize.OP and tok_string == unnest_type
):
nest_count -= 1
if not nest_count:
nest_type = unnest_type = None
elif (
not nest_count
and tok_type == tokenize.OP
and tok_string in ("(", "[", "{")
):
nest_type = tok_string
nest_count = 1
unnest_type = {"(": ")", "[": "]", "{": "}"}[nest_type]
return sig_args, var_arg, var_kw, defaults
def isolate_expression(string, start_pos, end_pos):
srow, scol = start_pos
srow -= 1
erow, ecol = end_pos
erow -= 1
lines = string.splitlines(True)
if srow == erow:
return lines[srow][scol:ecol]
parts = [lines[srow][scol:]]
parts.extend(lines[srow + 1:erow])
if erow < len(lines):
# It'll sometimes give (end_row_past_finish, 0)
parts.append(lines[erow][:ecol])
return "".join(parts)
_fill_command_usage = """\
%prog [OPTIONS] TEMPLATE arg=value
Use py:arg=value to set a Python value; otherwise all values are
strings.
"""
def fill_command(args=None):
import sys
import optparse
import pkg_resources
import os
if args is None:
args = sys.argv[1:]
dist = pkg_resources.get_distribution("Paste")
parser = optparse.OptionParser(version=coerce_text(dist), usage=_fill_command_usage)
parser.add_option(
"-o",
"--output",
dest="output",
metavar="FILENAME",
help="File to write output to (default stdout)",
)
parser.add_option(
"--env",
dest="use_env",
action="store_true",
help="Put the environment in as top-level variables",
)
options, args = parser.parse_args(args)
if len(args) < 1:
print("You must give a template filename")
sys.exit(2)
template_name = args[0]
args = args[1:]
vars = {}
if options.use_env:
vars.update(os.environ)
for value in args:
if "=" not in value:
print("Bad argument: %r" % value)
sys.exit(2)
name, value = value.split("=", 1)
if name.startswith("py:"):
name = name[:3]
value = eval(value)
vars[name] = value
if template_name == "-":
template_content = sys.stdin.read()
template_name = "<stdin>"
else:
with open(template_name, "rb") as f:
template_content = f.read()
template = Template(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
with open(options.output, "wb") as f:
f.write(result)
else:
sys.stdout.write(result)
if __name__ == "__main__":
fill_command()
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_build_utils@tempita@[email protected]_END.py
|
{
"filename": "XID+example_run_script_SED.ipynb",
"repo_name": "H-E-L-P/XID_plus",
"repo_path": "XID_plus_extracted/XID_plus-master/docs/notebooks/examples/XID+example_run_script_SED.ipynb",
"type": "Jupyter Notebook"
}
|
# XID+ Example Run Script
(This is based on a Jupyter notebook, available in the [XID+ package](https://github.com/H-E-L-P/XID_plus/tree/master/docs/notebooks/examples/) and can be interactively run and edited)
XID+ is a probababilistic deblender for confusion dominated maps. It is designed to:
1. Use a MCMC based approach to get FULL posterior
2. Provide a natural framework to introduce additional prior information
3. Allows more accurate estimate of flux density errors for each source
4. Provides a platform for doing science with the maps (e.g XID+ Hierarchical stacking, Luminosity function from the map etc)
Cross-identification tends to be done with catalogues, then science with the matched catalogues.
XID+ takes a different philosophy. Catalogues are a form of data compression. OK in some cases, not so much in others, i.e. confused images: catalogue compression loses correlation information. Ideally, science should be done without compression.
XID+ provides a framework to cross identify galaxies we know about in different maps, with the idea that it can be extended to do science with the maps!!
Philosophy:
- build a probabilistic generative model for the SPIRE maps
- Infer model on SPIRE maps
Bayes Theorem
$p(\mathbf{f}|\mathbf{d}) \propto p(\mathbf{d}|\mathbf{f}) \times p(\mathbf{f})$
In order to carry out Bayesian inference, we need a model to carry out inference on.
For the SPIRE maps, our model is quite simple, with likelihood defined as:
$L = p(\mathbf{d}|\mathbf{f}) \propto |\mathbf{N_d}|^{-1/2} \exp\big\{ -\frac{1}{2}(\mathbf{d}-\mathbf{Af})^T\mathbf{N_d}^{-1}(\mathbf{d}-\mathbf{Af})\big\}$
where:
$\mathbf{N_{d,ii}} =\sigma_{inst.,ii}^2+\sigma_{conf.}^2$
Simplest model for XID+ assumes following:
* All sources are known and have positive flux (fi)
* A global background (B) contributes to all pixels
* PRF is fixed and known
* Confusion noise is constant and not correlated across pixels
----
Because we are getting the joint probability distribution, our model is generative:
* Given parameters, we generate data and vica-versa
Compared to discriminative model (i.e. neural network), which only obtains conditional probability distribution:
* Neural network, give inputs, get output. Can't go other way'
Generative model is full probabilistic model. Allows more complex relationships between observed and target variables
### XID+ SPIRE
XID+ applied to GALFORM simulation of COSMOS field
* SAM simulation (with dust) ran through SMAP pipeline_ similar depth and size as COSMOS
* Use galaxies with an observed 100 micron flux of gt. $50\mathbf{\mu Jy}$. Gives 64823 sources
* Uninformative prior: uniform $0 - 10{^3} \mathbf{mJy}$
Import required modules
```python
from astropy.io import ascii, fits
import pylab as plt
%matplotlib inline
from astropy import wcs
import numpy as np
import xidplus
from xidplus import moc_routines
import pickle
```
Set image and catalogue filenames
```python
xidplus.__path__[0]
```
'/Users/pdh21/Work/Astro/XID_plus/xidplus'
```python
#Folder containing maps
imfolder=xidplus.__path__[0]+'/../test_files/'
pswfits=imfolder+'cosmos_itermap_lacey_07012015_simulated_observation_w_noise_PSW_hipe.fits.gz'#SPIRE 250 map
pmwfits=imfolder+'cosmos_itermap_lacey_07012015_simulated_observation_w_noise_PMW_hipe.fits.gz'#SPIRE 350 map
plwfits=imfolder+'cosmos_itermap_lacey_07012015_simulated_observation_w_noise_PLW_hipe.fits.gz'#SPIRE 500 map
#Folder containing prior input catalogue
catfolder=xidplus.__path__[0]+'/../test_files/'
#prior catalogue
prior_cat='lacey_07012015_MillGas.ALLVOLS_cat_PSW_COSMOS_test.fits'
#output folder
output_folder='./'
```
Load in images, noise maps, header info and WCS information
```python
#-----250-------------
hdulist = fits.open(pswfits)
im250phdu=hdulist[0].header
im250hdu=hdulist[1].header
im250=hdulist[1].data*1.0E3 #convert to mJy
nim250=hdulist[2].data*1.0E3 #convert to mJy
w_250 = wcs.WCS(hdulist[1].header)
pixsize250=3600.0*w_250.wcs.cd[1,1] #pixel size (in arcseconds)
hdulist.close()
#-----350-------------
hdulist = fits.open(pmwfits)
im350phdu=hdulist[0].header
im350hdu=hdulist[1].header
im350=hdulist[1].data*1.0E3 #convert to mJy
nim350=hdulist[2].data*1.0E3 #convert to mJy
w_350 = wcs.WCS(hdulist[1].header)
pixsize350=3600.0*w_350.wcs.cd[1,1] #pixel size (in arcseconds)
hdulist.close()
#-----500-------------
hdulist = fits.open(plwfits)
im500phdu=hdulist[0].header
im500hdu=hdulist[1].header
im500=hdulist[1].data*1.0E3 #convert to mJy
nim500=hdulist[2].data*1.0E3 #convert to mJy
w_500 = wcs.WCS(hdulist[1].header)
pixsize500=3600.0*w_500.wcs.cd[1,1] #pixel size (in arcseconds)
hdulist.close()
```
Load in catalogue you want to fit (and make any cuts)
```python
hdulist = fits.open(catfolder+prior_cat)
fcat=hdulist[1].data
hdulist.close()
inra=fcat['RA']
indec=fcat['DEC']
# select only sources with 100micron flux greater than 50 microJy
sgood=fcat['S100']>0.050
inra=inra[sgood]
indec=indec[sgood]
```
XID+ uses Multi Order Coverage (MOC) maps for cutting down maps and catalogues so they cover the same area. It can also take in MOCs as selection functions to carry out additional cuts. Lets use the python module [pymoc](http://pymoc.readthedocs.io/en/latest/) to create a MOC, centered on a specific position we are interested in. We will use a HEALPix order of 15 (the resolution: higher order means higher resolution), have a radius of 100 arcseconds centered around an R.A. of 150.74 degrees and Declination of 2.03 degrees.
```python
from astropy.coordinates import SkyCoord
from astropy import units as u
c = SkyCoord(ra=[150.74]*u.degree, dec=[2.03]*u.degree)
import pymoc
moc=pymoc.util.catalog.catalog_to_moc(c,100,15)
```
XID+ is built around two python classes. A prior and posterior class. There should be a prior class for each map being fitted. It is initiated with a map, noise map, primary header and map header and can be set with a MOC. It also requires an input prior catalogue and point spread function.
```python
#---prior250--------
prior250=xidplus.prior(im250,nim250,im250phdu,im250hdu, moc=moc)#Initialise with map, uncertianty map, wcs info and primary header
prior250.prior_cat(inra,indec,prior_cat)#Set input catalogue
prior250.prior_bkg(-5.0,5)#Set prior on background (assumes Gaussian pdf with mu and sigma)
#---prior350--------
prior350=xidplus.prior(im350,nim350,im350phdu,im350hdu, moc=moc)
prior350.prior_cat(inra,indec,prior_cat)
prior350.prior_bkg(-5.0,5)
#---prior500--------
prior500=xidplus.prior(im500,nim500,im500phdu,im500hdu, moc=moc)
prior500.prior_cat(inra,indec,prior_cat)
prior500.prior_bkg(-5.0,5)
```
Set PSF. For SPIRE, the PSF can be assumed to be Gaussian with a FWHM of 18.15, 25.15, 36.3 '' for 250, 350 and 500 $\mathrm{\mu m}$ respectively. Lets use the astropy module to construct a Gaussian PSF and assign it to the three XID+ prior classes.
```python
#pixsize array (size of pixels in arcseconds)
pixsize=np.array([pixsize250,pixsize350,pixsize500])
#point response function for the three bands
prfsize=np.array([18.15,25.15,36.3])
#use Gaussian2DKernel to create prf (requires stddev rather than fwhm hence pfwhm/2.355)
from astropy.convolution import Gaussian2DKernel
##---------fit using Gaussian beam-----------------------
prf250=Gaussian2DKernel(prfsize[0]/2.355,x_size=101,y_size=101)
prf250.normalize(mode='peak')
prf350=Gaussian2DKernel(prfsize[1]/2.355,x_size=101,y_size=101)
prf350.normalize(mode='peak')
prf500=Gaussian2DKernel(prfsize[2]/2.355,x_size=101,y_size=101)
prf500.normalize(mode='peak')
pind250=np.arange(0,101,1)*1.0/pixsize[0] #get 250 scale in terms of pixel scale of map
pind350=np.arange(0,101,1)*1.0/pixsize[1] #get 350 scale in terms of pixel scale of map
pind500=np.arange(0,101,1)*1.0/pixsize[2] #get 500 scale in terms of pixel scale of map
prior250.set_prf(prf250.array,pind250,pind250)#requires psf as 2d grid, and x and y bins for grid (in pixel scale)
prior350.set_prf(prf350.array,pind350,pind350)
prior500.set_prf(prf500.array,pind500,pind500)
```
```python
print('fitting '+ str(prior250.nsrc)+' sources \n')
print('using ' + str(prior250.snpix)+', '+ str(prior250.snpix)+' and '+ str(prior500.snpix)+' pixels')
```
fitting 51 sources
using 870, 870 and 219 pixels
Before fitting, the prior classes need to take the PSF and calculate how muich each source contributes to each pixel. This process provides what we call a pointing matrix. Lets calculate the pointing matrix for each prior class
```python
prior250.get_pointing_matrix()
prior350.get_pointing_matrix()
prior500.get_pointing_matrix()
```
Default prior on flux is a uniform distribution, with a minimum and maximum of 0.00 and 1000.0 $\mathrm{mJy}$ respectively for each source. running the function upper_lim _map resets the upper limit to the maximum flux value (plus a 5 sigma Background value) found in the map in which the source makes a contribution to.
```python
prior250.upper_lim_map()
prior350.upper_lim_map()
prior500.upper_lim_map()
```
Now fit using the XID+ interface to pystan
```python
from xidplus.stan_fit import SPIRE
fit_basic=SPIRE.all_bands(prior250,prior350,prior500,iter=1000)
```
INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_b1d99dae621f9d8744cd35a49a4302a4 NOW.
/XID+SPIRE not found or wrong version. Compiling
Initialise the posterior class with the fit object from pystan, and save alongside the prior classes
```python
posterior=xidplus.posterior_stan(fit_basic,[prior250,prior350,prior500])
#xidplus.save([prior250,prior350,prior500],posterior,'XID+SPIRE')
```
```python
plt.hist(np.log10(fcat['S100'][sgood]), bins=np.arange(-3,3, 0.1));
```

```python
SEDs=np.load('./SED_SPIRE_PACS100.npy')
```
```python
data={
'nsrc':prior250.nsrc,
'bkg_prior':[prior250.bkg[0],prior350.bkg[0],prior500.bkg[0]],
'bkg_prior_sig':[prior250.bkg[1],prior350.bkg[1],prior500.bkg[1]],
'z':fcat['Z_OBS'][sgood][prior250.ID-1],
'z_sig':fcat['Z_OBS'][sgood][prior250.ID-1]/10.0,
'npix_psw':prior250.snpix,
'nnz_psw':prior250.amat_data.size,
'db_psw':prior250.sim,
'sigma_psw':prior250.snim,
'Val_psw':prior250.amat_data,
'Row_psw': prior250.amat_row.astype(np.long),
'Col_psw': prior250.amat_col.astype(np.long),
'npix_pmw':prior350.snpix,
'nnz_pmw':prior350.amat_data.size,
'db_pmw':prior350.sim,
'sigma_pmw':prior350.snim,
'Val_pmw':prior350.amat_data,
'Row_pmw': prior350.amat_row.astype(np.long),
'Col_pmw': prior350.amat_col.astype(np.long),
'npix_plw':prior500.snpix,
'nnz_plw':prior500.amat_data.size,
'db_plw':prior500.sim,
'sigma_plw':prior500.snim,
'Val_plw':prior500.amat_data,
'Row_plw': prior500.amat_row.astype(np.long),
'Col_plw': prior500.amat_col.astype(np.long),
'nTemp':SEDs.shape[0],
'nz':SEDs.shape[2],
'nband':SEDs.shape[1],
'SEDs':SEDs,
'src_f_obs':fcat['S100'][sgood][prior250.ID-1][:,None],
'src_f_sig':fcat['S100'][sgood][prior250.ID-1][:,None]/20.0,
'src_f_cut': np.array([0.050])
}
```
```python
import pystan
sm=pystan.StanModel(file='/Users/pdh21/Work/Astro/XID_plus/stan_models/XID+SED.stan')
```
INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_77f0b102f537d7990bd876fcc53010ee NOW.
```python
fit=sm.sampling(data=data,verbose=True)
```
```python
fit
```
Inference for Stan model: anon_model_77f0b102f537d7990bd876fcc53010ee.
4 chains, each with iter=2000; warmup=1000; thin=1;
post-warmup draws per chain=1000, total post-warmup draws=4000.
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
Nbb[0] 11.35 5.5e-3 0.18 11.04 11.22 11.33 11.47 11.76 1130 1.0
Nbb[1] 11.61 6.8e-3 0.2 11.17 11.48 11.63 11.76 11.91 839 1.01
Nbb[2] 11.78 4.3e-3 0.08 11.61 11.73 11.78 11.82 11.93 349 1.01
Nbb[3] 11.54 7.7e-3 0.16 11.28 11.44 11.52 11.62 11.91 412 1.01
Nbb[4] 11.44 6.5e-3 0.15 11.19 11.33 11.42 11.51 11.78 524 1.01
Nbb[5] 12.4 9.0e-4 0.06 12.3 12.37 12.4 12.44 12.53 4000 1.0
Nbb[6] 11.07 0.01 0.13 10.9 10.98 11.03 11.13 11.36 130 1.03
Nbb[7] 10.96 0.02 0.26 10.28 10.88 11.02 11.12 11.31 252 1.01
Nbb[8] 9.72 5.6e-3 0.16 9.47 9.61 9.7 9.8 10.16 861 1.01
Nbb[9] 11.58 0.01 0.26 11.13 11.33 11.62 11.8 11.99 498 1.01
Nbb[10] 9.91 0.01 0.22 9.56 9.74 9.87 10.05 10.41 476 1.0
Nbb[11] 12.04 1.5e-3 0.07 11.9 12.0 12.04 12.09 12.17 2153 1.0
Nbb[12] 9.47 5.2e-3 0.17 9.21 9.36 9.45 9.57 9.84 1025 1.01
Nbb[13] 10.59 0.01 0.28 10.1 10.36 10.56 10.84 11.07 550 1.01
Nbb[14] 8.97 7.9e-3 0.18 8.7 8.85 8.95 9.07 9.46 494 1.01
Nbb[15] 10.95 4.2e-3 0.14 10.7 10.85 10.95 11.05 11.22 1091 1.0
Nbb[16] 11.7 0.01 0.22 11.28 11.5 11.74 11.88 12.05 475 1.01
Nbb[17] 11.81 1.1e-3 0.07 11.67 11.77 11.81 11.86 11.96 4000 1.0
Nbb[18] 9.62 8.6e-3 0.19 9.34 9.49 9.59 9.71 10.15 488 1.01
Nbb[19] 8.11 2.2e-3 0.09 8.01 8.05 8.1 8.16 8.32 1462 1.0
Nbb[20] 10.53 0.02 0.24 10.13 10.34 10.52 10.72 10.95 190 1.02
Nbb[21] 9.43 8.5e-3 0.17 9.18 9.32 9.4 9.51 9.87 401 1.01
Nbb[22] 10.72 0.01 0.28 10.27 10.5 10.68 10.94 11.25 667 1.0
Nbb[23] 10.81 0.01 0.27 10.34 10.6 10.8 11.05 11.25 482 1.01
Nbb[24] 11.23 2.3e-3 0.1 11.05 11.16 11.23 11.29 11.42 1853 1.0
Nbb[25] 10.47 0.01 0.29 9.97 10.23 10.47 10.73 10.94 563 1.01
Nbb[26] 11.27 0.01 0.36 10.7 10.97 11.23 11.55 11.95 592 1.01
Nbb[27] 10.44 0.01 0.28 9.95 10.2 10.42 10.69 10.9 512 1.0
Nbb[28] 11.9 2.2e-3 0.07 11.76 11.86 11.9 11.95 12.03 1060 1.0
Nbb[29] 11.29 0.01 0.21 10.96 11.15 11.24 11.44 11.72 333 1.01
Nbb[30] 11.35 0.01 0.24 10.92 11.16 11.35 11.54 11.78 478 1.01
Nbb[31] 11.17 4.6e-3 0.13 10.95 11.08 11.16 11.26 11.47 822 1.01
Nbb[32] 11.09 1.8e-3 0.07 10.97 11.05 11.09 11.14 11.23 1458 1.0
Nbb[33] 11.21 3.2e-3 0.11 10.94 11.16 11.22 11.28 11.4 1235 1.0
Nbb[34] 10.83 0.01 0.3 10.38 10.6 10.77 11.06 11.47 480 1.01
Nbb[35] 11.36 6.2e-3 0.18 11.01 11.23 11.37 11.49 11.68 840 1.0
Nbb[36] 11.0 9.1e-3 0.12 10.75 10.94 11.0 11.07 11.21 184 1.02
Nbb[37] 10.21 0.01 0.26 9.8 10.0 10.17 10.41 10.72 335 1.0
Nbb[38] 10.49 0.01 0.12 10.13 10.44 10.5 10.56 10.69 132 1.03
Nbb[39] 12.57 8.6e-4 0.05 12.47 12.53 12.57 12.61 12.68 4000 1.0
Nbb[40] 10.71 5.2e-3 0.19 10.38 10.57 10.7 10.84 11.13 1350 1.0
Nbb[41] 11.66 0.02 0.26 11.17 11.43 11.72 11.88 12.08 167 1.01
Nbb[42] 10.7 5.1e-3 0.15 10.43 10.59 10.69 10.81 11.02 891 1.0
Nbb[43] 10.29 4.3e-3 0.16 10.02 10.17 10.27 10.4 10.62 1336 1.0
Nbb[44] 11.37 1.7e-3 0.08 11.22 11.31 11.36 11.41 11.54 2338 1.0
Nbb[45] 10.49 4.8e-3 0.16 10.21 10.37 10.49 10.6 10.84 1140 1.0
Nbb[46] 11.58 8.3e-3 0.23 11.16 11.41 11.59 11.76 11.98 776 1.01
Nbb[47] 11.08 4.3e-3 0.15 10.83 10.98 11.07 11.17 11.44 1182 1.0
Nbb[48] 11.95 2.2e-3 0.08 11.78 11.9 11.95 12.01 12.11 1433 1.0
Nbb[49] 11.04 0.01 0.34 10.47 10.73 11.05 11.33 11.62 560 1.01
Nbb[50] 11.89 3.5e-3 0.1 11.67 11.83 11.9 11.96 12.06 813 1.0
src_f[0,0] 1.17 0.03 0.9 0.23 0.59 0.86 1.51 3.53 836 1.0
src_f[1,0] 2.65 0.04 1.34 0.55 1.63 2.5 3.58 5.4 950 1.01
src_f[2,0] 19.53 0.26 3.36 11.22 17.93 20.09 21.85 24.64 171 1.02
src_f[3,0] 1.1 0.04 0.73 0.23 0.65 0.94 1.32 3.17 351 1.01
src_f[4,0] 0.9 0.03 0.58 0.2 0.56 0.77 1.04 2.67 429 1.01
src_f[5,0] 13.21 0.02 1.45 10.42 12.22 13.17 14.21 16.08 4000 1.0
src_f[6,0] 15.88 0.03 1.61 12.73 14.77 15.9 16.98 19.02 4000 1.0
src_f[7,0] 1.39 0.04 0.76 0.13 0.91 1.35 1.82 3.07 456 1.01
src_f[8,0] 0.65 0.01 0.39 0.19 0.39 0.57 0.79 1.62 743 1.01
src_f[9,0] 1.25 0.03 0.77 0.27 0.56 1.15 1.8 2.94 583 1.01
src_f[10,0] 0.22 0.01 0.24 0.03 0.08 0.14 0.26 0.97 368 1.01
src_f[11,0] 5.92 0.02 1.19 3.64 5.08 5.87 6.73 8.25 4000 1.0
src_f[12,0] 0.11 5.5e-3 0.11 0.02 0.06 0.08 0.13 0.37 430 1.01
src_f[13,0] 0.77 0.03 0.67 0.09 0.24 0.51 1.2 2.35 581 1.01
src_f[14,0] 0.55 0.02 0.32 0.17 0.34 0.47 0.66 1.4 195 1.02
src_f[15,0] 0.57 0.01 0.31 0.19 0.35 0.5 0.71 1.35 616 1.01
src_f[16,0] 1.92 0.05 1.13 0.41 0.89 1.84 2.73 4.34 552 1.01
src_f[17,0] 36.43 0.03 1.71 33.07 35.29 36.39 37.58 39.89 4000 1.0
src_f[18,0] 4.85 0.27 3.18 1.37 2.65 3.82 6.05 13.45 140 1.02
src_f[19,0] 0.06 1.6e-3 0.02 0.02 0.04 0.05 0.06 0.12 199 1.02
src_f[20,0] 1.36 0.1 1.18 0.17 0.44 0.93 1.95 4.28 143 1.03
src_f[21,0] 0.16 0.02 0.21 0.03 0.08 0.11 0.16 0.8 178 1.03
src_f[22,0] 0.93 0.04 0.92 0.11 0.28 0.52 1.32 3.23 634 1.0
src_f[23,0] 1.14 0.04 0.92 0.13 0.38 0.83 1.74 3.3 500 1.01
src_f[24,0] 7.06 0.03 1.55 4.08 6.02 7.05 8.09 10.21 2418 1.0
src_f[25,0] 0.59 0.02 0.49 0.06 0.18 0.43 0.93 1.7 600 1.01
src_f[26,0] 1.87 0.09 2.09 0.16 0.41 1.05 2.52 7.95 576 1.01
src_f[27,0] 0.76 0.03 0.66 0.07 0.22 0.5 1.22 2.29 483 1.0
src_f[28,0] 22.87 0.1 2.79 16.63 21.22 23.11 24.82 27.69 765 1.0
src_f[29,0] 0.54 0.02 0.4 0.1 0.29 0.4 0.68 1.59 451 1.01
src_f[30,0] 1.38 0.04 0.99 0.27 0.58 1.13 1.93 3.74 531 1.01
src_f[31,0] 2.03 0.03 1.01 0.67 1.3 1.82 2.53 4.49 856 1.0
src_f[32,0] 11.56 0.02 1.49 8.7 10.57 11.55 12.55 14.48 4000 1.0
src_f[33,0] 4.48 0.04 1.34 1.75 3.63 4.54 5.36 6.99 1400 1.0
src_f[34,0] 0.46 0.02 0.45 0.06 0.15 0.3 0.63 1.65 652 1.01
src_f[35,0] 2.07 0.04 1.21 0.49 1.11 1.81 2.77 4.96 1092 1.0
src_f[36,0] 3.33 0.05 1.0 1.34 2.7 3.26 3.94 5.46 407 1.01
src_f[37,0] 0.52 0.03 0.54 0.06 0.16 0.28 0.73 1.97 279 1.0
src_f[38,0] 3.35 0.07 1.01 0.85 2.78 3.34 3.96 5.37 223 1.02
src_f[39,0] 21.28 0.05 1.95 17.24 19.96 21.34 22.6 24.91 1510 1.0
src_f[40,0] 0.71 0.02 0.57 0.14 0.34 0.54 0.91 2.33 1069 1.0
src_f[41,0] 1.46 0.06 1.0 0.16 0.6 1.31 2.11 3.69 298 1.01
src_f[42,0] 1.29 0.03 0.81 0.31 0.73 1.1 1.61 3.47 688 1.01
src_f[43,0] 0.51 0.01 0.37 0.12 0.26 0.42 0.63 1.53 1158 1.0
src_f[44,0] 11.53 0.02 1.56 8.49 10.49 11.55 12.54 14.61 4000 1.0
src_f[45,0] 0.47 10.0e-3 0.34 0.12 0.25 0.38 0.58 1.41 1135 1.0
src_f[46,0] 2.45 0.05 1.62 0.44 1.04 2.12 3.43 6.24 876 1.01
src_f[47,0] 0.43 0.01 0.27 0.1 0.26 0.36 0.5 1.22 582 1.01
src_f[48,0] 6.06 0.03 1.29 3.78 5.16 5.97 6.87 8.79 2188 1.0
src_f[49,0] 0.8 0.03 0.7 0.09 0.22 0.59 1.2 2.64 720 1.0
src_f[50,0] 5.02 0.05 1.46 2.29 4.02 4.92 5.98 8.09 824 1.0
src_f[0,1] 1.12 0.03 0.86 0.22 0.54 0.9 1.41 3.48 793 1.0
src_f[1,1] 2.84 0.05 1.53 0.52 1.63 2.63 3.86 6.05 980 1.0
src_f[2,1] 13.23 0.17 2.39 7.77 11.85 13.45 14.88 17.3 203 1.02
src_f[3,1] 1.11 0.04 0.69 0.22 0.65 0.98 1.4 3.0 319 1.01
src_f[4,1] 0.91 0.03 0.57 0.19 0.54 0.79 1.12 2.43 447 1.01
src_f[5,1] 17.23 0.02 1.47 14.33 16.24 17.22 18.2 20.04 4000 1.0
src_f[6,1] 10.55 0.05 1.51 7.49 9.56 10.54 11.56 13.47 843 1.0
src_f[7,1] 1.97 0.06 1.17 0.1 1.17 1.98 2.71 4.43 331 1.01
src_f[8,1] 0.34 8.8e-3 0.24 0.08 0.19 0.28 0.42 0.97 738 1.01
src_f[9,1] 1.95 0.06 1.45 0.3 0.68 1.62 2.94 5.19 600 1.01
src_f[10,1] 0.2 0.02 0.29 0.02 0.06 0.1 0.2 1.18 318 1.01
src_f[11,1] 7.28 0.03 1.28 4.77 6.41 7.25 8.15 9.76 2577 1.0
src_f[12,1] 0.08 5.5e-3 0.1 0.01 0.04 0.06 0.09 0.27 357 1.02
src_f[13,1] 0.8 0.04 0.81 0.06 0.21 0.43 1.23 2.97 468 1.01
src_f[14,1] 0.28 0.02 0.2 0.07 0.15 0.22 0.32 0.83 171 1.02
src_f[15,1] 0.51 0.01 0.3 0.15 0.29 0.44 0.66 1.25 883 1.01
src_f[16,1] 2.2 0.06 1.37 0.39 1.01 2.01 3.11 5.36 553 1.01
src_f[17,1] 26.4 0.03 1.7 22.93 25.27 26.44 27.54 29.71 4000 1.0
src_f[18,1] 2.38 0.16 1.84 0.56 1.13 1.69 3.04 7.6 130 1.02
src_f[19,1] 0.03 9.6e-4 0.01 0.01 0.02 0.03 0.03 0.06 168 1.03
src_f[20,1] 1.25 0.12 1.27 0.1 0.33 0.69 1.66 4.43 118 1.04
src_f[21,1] 0.11 0.01 0.17 0.02 0.04 0.07 0.1 0.69 168 1.03
src_f[22,1] 0.93 0.04 1.04 0.08 0.24 0.47 1.3 3.75 562 1.0
src_f[23,1] 1.17 0.05 1.08 0.1 0.34 0.72 1.84 3.83 510 1.01
src_f[24,1] 5.35 0.04 1.48 2.61 4.3 5.3 6.36 8.38 1578 1.0
src_f[25,1] 0.69 0.03 0.71 0.05 0.16 0.37 1.1 2.49 592 1.01
src_f[26,1] 2.14 0.1 2.41 0.14 0.44 1.07 3.07 8.54 577 1.01
src_f[27,1] 0.87 0.05 0.94 0.05 0.18 0.39 1.29 3.22 429 1.0
src_f[28,1] 17.54 0.11 2.98 11.09 15.57 17.8 19.66 22.56 715 1.0
src_f[29,1] 0.65 0.02 0.52 0.09 0.31 0.46 0.8 2.05 445 1.01
src_f[30,1] 1.48 0.05 1.11 0.22 0.63 1.15 2.01 4.31 503 1.01
src_f[31,1] 1.39 0.02 0.73 0.37 0.84 1.24 1.82 3.18 1193 1.0
src_f[32,1] 14.01 0.04 1.78 10.6 12.82 13.95 15.19 17.57 2221 1.0
src_f[33,1] 2.92 0.02 0.93 1.1 2.3 2.93 3.57 4.73 1541 1.0
src_f[34,1] 0.56 0.02 0.61 0.05 0.16 0.3 0.71 2.31 691 1.01
src_f[35,1] 1.92 0.04 1.1 0.42 1.07 1.71 2.56 4.54 939 1.0
src_f[36,1] 4.0 0.07 1.2 1.3 3.3 4.01 4.73 6.31 310 1.01
src_f[37,1] 0.54 0.04 0.7 0.03 0.12 0.22 0.62 2.55 267 1.0
src_f[38,1] 3.28 0.07 1.0 0.59 2.71 3.25 3.87 5.37 196 1.02
src_f[39,1] 18.84 0.07 2.21 14.29 17.4 18.9 20.38 22.96 1140 1.01
src_f[40,1] 0.62 0.02 0.53 0.1 0.27 0.47 0.78 2.23 1067 1.0
src_f[41,1] 1.73 0.08 1.21 0.15 0.67 1.56 2.52 4.47 233 1.01
src_f[42,1] 1.01 0.03 0.75 0.18 0.53 0.83 1.22 3.05 583 1.0
src_f[43,1] 0.36 8.6e-3 0.27 0.07 0.17 0.29 0.45 1.15 1014 1.0
src_f[44,1] 7.05 0.02 1.34 4.55 6.1 7.01 7.95 9.8 4000 1.0
src_f[45,1] 0.38 7.9e-3 0.27 0.08 0.19 0.32 0.47 1.13 1203 1.0
src_f[46,1] 2.56 0.06 1.78 0.38 1.16 2.1 3.55 6.81 776 1.01
src_f[47,1] 0.45 0.01 0.3 0.1 0.26 0.39 0.56 1.29 567 1.01
src_f[48,1] 7.82 0.04 1.53 4.84 6.78 7.82 8.86 10.8 1583 1.0
src_f[49,1] 1.17 0.05 1.14 0.08 0.25 0.7 1.91 3.88 602 1.01
src_f[50,1] 7.76 0.07 1.92 3.83 6.48 7.79 9.04 11.42 799 1.0
src_f[0,2] 0.9 0.02 0.74 0.16 0.39 0.71 1.15 2.9 964 1.0
src_f[1,2] 2.04 0.03 1.17 0.35 1.14 1.88 2.75 4.67 1115 1.0
src_f[2,2] 4.8 0.05 1.35 2.32 3.87 4.74 5.66 7.59 739 1.01
src_f[3,2] 1.13 0.04 0.83 0.23 0.54 0.95 1.46 3.37 401 1.01
src_f[4,2] 0.76 0.02 0.49 0.18 0.39 0.66 1.01 2.01 537 1.01
src_f[5,2] 14.21 0.03 1.97 10.51 12.84 14.15 15.53 18.19 4000 1.0
src_f[6,2] 7.08 0.1 1.75 3.77 5.86 7.07 8.27 10.51 313 1.02
src_f[7,2] 1.66 0.06 1.17 0.07 0.78 1.52 2.39 4.36 414 1.01
src_f[8,2] 0.14 4.7e-3 0.12 0.03 0.07 0.11 0.17 0.43 611 1.01
src_f[9,2] 1.77 0.05 1.46 0.22 0.64 1.3 2.52 5.55 726 1.01
src_f[10,2] 0.12 0.01 0.21 8.5e-3 0.03 0.06 0.11 0.86 302 1.01
src_f[11,2] 4.57 0.02 1.18 2.5 3.73 4.5 5.34 6.99 4000 1.0
src_f[12,2] 0.04 3.2e-3 0.06 4.7e-3 0.02 0.03 0.04 0.16 352 1.02
src_f[13,2] 0.54 0.03 0.64 0.03 0.13 0.26 0.76 2.46 490 1.01
src_f[14,2] 0.1 6.7e-3 0.08 0.03 0.05 0.08 0.12 0.34 154 1.02
src_f[15,2] 0.37 6.5e-3 0.25 0.08 0.19 0.3 0.48 1.02 1468 1.0
src_f[16,2] 2.0 0.05 1.37 0.27 0.95 1.66 2.8 5.26 654 1.0
src_f[17,2] 11.67 0.03 2.01 7.88 10.33 11.63 12.99 15.77 4000 1.0
src_f[18,2] 0.88 0.06 0.71 0.18 0.4 0.6 1.15 2.9 133 1.02
src_f[19,2] 0.01 3.8e-4 4.9e-3 3.9e-3 9.5e-3 0.01 0.01 0.02 162 1.03
src_f[20,2] 0.89 0.1 1.08 0.05 0.17 0.37 1.06 3.72 108 1.04
src_f[21,2] 0.05 7.4e-3 0.09 5.8e-3 0.02 0.03 0.05 0.36 163 1.03
src_f[22,2] 0.64 0.03 0.81 0.04 0.15 0.3 0.77 3.07 603 1.0
src_f[23,2] 0.88 0.04 0.99 0.05 0.21 0.45 1.27 3.7 581 1.01
src_f[24,2] 2.6 0.03 1.04 1.07 1.84 2.42 3.18 5.17 1309 1.0
src_f[25,2] 0.49 0.02 0.57 0.02 0.1 0.22 0.72 2.05 630 1.01
src_f[26,2] 1.72 0.08 1.99 0.09 0.33 0.82 2.31 6.96 600 1.01
src_f[27,2] 0.59 0.03 0.72 0.02 0.1 0.22 0.82 2.46 431 1.0
src_f[28,2] 10.3 0.08 2.57 5.58 8.53 10.2 11.94 15.75 1003 1.0
src_f[29,2] 0.61 0.02 0.5 0.09 0.28 0.45 0.77 1.95 479 1.01
src_f[30,2] 1.36 0.06 1.29 0.15 0.52 0.95 1.8 4.93 532 1.01
src_f[31,2] 0.83 0.02 0.52 0.17 0.42 0.72 1.11 2.13 1004 1.0
src_f[32,2] 6.85 0.03 1.56 4.07 5.74 6.77 7.81 10.22 2494 1.0
src_f[33,2] 1.7 0.01 0.65 0.58 1.25 1.65 2.11 3.11 1983 1.0
src_f[34,2] 0.52 0.04 0.7 0.04 0.13 0.24 0.56 2.82 390 1.02
src_f[35,2] 1.32 0.03 0.81 0.23 0.7 1.17 1.77 3.29 1017 1.0
src_f[36,2] 3.59 0.07 1.29 0.86 2.84 3.53 4.33 6.33 365 1.01
src_f[37,2] 0.36 0.03 0.53 0.02 0.06 0.12 0.37 1.85 267 1.0
src_f[38,2] 1.91 0.04 0.65 0.27 1.53 1.86 2.27 3.29 233 1.02
src_f[39,2] 15.1 0.06 2.62 10.05 13.35 15.08 16.85 20.15 1857 1.0
src_f[40,2] 0.37 10.0e-3 0.35 0.05 0.16 0.28 0.47 1.39 1241 1.0
src_f[41,2] 1.72 0.07 1.33 0.16 0.66 1.42 2.43 4.96 345 1.01
src_f[42,2] 0.52 0.02 0.45 0.08 0.26 0.42 0.63 1.73 588 1.0
src_f[43,2] 0.19 5.3e-3 0.17 0.03 0.09 0.15 0.23 0.67 964 1.0
src_f[44,2] 3.51 0.02 1.06 1.82 2.75 3.37 4.13 5.94 1826 1.0
src_f[45,2] 0.22 5.0e-3 0.18 0.04 0.11 0.18 0.28 0.66 1256 1.0
src_f[46,2] 2.04 0.05 1.5 0.26 0.91 1.65 2.79 5.88 972 1.01
src_f[47,2] 0.38 9.5e-3 0.26 0.09 0.19 0.32 0.52 1.03 771 1.0
src_f[48,2] 5.83 0.04 1.74 2.83 4.53 5.73 7.0 9.44 1983 1.0
src_f[49,2] 1.06 0.05 1.15 0.05 0.2 0.53 1.67 3.91 602 1.01
src_f[50,2] 5.38 0.04 1.62 2.29 4.27 5.39 6.45 8.62 1329 1.0
src_f[0,3] 0.27 2.2e-4 0.01 0.24 0.26 0.27 0.28 0.3 4000 1.0
src_f[1,3] 0.25 2.0e-4 0.01 0.23 0.24 0.25 0.26 0.27 4000 1.0
src_f[2,3] 3.93 3.2e-3 0.2 3.53 3.79 3.93 4.06 4.32 4000 1.0
src_f[3,3] 0.25 2.1e-4 0.01 0.22 0.24 0.25 0.26 0.28 4000 1.0
src_f[4,3] 0.2 1.6e-4 0.01 0.18 0.2 0.2 0.21 0.22 4000 1.0
src_f[5,3] 0.15 1.2e-4 7.3e-3 0.14 0.15 0.15 0.16 0.17 4000 1.0
src_f[6,3] 6.57 5.2e-3 0.33 5.92 6.35 6.56 6.78 7.2 4000 1.0
src_f[7,3] 0.09 7.1e-5 4.5e-3 0.08 0.09 0.09 0.09 0.1 4000 1.0
src_f[8,3] 0.75 6.0e-4 0.04 0.68 0.73 0.75 0.78 0.83 4000 1.0
src_f[9,3] 0.11 9.0e-5 5.7e-3 0.1 0.11 0.11 0.12 0.13 4000 1.0
src_f[10,3] 0.06 4.8e-5 3.0e-3 0.05 0.06 0.06 0.06 0.07 4000 1.0
src_f[11,3] 0.23 1.8e-4 0.01 0.2 0.22 0.23 0.23 0.25 4000 1.0
src_f[12,3] 0.06 4.8e-5 3.0e-3 0.05 0.06 0.06 0.06 0.06 4000 1.0
src_f[13,3] 0.11 8.9e-5 5.6e-3 0.1 0.11 0.11 0.12 0.12 4000 1.0
src_f[14,3] 1.06 8.4e-4 0.05 0.95 1.02 1.06 1.09 1.16 4000 1.0
src_f[15,3] 0.3 2.4e-4 0.02 0.27 0.29 0.3 0.31 0.33 4000 1.0
src_f[16,3] 0.18 1.5e-4 9.3e-3 0.17 0.18 0.18 0.19 0.2 4000 1.0
src_f[17,3] 6.79 5.1e-3 0.32 6.18 6.57 6.79 7.0 7.42 4000 1.0
src_f[18,3] 6.13 4.9e-3 0.31 5.53 5.92 6.13 6.35 6.73 4000 1.0
src_f[19,3] 0.06 4.7e-5 3.0e-3 0.05 0.06 0.06 0.06 0.06 4000 1.0
src_f[20,3] 0.25 2.0e-4 0.01 0.22 0.24 0.25 0.26 0.27 4000 1.0
src_f[21,3] 0.09 7.6e-5 4.8e-3 0.08 0.09 0.09 0.1 0.1 4000 1.0
src_f[22,3] 0.15 1.2e-4 7.6e-3 0.14 0.15 0.15 0.16 0.17 4000 1.0
src_f[23,3] 0.17 1.3e-4 8.3e-3 0.15 0.16 0.17 0.17 0.18 4000 1.0
src_f[24,3] 2.05 1.6e-3 0.1 1.84 1.98 2.05 2.12 2.25 4000 1.0
src_f[25,3] 0.08 6.3e-5 4.0e-3 0.07 0.08 0.08 0.08 0.09 4000 1.0
src_f[26,3] 0.13 1.1e-4 6.7e-3 0.12 0.13 0.13 0.14 0.15 4000 1.0
src_f[27,3] 0.11 8.9e-5 5.6e-3 0.1 0.1 0.11 0.11 0.12 4000 1.0
src_f[28,3] 1.46 1.1e-3 0.07 1.32 1.41 1.46 1.51 1.6 4000 1.0
src_f[29,3] 0.07 5.4e-5 3.4e-3 0.06 0.07 0.07 0.07 0.07 4000 1.0
src_f[30,3] 0.19 1.5e-4 9.2e-3 0.17 0.18 0.19 0.19 0.2 4000 1.0
src_f[31,3] 1.32 1.1e-3 0.07 1.19 1.27 1.32 1.37 1.46 4000 1.0
src_f[32,3] 0.7 5.4e-4 0.03 0.64 0.68 0.7 0.73 0.77 4000 1.0
src_f[33,3] 0.4 3.2e-4 0.02 0.36 0.39 0.4 0.42 0.44 4000 1.0
src_f[34,3] 0.05 5.6e-5 3.5e-3 0.05 0.05 0.05 0.06 0.06 4000 1.0
src_f[35,3] 0.41 3.2e-4 0.02 0.37 0.4 0.41 0.42 0.45 4000 1.0
src_f[36,3] 0.2 1.5e-4 9.7e-3 0.18 0.19 0.2 0.21 0.22 4000 1.0
src_f[37,3] 0.1 8.5e-5 5.3e-3 0.09 0.1 0.1 0.11 0.11 4000 1.0
src_f[38,3] 0.28 2.2e-4 0.01 0.25 0.27 0.28 0.29 0.31 4000 1.0
src_f[39,3] 1.02 7.9e-4 0.05 0.92 0.99 1.02 1.06 1.12 4000 1.0
src_f[40,3] 0.22 1.8e-4 0.01 0.2 0.22 0.22 0.23 0.25 4000 1.0
src_f[41,3] 0.1 7.7e-5 4.9e-3 0.09 0.1 0.1 0.1 0.11 4000 1.0
src_f[42,3] 0.68 5.2e-4 0.03 0.61 0.65 0.68 0.7 0.74 4000 1.0
src_f[43,3] 0.27 2.1e-4 0.01 0.24 0.26 0.27 0.27 0.29 4000 1.0
src_f[44,3] 2.85 2.2e-3 0.14 2.58 2.76 2.85 2.95 3.13 4000 1.0
src_f[45,3] 0.2 1.6e-4 0.01 0.18 0.19 0.2 0.2 0.22 4000 1.0
src_f[46,3] 0.3 2.4e-4 0.02 0.27 0.29 0.3 0.31 0.33 4000 1.0
src_f[47,3] 0.12 9.3e-5 5.9e-3 0.11 0.12 0.12 0.12 0.13 4000 1.0
src_f[48,3] 0.21 1.7e-4 0.01 0.19 0.2 0.21 0.22 0.23 4000 1.0
src_f[49,3] 0.07 5.2e-5 3.3e-3 0.06 0.06 0.07 0.07 0.07 4000 1.0
src_f[50,3] 0.16 1.2e-4 7.9e-3 0.14 0.15 0.16 0.16 0.17 4000 1.0
bkg[0] -2.04 2.2e-3 0.14 -2.32 -2.13 -2.03 -1.94 -1.76 4000 1.0
bkg[1] -3.85 3.0e-3 0.19 -4.22 -3.98 -3.85 -3.72 -3.48 4000 1.0
bkg[2] -3.98 6.5e-3 0.41 -4.78 -4.27 -3.98 -3.69 -3.2 4000 1.0
sigma_conf[0] 1.55 3.2e-3 0.21 1.13 1.41 1.56 1.7 1.93 4000 1.0
sigma_conf[1] 1.66 3.2e-3 0.2 1.25 1.53 1.66 1.8 2.05 4000 1.0
sigma_conf[2] 2.33 7.9e-3 0.5 1.26 2.0 2.36 2.67 3.24 4000 1.0
p[0,0] 0.04 1.6e-3 0.085.8e-10 1.2e-4 4.6e-3 0.05 0.28 2234 1.0
p[1,0] 0.02 1.1e-3 0.05 1.1e-9 9.8e-6 3.3e-4 4.9e-3 0.18 1838 1.0
p[2,0] 4.6e-3 6.2e-4 0.013.4e-11 2.0e-6 1.3e-4 2.7e-3 0.04 413 1.01
p[3,0] 0.05 2.5e-3 0.082.7e-12 2.3e-4 0.01 0.07 0.27 976 1.0
p[4,0] 0.04 1.4e-3 0.074.6e-12 9.3e-5 5.2e-3 0.05 0.24 2300 1.0
p[5,0] 1.4e-6 2.9e-7 1.8e-52.6e-222.5e-161.6e-131.0e-10 3.2e-6 3650 nan
p[6,0] 1.3e-3 2.2e-4 0.012.0e-1409.3e-961.4e-711.8e-14 5.4e-3 2834 1.0
p[7,0] 6.7e-3 1.1e-3 0.035.2e-1091.9e-577.4e-18 2.2e-8 0.1 801 1.0
p[8,0] 0.03 1.5e-3 0.071.5e-28 5.0e-7 3.8e-4 0.03 0.24 1838 1.0
p[9,0] 0.04 2.1e-3 0.071.8e-11 1.1e-5 1.1e-3 0.03 0.25 1094 1.0
p[10,0] 0.03 1.2e-3 0.057.3e-104 1.7e-7 1.8e-3 0.04 0.16 1625 1.0
p[11,0] 1.2e-5 1.5e-6 8.3e-53.4e-13 1.3e-9 3.7e-8 7.9e-7 8.7e-5 2846 nan
p[12,0] 0.03 1.1e-3 0.053.4e-63 1.2e-6 3.0e-3 0.05 0.16 1859 1.0
p[13,0] 0.02 1.2e-3 0.056.5e-791.1e-10 1.5e-4 0.02 0.18 1656 1.0
p[14,0] 0.03 1.6e-3 0.065.5e-97 1.2e-7 1.2e-4 0.02 0.22 1469 1.0
p[15,0] 0.04 1.8e-3 0.074.5e-11 1.4e-6 1.4e-3 0.04 0.25 1473 1.0
p[16,0] 0.04 2.1e-3 0.07 7.9e-9 9.0e-5 2.1e-3 0.04 0.26 1182 1.0
p[17,0] 6.8e-4 1.2e-4 6.1e-35.8e-441.4e-171.3e-12 1.7e-8 2.5e-3 2455 1.0
p[18,0] 0.03 1.9e-3 0.077.4e-259 5.4e-9 5.9e-5 0.03 0.24 1210 1.0
p[19,0] 1.1e-3 3.5e-4 0.011.8e-12 7.0e-8 2.8e-7 1.7e-6 3.7e-4 1186 1.0
p[20,0] 0.02 1.2e-3 0.044.5e-1544.1e-37 4.1e-6 0.02 0.14 1178 1.01
p[21,0] 0.03 1.4e-3 0.055.3e-118 5.3e-7 1.4e-3 0.04 0.19 1518 1.0
p[22,0] 0.03 1.2e-3 0.052.1e-62 6.7e-9 4.5e-4 0.02 0.19 1924 1.0
p[23,0] 0.02 1.4e-3 0.053.8e-776.3e-11 7.6e-5 0.01 0.18 1167 1.01
p[24,0] 0.01 4.9e-4 0.031.6e-52 4.6e-9 3.1e-5 3.7e-3 0.1 2843 1.0
p[25,0] 0.02 1.3e-3 0.051.8e-1009.7e-15 1.5e-5 0.01 0.17 1345 1.0
p[26,0] 0.02 1.3e-3 0.063.2e-22 5.9e-9 1.3e-4 0.01 0.23 1967 1.0
p[27,0] 0.02 1.2e-3 0.041.3e-1283.5e-24 5.5e-6 0.01 0.16 1151 1.0
p[28,0] 2.9e-5 1.4e-5 9.1e-41.6e-442.3e-182.5e-143.5e-11 2.6e-6 4000 nan
p[29,0] 0.05 1.6e-3 0.085.1e-13 2.2e-4 6.8e-3 0.06 0.27 2204 1.0
p[30,0] 0.04 1.7e-3 0.071.4e-11 5.6e-5 2.6e-3 0.03 0.27 1836 1.0
p[31,0] 0.03 1.2e-3 0.051.9e-11 7.5e-6 1.8e-3 0.05 0.17 1649 1.0
p[32,0] 1.7e-5 1.6e-5 9.7e-43.9e-2784.2e-2064.0e-1711.5e-1409.4e-92 3917 nan
p[33,0] 1.4e-3 2.8e-4 0.012.4e-215.5e-13 1.2e-9 1.1e-6 9.2e-3 1617 1.0
p[34,0] 0.04 1.5e-3 0.073.4e-22 2.0e-6 1.5e-3 0.03 0.27 2160 1.0
p[35,0] 0.03 1.3e-3 0.06 5.6e-8 4.5e-4 5.6e-3 0.04 0.24 2304 1.0
p[36,0] 2.7e-4 2.6e-4 4.6e-39.6e-1871.7e-1306.4e-1051.9e-791.7e-10 316 1.01
p[37,0] 0.02 1.4e-3 0.043.6e-1365.0e-12 1.8e-4 0.03 0.16 1016 1.0
p[38,0] 8.7e-4 6.1e-4 0.013.9e-2671.1e-1921.2e-1575.7e-127 4.5e-9 269 1.01
p[39,0] 3.5e-5 2.5e-6 1.4e-4 1.9e-9 3.3e-7 2.8e-6 1.8e-5 2.7e-4 3139 nan
p[40,0] 0.04 1.2e-3 0.061.2e-11 7.8e-5 5.2e-3 0.05 0.19 2340 1.0
p[41,0] 0.03 2.1e-3 0.071.3e-12 1.1e-5 6.5e-4 0.02 0.25 978 1.0
p[42,0] 0.04 1.2e-3 0.052.2e-19 1.5e-5 9.2e-3 0.06 0.16 1547 1.0
p[43,0] 0.03 1.2e-3 0.051.1e-16 2.8e-6 2.8e-3 0.05 0.15 1430 1.0
p[44,0] 4.9e-3 2.9e-4 0.022.6e-34 1.8e-9 1.9e-6 4.6e-4 0.06 2966 1.0
p[45,0] 0.04 1.2e-3 0.055.8e-11 6.2e-5 7.5e-3 0.06 0.18 2023 1.0
p[46,0] 0.03 1.3e-3 0.06 2.5e-9 5.2e-5 1.7e-3 0.02 0.24 2471 1.0
p[47,0] 0.05 1.8e-3 0.072.7e-11 7.1e-5 6.1e-3 0.06 0.26 1677 1.0
p[48,0] 7.8e-6 2.9e-6 1.7e-43.0e-173.8e-125.3e-10 3.5e-8 1.7e-5 3397 nan
p[49,0] 0.02 1.5e-3 0.065.1e-398.5e-10 5.5e-5 0.01 0.23 1562 1.0
p[50,0] 4.8e-6 2.3e-6 1.4e-45.1e-221.3e-148.8e-12 1.7e-9 6.8e-6 3973 nan
p[0,1] 0.03 1.7e-3 0.071.2e-355.9e-13 8.6e-6 0.02 0.25 1597 1.0
p[1,1] 0.06 1.4e-3 0.071.6e-13 1.3e-3 0.02 0.09 0.26 3001 1.0
p[2,1] 2.0e-3 4.1e-4 9.8e-31.6e-10 2.1e-6 7.6e-5 1.0e-3 0.01 570 1.0
p[3,1] 0.02 1.9e-3 0.062.9e-553.9e-226.2e-12 3.9e-6 0.23 868 1.0
p[4,1] 0.01 1.4e-3 0.046.7e-573.0e-242.7e-13 6.2e-7 0.17 940 1.0
p[5,1] 4.9e-5 7.1e-6 3.6e-46.0e-151.1e-10 1.1e-8 7.3e-7 3.7e-4 2588 nan
p[6,1] 6.3e-4 9.0e-5 2.5e-32.8e-874.1e-599.9e-44 1.7e-8 7.9e-3 767 1.01
p[7,1] 7.4e-3 1.0e-3 0.031.1e-943.0e-503.6e-15 2.5e-7 0.11 789 1.0
p[8,1] 0.03 1.4e-3 0.064.1e-15 1.8e-7 1.8e-3 0.03 0.2 1582 1.0
p[9,1] 0.04 1.8e-3 0.061.1e-343.1e-10 3.8e-4 0.05 0.22 1317 1.0
p[10,1] 0.03 1.2e-3 0.055.4e-84 3.9e-7 2.1e-3 0.03 0.19 1852 1.0
p[11,1] 2.7e-3 1.6e-4 7.3e-3 4.2e-7 5.1e-5 3.8e-4 2.1e-3 0.02 2141 1.0
p[12,1] 0.03 1.1e-3 0.061.2e-44 1.2e-6 3.3e-3 0.03 0.23 3017 1.0
p[13,1] 0.02 1.2e-3 0.054.8e-687.5e-10 2.4e-4 0.02 0.17 1467 1.0
p[14,1] 0.03 1.4e-3 0.079.0e-51 1.8e-8 5.2e-4 0.02 0.26 2159 1.0
p[15,1] 0.02 1.0e-3 0.057.5e-235.2e-10 8.4e-6 5.1e-3 0.18 1951 1.0
p[16,1] 0.06 2.8e-3 0.089.4e-38 8.5e-9 0.02 0.12 0.27 865 1.0
p[17,1] 2.1e-4 2.8e-5 1.3e-33.1e-342.1e-141.4e-10 2.0e-7 2.2e-3 2228 1.0
p[18,1] 0.03 1.7e-3 0.065.5e-154 3.1e-9 7.2e-4 0.03 0.24 1467 1.0
p[19,1] 6.3e-4 3.0e-4 6.3e-36.3e-145.2e-10 2.2e-9 1.5e-8 1.2e-3 445 1.01
p[20,1] 0.02 1.1e-3 0.054.1e-1181.1e-28 6.4e-6 0.01 0.17 1852 1.01
p[21,1] 0.03 1.3e-3 0.062.7e-85 4.1e-7 2.8e-3 0.03 0.22 2232 1.0
p[22,1] 0.03 1.3e-3 0.052.8e-54 1.2e-8 5.3e-4 0.03 0.18 1596 1.0
p[23,1] 0.02 1.3e-3 0.051.1e-666.1e-10 1.5e-4 0.02 0.16 1335 1.0
p[24,1] 3.7e-3 2.2e-4 0.017.6e-38 1.1e-7 1.1e-4 3.1e-3 0.03 2154 1.0
p[25,1] 0.02 1.3e-3 0.057.4e-863.4e-13 2.6e-5 0.02 0.17 1305 1.0
p[26,1] 0.03 1.3e-3 0.065.1e-29 3.0e-9 6.9e-5 0.02 0.23 2294 1.0
p[27,1] 0.02 1.2e-3 0.042.8e-1064.1e-20 1.0e-5 0.01 0.17 1394 1.0
p[28,1] 3.7e-5 1.7e-5 1.1e-33.1e-378.6e-161.8e-128.6e-10 1.2e-5 4000 1.0
p[29,1] 0.03 2.8e-3 0.062.3e-641.5e-192.3e-10 7.7e-3 0.22 502 1.01
p[30,1] 0.05 1.6e-3 0.084.8e-30 2.3e-6 3.8e-3 0.07 0.26 2388 1.0
p[31,1] 0.03 1.3e-3 0.051.1e-11 3.2e-6 9.0e-4 0.04 0.2 1847 1.0
p[32,1] 3.7e-5 3.9e-5 1.8e-37.2e-2091.9e-1555.2e-1292.1e-1061.7e-69 2178 1.0
p[33,1] 1.6e-3 2.9e-4 0.014.8e-182.2e-11 1.6e-8 5.2e-6 9.7e-3 1747 1.0
p[34,1] 0.04 1.5e-3 0.072.5e-35 1.1e-9 1.8e-4 0.03 0.25 2219 1.0
p[35,1] 0.05 1.4e-3 0.072.4e-12 1.3e-3 0.02 0.09 0.23 2476 1.0
p[36,1] 2.6e-4 2.5e-4 5.1e-34.5e-1513.8e-1056.1e-853.7e-64 4.4e-9 404 1.01
p[37,1] 0.02 1.5e-3 0.056.7e-1095.4e-11 2.0e-4 0.02 0.18 1135 1.0
p[38,1] 6.6e-4 4.6e-4 8.8e-33.4e-1934.3e-1401.3e-1152.8e-93 2.4e-7 361 1.01
p[39,1] 0.03 5.4e-4 0.02 1.1e-3 0.01 0.02 0.04 0.08 1723 1.0
p[40,1] 0.04 1.1e-3 0.052.4e-13 6.2e-5 6.8e-3 0.06 0.18 2446 1.0
p[41,1] 0.05 3.0e-3 0.071.1e-541.7e-10 5.8e-3 0.09 0.24 571 1.0
p[42,1] 0.04 1.1e-3 0.061.2e-14 1.6e-5 7.8e-3 0.04 0.22 2821 1.0
p[43,1] 0.03 1.3e-3 0.061.1e-13 2.7e-6 2.9e-3 0.03 0.22 2254 1.0
p[44,1] 1.2e-3 5.8e-5 3.3e-35.0e-25 3.0e-8 6.5e-6 4.7e-4 0.01 3199 1.0
p[45,1] 0.04 1.2e-3 0.052.7e-11 3.9e-5 7.2e-3 0.06 0.19 2124 1.0
p[46,1] 0.06 1.7e-3 0.087.9e-23 4.2e-5 0.01 0.09 0.27 2263 1.0
p[47,1] 0.01 1.2e-3 0.054.0e-516.7e-228.1e-12 5.4e-6 0.19 1403 1.0
p[48,1] 1.4e-4 1.7e-5 9.4e-41.2e-13 7.7e-9 5.5e-7 1.4e-5 1.1e-3 2957 nan
p[49,1] 0.03 1.3e-3 0.062.1e-403.0e-11 1.3e-5 9.2e-3 0.23 2016 1.0
p[50,1] 1.0e-4 3.4e-5 1.6e-31.1e-181.8e-11 4.9e-9 4.3e-7 5.1e-4 2363 1.0
p[0,2] 6.7e-5 6.6e-5 4.1e-3 0.0 0.03.3e-1904.1e-892.4e-15 4000 1.0
p[1,2] 3.8e-4 1.3e-4 6.8e-3 0.02.9e-657.8e-274.5e-12 1.3e-4 2933 1.0
p[2,2] 2.5e-3 5.1e-4 0.03 1.7e-7 2.9e-5 1.4e-4 5.9e-4 9.7e-3 4000 1.0
p[3,2] 7.3e-10 7.3e-10 4.6e-8 0.0 0.0 0.07.1e-2781.3e-54 4000 nan
p[4,2] 1.2e-14 1.2e-147.9e-13 0.0 0.0 0.01.2e-3064.6e-72 4000 nan
p[5,2] 0.07 3.2e-3 0.16 3.6e-6 6.5e-4 6.4e-3 0.06 0.62 2428 1.0
p[6,2] 0.72 0.05 0.45 1.6e-8 3.3e-3 1.0 1.0 1.0 95 1.05
p[7,2] 0.1 4.7e-3 0.221.4e-66 1.8e-5 2.9e-3 0.04 0.85 2077 1.0
p[8,2] 0.02 7.5e-3 0.142.8e-174.2e-141.0e-10 2.1e-8 0.02 345 1.01
p[9,2] 2.5e-4 2.1e-4 0.01 0.0 0.02.8e-866.5e-30 4.2e-8 4000 1.0
p[10,2] 0.04 4.8e-3 0.189.8e-381.1e-14 6.0e-9 3.6e-5 0.99 1463 1.0
p[11,2] 1.0e-3 3.1e-4 0.027.4e-231.5e-12 3.1e-9 9.9e-7 1.4e-3 3229 1.0
p[12,2] 0.03 5.8e-3 0.182.6e-212.4e-12 4.0e-9 2.4e-6 1.0 953 1.0
p[13,2] 0.05 5.1e-3 0.197.2e-834.2e-20 4.7e-7 2.4e-3 0.89 1358 1.0
p[14,2] 0.02 0.01 0.131.9e-181.9e-156.0e-12 1.9e-9 0.05 80 1.03
p[15,2] 1.5e-8 1.3e-8 8.2e-71.5e-3091.4e-1413.1e-825.5e-505.7e-19 4000 nan
p[16,2] 1.4e-9 8.5e-10 5.4e-8 0.0 0.03.7e-1057.7e-483.5e-16 4000 nan
p[17,2] 0.09 5.6e-3 0.25 7.2e-6 4.7e-4 3.3e-3 0.02 1.0 1978 1.0
p[18,2] 0.08 0.02 0.255.6e-185.0e-133.6e-10 3.1e-7 0.97 168 1.01
p[19,2] 1.7e-6 1.5e-6 9.8e-53.5e-193.6e-189.4e-184.0e-174.1e-10 4000 nan
p[20,2] 0.09 7.4e-3 0.271.9e-22 1.1e-9 1.1e-5 1.4e-3 1.0 1351 1.0
p[21,2] 0.04 6.1e-3 0.183.0e-181.2e-12 2.6e-9 5.4e-7 1.0 865 1.0
p[22,2] 0.05 4.6e-3 0.181.8e-955.6e-289.3e-11 6.7e-4 0.89 1564 1.0
p[23,2] 0.07 6.0e-3 0.238.6e-874.4e-20 1.0e-6 4.3e-3 0.97 1407 1.0
p[24,2] 0.12 0.01 0.31 2.1e-8 1.1e-5 1.7e-4 3.2e-3 1.0 582 1.0
p[25,2] 0.07 4.8e-3 0.211.4e-755.8e-18 5.1e-6 3.9e-3 0.95 1928 1.0
p[26,2] 0.04 2.6e-3 0.13 0.02.1e-1333.3e-29 2.6e-4 0.53 2561 1.0
p[27,2] 0.05 4.3e-3 0.23.3e-466.8e-13 3.7e-6 1.2e-3 0.98 2062 1.0
p[28,2] 9.8e-3 9.7e-4 0.04 1.5e-4 6.3e-4 1.7e-3 5.3e-3 0.07 1917 1.0
p[29,2] 10.0e-8 9.6e-8 6.1e-6 0.0 0.0 0.06.0e-1392.1e-27 4000 nan
p[30,2] 1.9e-3 4.2e-4 0.03 0.04.9e-1991.3e-716.0e-22 6.4e-4 4000 1.0
p[31,2] 4.3e-6 3.9e-6 2.5e-45.6e-593.5e-303.8e-202.5e-13 3.7e-7 4000 nan
p[32,2] 2.6e-4 6.6e-5 3.6e-32.6e-13 1.8e-9 8.7e-8 3.2e-6 8.5e-4 2967 1.0
p[33,2] 0.02 3.0e-3 0.13 1.5e-6 1.7e-4 5.7e-4 2.4e-3 0.25 1859 1.0
p[34,2] 0.03 3.0e-3 0.13 0.08.1e-2374.1e-913.7e-16 0.51 1883 1.0
p[35,2] 9.4e-4 4.1e-4 0.038.4e-2083.4e-591.1e-261.3e-12 2.8e-4 4000 1.0
p[36,2] 0.02 4.2e-3 0.121.1e-10 6.6e-7 2.4e-5 6.7e-4 0.27 870 1.01
p[37,2] 0.05 7.0e-3 0.24.6e-391.3e-13 1.8e-7 1.8e-4 1.0 847 1.0
p[38,2] 7.0e-3 2.4e-3 0.071.8e-11 6.8e-8 2.4e-6 6.2e-5 0.01 890 1.02
p[39,2] 2.8e-7 1.5e-7 9.3e-67.9e-352.4e-221.1e-178.8e-14 3.0e-8 4000 nan
p[40,2] 9.7e-3 1.9e-3 0.091.5e-911.6e-332.7e-16 5.1e-8 0.01 2159 1.0
p[41,2] 6.3e-4 4.4e-4 0.02 0.0 0.02.6e-847.2e-35 9.2e-9 2328 1.0
p[42,2] 0.03 5.4e-3 0.157.8e-272.9e-13 2.5e-9 1.9e-6 0.57 806 1.0
p[43,2] 0.02 5.7e-3 0.145.5e-283.0e-144.2e-10 7.2e-7 0.1 609 1.0
p[44,2] 0.05 7.2e-3 0.2 3.3e-7 3.4e-5 2.4e-4 2.0e-3 1.0 789 1.01
p[45,2] 6.1e-3 1.5e-3 0.073.3e-592.7e-243.3e-14 1.0e-8 1.6e-3 2278 1.0
p[46,2] 4.1e-4 1.6e-4 9.8e-3 0.02.8e-1632.7e-559.9e-20 9.6e-5 3755 1.0
p[47,2] 1.5e-17 1.5e-178.2e-16 0.0 0.0 0.01.6e-2511.4e-56 3103 nan
p[48,2] 0.11 2.5e-3 0.16 1.8e-6 4.9e-3 0.04 0.14 0.59 4000 1.0
p[49,2] 0.07 4.8e-3 0.19 0.02.2e-1501.3e-25 6.7e-4 0.79 1520 1.0
p[50,2] 0.14 2.9e-3 0.18 9.0e-6 0.01 0.06 0.19 0.69 4000 1.0
p[0,3] 8.1e-6 5.7e-6 3.6e-44.4e-2703.6e-1105.0e-601.2e-28 2.1e-8 4000 nan
p[1,3] 5.5e-4 3.1e-4 0.021.0e-1183.3e-222.8e-12 2.6e-8 6.3e-4 2952 1.0
p[2,3] 1.3e-9 2.5e-10 1.3e-85.4e-303.3e-164.2e-134.2e-11 1.1e-8 2779 nan
p[3,3] 5.2e-8 4.4e-8 2.8e-6 0.03.3e-1656.1e-1051.5e-633.4e-18 3970 nan
p[4,3] 1.8e-10 1.1e-10 7.1e-9 0.07.1e-1791.5e-1143.1e-702.0e-23 4000 nan
p[5,3] 1.3e-10 7.2e-11 4.6e-96.0e-507.9e-364.8e-297.8e-231.6e-13 4000 nan
p[6,3] 1.5e-4 6.1e-5 3.8e-31.1e-161.1e-10 6.6e-9 2.6e-7 3.1e-4 4000 1.0
p[7,3] 0.37 0.02 0.461.3e-88 3.9e-5 9.0e-3 0.99 1.0 621 1.01
p[8,3] 3.2e-3 1.8e-3 0.051.6e-336.9e-215.8e-174.1e-14 1.4e-7 954 1.0
p[9,3] 0.02 3.3e-3 0.131.4e-2718.2e-1001.3e-343.3e-16 0.03 1473 1.0
p[10,3] 0.06 0.01 0.239.8e-1642.2e-642.0e-329.5e-14 1.0 261 1.01
p[11,3] 1.5e-4 9.0e-5 5.7e-31.7e-329.1e-218.3e-161.3e-11 9.5e-6 4000 1.0
p[12,3] 0.02 7.4e-3 0.122.7e-1325.8e-628.3e-394.1e-21 2.4e-5 268 1.03
p[13,3] 0.08 9.2e-3 0.258.3e-1721.3e-441.8e-15 1.2e-4 1.0 766 1.01
p[14,3] 0.01 7.7e-3 0.093.3e-198.8e-172.8e-143.4e-12 7.8e-3 133 1.02
p[15,3] 1.3e-9 1.2e-9 7.8e-82.1e-2747.7e-1269.8e-742.3e-451.9e-18 4000 nan
p[16,3] 9.6e-4 4.4e-4 0.031.9e-2732.9e-894.5e-392.4e-22 7.1e-8 4000 1.0
p[17,3] 1.0e-6 2.2e-7 1.4e-57.4e-12 1.3e-8 1.0e-7 4.3e-7 5.8e-6 3993 nan
p[18,3] 0.08 0.02 0.244.7e-198.1e-151.5e-12 1.1e-9 1.0 144 1.02
p[19,3] 8.7e-9 7.6e-9 4.8e-78.2e-202.8e-197.0e-192.6e-184.1e-12 4000 nan
p[20,3] 0.2 0.04 0.392.1e-1214.6e-402.5e-15 1.1e-4 1.0 107 1.05
p[21,3] 0.03 0.02 0.182.0e-941.8e-483.8e-322.9e-19 1.0 132 1.04
p[22,3] 0.05 5.3e-3 0.214.1e-1743.1e-532.0e-21 1.6e-5 0.99 1507 1.0
p[23,3] 0.08 7.4e-3 0.259.7e-1586.9e-391.4e-13 1.7e-4 1.0 1155 1.0
p[24,3] 1.8e-8 7.5e-9 4.8e-71.3e-375.2e-204.3e-156.3e-12 2.4e-8 3987 nan
p[25,3] 0.15 0.01 0.342.5e-1567.3e-408.1e-12 1.5e-3 1.0 844 1.0
p[26,3] 0.02 3.0e-3 0.136.1e-1984.3e-516.6e-14 4.3e-7 0.16 2036 1.0
p[27,3] 0.21 0.02 0.41.8e-1508.7e-422.3e-13 6.0e-3 1.0 410 1.0
p[28,3] 0.05 5.1e-3 0.19 1.7e-5 1.7e-4 6.7e-4 3.8e-3 0.96 1325 1.0
p[29,3] 2.2e-7 2.2e-7 1.4e-5 0.010.0e-2145.3e-1341.4e-662.1e-20 4000 nan
p[30,3] 0.02 4.3e-3 0.137.9e-2175.4e-624.2e-232.7e-10 0.03 972 1.01
p[31,3] 2.6e-13 2.5e-131.6e-119.2e-2093.0e-1076.3e-724.2e-431.4e-18 4000 nan
p[32,3] 1.0 2.5e-3 0.04 1.0 1.0 1.0 1.0 1.0 312 1.01
p[33,3] 2.4e-3 6.0e-4 0.041.5e-18 1.7e-6 1.7e-5 8.1e-5 3.8e-3 4000 1.0
p[34,3] 0.03 5.9e-3 0.179.0e-2311.6e-722.1e-286.8e-10 0.98 819 1.01
p[35,3] 8.1e-5 4.3e-5 2.7e-32.1e-1572.3e-452.3e-212.9e-11 1.2e-5 4000 1.0
p[36,3] 0.92 0.02 0.25 1.2e-5 1.0 1.0 1.0 1.0 173 1.02
p[37,3] 0.16 0.02 0.361.5e-1671.5e-581.1e-24 2.3e-6 1.0 240 1.01
p[38,3] 0.96 0.02 0.197.5e-14 1.0 1.0 1.0 1.0 60 1.08
p[39,3] 3.5e-8 2.3e-8 1.4e-61.2e-301.4e-216.2e-181.1e-14 1.2e-9 3796 nan
p[40,3] 1.3e-3 4.8e-4 0.039.2e-1913.0e-731.8e-354.8e-18 6.9e-5 4000 1.0
p[41,3] 7.4e-4 4.3e-4 0.03 0.02.6e-1469.7e-683.6e-394.7e-13 4000 1.0
p[42,3] 1.0e-3 5.3e-4 0.033.1e-1575.1e-698.3e-421.4e-23 1.3e-8 2567 1.0
p[43,3] 2.6e-4 2.5e-4 0.021.2e-1594.8e-754.5e-443.0e-24 1.6e-8 3881 1.0
p[44,3] 2.7e-9 5.5e-10 3.1e-88.4e-261.1e-161.2e-132.2e-11 1.4e-8 3319 nan
p[45,3] 7.9e-5 6.5e-5 4.1e-31.5e-1806.2e-774.4e-423.8e-24 2.3e-7 4000 1.0
p[46,3] 1.7e-3 5.5e-4 0.031.0e-1818.3e-511.1e-183.1e-10 2.6e-4 4000 1.0
p[47,3] 9.5e-9 5.5e-9 3.5e-7 0.05.7e-1602.9e-993.8e-591.4e-16 4000 nan
p[48,3] 2.7e-3 1.1e-3 0.041.7e-14 4.1e-9 5.6e-7 3.0e-5 6.3e-3 1316 1.0
p[49,3] 0.08 7.7e-3 0.261.1e-1924.5e-488.7e-12 5.6e-5 1.0 1139 1.01
p[50,3] 0.02 4.5e-3 0.133.3e-14 3.4e-8 5.1e-6 2.4e-4 0.24 830 1.0
p[0,4] 1.2e-3 2.6e-4 0.025.0e-2973.1e-1201.9e-646.6e-28 5.2e-4 3230 1.0
p[1,4] 0.02 1.3e-3 0.071.8e-1281.9e-19 5.3e-7 4.8e-3 0.21 2918 1.0
p[2,4] 0.15 6.3e-3 0.13 7.1e-6 0.03 0.11 0.24 0.44 433 1.01
p[3,4] 2.5e-4 1.0e-4 6.4e-31.3e-2555.9e-1162.4e-702.0e-40 4.4e-6 3892 1.0
p[4,4] 1.4e-5 8.2e-6 5.2e-45.6e-2672.3e-1255.2e-776.8e-45 7.5e-9 3965 nan
p[5,4] 1.0e-3 2.6e-5 1.6e-3 1.4e-6 4.7e-5 2.6e-4 1.2e-3 6.1e-3 4000 1.0
p[6,4] 9.7e-3 1.4e-3 0.052.4e-577.4e-376.3e-26 7.3e-4 0.09 1191 1.01
p[7,4] 0.05 2.5e-3 0.122.3e-721.9e-20 8.9e-8 0.04 0.49 2443 1.0
p[8,4] 0.03 1.6e-3 0.091.4e-14 7.2e-9 3.6e-4 0.02 0.33 2879 1.0
p[9,4] 0.05 3.0e-3 0.124.0e-1409.4e-47 1.1e-7 0.02 0.42 1466 1.0
p[10,4] 0.04 2.9e-3 0.112.8e-592.7e-227.4e-11 9.9e-5 0.44 1487 1.0
p[11,4] 0.07 1.3e-3 0.08 5.7e-4 0.01 0.04 0.09 0.29 4000 1.0
p[12,4] 0.03 2.2e-3 0.112.5e-296.1e-11 9.7e-7 1.6e-3 0.43 2260 1.0
p[13,4] 0.06 3.3e-3 0.131.6e-1111.6e-274.7e-10 0.01 0.46 1443 1.0
p[14,4] 0.03 1.5e-3 0.078.6e-387.8e-10 1.1e-4 0.01 0.28 2497 1.0
p[15,4] 1.6e-6 1.1e-6 6.8e-54.4e-2743.0e-1242.2e-718.7e-433.0e-15 4000 nan
p[16,4] 0.01 8.9e-4 0.051.7e-1642.3e-483.2e-11 1.3e-4 0.16 3439 1.0
p[17,4] 0.21 3.5e-3 0.221.8e-10 0.01 0.12 0.39 0.67 4000 1.0
p[18,4] 0.03 1.9e-3 0.086.7e-1151.1e-10 2.4e-4 0.02 0.28 1648 1.0
p[19,4] 1.5e-4 8.5e-5 1.4e-32.6e-151.1e-114.6e-113.1e-10 4.4e-4 276 1.01
p[20,4] 0.06 3.8e-3 0.132.9e-586.5e-21 4.1e-9 7.2e-3 0.5 1253 1.01
p[21,4] 0.03 2.0e-3 0.11.7e-42 1.5e-9 2.6e-5 6.1e-3 0.41 2461 1.0
p[22,4] 0.04 3.4e-3 0.129.2e-1236.5e-352.7e-14 1.2e-3 0.46 1173 1.0
p[23,4] 0.06 3.4e-3 0.134.6e-1118.9e-276.4e-10 0.01 0.5 1483 1.0
p[24,4] 0.18 4.4e-3 0.186.3e-17 0.02 0.12 0.3 0.57 1639 1.0
p[25,4] 0.05 2.9e-3 0.121.0e-1013.4e-272.3e-11 7.7e-3 0.45 1738 1.0
p[26,4] 0.04 2.2e-3 0.119.1e-2251.3e-566.9e-11 0.02 0.4 2348 1.0
p[27,4] 0.05 3.2e-3 0.121.9e-713.7e-271.8e-12 1.5e-3 0.43 1288 1.0
p[28,4] 0.13 1.9e-3 0.12 8.7e-9 0.03 0.1 0.22 0.4 4000 1.0
p[29,4] 5.4e-3 7.9e-4 0.046.1e-1757.4e-621.2e-351.7e-10 0.04 2220 1.0
p[30,4] 0.02 1.7e-3 0.088.3e-2381.3e-662.2e-21 5.7e-6 0.28 2027 1.0
p[31,4] 7.8e-5 6.2e-5 3.9e-34.6e-904.2e-445.0e-288.1e-16 2.0e-6 4000 1.0
p[32,4] 1.9e-4 1.9e-4 5.4e-31.7e-1131.7e-801.4e-653.0e-523.3e-31 848 1.0
p[33,4] 0.23 3.3e-3 0.14 6.4e-7 0.12 0.25 0.35 0.47 1813 1.0
p[34,4] 0.03 1.9e-3 0.082.0e-2501.6e-772.4e-27 6.6e-5 0.31 1924 1.0
p[35,4] 2.9e-3 3.8e-4 0.022.4e-1644.9e-451.8e-19 2.1e-8 0.03 3571 1.0
p[36,4] 6.3e-3 1.8e-3 0.041.3e-703.9e-462.4e-359.6e-25 0.06 570 1.0
p[37,4] 0.04 2.8e-3 0.112.5e-623.3e-272.3e-12 9.5e-5 0.45 1553 1.0
p[38,4] 2.7e-3 1.9e-3 0.035.2e-1111.5e-767.6e-613.7e-47 1.9e-6 285 1.01
p[39,4] 7.4e-3 1.6e-4 0.01 1.4e-5 8.3e-4 3.3e-3 9.6e-3 0.04 4000 1.0
p[40,4] 0.02 2.0e-3 0.071.8e-1233.9e-447.0e-20 1.9e-8 0.29 1442 1.0
p[41,4] 0.02 1.5e-3 0.073.9e-1437.3e-34 1.0e-7 7.9e-4 0.29 2179 1.0
p[42,4] 0.02 2.2e-3 0.093.2e-373.9e-14 2.0e-8 1.9e-4 0.36 1556 1.0
p[43,4] 0.03 2.2e-3 0.091.8e-384.2e-16 4.7e-9 7.9e-5 0.4 1765 1.0
p[44,4] 0.28 4.3e-3 0.19 3.2e-9 0.12 0.28 0.44 0.61 1917 1.0
p[45,4] 9.6e-3 1.3e-3 0.055.1e-883.1e-341.4e-17 2.8e-9 0.13 1719 1.0
p[46,4] 0.02 1.3e-3 0.066.1e-1981.8e-538.4e-16 6.3e-5 0.19 2428 1.0
p[47,4] 4.9e-5 3.5e-5 2.2e-32.0e-3062.4e-1431.3e-876.6e-51 3.0e-9 3768 1.0
p[48,4] 0.18 2.0e-3 0.12 0.02 0.08 0.15 0.25 0.47 3565 1.0
p[49,4] 0.05 2.6e-3 0.126.9e-2133.3e-51 4.8e-9 0.03 0.44 2066 1.0
p[50,4] 0.1 3.2e-3 0.11 1.8e-3 0.02 0.06 0.15 0.41 1285 1.0
p[0,5] 0.05 2.1e-3 0.115.3e-17 8.5e-7 7.5e-4 0.06 0.4 2536 1.0
p[1,5] 0.01 1.3e-3 0.061.1e-281.0e-14 1.7e-9 2.0e-5 0.16 2000 1.0
p[2,5] 6.8e-5 2.5e-5 1.1e-31.6e-805.4e-498.8e-361.0e-24 7.3e-9 2005 1.0
p[3,5] 0.06 2.9e-3 0.113.3e-11 5.8e-6 3.3e-3 0.05 0.42 1401 1.0
p[4,5] 0.08 3.0e-3 0.12 2.2e-9 3.1e-4 0.02 0.09 0.47 1671 1.0
p[5,5] 2.1e-9 6.8e-10 4.0e-88.2e-489.7e-344.8e-274.1e-219.7e-12 3434 nan
p[6,5] 1.3e-14 1.3e-148.0e-13 0.0 0.0 0.02.1e-1775.3e-55 4000 nan
p[7,5] 5.5e-3 1.2e-3 0.04 0.07.5e-2882.9e-912.5e-39 0.05 882 1.0
p[8,5] 0.04 3.0e-3 0.098.2e-2978.8e-563.7e-21 3.1e-4 0.32 853 1.0
p[9,5] 0.03 2.0e-3 0.082.2e-313.2e-11 7.8e-6 5.8e-3 0.3 1543 1.0
p[10,5] 0.03 1.9e-3 0.07 0.01.5e-444.9e-17 7.9e-5 0.28 1508 1.0
p[11,5] 8.2e-6 4.2e-6 2.1e-43.6e-284.7e-184.0e-141.1e-10 3.1e-6 2497 nan
p[12,5] 0.03 1.9e-3 0.07 0.01.0e-441.3e-18 1.3e-4 0.28 1553 1.0
p[13,5] 0.02 1.4e-3 0.06 0.09.7e-568.6e-22 6.3e-7 0.26 1987 1.0
p[14,5] 0.04 3.3e-3 0.09 0.06.5e-644.0e-22 4.4e-4 0.33 790 1.01
p[15,5] 0.07 2.5e-3 0.119.3e-19 3.8e-6 2.7e-3 0.1 0.39 2095 1.0
p[16,5] 0.03 2.2e-3 0.092.5e-18 4.5e-8 6.2e-5 7.2e-3 0.34 1511 1.0
p[17,5] 5.5e-6 5.5e-6 2.1e-41.7e-3175.1e-1522.9e-1161.9e-868.0e-42 1459 nan
p[18,5] 0.02 2.6e-3 0.06 0.05.5e-1352.5e-481.3e-12 0.28 604 1.0
p[19,5] 1.4e-3 1.6e-4 8.3e-33.0e-64 7.2e-9 9.5e-7 4.1e-5 0.01 2886 1.0
p[20,5] 0.01 1.2e-3 0.05 0.05.0e-2682.7e-471.0e-14 0.23 1875 1.0
p[21,5] 0.03 2.4e-3 0.08 0.02.0e-523.0e-21 8.5e-5 0.29 1073 1.0
p[22,5] 0.02 1.6e-3 0.07 0.04.5e-403.5e-16 3.2e-5 0.29 2110 1.0
p[23,5] 0.02 1.4e-3 0.06 0.04.1e-562.2e-22 2.1e-7 0.26 2049 1.0
p[24,5] 5.6e-5 2.0e-5 9.2e-4 0.02.2e-954.1e-626.3e-418.3e-11 2038 nan
p[25,5] 0.02 1.5e-3 0.06 0.03.1e-821.3e-27 5.2e-8 0.27 1740 1.0
p[26,5] 0.02 1.6e-3 0.078.7e-854.5e-251.2e-10 3.3e-4 0.28 2281 1.0
p[27,5] 0.02 1.4e-3 0.06 0.01.1e-1639.0e-344.9e-10 0.26 1733 1.0
p[28,5] 4.0e-17 4.0e-172.5e-152.9e-2461.4e-1071.0e-839.0e-657.5e-38 4000 nan
p[29,5] 0.05 2.6e-3 0.19.2e-14 6.5e-5 4.4e-3 0.06 0.36 1409 1.0
p[30,5] 0.03 1.9e-3 0.083.6e-449.4e-14 2.9e-7 2.5e-3 0.31 1798 1.0
p[31,5] 0.07 2.8e-3 0.114.0e-33 4.3e-9 6.4e-4 0.15 0.32 1490 1.0
p[32,5] 7.3e-36 7.3e-364.6e-34 0.0 0.0 0.0 0.0 0.0 4000 nan
p[33,5] 1.3e-4 7.4e-5 4.3e-31.1e-1283.5e-701.7e-522.5e-371.2e-13 3418 1.0
p[34,5] 0.03 1.7e-3 0.091.7e-942.4e-1410.0e-7 5.9e-3 0.33 2522 1.0
p[35,5] 0.02 1.6e-3 0.072.8e-331.7e-15 6.1e-9 5.0e-4 0.27 1994 1.0
p[36,5] 4.1e-4 4.0e-4 9.2e-3 0.0 0.0 0.0 0.08.6e-67 523 1.01
p[37,5] 0.02 2.0e-3 0.07 0.07.8e-834.7e-25 2.4e-6 0.28 1283 1.0
p[38,5] 3.5e-4 1.5e-4 9.2e-3 0.0 0.0 0.0 0.02.0e-94 4000 1.0
p[39,5] 5.6e-5 1.9e-5 1.1e-31.9e-184.2e-12 1.4e-9 1.8e-7 1.0e-4 3778 1.0
p[40,5] 0.03 1.9e-3 0.086.6e-616.4e-21 2.5e-9 2.6e-3 0.31 1832 1.0
p[41,5] 0.03 2.7e-3 0.098.4e-20 9.9e-8 1.4e-4 0.01 0.32 978 1.0
p[42,5] 0.03 1.9e-3 0.076.3e-1682.4e-353.0e-16 6.3e-5 0.28 1394 1.0
p[43,5] 0.04 2.6e-3 0.096.4e-1411.0e-311.5e-12 2.7e-3 0.29 1081 1.0
p[44,5] 2.6e-5 1.8e-5 8.2e-42.1e-2739.2e-954.6e-692.5e-482.0e-18 2166 nan
p[45,5] 0.04 2.2e-3 0.093.8e-563.4e-19 3.5e-8 0.01 0.3 1576 1.0
p[46,5] 0.03 1.8e-3 0.084.1e-281.1e-12 2.2e-7 9.0e-4 0.3 1840 1.0
p[47,5] 0.06 2.4e-3 0.111.6e-10 2.3e-5 7.8e-3 0.07 0.4 2049 1.0
p[48,5] 6.7e-8 4.5e-8 2.8e-61.8e-623.7e-388.6e-293.0e-211.3e-11 3898 nan
p[49,5] 0.02 1.6e-3 0.076.6e-1618.3e-319.8e-12 2.9e-4 0.28 2216 1.0
p[50,5] 7.6e-8 6.9e-8 4.4e-68.5e-811.1e-462.6e-351.7e-261.2e-13 4000 nan
p[0,6] 0.04 1.4e-3 0.07 2.3e-9 2.9e-4 7.8e-3 0.05 0.26 2574 1.0
p[1,6] 0.02 1.1e-3 0.052.7e-10 5.5e-6 2.9e-4 4.7e-3 0.19 1652 1.0
p[2,6] 2.2e-3 3.7e-4 9.5e-36.6e-17 1.3e-9 1.1e-6 1.7e-4 0.03 676 1.01
p[3,6] 0.05 2.3e-3 0.072.3e-11 7.2e-4 0.02 0.08 0.23 882 1.0
p[4,6] 0.05 1.8e-3 0.074.3e-11 4.2e-4 0.01 0.07 0.23 1456 1.0
p[5,6] 1.4e-6 3.0e-7 1.8e-54.1e-239.9e-171.1e-137.4e-11 2.4e-6 3419 nan
p[6,6] 2.1e-4 9.4e-5 5.1e-31.6e-2296.0e-1591.1e-1202.1e-28 3.7e-6 2976 1.0
p[7,6] 6.2e-3 1.0e-3 0.032.2e-1536.8e-811.7e-241.5e-10 0.09 844 1.0
p[8,6] 0.03 1.2e-3 0.066.5e-52 1.6e-7 1.5e-4 0.02 0.23 2588 1.0
p[9,6] 0.04 2.1e-3 0.068.5e-13 1.1e-5 1.3e-3 0.04 0.21 860 1.0
p[10,6] 0.03 1.2e-3 0.052.9e-153 1.6e-8 4.0e-4 0.03 0.18 2063 1.0
p[11,6] 1.3e-5 1.9e-6 9.6e-55.4e-13 1.6e-9 4.8e-8 1.1e-6 1.0e-4 2447 nan
p[12,6] 0.03 1.1e-3 0.052.6e-100 1.9e-7 3.6e-4 0.04 0.18 2492 1.0
p[13,6] 0.02 1.1e-3 0.053.7e-1145.1e-14 3.1e-5 9.5e-3 0.19 1849 1.0
p[14,6] 0.03 1.4e-3 0.073.4e-165 4.4e-8 5.9e-5 0.02 0.24 2238 1.0
p[15,6] 0.04 1.8e-3 0.07 1.4e-9 1.5e-5 4.1e-3 0.05 0.24 1478 1.0
p[16,6] 0.04 2.2e-3 0.06 1.1e-8 1.2e-4 2.5e-3 0.04 0.21 819 1.0
p[17,6] 1.6e-4 5.9e-5 3.3e-35.0e-701.6e-291.4e-217.4e-15 9.1e-6 3125 1.0
p[18,6] 0.03 1.5e-3 0.06 0.01.4e-17 7.8e-6 7.8e-3 0.24 1959 1.0
p[19,6] 1.1e-3 2.5e-4 0.015.1e-12 2.6e-6 8.6e-6 3.7e-5 1.8e-3 2197 1.0
p[20,6] 0.02 1.1e-3 0.045.7e-2318.3e-59 7.0e-8 4.1e-3 0.17 1550 1.0
p[21,6] 0.03 1.2e-3 0.052.1e-182 5.4e-8 2.5e-4 0.03 0.19 1932 1.0
p[22,6] 0.03 1.1e-3 0.057.6e-896.9e-10 1.7e-4 0.02 0.21 2367 1.0
p[23,6] 0.02 1.1e-3 0.052.7e-1103.2e-14 9.2e-6 6.5e-3 0.19 1956 1.0
p[24,6] 3.8e-3 3.1e-4 0.021.2e-843.2e-16 1.7e-9 1.4e-5 0.06 2830 1.0
p[25,6] 0.02 1.2e-3 0.053.7e-1441.8e-20 9.2e-7 5.6e-3 0.19 1828 1.0
p[26,6] 0.02 1.4e-3 0.062.6e-27 1.5e-910.0e-5 0.01 0.22 1634 1.0
p[27,6] 0.02 1.2e-3 0.055.5e-1868.0e-37 4.1e-7 5.2e-3 0.18 1446 1.0
p[28,6] 7.2e-6 4.4e-6 2.8e-46.2e-643.5e-263.5e-201.5e-15 8.4e-9 4000 nan
p[29,6] 0.05 1.6e-3 0.078.8e-12 5.8e-4 0.01 0.08 0.22 1759 1.0
p[30,6] 0.03 1.5e-3 0.065.6e-13 3.1e-5 2.6e-3 0.03 0.25 1897 1.0
p[31,6] 0.04 1.5e-3 0.067.2e-10 8.7e-5 4.4e-3 0.08 0.21 1875 1.0
p[32,6] 5.2e-7 5.2e-7 3.3e-5 0.03.5e-3131.7e-2611.5e-2151.5e-142 4000 nan
p[33,6] 8.3e-4 2.0e-4 7.9e-39.5e-311.2e-175.6e-13 6.4e-9 4.0e-3 1546 1.0
p[34,6] 0.03 1.5e-3 0.071.8e-29 1.9e-6 2.0e-3 0.03 0.24 1864 1.0
p[35,6] 0.03 1.4e-3 0.06 1.3e-8 1.9e-4 4.1e-3 0.03 0.23 1896 1.0
p[36,6] 2.1e-4 2.0e-4 4.1e-31.3e-2733.8e-1924.4e-15510.0e-1184.0e-15 427 1.01
p[37,6] 0.02 1.1e-3 0.059.8e-2005.5e-17 4.2e-5 0.01 0.18 1928 1.0
p[38,6] 7.7e-4 5.1e-4 0.01 0.01.5e-2966.7e-2442.4e-1971.3e-14 398 1.01
p[39,6] 5.8e-5 3.6e-6 2.0e-4 5.3e-9 9.1e-7 7.0e-6 3.6e-5 4.5e-4 3080 nan
p[40,6] 0.04 1.3e-3 0.064.1e-14 2.5e-5 3.1e-3 0.05 0.21 2331 1.0
p[41,6] 0.03 2.3e-3 0.064.4e-12 2.2e-5 9.2e-4 0.03 0.21 652 1.0
p[42,6] 0.04 1.1e-3 0.061.8e-32 3.0e-6 1.4e-3 0.05 0.18 2402 1.0
p[43,6] 0.03 1.2e-3 0.057.4e-28 2.4e-6 8.6e-4 0.04 0.18 1954 1.0
p[44,6] 1.4e-3 1.7e-4 8.7e-35.4e-571.7e-162.2e-11 3.6e-7 0.02 2753 1.0
p[45,6] 0.04 1.2e-3 0.062.3e-11 7.0e-5 3.9e-3 0.06 0.2 2393 1.0
p[46,6] 0.03 1.3e-3 0.068.0e-10 3.7e-5 1.6e-3 0.02 0.24 2056 1.0
p[47,6] 0.05 2.2e-3 0.072.8e-10 3.9e-4 9.9e-3 0.07 0.25 1074 1.0
p[48,6] 6.5e-6 2.6e-6 1.6e-44.6e-216.6e-143.8e-11 5.4e-9 9.1e-6 3528 nan
p[49,6] 0.02 1.5e-3 0.061.5e-514.8e-11 3.9e-5 0.01 0.23 1435 1.0
p[50,6] 3.5e-6 1.9e-6 1.2e-45.2e-275.9e-171.8e-131.1e-10 2.7e-6 3992 nan
p[0,7] 2.8e-3 5.3e-4 0.021.1e-2446.3e-985.1e-529.9e-22 8.5e-3 1917 1.0
p[1,7] 0.04 2.0e-3 0.071.0e-1048.0e-15 5.0e-5 0.05 0.26 1469 1.0
p[2,7] 0.05 2.0e-3 0.06 1.5e-3 0.02 0.04 0.06 0.22 865 1.0
p[3,7] 8.9e-5 7.2e-5 4.6e-3 0.04.2e-1722.6e-1069.6e-62 1.9e-9 4000 1.0
p[4,7] 4.4e-6 2.3e-6 1.5e-4 0.02.9e-1868.0e-1161.4e-683.9e-13 3970 nan
p[5,7] 0.02 3.2e-4 0.02 3.8e-4 4.0e-3 0.01 0.03 0.08 4000 1.0
p[6,7] 9.2e-4 1.4e-4 3.6e-39.3e-621.8e-403.7e-29 8.3e-5 8.4e-3 675 1.01
p[7,7] 0.02 1.6e-3 0.061.2e-531.3e-26 2.2e-8 7.3e-4 0.2 1321 1.0
p[8,7] 0.03 1.3e-3 0.076.2e-15 1.3e-8 5.1e-4 0.02 0.26 2671 1.0
p[9,7] 0.01 9.7e-4 0.053.8e-2461.1e-853.1e-14 3.9e-4 0.15 2681 1.0
p[10,7] 0.03 1.3e-3 0.089.2e-49 2.5e-8 8.1e-4 0.02 0.27 3161 1.0
p[11,7] 0.21 2.9e-3 0.12 6.9e-3 0.11 0.2 0.3 0.45 1790 1.0
p[12,7] 0.03 1.2e-3 0.071.0e-26 1.7e-7 1.9e-3 0.03 0.24 3295 1.0
p[13,7] 0.04 1.7e-3 0.092.9e-431.4e-10 1.5e-4 0.02 0.33 2731 1.0
p[14,7] 0.03 1.4e-3 0.071.5e-37 1.0e-9 1.4e-4 0.01 0.26 2363 1.0
p[15,7] 3.9e-4 1.2e-4 7.2e-34.1e-1092.5e-471.0e-265.7e-16 5.8e-5 3617 1.0
p[16,7] 3.5e-3 3.7e-4 0.025.0e-2671.3e-816.9e-19 2.9e-7 0.04 3643 1.0
p[17,7] 2.6e-3 8.8e-5 5.0e-31.0e-16 4.0e-6 1.6e-4 2.3e-3 0.02 3172 1.0
p[18,7] 0.03 1.6e-3 0.066.2e-1161.4e-10 2.6e-4 0.01 0.24 1539 1.0
p[19,7] 1.4e-4 7.8e-5 1.1e-32.9e-151.3e-115.6e-113.8e-10 8.9e-4 199 1.02
p[20,7] 0.03 1.5e-3 0.073.1e-751.7e-15 2.8e-4 0.02 0.23 1894 1.0
p[21,7] 0.03 1.2e-3 0.073.6e-54 3.3e-8 1.2e-3 0.03 0.24 3041 1.0
p[22,7] 0.03 1.6e-3 0.082.0e-394.7e-12 3.3e-5 0.01 0.31 2571 1.0
p[23,7] 0.04 1.9e-3 0.093.9e-431.8e-11 1.8e-4 0.03 0.34 2402 1.0
p[24,7] 0.03 1.2e-3 0.052.1e-21 2.7e-3 0.02 0.04 0.19 2160 1.0
p[25,7] 0.03 1.7e-3 0.081.0e-491.5e-13 2.8e-5 0.01 0.31 2229 1.0
p[26,7] 0.04 2.1e-3 0.081.9e-1545.3e-37 1.9e-7 0.02 0.28 1510 1.0
p[27,7] 0.03 1.5e-3 0.078.1e-621.6e-16 3.5e-5 0.01 0.26 2362 1.0
p[28,7] 1.1e-3 5.7e-5 3.2e-34.5e-16 5.0e-6 9.2e-5 7.0e-4 9.9e-3 3043 1.0
p[29,7] 1.2e-3 3.2e-4 0.02 0.07.4e-1335.9e-792.4e-22 2.7e-3 2684 1.0
p[30,7] 0.03 1.8e-3 0.076.7e-1975.8e-549.9e-17 1.7e-4 0.25 1402 1.0
p[31,7] 5.2e-3 4.8e-4 0.033.9e-249.8e-12 7.4e-8 1.3e-4 0.05 3424 1.0
p[32,7] 5.4e-5 6.4e-5 1.4e-39.6e-1391.6e-1012.9e-831.7e-672.0e-42 472 1.0
p[33,7] 0.02 1.3e-3 0.06 1.2e-6 5.9e-4 3.8e-3 0.02 0.21 1711 1.0
p[34,7] 0.03 1.9e-3 0.072.7e-2111.7e-642.4e-22 1.4e-4 0.28 1552 1.01
p[35,7] 0.03 1.8e-3 0.082.3e-703.5e-17 1.0e-6 7.4e-3 0.3 1945 1.0
p[36,7] 1.2e-3 1.0e-3 0.021.2e-932.2e-636.2e-502.8e-36 3.7e-4 247 1.02
p[37,7] 0.03 1.5e-3 0.071.1e-652.7e-12 1.2e-4 0.02 0.27 2157 1.0
p[38,7] 1.3e-3 9.1e-4 0.021.7e-1321.7e-932.0e-751.1e-59 1.1e-5 296 1.01
p[39,7] 0.03 9.0e-4 0.04 4.8e-5 4.8e-3 0.02 0.05 0.15 2065 1.0
p[40,7] 0.03 1.6e-3 0.083.6e-352.0e-11 2.6e-5 0.02 0.31 2594 1.0
p[41,7] 0.02 2.0e-3 0.083.0e-3004.8e-774.3e-12 2.9e-4 0.33 1767 1.0
p[42,7] 0.04 1.3e-3 0.089.8e-15 2.7e-6 3.2e-3 0.03 0.28 3510 1.0
p[43,7] 0.03 1.2e-3 0.079.4e-15 9.1e-8 1.0e-3 0.02 0.24 3150 1.0
p[44,7] 0.02 5.0e-4 0.021.9e-12 3.7e-3 0.01 0.03 0.09 2390 1.0
p[45,7] 0.03 1.4e-3 0.078.6e-23 1.6e-9 4.5e-5 0.01 0.26 2852 1.0
p[46,7] 0.03 1.6e-3 0.071.1e-1634.5e-434.2e-12 2.0e-3 0.25 1895 1.0
p[47,7] 2.3e-5 1.9e-5 9.4e-4 0.08.9e-1695.8e-1049.5e-616.7e-11 2526 nan
p[48,7] 0.12 2.2e-3 0.11 5.4e-4 0.03 0.1 0.21 0.36 2262 1.0
p[49,7] 0.04 1.9e-3 0.089.4e-1719.8e-40 1.0e-8 0.02 0.27 1731 1.0
p[50,7] 0.05 2.3e-3 0.08 5.7e-6 2.2e-3 0.01 0.05 0.27 1059 1.0
p[0,8] 0.05 4.2e-3 0.26.4e-764.6e-334.3e-15 3.4e-5 0.92 2218 1.0
p[1,8] 6.0e-3 1.3e-3 0.071.2e-1131.3e-612.9e-417.3e-25 1.2e-4 2762 1.0
p[2,8] 2.5e-12 2.4e-121.5e-10 0.02.9e-2272.0e-1731.7e-1286.3e-58 4000 nan
p[3,8] 0.05 3.6e-3 0.197.2e-493.6e-21 4.8e-9 1.8e-4 0.83 2696 1.0
p[4,8] 0.09 4.9e-3 0.247.1e-381.7e-14 2.7e-6 3.4e-3 0.89 2351 1.0
p[5,8] 1.2e-26 7.2e-274.6e-254.3e-1545.8e-1093.7e-881.4e-697.0e-41 4000 nan
p[6,8] 1.2e-89 1.2e-897.3e-88 0.0 0.0 0.0 0.07.2e-289 4000 nan
p[7,8] 4.4e-3 1.5e-3 0.06 0.0 0.0 0.03.5e-1677.9e-10 1684 1.0
p[8,8] 0.03 0.01 0.17 0.04.0e-3001.0e-1401.6e-34 1.0 257 1.01
p[9,8] 0.02 2.0e-3 0.121.9e-1053.6e-417.2e-233.7e-11 0.36 3523 1.0
p[10,8] 0.03 6.3e-3 0.18 0.03.4e-2161.4e-1012.4e-36 0.98 814 1.0
p[11,8] 4.6e-10 4.0e-10 2.5e-82.5e-983.4e-623.0e-481.5e-352.1e-19 4000 nan
p[12,8] 0.04 8.8e-3 0.2 0.09.6e-2331.4e-1161.2e-32 1.0 498 1.01
p[13,8] 0.03 3.8e-3 0.15 0.03.7e-2385.6e-1111.4e-46 0.59 1576 1.0
p[14,8] 0.04 0.01 0.18 0.0 0.01.9e-1481.3e-34 1.0 246 1.01
p[15,8] 0.1 7.6e-3 0.288.2e-963.9e-353.0e-11 7.4e-5 0.99 1354 1.0
p[16,8] 0.02 2.5e-3 0.124.8e-686.2e-321.1e-194.8e-10 0.42 2434 1.0
p[17,8] 4.9e-25 4.6e-252.9e-23 0.0 0.0 0.0 0.01.7e-215 4000 nan
p[18,8] 7.0e-3 3.3e-3 0.08 0.0 0.04.6e-2743.3e-1001.0e-10 646 1.0
p[19,8] 0.03 0.03 0.18 0.01.1e-616.2e-486.1e-36 1.0 30 1.17
p[20,8] 8.4e-3 2.3e-3 0.09 0.0 0.09.4e-2321.8e-93 9.7e-7 1437 1.0
p[21,8] 0.04 9.8e-3 0.2 0.06.9e-2736.6e-1341.3e-35 1.0 430 1.01
p[22,8] 0.02 3.3e-3 0.15 0.02.8e-1782.3e-872.1e-31 0.19 1931 1.0
p[23,8] 0.02 3.7e-3 0.14 0.03.8e-2406.8e-1132.2e-47 0.01 1350 1.0
p[24,8] 6.6e-12 6.6e-124.1e-10 0.0 0.01.3e-2985.1e-2105.7e-75 4000 nan
p[25,8] 0.02 3.7e-3 0.14 0.0 0.01.1e-1331.9e-52 0.05 1358 1.0
p[26,8] 0.03 2.7e-3 0.141.5e-3126.2e-1001.5e-496.0e-20 0.6 2863 1.0
p[27,8] 0.02 3.4e-3 0.13 0.0 0.05.3e-1674.7e-65 6.8e-3 1494 1.0
p[28,8] 2.1e-84 1.6e-841.0e-82 0.0 0.0 0.02.3e-2771.8e-172 4000 nan
p[29,8] 0.06 3.3e-3 0.181.3e-416.9e-17 1.4e-8 6.6e-4 0.77 2904 1.0
p[30,8] 0.02 2.4e-3 0.141.6e-1715.4e-615.9e-362.0e-16 0.47 3233 1.0
p[31,8] 0.08 9.3e-3 0.263.5e-1708.0e-632.9e-23 9.7e-9 1.0 782 1.01
p[32,8] 1.6e-186 0.0 0.0 0.0 0.0 0.0 0.0 0.0 4000 nan
p[33,8] 7.5e-11 7.1e-11 4.5e-9 0.01.3e-2949.1e-2272.3e-1685.3e-73 4000 nan
p[34,8] 0.03 2.6e-3 0.14 0.01.9e-614.1e-333.9e-12 0.63 3067 1.0
p[35,8] 0.01 2.6e-3 0.12.1e-1494.2e-763.0e-481.4e-25 6.8e-3 1620 1.0
p[36,8] 1.2e-3 5.6e-4 0.04 0.0 0.0 0.0 0.01.1e-293 4000 1.0
p[37,8] 0.03 8.1e-3 0.16 0.0 0.02.1e-1361.6e-49 0.94 415 1.01
p[38,8] 6.4e-12 4.5e-122.8e-10 0.0 0.0 0.0 0.0 0.0 4000 nan
p[39,8] 4.0e-7 3.1e-7 2.0e-51.9e-692.2e-451.0e-357.8e-271.4e-15 4000 nan
p[40,8] 0.04 5.1e-3 0.181.4e-2643.2e-1081.7e-581.7e-17 0.95 1272 1.0
p[41,8] 0.02 2.5e-3 0.114.4e-623.7e-256.0e-14 7.3e-7 0.38 2084 1.0
p[42,8] 0.03 7.7e-3 0.17 0.03.8e-1889.5e-1021.7e-35 0.98 509 1.01
p[43,8] 0.04 8.0e-3 0.19 0.04.9e-1713.6e-841.1e-18 1.0 575 1.0
p[44,8] 1.6e-14 1.5e-146.7e-13 0.0 0.0 0.06.9e-2417.1e-110 1906 nan
p[45,8] 0.03 5.3e-3 0.173.7e-2561.2e-1062.1e-569.3e-15 0.96 1072 1.0
p[46,8] 0.02 2.2e-3 0.122.4e-1145.4e-564.0e-352.4e-18 0.05 2976 1.0
p[47,8] 0.08 4.6e-3 0.231.0e-501.6e-19 4.3e-8 6.5e-4 0.92 2484 1.0
p[48,8] 2.2e-22 2.1e-221.3e-203.6e-2275.7e-1422.0e-1077.2e-801.9e-43 4000 nan
p[49,8] 0.02 2.2e-3 0.13 0.01.2e-1181.8e-528.8e-21 0.21 3132 1.0
p[50,8] 9.1e-20 9.1e-205.8e-185.3e-2891.6e-1691.4e-1277.8e-971.1e-51 4000 nan
p[0,9] 0.05 1.8e-3 0.095.7e-10 3.3e-5 5.2e-3 0.05 0.35 2665 1.0
p[1,9] 8.5e-3 8.4e-4 0.045.4e-184.5e-10 6.0e-7 2.0e-4 0.1 1726 1.0
p[2,9] 3.3e-4 8.3e-5 3.6e-39.8e-256.1e-159.2e-11 2.2e-7 2.1e-3 1850 1.0
p[3,9] 0.06 3.0e-3 0.09 4.3e-9 5.0e-4 0.02 0.09 0.33 911 1.0
p[4,9] 0.06 2.2e-3 0.09 5.5e-9 7.9e-4 0.02 0.09 0.31 1605 1.0
p[5,9] 1.7e-8 5.8e-9 3.6e-74.2e-395.1e-287.7e-236.9e-185.9e-10 3948 nan
p[6,9] 1.0e-3 3.5e-4 0.028.3e-1774.0e-1231.7e-937.3e-22 2.6e-4 2201 1.0
p[7,9] 5.4e-3 9.0e-4 0.041.9e-1701.1e-941.4e-335.2e-16 0.04 1892 1.0
p[8,9] 0.03 1.5e-3 0.082.7e-40 2.3e-7 8.0e-5 0.01 0.31 2829 1.0
p[9,9] 0.03 2.1e-3 0.061.0e-21 3.0e-9 4.2e-5 0.03 0.22 888 1.0
p[10,9] 0.03 1.2e-3 0.078.2e-156 1.2e-9 1.6e-4 0.01 0.27 3461 1.0
p[11,9] 1.5e-6 7.4e-7 3.5e-52.1e-231.5e-161.5e-139.1e-11 1.1e-6 2207 nan
p[12,9] 0.03 1.3e-3 0.072.4e-90 1.4e-7 2.5e-4 0.02 0.26 3111 1.0
p[13,9] 0.02 1.3e-3 0.072.1e-1274.5e-20 4.8e-7 2.5e-3 0.23 2904 1.0
p[14,9] 0.03 1.8e-3 0.091.8e-116 2.0e-8 1.2e-5 3.2e-3 0.37 2621 1.0
p[15,9] 0.06 2.5e-3 0.14 2.8e-8 1.2e-4 5.9e-3 0.05 0.52 2915 1.0
p[16,9] 0.03 1.8e-3 0.061.2e-13 7.3e-7 2.2e-4 0.02 0.22 1185 1.0
p[17,9] 9.1e-5 4.6e-5 2.7e-35.7e-716.5e-321.5e-231.3e-16 1.7e-6 3484 1.0
p[18,9] 0.03 2.0e-3 0.11.4e-3097.3e-13 6.7e-6 3.2e-3 0.4 2492 1.0
p[19,9] 4.5e-3 1.1e-3 0.051.8e-11 1.2e-6 4.7e-6 2.5e-5 3.3e-3 1762 1.0
p[20,9] 0.02 1.1e-3 0.064.2e-2179.1e-60 3.1e-9 1.1e-3 0.22 2947 1.0
p[21,9] 0.03 1.5e-3 0.074.8e-164 1.1e-7 2.7e-4 0.02 0.25 2049 1.0
p[22,9] 0.02 1.4e-3 0.089.4e-1021.0e-14 8.3e-6 5.1e-3 0.28 3387 1.0
p[23,9] 0.02 1.3e-3 0.076.6e-1248.7e-21 1.0e-7 1.4e-3 0.23 2859 1.0
p[24,9] 1.8e-3 2.1e-4 0.011.7e-774.6e-189.7e-11 1.8e-6 0.02 3330 1.0
p[25,9] 0.02 1.3e-3 0.071.2e-1554.5e-28 1.3e-8 1.4e-3 0.24 2966 1.0
p[26,9] 0.02 1.6e-3 0.086.7e-412.9e-15 7.7e-7 6.2e-3 0.28 2445 1.0
p[27,9] 0.02 1.3e-3 0.061.4e-1913.9e-43 7.2e-9 1.3e-3 0.24 2583 1.0
p[28,9] 9.4e-7 5.8e-7 3.7e-53.6e-773.0e-355.4e-284.1e-221.1e-12 4000 nan
p[29,9] 0.05 2.2e-3 0.071.1e-12 1.6e-4 0.01 0.08 0.25 1076 1.0
p[30,9] 0.03 1.8e-3 0.081.4e-20 7.6e-8 1.7e-4 0.02 0.28 2007 1.0
p[31,9] 0.05 2.0e-3 0.11 1.3e-8 2.1e-4 3.0e-3 0.05 0.39 2746 1.0
p[32,9] 1.0e-5 1.0e-5 6.5e-4 0.02.0e-2842.7e-2371.9e-1971.6e-132 4000 nan
p[33,9] 4.1e-4 2.2e-4 9.7e-34.5e-411.4e-265.9e-204.8e-14 7.9e-5 1967 1.0
p[34,9] 0.03 1.6e-3 0.093.3e-40 6.1e-9 1.7e-4 0.02 0.29 2876 1.0
p[35,9] 0.02 1.4e-3 0.071.8e-14 3.3e-7 1.8e-4 0.01 0.24 2732 1.0
p[36,9] 2.7e-4 2.7e-4 9.0e-32.7e-2707.4e-1928.2e-1573.1e-1212.1e-19 1136 1.0
p[37,9] 0.03 1.3e-3 0.072.6e-2003.6e-22 7.2e-6 4.4e-3 0.28 3091 1.0
p[38,9] 7.1e-4 4.7e-4 0.01 0.02.0e-2601.0e-2161.4e-1764.2e-14 504 1.01
p[39,9] 1.6e-5 4.8e-6 2.5e-48.0e-163.2e-11 2.6e-9 1.2e-7 4.3e-5 2642 nan
p[40,9] 0.04 1.8e-3 0.11.4e-19 7.9e-7 7.9e-4 0.01 0.37 2955 1.0
p[41,9] 0.03 2.3e-3 0.068.0e-16 9.3e-8 1.2e-4 0.02 0.22 695 1.0
p[42,9] 0.04 1.4e-3 0.081.6e-33 1.4e-6 6.2e-4 0.03 0.29 2850 1.0
p[43,9] 0.03 1.3e-3 0.071.2e-30 1.1e-6 3.9e-4 0.02 0.27 3121 1.0
p[44,9] 3.9e-4 6.8e-5 3.7e-31.3e-544.2e-191.5e-13 9.7e-9 2.1e-3 2895 1.0
p[45,9] 0.04 1.7e-3 0.095.9e-15 1.0e-5 1.6e-3 0.02 0.35 2876 1.0
p[46,9] 0.02 1.3e-3 0.072.5e-16 3.4e-8 3.5e-5 9.9e-3 0.23 2559 1.0
p[47,9] 0.06 2.4e-3 0.09 2.3e-8 5.9e-4 0.02 0.08 0.35 1492 1.0
p[48,9] 2.0e-6 1.2e-6 7.8e-53.1e-336.9e-232.6e-181.3e-14 9.1e-9 3940 nan
p[49,9] 0.02 1.6e-3 0.074.9e-685.9e-18 2.2e-7 5.6e-3 0.24 2114 1.0
p[50,9] 4.5e-7 3.8e-7 2.4e-53.2e-429.3e-284.4e-221.6e-175.7e-10 4000 nan
p[0,10] 0.06 2.3e-3 0.122.7e-21 3.9e-8 1.0e-4 0.04 0.41 2498 1.0
p[1,10] 8.4e-3 1.0e-3 0.056.2e-352.8e-185.4e-12 4.9e-7 0.13 2082 1.0
p[2,10] 3.2e-5 1.4e-5 7.2e-42.5e-962.2e-598.8e-441.4e-302.0e-11 2794 nan
p[3,10] 0.05 2.9e-3 0.115.8e-14 1.0e-6 9.7e-4 0.05 0.38 1320 1.0
p[4,10] 0.08 2.8e-3 0.121.7e-10 5.0e-5 0.02 0.11 0.42 1985 1.0
p[5,10] 7.1e-11 2.5e-11 1.5e-97.6e-572.9e-402.4e-322.4e-253.7e-14 3575 nan
p[6,10] 1.5e-17 1.5e-179.5e-16 0.0 0.0 0.02.8e-2071.0e-65 4000 nan
p[7,10] 5.6e-3 1.2e-3 0.04 0.0 0.010.0e-1081.8e-47 0.02 1189 1.0
p[8,10] 0.04 2.9e-3 0.1 0.02.9e-672.4e-26 1.1e-4 0.41 1311 1.0
p[9,10] 0.02 1.8e-3 0.071.6e-375.9e-14 2.0e-7 2.0e-3 0.28 1591 1.0
p[10,10] 0.03 2.0e-3 0.08 0.04.2e-541.8e-21 1.1e-5 0.33 1843 1.0
p[11,10] 2.4e-6 1.5e-6 7.4e-52.3e-343.9e-222.4e-174.1e-13 1.3e-7 2507 nan
p[12,10] 0.03 2.0e-3 0.08 0.03.4e-543.5e-23 4.3e-5 0.34 1735 1.0
p[13,10] 0.02 1.6e-3 0.07 0.07.7e-675.6e-27 5.0e-9 0.3 2039 1.0
p[14,10] 0.04 3.2e-3 0.11 0.06.2e-771.9e-27 1.6e-4 0.43 1169 1.0
p[15,10] 0.08 3.5e-3 0.142.6e-23 6.4e-7 1.9e-3 0.13 0.46 1567 1.0
p[16,10] 0.03 1.8e-3 0.071.8e-224.6e-10 2.6e-6 2.1e-3 0.28 1675 1.0
p[17,10] 1.6e-6 1.6e-6 7.4e-5 0.01.0e-1782.1e-1379.1e-1031.1e-50 2021 nan
p[18,10] 0.02 2.0e-3 0.08 0.01.2e-1599.8e-593.3e-16 0.31 1505 1.0
p[19,10] 1.8e-3 2.5e-4 0.014.7e-771.5e-10 6.4e-8 9.6e-6 0.01 3415 1.0
p[20,10] 0.01 1.3e-3 0.06 0.01.1e-3112.2e-571.1e-18 0.24 2056 1.0
p[21,10] 0.03 2.3e-3 0.09 0.05.1e-632.3e-26 4.3e-5 0.36 1565 1.0
p[22,10] 0.02 1.5e-3 0.08 0.01.1e-482.5e-20 6.0e-6 0.32 2651 1.0
p[23,10] 0.02 1.3e-3 0.07 0.02.5e-678.7e-28 1.1e-9 0.27 2666 1.0
p[24,10] 3.0e-5 1.5e-5 7.9e-4 0.04.3e-1132.2e-741.3e-498.3e-14 2838 nan
p[25,10] 0.02 1.6e-3 0.07 0.01.6e-974.9e-341.8e-10 0.29 1992 1.0
p[26,10] 0.02 1.5e-3 0.081.5e-1001.3e-301.8e-13 3.4e-5 0.29 2681 1.0
p[27,10] 0.02 1.4e-3 0.07 0.06.1e-1923.2e-415.2e-13 0.28 2172 1.0
p[28,10] 1.2e-20 1.2e-207.6e-197.6e-2881.3e-1277.3e-1001.0e-774.3e-46 4000 nan
p[29,10] 0.05 2.8e-3 0.12.7e-14 5.5e-6 3.1e-3 0.06 0.35 1202 1.0
p[30,10] 0.03 2.2e-3 0.088.1e-533.6e-17 2.4e-9 3.7e-4 0.3 1301 1.0
p[31,10] 0.07 3.0e-3 0.122.2e-409.0e-12 3.4e-4 0.11 0.39 1604 1.0
p[32,10] 9.8e-43 9.8e-436.2e-41 0.0 0.0 0.0 0.0 0.0 4000 nan
p[33,10] 6.2e-5 3.5e-5 2.1e-39.0e-1524.6e-842.2e-631.8e-453.7e-17 3837 1.0
p[34,10] 0.03 1.6e-3 0.094.2e-1111.0e-17 1.6e-8 1.8e-3 0.33 3052 1.0
p[35,10] 0.02 1.7e-3 0.071.4e-401.5e-191.4e-11 2.3e-5 0.28 1740 1.0
p[36,10] 4.6e-4 4.6e-4 0.01 0.0 0.0 0.0 0.09.8e-80 624 1.01
p[37,10] 0.02 2.0e-3 0.08 0.01.0e-984.7e-31 2.4e-8 0.32 1620 1.0
p[38,10] 2.1e-4 1.1e-4 6.9e-3 0.0 0.0 0.0 0.02.4e-112 4000 1.0
p[39,10] 2.3e-5 1.1e-5 6.7e-49.5e-235.6e-156.4e-12 3.0e-9 8.7e-6 3922 nan
p[40,10] 0.03 2.1e-3 0.095.0e-734.9e-264.9e-12 8.9e-4 0.34 1915 1.0
p[41,10] 0.03 2.7e-3 0.089.2e-23 2.6e-9 1.4e-5 7.7e-3 0.29 815 1.01
p[42,10] 0.03 2.0e-3 0.083.2e-1973.5e-431.8e-20 2.0e-5 0.32 1682 1.0
p[43,10] 0.04 2.7e-3 0.12.5e-1666.1e-396.2e-16 1.2e-3 0.36 1292 1.0
p[44,10] 2.2e-5 1.8e-5 8.0e-45.4e-3182.2e-1121.5e-822.9e-587.4e-23 2027 nan
p[45,10] 0.04 2.4e-3 0.11.0e-675.7e-241.0e-10 2.9e-3 0.36 1673 1.0
p[46,10] 0.02 1.7e-3 0.083.6e-346.3e-16 1.8e-9 6.0e-5 0.29 1979 1.0
p[47,10] 0.07 2.6e-3 0.122.1e-13 4.9e-6 4.9e-3 0.09 0.44 2314 1.0
p[48,10] 5.5e-9 4.3e-9 2.7e-73.8e-748.7e-469.9e-359.0e-263.2e-14 3969 nan
p[49,10] 0.02 1.5e-3 0.082.0e-1875.0e-379.9e-15 2.1e-5 0.3 2595 1.0
p[50,10] 7.4e-9 7.1e-9 4.5e-72.5e-951.0e-552.1e-425.7e-321.2e-16 4000 nan
p[0,11] 0.05 2.7e-3 0.162.2e-17 1.7e-8 3.8e-5 5.1e-3 0.66 3545 1.0
p[1,11] 0.05 2.6e-3 0.153.7e-19 5.8e-9 2.1e-5 4.1e-3 0.59 3283 1.0
p[2,11] 7.3e-5 2.5e-5 1.0e-36.3e-672.9e-395.1e-285.4e-19 1.2e-6 1733 1.0
p[3,11] 0.01 1.2e-3 0.064.7e-412.4e-16 1.0e-9 7.9e-6 0.16 2970 1.0
p[4,11] 0.03 3.3e-3 0.113.5e-421.7e-174.8e-10 1.2e-5 0.44 1200 1.0
p[5,11] 2.3e-6 6.1e-7 3.5e-57.2e-291.4e-192.0e-158.0e-12 1.3e-6 3255 nan
p[6,11] 8.8e-15 8.8e-155.6e-13 0.0 0.0 0.04.4e-1721.6e-53 4000 nan
p[7,11] 5.3e-3 1.1e-3 0.05 0.02.2e-2501.8e-742.1e-29 0.03 1772 1.0
p[8,11] 0.04 4.3e-3 0.151.1e-2871.7e-534.6e-20 1.2e-4 0.54 1129 1.0
p[9,11] 0.03 1.9e-3 0.11.4e-331.0e-12 4.1e-7 1.2e-3 0.34 2695 1.0
p[10,11] 0.03 2.1e-3 0.1 0.02.8e-382.8e-14 1.1e-4 0.32 1989 1.0
p[11,11] 2.1e-3 3.5e-4 0.027.1e-16 4.7e-9 1.2e-6 9.4e-5 0.02 2745 1.0
p[12,11] 0.03 2.2e-3 0.11 0.09.6e-411.2e-16 6.1e-5 0.36 2260 1.0
p[13,11] 0.02 1.8e-3 0.09 0.05.3e-445.0e-17 1.1e-5 0.27 2724 1.0
p[14,11] 0.04 4.1e-3 0.15 0.04.7e-608.6e-21 2.6e-4 0.67 1383 1.0
p[15,11] 0.04 2.4e-3 0.132.4e-15 7.5e-7 2.5e-4 0.02 0.48 2757 1.0
p[16,11] 0.06 3.0e-3 0.152.9e-31 2.0e-9 5.9e-5 0.02 0.58 2450 1.0
p[17,11] 4.1e-6 4.1e-6 1.4e-41.3e-2924.8e-1381.0e-1041.6e-773.9e-37 1121 nan
p[18,11] 0.01 2.0e-3 0.08 0.09.3e-1303.9e-464.4e-12 0.14 1786 1.0
p[19,11] 2.6e-4 2.9e-5 1.6e-33.9e-63 3.0e-9 2.9e-7 1.1e-5 2.4e-3 3112 1.0
p[20,11] 0.01 1.4e-3 0.07 0.01.3e-2486.2e-411.3e-12 0.16 2726 1.0
p[21,11] 0.03 2.6e-3 0.12 0.01.4e-488.9e-20 3.9e-5 0.39 2053 1.0
p[22,11] 0.03 2.0e-3 0.116.9e-2883.5e-312.5e-12 1.7e-4 0.35 2777 1.0
p[23,11] 0.02 1.7e-3 0.09 0.01.3e-452.0e-17 3.6e-6 0.25 2652 1.0
p[24,11] 3.9e-5 1.3e-5 6.1e-4 0.07.2e-871.3e-555.4e-36 4.4e-9 2047 nan
p[25,11] 0.02 1.6e-3 0.08 0.03.4e-671.5e-21 1.8e-6 0.22 2603 1.0
p[26,11] 0.03 2.0e-3 0.121.6e-669.1e-17 4.3e-8 4.5e-4 0.44 3241 1.0
p[27,11] 0.02 1.5e-3 0.08 0.04.2e-1456.9e-28 6.6e-8 0.23 2619 1.0
p[28,11] 5.6e-14 4.5e-142.9e-129.3e-2163.9e-904.9e-693.2e-524.7e-29 4000 nan
p[29,11] 0.02 2.5e-3 0.094.5e-874.3e-281.1e-15 1.7e-5 0.29 1417 1.0
p[30,11] 0.03 2.0e-3 0.124.4e-324.2e-10 4.8e-6 1.3e-3 0.45 3504 1.0
p[31,11] 0.08 3.4e-3 0.171.2e-29 1.8e-7 9.2e-4 0.07 0.66 2430 1.0
p[32,11] 2.6e-35 2.6e-351.7e-33 0.0 0.0 0.0 0.0 0.0 4000 nan
p[33,11] 1.4e-4 8.5e-5 4.7e-32.0e-1123.2e-568.6e-413.6e-28 2.1e-9 3044 1.0
p[34,11] 0.03 2.1e-3 0.134.8e-734.6e-11 2.1e-6 1.1e-3 0.51 3677 1.0
p[35,11] 0.04 2.3e-3 0.132.4e-259.1e-11 4.9e-6 4.3e-3 0.52 3165 1.0
p[36,11] 3.8e-4 3.0e-4 9.6e-3 0.0 0.0 0.0 0.09.0e-56 1038 1.0
p[37,11] 0.02 2.0e-3 0.09 0.02.0e-722.8e-21 9.4e-6 0.29 2084 1.0
p[38,11] 4.6e-4 2.6e-4 0.02 0.0 0.0 0.0 0.03.3e-87 4000 1.0
p[39,11] 9.4e-3 6.4e-4 0.03 1.9e-9 1.0e-5 2.6e-4 4.0e-3 0.09 2473 1.0
p[40,11] 0.03 1.9e-3 0.111.2e-503.1e-16 4.1e-7 4.5e-3 0.4 3182 1.0
p[41,11] 0.06 4.1e-3 0.163.9e-802.4e-17 1.7e-5 0.02 0.65 1590 1.0
p[42,11] 0.03 2.4e-3 0.11.6e-1571.2e-314.7e-14 6.4e-5 0.35 1850 1.0
p[43,11] 0.04 2.7e-3 0.121.2e-1285.7e-288.6e-11 2.2e-3 0.44 2048 1.0
p[44,11] 9.0e-6 4.9e-6 2.5e-41.9e-2561.3e-845.6e-612.0e-423.4e-15 2546 nan
p[45,11] 0.04 2.3e-3 0.124.2e-487.1e-16 1.1e-6 0.01 0.46 2588 1.0
p[46,11] 0.04 2.2e-3 0.149.1e-20 7.9e-9 1.7e-5 3.2e-3 0.55 3906 1.0
p[47,11] 0.02 1.8e-3 0.098.7e-312.5e-13 5.7e-8 9.6e-5 0.31 2584 1.0
p[48,11] 1.3e-5 5.1e-6 3.1e-43.8e-457.7e-261.8e-186.4e-13 2.6e-6 3623 nan
p[49,11] 0.02 2.0e-3 0.112.4e-1273.3e-21 6.9e-9 1.8e-4 0.39 2986 1.0
p[50,11] 1.1e-5 5.9e-6 3.7e-46.1e-606.1e-323.8e-231.4e-16 8.0e-8 3960 nan
p[0,12] 4.5e-3 7.6e-4 0.032.0e-1858.6e-735.1e-382.4e-15 0.06 1342 1.0
p[1,12] 0.07 2.9e-3 0.11.0e-773.4e-10 2.3e-3 0.15 0.28 1193 1.0
p[2,12] 0.17 2.3e-3 0.15 6.5e-5 0.03 0.13 0.29 0.45 4000 1.0
p[3,12] 8.4e-4 1.8e-4 7.6e-32.2e-1581.1e-692.0e-413.6e-23 3.3e-3 1773 1.0
p[4,12] 4.3e-4 1.4e-4 7.0e-32.1e-1668.2e-761.6e-457.0e-26 1.9e-4 2548 1.0
p[5,12] 1.4e-3 3.5e-5 2.2e-3 5.4e-6 1.2e-4 4.9e-4 1.7e-3 8.1e-3 4000 1.0
p[6,12] 1.1e-4 2.0e-5 1.1e-38.2e-1525.4e-1031.1e-767.2e-16 3.5e-4 3091 1.0
p[7,12] 0.02 1.6e-3 0.071.7e-848.0e-424.0e-13 5.6e-5 0.26 1766 1.0
p[8,12] 0.03 1.4e-3 0.063.1e-29 3.2e-7 1.1e-3 0.03 0.24 2100 1.0
p[9,12] 0.05 2.6e-3 0.089.8e-825.9e-26 4.9e-4 0.07 0.3 1053 1.01
p[10,12] 0.03 1.2e-3 0.071.5e-888.2e-10 8.5e-5 0.01 0.29 4000 1.0
p[11,12] 0.19 1.4e-3 0.07 0.07 0.13 0.17 0.23 0.35 2943 1.0
p[12,12] 0.03 1.2e-3 0.063.3e-58 6.5e-7 2.1e-3 0.03 0.22 2542 1.0
p[13,12] 0.04 1.5e-3 0.092.4e-671.8e-15 3.0e-6 9.3e-3 0.34 3265 1.0
p[14,12] 0.03 1.8e-3 0.081.8e-108 4.9e-8 2.5e-4 0.02 0.3 1866 1.0
p[15,12] 2.5e-4 1.0e-4 6.4e-31.1e-1375.7e-601.5e-337.7e-20 4.1e-6 3936 1.0
p[16,12] 0.03 1.6e-3 0.062.6e-983.1e-27 2.5e-5 0.02 0.24 1562 1.0
p[17,12] 1.7e-3 2.0e-4 0.014.5e-362.3e-13 3.3e-9 4.2e-6 0.02 2561 1.0
p[18,12] 0.02 1.4e-3 0.063.4e-2896.2e-10 1.7e-5 0.01 0.23 2105 1.0
p[19,12] 1.1e-4 3.3e-5 1.4e-32.0e-13 1.3e-8 5.0e-8 2.7e-7 4.5e-5 1812 1.0
p[20,12] 0.03 1.6e-3 0.071.3e-1433.1e-31 1.3e-5 0.01 0.25 1615 1.0
p[21,12] 0.03 1.5e-3 0.052.1e-113 4.7e-7 2.3e-3 0.04 0.19 1257 1.0
p[22,12] 0.03 1.6e-3 0.098.5e-613.9e-17 2.2e-7 3.9e-3 0.36 3242 1.0
p[23,12] 0.04 1.7e-3 0.091.1e-691.6e-16 2.2e-6 0.01 0.36 2881 1.0
p[24,12] 0.04 1.3e-3 0.078.4e-49 1.1e-6 8.9e-4 0.03 0.26 2894 1.0
p[25,12] 0.03 1.7e-3 0.096.8e-808.1e-21 1.6e-7 3.5e-3 0.34 2547 1.0
p[26,12] 0.04 2.0e-3 0.081.1e-1345.3e-32 7.2e-7 0.01 0.28 1757 1.0
p[27,12] 0.03 1.5e-3 0.083.4e-1065.5e-27 4.3e-7 4.0e-3 0.31 2666 1.0
p[28,12] 2.1e-3 2.6e-4 0.027.3e-29 1.2e-9 6.0e-7 5.8e-5 0.02 3368 1.0
p[29,12] 0.01 1.4e-3 0.055.8e-998.0e-331.5e-18 1.8e-4 0.17 1208 1.0
p[30,12] 0.03 1.9e-3 0.072.5e-1482.3e-393.2e-12 3.2e-3 0.25 1236 1.0
p[31,12] 4.1e-3 3.8e-4 0.022.7e-332.4e-15 2.3e-9 1.8e-5 0.05 3092 1.0
p[32,12] 6.7e-6 6.7e-6 2.7e-42.1e-2716.0e-1973.4e-1636.3e-1331.4e-84 1621 nan
p[33,12] 0.05 2.6e-3 0.114.2e-12 4.1e-5 1.8e-3 0.04 0.41 1850 1.0
p[34,12] 0.03 1.9e-3 0.071.5e-1553.5e-461.8e-16 1.0e-3 0.26 1488 1.0
p[35,12] 0.03 1.5e-3 0.076.7e-858.3e-21 2.4e-8 2.8e-3 0.26 2043 1.0
p[36,12] 1.2e-3 8.3e-4 0.026.6e-1671.9e-1131.8e-893.5e-65 4.0e-6 343 1.01
p[37,12] 0.03 1.5e-3 0.072.5e-1178.2e-17 9.0e-6 9.0e-3 0.28 2384 1.0
p[38,12] 9.6e-4 6.7e-410.0e-31.6e-2682.8e-1903.9e-1541.4e-122 2.7e-8 218 1.02
p[39,12] 0.18 1.8e-3 0.08 0.04 0.12 0.18 0.24 0.33 1843 1.0
p[40,12] 0.03 1.6e-3 0.091.8e-509.0e-16 2.4e-7 5.0e-3 0.35 2850 1.0
p[41,12] 0.04 2.4e-3 0.073.1e-802.2e-17 3.5e-3 0.04 0.27 897 1.01
p[42,12] 0.03 1.3e-3 0.074.9e-21 2.3e-6 2.0e-3 0.03 0.24 2648 1.0
p[43,12] 0.03 1.4e-3 0.071.9e-19 1.7e-7 5.1e-4 0.03 0.26 2301 1.0
p[44,12] 0.03 1.2e-3 0.066.5e-30 1.9e-6 4.2e-4 0.02 0.24 2668 1.0
p[45,12] 0.03 1.4e-3 0.078.5e-331.9e-11 2.9e-6 4.1e-3 0.28 2784 1.0
p[46,12] 0.04 2.0e-3 0.081.1e-1217.9e-31 3.3e-8 0.02 0.26 1502 1.0
p[47,12] 3.4e-4 1.1e-4 5.3e-37.8e-1931.3e-884.8e-537.6e-30 7.5e-5 2158 1.0
p[48,12] 0.07 1.9e-3 0.08 8.1e-6 3.8e-3 0.03 0.11 0.27 1735 1.0
p[49,12] 0.04 1.8e-3 0.082.4e-1314.0e-31 2.3e-8 0.01 0.26 1727 1.0
p[50,12] 0.03 1.9e-3 0.06 1.8e-8 2.1e-4 2.2e-3 0.02 0.23 959 1.0
p[0,13] 1.4e-4 7.7e-5 4.9e-3 0.04.7e-2194.3e-1205.7e-55 1.0e-8 4000 1.0
p[1,13] 2.3e-3 3.7e-4 0.021.0e-2342.2e-392.6e-15 1.6e-6 0.02 3378 1.0
p[2,13] 0.01 8.0e-4 0.03 2.0e-8 5.7e-4 4.5e-3 0.02 0.07 1103 1.0
p[3,13] 6.3e-5 3.5e-5 2.2e-36.3e-3142.4e-1434.1e-883.6e-51 3.5e-8 3998 1.0
p[4,13] 2.9e-6 1.9e-6 1.2e-4 0.04.4e-1554.2e-969.3e-571.2e-11 4000 nan
p[5,13] 1.5e-3 2.8e-5 1.8e-3 1.0e-5 1.8e-4 7.1e-4 2.3e-3 6.0e-3 4000 1.0
p[6,13] 0.05 5.3e-3 0.176.1e-236.9e-14 4.2e-9 6.8e-4 0.81 1015 1.01
p[7,13] 0.16 7.6e-3 0.263.5e-1545.4e-14 5.8e-6 0.27 0.8 1126 1.0
p[8,13] 0.03 2.0e-3 0.123.3e-157.8e-11 3.0e-6 7.5e-4 0.44 3585 1.0
p[9,13] 0.04 2.7e-3 0.127.7e-1516.6e-51 1.1e-8 8.3e-3 0.45 1911 1.0
p[10,13] 0.04 3.2e-3 0.152.3e-791.1e-281.5e-14 1.2e-6 0.61 2114 1.0
p[11,13] 0.03 8.7e-4 0.06 5.1e-5 3.2e-3 0.01 0.04 0.19 4000 1.0
p[12,13] 0.04 2.5e-3 0.142.1e-282.0e-12 2.2e-8 8.1e-5 0.62 3158 1.0
p[13,13] 0.08 5.3e-3 0.21.0e-2055.8e-515.0e-16 1.8e-3 0.77 1369 1.0
p[14,13] 0.03 2.1e-3 0.122.0e-173.5e-12 4.2e-7 3.8e-4 0.42 3226 1.0
p[15,13] 3.5e-12 3.4e-122.2e-10 0.01.1e-2712.3e-1593.1e-976.4e-37 4000 nan
p[16,13] 6.7e-3 6.7e-4 0.044.0e-1877.1e-562.2e-13 1.3e-5 0.06 4000 1.0
p[17,13] 0.59 6.7e-3 0.33 9.0e-4 0.29 0.7 0.9 0.96 2389 1.0
p[18,13] 0.03 2.6e-3 0.137.1e-471.3e-12 1.5e-6 8.6e-4 0.51 2590 1.0
p[19,13] 3.2e-4 1.2e-4 6.3e-35.3e-173.0e-141.1e-136.9e-13 1.1e-4 2681 1.0
p[20,13] 0.08 5.1e-3 0.211.2e-413.9e-18 1.3e-9 1.6e-3 0.85 1670 1.0
p[21,13] 0.03 2.3e-3 0.135.6e-228.3e-11 7.0e-7 3.1e-4 0.55 3357 1.0
p[22,13] 0.06 5.2e-3 0.162.3e-2345.8e-701.5e-25 4.7e-6 0.67 1000 1.0
p[23,13] 0.06 4.8e-3 0.172.9e-2123.2e-507.8e-16 5.1e-4 0.69 1269 1.0
p[24,13] 0.13 3.9e-3 0.21 9.5e-9 1.8e-3 0.02 0.14 0.79 3044 1.0
p[25,13] 0.08 4.9e-3 0.21.3e-1865.5e-455.3e-14 1.3e-3 0.75 1591 1.0
p[26,13] 0.05 3.7e-3 0.13 0.05.7e-1161.6e-24 5.3e-3 0.5 1260 1.0
p[27,13] 0.07 4.6e-3 0.21.2e-1091.8e-281.8e-13 1.2e-4 0.81 1893 1.0
p[28,13] 0.61 6.8e-3 0.26 4.8e-3 0.42 0.66 0.84 0.94 1454 1.0
p[29,13] 4.6e-3 6.5e-4 0.042.2e-1618.5e-576.9e-334.3e-10 0.03 3105 1.0
p[30,13] 4.1e-3 5.2e-4 0.03 0.02.6e-1244.3e-431.9e-12 0.03 3880 1.0
p[31,13] 1.4e-6 1.4e-6 8.8e-51.1e-1379.6e-697.9e-451.6e-251.0e-10 4000 nan
p[32,13] 7.4e-5 8.4e-5 2.8e-31.2e-578.7e-411.0e-321.1e-251.1e-14 1087 1.0
p[33,13] 0.2 3.5e-3 0.225.9e-11 0.03 0.13 0.3 0.81 4000 1.0
p[34,13] 0.02 2.0e-3 0.09 0.04.0e-1425.5e-53 4.3e-9 0.32 1859 1.0
p[35,13] 3.8e-5 2.3e-5 1.5e-3 0.03.0e-1056.6e-495.1e-23 1.5e-6 4000 1.0
p[36,13] 0.03 4.4e-3 0.136.0e-381.0e-239.3e-184.7e-12 0.56 929 1.0
p[37,13] 0.05 3.4e-3 0.164.3e-822.2e-273.6e-14 1.8e-6 0.71 2243 1.0
p[38,13] 4.4e-3 3.6e-3 0.066.8e-553.2e-373.7e-294.2e-22 2.5e-6 245 1.02
p[39,13] 8.1e-4 3.8e-5 1.9e-3 1.2e-8 1.1e-5 1.1e-4 6.7e-4 6.4e-3 2424 1.0
p[40,13] 6.5e-3 1.0e-3 0.057.0e-2265.4e-852.5e-397.5e-18 0.05 2259 1.0
p[41,13] 0.02 1.5e-3 0.082.3e-1291.3e-30 1.5e-7 5.8e-4 0.25 2810 1.0
p[42,13] 0.02 2.1e-3 0.094.1e-455.4e-187.2e-11 2.9e-6 0.27 2059 1.0
p[43,13] 0.02 2.1e-3 0.111.5e-461.9e-201.5e-11 9.9e-7 0.3 2512 1.0
p[44,13] 0.2 3.8e-3 0.24 1.0e-4 0.02 0.1 0.28 0.85 4000 1.0
p[45,13] 3.0e-3 5.7e-4 0.031.4e-1417.5e-584.2e-304.2e-16 3.9e-3 3393 1.0
p[46,13] 2.2e-3 3.6e-4 0.02 0.01.7e-1001.7e-328.2e-11 0.01 2938 1.0
p[47,13] 2.2e-6 2.0e-6 1.2e-4 0.05.9e-2072.0e-1285.0e-766.0e-15 3789 nan
p[48,13] 0.29 4.2e-3 0.21 3.2e-3 0.12 0.27 0.44 0.74 2434 1.0
p[49,13] 0.06 3.6e-3 0.15 0.08.3e-991.4e-15 0.01 0.54 1648 1.0
p[50,13] 0.3 4.8e-3 0.22 2.8e-3 0.11 0.25 0.45 0.78 2162 1.0
p[0,14] 0.03 1.7e-3 0.072.7e-435.1e-16 1.1e-7 4.9e-3 0.24 1639 1.0
p[1,14] 0.07 2.0e-3 0.13.8e-17 4.2e-3 0.03 0.11 0.34 2313 1.0
p[2,14] 5.3e-3 8.6e-4 0.02 1.7e-6 3.9e-4 2.0e-3 4.8e-3 0.03 448 1.01
p[3,14] 6.3e-3 9.1e-4 0.031.7e-894.4e-381.3e-211.6e-11 0.11 1061 1.0
p[4,14] 3.0e-3 4.8e-4 0.026.6e-941.2e-414.6e-244.4e-13 0.03 1698 1.0
p[5,14] 2.3e-4 1.9e-5 8.7e-4 1.2e-9 4.3e-7 6.7e-6 7.6e-5 2.2e-3 2076 nan
p[6,14] 5.5e-4 7.5e-5 2.2e-31.6e-831.0e-557.1e-41 2.1e-7 6.3e-3 891 1.01
p[7,14] 8.3e-3 1.1e-3 0.043.9e-732.8e-377.0e-11 1.1e-5 0.1 1271 1.0
p[8,14] 0.03 1.3e-3 0.053.1e-14 9.9e-8 1.7e-3 0.04 0.18 1563 1.0
p[9,14] 0.04 2.0e-3 0.063.5e-715.2e-22 7.9e-4 0.06 0.2 1022 1.0
p[10,14] 0.03 1.4e-3 0.079.4e-68 5.8e-7 2.1e-3 0.02 0.23 2514 1.0
p[11,14] 0.01 5.2e-4 0.02 3.2e-4 2.8e-3 6.7e-3 0.01 0.06 1586 1.0
p[12,14] 0.03 1.2e-3 0.071.9e-38 6.3e-7 2.8e-3 0.03 0.24 3135 1.0
p[13,14] 0.03 1.3e-3 0.071.0e-51 3.7e-8 5.3e-4 0.02 0.24 2582 1.0
p[14,14] 0.03 1.4e-3 0.056.8e-53 1.2e-8 6.1e-4 0.03 0.2 1486 1.0
p[15,14] 0.01 8.7e-4 0.053.1e-244.8e-11 8.3e-7 1.0e-3 0.15 2887 1.0
p[16,14] 0.05 2.6e-3 0.073.2e-702.6e-18 2.3e-3 0.1 0.24 808 1.0
p[17,14] 4.5e-4 3.4e-5 1.7e-34.0e-262.7e-10 1.6e-7 2.3e-5 6.4e-3 2430 1.0
p[18,14] 0.03 1.6e-3 0.062.1e-154 1.7e-9 7.5e-4 0.03 0.2 1172 1.0
p[19,14] 3.4e-4 1.7e-4 3.3e-33.3e-142.8e-10 1.2e-9 8.1e-9 6.2e-4 361 1.01
p[20,14] 0.02 1.4e-3 0.061.3e-1003.6e-22 3.8e-5 0.01 0.19 1901 1.0
p[21,14] 0.03 1.2e-3 0.067.8e-74 1.9e-7 2.3e-3 0.04 0.21 2566 1.0
p[22,14] 0.03 1.5e-3 0.072.1e-40 1.2e-7 7.9e-4 0.02 0.26 2313 1.0
p[23,14] 0.03 1.4e-3 0.063.1e-50 1.2e-8 5.8e-4 0.02 0.24 2090 1.0
p[24,14] 7.0e-3 3.6e-4 0.022.5e-31 2.1e-5 1.5e-3 8.6e-3 0.04 1749 1.0
p[25,14] 0.02 1.2e-3 0.066.2e-672.4e-10 7.1e-5 0.01 0.2 2158 1.0
p[26,14] 0.03 1.4e-3 0.074.5e-29 2.7e-8 1.4e-4 0.02 0.26 2641 1.0
p[27,14] 0.02 1.5e-3 0.064.0e-857.6e-15 3.1e-5 0.01 0.19 1520 1.0
p[28,14] 5.5e-5 1.2e-5 7.4e-41.2e-267.1e-11 1.3e-8 6.6e-7 2.7e-4 4000 nan
p[29,14] 0.01 9.6e-4 0.041.5e-1648.0e-571.3e-32 1.3e-7 0.14 1612 1.0
p[30,14] 0.05 2.2e-3 0.091.0e-34 4.9e-8 4.3e-3 0.08 0.31 1675 1.0
p[31,14] 0.02 1.3e-3 0.071.8e-12 4.3e-7 1.7e-4 0.01 0.22 2671 1.0
p[32,14] 3.1e-5 3.7e-5 8.8e-43.8e-1829.7e-1355.7e-1111.4e-903.0e-58 562 nan
p[33,14] 2.0e-3 2.9e-4 0.013.1e-12 1.1e-7 6.4e-6 2.2e-4 0.02 1908 1.0
p[34,14] 0.04 1.6e-3 0.081.5e-376.2e-11 1.4e-4 0.03 0.27 2417 1.0
p[35,14] 0.06 1.7e-3 0.091.3e-13 1.1e-3 0.02 0.07 0.35 2935 1.0
p[36,14] 2.9e-4 2.8e-4 5.1e-31.0e-1245.6e-861.2e-682.4e-51 1.1e-6 324 1.01
p[37,14] 0.02 1.5e-3 0.061.5e-894.7e-10 4.0e-4 0.02 0.2 1664 1.0
p[38,14] 7.9e-4 5.6e-4 8.9e-39.3e-1731.1e-1231.0e-1001.5e-80 5.2e-7 253 1.01
p[39,14] 0.08 8.8e-4 0.06 0.02 0.04 0.06 0.1 0.22 4000 1.0
p[40,14] 0.04 1.4e-3 0.072.2e-13 2.5e-5 5.4e-3 0.04 0.26 2847 1.0
p[41,14] 0.05 3.2e-3 0.095.6e-1507.8e-35 1.5e-4 0.05 0.32 753 1.0
p[42,14] 0.04 1.3e-3 0.072.6e-13 1.2e-5 6.4e-3 0.04 0.26 3166 1.0
p[43,14] 0.03 1.2e-3 0.071.6e-12 1.0e-6 2.5e-3 0.03 0.22 3286 1.0
p[44,14] 2.9e-3 9.0e-5 5.0e-31.1e-19 1.5e-5 4.1e-4 4.1e-3 0.02 3107 1.0
p[45,14] 0.04 1.3e-3 0.081.7e-11 8.8e-6 5.0e-3 0.04 0.26 3413 1.0
p[46,14] 0.06 2.0e-3 0.096.0e-28 7.2e-6 0.01 0.09 0.33 2114 1.0
p[47,14] 5.2e-3 6.2e-4 0.033.9e-726.9e-324.9e-18 2.0e-9 0.08 1828 1.0
p[48,14] 4.3e-4 2.0e-5 1.0e-3 5.9e-9 6.2e-6 7.2e-5 3.8e-4 3.0e-3 2828 1.0
p[49,14] 0.03 1.5e-3 0.071.1e-342.7e-10 4.3e-5 0.01 0.25 1957 1.0
p[50,14] 2.8e-4 6.0e-5 2.5e-34.0e-12 9.3e-8 2.7e-6 4.1e-5 2.2e-3 1791 1.0
p[0,15] 0.04 2.3e-3 0.132.3e-18 4.5e-9 5.3e-6 2.2e-3 0.55 3523 1.0
p[1,15] 0.01 1.3e-3 0.071.3e-261.0e-144.4e-10 3.0e-6 0.09 3083 1.0
p[2,15] 6.0e-3 4.4e-4 0.029.9e-11 2.5e-6 1.2e-4 2.3e-3 0.05 2467 1.0
p[3,15] 0.07 3.9e-3 0.21.8e-15 1.7e-8 1.4e-5 7.6e-3 0.84 2552 1.0
p[4,15] 0.04 2.4e-3 0.145.2e-19 3.6e-9 4.5e-6 2.5e-3 0.59 3368 1.0
p[5,15] 8.6e-12 3.3e-122.1e-103.2e-602.3e-441.6e-369.1e-297.0e-16 4000 nan
p[6,15] 0.19 0.03 0.351.2e-539.0e-362.1e-25 1.6e-3 0.93 115 1.03
p[7,15] 7.9e-3 1.3e-3 0.051.8e-755.7e-421.7e-194.7e-10 0.06 1801 1.0
p[8,15] 0.03 3.6e-3 0.141.2e-306.6e-14 2.4e-9 6.0e-6 0.65 1554 1.0
p[9,15] 0.03 2.9e-3 0.146.2e-282.7e-158.8e-10 3.6e-5 0.56 2355 1.0
p[10,15] 0.03 1.9e-3 0.113.0e-602.1e-10 2.2e-5 3.4e-3 0.42 3353 1.0
p[11,15] 2.0e-8 1.1e-8 6.9e-71.4e-411.6e-285.5e-237.4e-181.5e-10 4000 nan
p[12,15] 0.03 2.3e-3 0.131.0e-328.5e-11 1.8e-6 6.9e-4 0.52 2995 1.0
p[13,15] 0.03 1.7e-3 0.095.3e-545.6e-13 1.6e-6 1.5e-3 0.32 2987 1.0
p[14,15] 0.02 2.1e-3 0.11.1e-333.4e-12 3.6e-9 1.0e-5 0.36 2024 1.0
p[15,15] 0.03 1.9e-3 0.12.6e-20 3.6e-9 4.2e-6 1.4e-3 0.4 3112 1.0
p[16,15] 0.03 2.5e-3 0.124.6e-271.9e-14 3.1e-9 2.6e-5 0.45 2359 1.0
p[17,15] 0.03 1.9e-3 0.11.5e-22 1.5e-8 8.0e-6 1.2e-3 0.36 2477 1.0
p[18,15] 0.05 5.3e-3 0.163.3e-954.4e-12 4.2e-8 3.4e-4 0.66 913 1.0
p[19,15] 0.02 9.9e-3 0.111.4e-132.6e-10 1.0e-9 7.6e-9 0.4 120 1.03
p[20,15] 0.03 2.4e-3 0.113.6e-801.6e-20 8.6e-7 1.2e-3 0.45 2322 1.0
p[21,15] 0.03 2.8e-3 0.143.0e-521.5e-13 5.3e-8 8.0e-5 0.65 2505 1.0
p[22,15] 0.03 1.7e-3 0.12.8e-432.0e-11 3.0e-6 1.9e-3 0.38 3431 1.0
p[23,15] 0.03 1.7e-3 0.097.8e-528.0e-13 6.6e-7 1.3e-3 0.34 2936 1.0
p[24,15] 0.13 4.6e-3 0.234.3e-23 2.5e-5 4.8e-3 0.13 0.8 2413 1.0
p[25,15] 0.02 1.7e-3 0.097.5e-677.5e-16 1.6e-7 8.4e-4 0.35 2962 1.0
p[26,15] 0.03 2.2e-3 0.115.5e-367.1e-17 3.1e-9 9.6e-5 0.44 2619 1.0
p[27,15] 0.02 1.7e-3 0.091.7e-772.4e-20 1.6e-7 8.7e-4 0.32 2628 1.0
p[28,15] 4.8e-5 2.6e-5 1.7e-34.6e-351.2e-174.5e-146.8e-11 4.4e-6 4000 1.0
p[29,15] 0.05 3.4e-3 0.172.0e-251.3e-11 3.2e-7 1.2e-3 0.75 2482 1.0
p[30,15] 0.04 2.7e-3 0.145.4e-235.1e-11 7.3e-7 1.1e-3 0.58 2669 1.0
p[31,15] 8.9e-3 8.5e-4 0.051.2e-242.7e-11 3.2e-7 1.8e-4 0.09 3580 1.0
p[32,15] 1.1e-3 1.3e-3 0.023.0e-1364.7e-1001.1e-829.1e-688.6e-43 370 1.01
p[33,15] 1.5e-3 3.0e-4 0.012.0e-237.1e-156.1e-11 1.6e-7 6.9e-3 2438 1.0
p[34,15] 0.03 2.6e-3 0.132.1e-305.7e-13 1.7e-7 4.9e-4 0.56 2672 1.0
p[35,15] 0.03 1.9e-3 0.115.8e-16 1.0e-7 5.1e-5 3.8e-3 0.46 3500 1.0
p[36,15] 1.2e-3 8.1e-4 0.021.8e-1074.0e-751.1e-607.8e-46 3.0e-8 807 1.01
p[37,15] 0.03 2.1e-3 0.14.7e-781.7e-15 2.6e-6 2.0e-3 0.38 2462 1.0
p[38,15] 1.7e-3 1.1e-3 0.039.2e-1237.4e-891.1e-728.8e-58 2.9e-9 743 1.01
p[39,15] 5.6e-8 3.9e-8 2.4e-63.5e-343.6e-246.7e-203.9e-162.3e-10 3744 nan
p[40,15] 0.03 1.7e-3 0.16.7e-20 9.6e-8 1.0e-4 6.1e-3 0.4 3467 1.0
p[41,15] 0.02 2.7e-3 0.122.6e-323.5e-181.3e-11 2.1e-6 0.39 1893 1.0
p[42,15] 0.04 2.6e-3 0.136.4e-25 6.6e-9 2.5e-5 3.8e-3 0.59 2656 1.0
p[43,15] 0.03 1.9e-3 0.13.0e-253.7e-10 6.7e-6 1.4e-3 0.37 2807 1.0
p[44,15] 0.07 3.2e-3 0.165.1e-16 1.0e-510.0e-4 0.03 0.65 2467 1.0
p[45,15] 0.03 1.8e-3 0.15.7e-20 4.0e-8 9.9e-5 5.8e-3 0.34 2891 1.0
p[46,15] 0.03 2.3e-3 0.121.4e-226.2e-12 8.4e-8 1.9e-4 0.47 2870 1.0
p[47,15] 0.05 2.9e-3 0.168.2e-16 2.9e-8 2.8e-5 7.6e-3 0.68 3272 1.0
p[48,15] 2.6e-6 1.8e-6 1.1e-41.6e-355.0e-254.3e-206.0e-16 1.0e-9 3613 nan
p[49,15] 0.03 2.3e-3 0.126.8e-381.6e-174.7e-10 5.3e-5 0.47 2933 1.0
p[50,15] 2.4e-6 2.2e-6 1.4e-43.0e-405.3e-272.1e-218.2e-17 1.0e-9 4000 nan
p[0,16] 0.05 1.8e-3 0.093.9e-17 9.8e-7 1.3e-3 0.04 0.35 2632 1.0
p[1,16] 0.04 1.6e-3 0.084.6e-12 8.0e-6 1.1e-3 0.02 0.31 2468 1.0
p[2,16] 2.8e-4 7.0e-5 3.1e-39.2e-275.9e-162.1e-11 1.0e-7 1.9e-3 2009 1.0
p[3,16] 0.02 2.0e-3 0.081.5e-344.2e-13 4.3e-7 5.7e-4 0.34 1629 1.0
p[4,16] 0.03 2.8e-3 0.091.3e-352.8e-14 1.1e-7 4.6e-4 0.35 1015 1.0
p[5,16] 5.6e-6 1.3e-6 6.8e-51.3e-231.8e-163.6e-133.2e-10 8.6e-6 2864 nan
p[6,16] 3.3e-5 1.8e-5 1.1e-31.8e-2667.6e-1881.7e-1441.4e-37 4.2e-9 3546 1.0
p[7,16] 5.8e-3 9.1e-4 0.032.8e-2124.9e-1141.5e-342.7e-14 0.08 1213 1.0
p[8,16] 0.03 1.4e-3 0.081.1e-62 2.4e-9 9.9e-5 0.01 0.3 2980 1.0
p[9,16] 0.03 1.6e-3 0.083.3e-25 2.5e-9 2.7e-5 7.8e-3 0.31 2705 1.0
p[10,16] 0.03 1.3e-3 0.081.8e-2066.6e-12 5.8e-5 8.7e-3 0.3 3537 1.0
p[11,16] 6.0e-4 5.7e-5 2.8e-32.8e-12 6.1e-8 3.4e-6 9.3e-5 5.7e-3 2389 1.0
p[12,16] 0.03 1.5e-3 0.097.9e-133 2.1e-9 7.8e-5 7.7e-3 0.29 3499 1.0
p[13,16] 0.02 1.2e-3 0.061.4e-1567.2e-20 7.0e-7 4.2e-3 0.23 2338 1.0
p[14,16] 0.03 1.7e-3 0.072.1e-184 5.3e-9 6.5e-5 0.02 0.26 1849 1.0
p[15,16] 0.04 1.6e-3 0.085.8e-11 4.6e-6 2.9e-3 0.04 0.3 2313 1.0
p[16,16] 0.06 2.4e-3 0.111.3e-24 1.2e-6 1.8e-3 0.06 0.4 2120 1.0
p[17,16] 3.5e-5 2.4e-5 1.4e-31.1e-975.2e-444.6e-331.7e-235.4e-10 3382 1.0
p[18,16] 0.02 1.3e-3 0.06 0.01.1e-20 4.4e-6 3.7e-3 0.22 1957 1.0
p[19,16] 6.9e-4 1.4e-4 6.5e-35.0e-12 4.6e-6 1.5e-5 5.9e-5 2.1e-3 2288 1.0
p[20,16] 0.02 1.1e-3 0.071.6e-2989.7e-826.1e-12 4.0e-4 0.2 3728 1.0
p[21,16] 0.03 1.8e-3 0.14.7e-230 1.4e-9 5.9e-5 6.6e-3 0.33 2751 1.0
p[22,16] 0.02 1.1e-3 0.062.6e-1241.4e-13 1.1e-5 0.01 0.22 2763 1.0
p[23,16] 0.02 1.2e-3 0.063.1e-1525.9e-20 2.8e-7 2.4e-3 0.21 2220 1.0
p[24,16] 3.2e-4 4.6e-5 2.6e-31.5e-1132.0e-251.1e-15 1.0e-9 2.4e-3 3112 1.0
p[25,16] 0.02 1.2e-3 0.061.7e-1981.3e-29 1.5e-8 2.3e-3 0.22 2128 1.0
p[26,16] 0.03 1.2e-3 0.074.6e-372.4e-11 2.3e-5 7.6e-3 0.26 2996 1.0
p[27,16] 0.02 1.1e-3 0.061.4e-2501.9e-53 1.1e-9 1.3e-3 0.22 2590 1.0
p[28,16] 4.5e-7 3.6e-7 2.3e-56.4e-891.6e-373.2e-291.4e-225.9e-13 4000 nan
p[29,16] 0.03 3.0e-3 0.092.5e-597.1e-18 1.3e-9 1.4e-3 0.35 886 1.01
p[30,16] 0.04 1.4e-3 0.081.3e-21 2.5e-6 7.9e-4 0.02 0.31 3302 1.0
p[31,16] 0.06 2.2e-3 0.11 8.9e-9 3.5e-4 6.9e-3 0.07 0.38 2358 1.0
p[32,16] 3.0e-8 3.0e-8 1.9e-6 0.0 0.0 0.05.2e-2781.3e-188 4000 nan
p[33,16] 3.9e-4 1.6e-4 7.2e-32.7e-444.7e-251.1e-186.7e-13 2.7e-4 2144 1.0
p[34,16] 0.03 1.4e-3 0.085.2e-42 2.9e-8 3.3e-4 0.02 0.29 3357 1.0
p[35,16] 0.04 1.4e-3 0.081.8e-11 2.7e-5 3.3e-3 0.04 0.28 2863 1.0
p[36,16] 2.3e-4 2.2e-4 6.2e-3 0.06.0e-2551.4e-2071.7e-1592.6e-22 799 1.0
p[37,16] 0.02 1.3e-3 0.087.9e-2661.0e-25 8.3e-7 4.0e-3 0.27 3450 1.0
p[38,16] 6.1e-4 3.5e-4 0.01 0.0 0.02.2e-3072.4e-2513.6e-22 1484 1.0
p[39,16] 6.4e-3 2.3e-4 0.01 8.4e-7 2.0e-4 1.5e-3 7.1e-3 0.04 2503 1.0
p[40,16] 0.04 1.3e-3 0.073.9e-20 1.1e-6 1.3e-3 0.03 0.27 2911 1.0
p[41,16] 0.06 3.2e-3 0.113.5e-531.4e-10 3.9e-4 0.06 0.39 1106 1.0
p[42,16] 0.03 1.9e-3 0.17.1e-47 3.6e-8 1.8e-4 0.01 0.35 2787 1.0
p[43,16] 0.03 1.6e-3 0.091.1e-40 7.3e-8 3.3e-4 0.01 0.32 3396 1.0
p[44,16] 8.4e-5 2.1e-5 1.1e-35.3e-785.9e-261.5e-182.5e-12 1.4e-4 2569 1.0
p[45,16] 0.04 1.4e-3 0.084.9e-16 4.9e-6 2.1e-3 0.03 0.3 2949 1.0
p[46,16] 0.04 1.5e-3 0.092.5e-13 1.1e-5 1.4e-3 0.03 0.33 3315 1.0
p[47,16] 0.02 1.7e-3 0.086.4e-281.4e-11 3.3e-6 1.9e-3 0.31 2072 1.0
p[48,16] 1.7e-5 5.7e-6 3.3e-42.2e-279.9e-171.5e-12 2.0e-9 2.5e-5 3418 nan
p[49,16] 0.02 1.3e-3 0.071.4e-733.1e-14 3.7e-6 5.0e-3 0.27 2646 1.0
p[50,16] 1.1e-5 5.4e-6 3.3e-42.0e-363.1e-213.8e-163.3e-12 2.3e-6 3691 nan
p[0,17] 5.5e-3 8.8e-4 0.032.1e-1793.2e-701.5e-361.3e-14 0.07 1515 1.0
p[1,17] 0.08 3.1e-3 0.125.9e-759.8e-10 3.2e-3 0.14 0.37 1422 1.0
p[2,17] 0.04 2.2e-3 0.04 2.4e-6 2.0e-3 0.02 0.06 0.14 346 1.01
p[3,17] 2.8e-4 8.0e-5 4.5e-31.4e-2202.5e-996.4e-604.7e-34 1.1e-4 3168 1.0
p[4,17] 1.5e-4 5.1e-5 2.8e-38.9e-2328.9e-1082.3e-6510.0e-38 1.2e-6 3009 1.0
p[5,17] 3.6e-3 1.2e-4 6.8e-3 3.0e-6 1.6e-4 9.1e-4 3.9e-3 0.02 3294 1.0
p[6,17] 8.5e-5 1.4e-5 7.4e-43.5e-1413.9e-968.0e-724.2e-15 3.2e-4 2983 nan
p[7,17] 0.01 1.4e-3 0.051.1e-921.3e-457.6e-13 1.1e-5 0.19 1173 1.0
p[8,17] 0.03 1.3e-3 0.071.1e-26 2.7e-7 1.2e-3 0.02 0.25 2455 1.0
p[9,17] 0.03 1.6e-3 0.063.7e-1312.2e-43 1.6e-6 0.02 0.23 1748 1.0
p[10,17] 0.03 1.3e-3 0.062.5e-94 6.0e-8 1.1e-3 0.03 0.2 2058 1.0
p[11,17] 0.29 2.1e-3 0.13 0.04 0.2 0.3 0.38 0.55 4000 1.0
p[12,17] 0.03 1.1e-3 0.054.5e-59 1.2e-6 3.7e-3 0.04 0.18 2109 1.0
p[13,17] 0.03 1.4e-3 0.071.6e-684.2e-11 5.0e-5 0.02 0.25 2280 1.0
p[14,17] 0.03 1.7e-3 0.078.1e-97 4.5e-8 5.5e-4 0.03 0.27 1750 1.0
p[15,17] 7.9e-4 2.0e-4 0.016.8e-926.6e-391.3e-216.8e-13 1.0e-3 3678 1.0
p[16,17] 0.02 1.1e-3 0.052.5e-1493.4e-43 9.3e-9 2.1e-3 0.18 2355 1.0
p[17,17] 4.3e-4 6.1e-5 3.0e-37.4e-395.6e-151.1e-10 2.8e-7 3.4e-3 2515 1.0
p[18,17] 0.02 1.3e-3 0.067.0e-258 1.8e-9 5.0e-5 0.02 0.21 1802 1.0
p[19,17] 1.1e-4 3.6e-5 1.2e-31.5e-13 7.2e-9 2.9e-8 1.7e-7 3.5e-5 1206 1.0
p[20,17] 0.02 1.4e-3 0.052.5e-1471.7e-33 1.8e-5 0.02 0.17 1140 1.0
p[21,17] 0.03 1.3e-3 0.064.1e-113 5.4e-7 2.4e-3 0.04 0.21 2040 1.0
p[22,17] 0.03 1.5e-3 0.073.2e-532.1e-11 1.5e-5 0.01 0.25 2249 1.0
p[23,17] 0.03 1.5e-3 0.077.0e-661.2e-11 2.8e-5 0.02 0.26 2205 1.0
p[24,17] 0.01 5.1e-4 0.026.4e-50 1.1e-7 1.4e-4 7.4e-3 0.08 2324 1.0
p[25,17] 0.03 1.5e-3 0.061.1e-871.2e-15 3.9e-6 8.9e-3 0.23 1779 1.0
p[26,17] 0.04 2.0e-3 0.091.4e-1175.7e-28 8.3e-7 9.5e-3 0.33 2121 1.0
p[27,17] 0.03 1.6e-3 0.066.7e-1151.8e-22 7.0e-6 0.01 0.21 1346 1.0
p[28,17] 3.7e-4 5.9e-5 3.7e-31.4e-321.0e-11 1.1e-8 2.0e-6 1.5e-3 4000 1.0
p[29,17] 6.6e-3 6.8e-4 0.039.3e-1762.9e-611.2e-35 1.0e-8 0.09 2097 1.0
p[30,17] 0.03 2.0e-3 0.087.7e-1445.6e-387.8e-12 3.2e-3 0.29 1485 1.0
p[31,17] 0.01 10.0e-4 0.041.4e-20 1.7e-9 5.6e-6 2.2e-3 0.14 1715 1.0
p[32,17] 5.0e-6 3.9e-6 2.4e-42.0e-2726.8e-2002.7e-1661.0e-1353.1e-87 3736 nan
p[33,17] 0.02 1.2e-3 0.043.2e-14 1.1e-6 9.1e-5 4.5e-3 0.15 1289 1.0
p[34,17] 0.03 1.9e-3 0.086.8e-1532.3e-451.5e-16 7.6e-4 0.31 1831 1.0
p[35,17] 0.05 2.3e-3 0.19.1e-581.9e-13 3.5e-5 0.03 0.34 1697 1.0
p[36,17] 6.3e-4 5.3e-4 8.6e-32.5e-1761.4e-1209.8e-961.7e-70 2.8e-7 263 1.01
p[37,17] 0.03 1.3e-3 0.052.8e-1257.0e-13 1.2e-4 0.02 0.19 1622 1.0
p[38,17] 7.1e-4 4.9e-4 9.2e-31.2e-2642.7e-1893.3e-1542.1e-123 7.3e-9 357 1.01
p[39,17] 0.23 2.9e-3 0.11 0.02 0.15 0.24 0.31 0.42 1364 1.0
p[40,17] 0.04 1.4e-3 0.079.3e-31 1.6e-9 1.5e-4 0.03 0.26 2467 1.0
p[41,17] 0.04 2.6e-3 0.082.3e-1457.1e-34 1.4e-4 0.04 0.28 951 1.0
p[42,17] 0.03 1.2e-3 0.052.4e-17 9.7e-6 8.1e-3 0.05 0.18 1980 1.0
p[43,17] 0.03 1.3e-3 0.052.4e-15 1.5e-6 2.9e-3 0.04 0.19 1632 1.0
p[44,17] 5.6e-3 2.6e-4 0.012.7e-31 1.4e-7 3.8e-5 2.2e-3 0.05 2725 1.0
p[45,17] 0.03 1.2e-3 0.065.8e-21 1.4e-7 5.6e-4 0.03 0.23 2697 1.0
p[46,17] 0.04 2.2e-3 0.093.4e-1181.1e-29 7.5e-8 0.02 0.32 1744 1.0
p[47,17] 1.9e-4 8.4e-5 3.9e-31.2e-2342.6e-1081.2e-652.9e-37 5.3e-6 2172 1.0
p[48,17] 0.05 1.7e-3 0.08 3.8e-7 7.8e-4 0.01 0.07 0.29 2351 1.0
p[49,17] 0.03 1.8e-3 0.085.3e-1254.5e-31 7.5e-9 5.2e-3 0.3 2005 1.0
p[50,17] 0.02 1.5e-3 0.051.4e-10 2.2e-5 5.2e-4 7.3e-3 0.21 1269 1.0
p[0,18] 0.05 2.0e-3 0.097.6e-16 1.6e-6 8.3e-4 0.06 0.32 2298 1.0
p[1,18] 0.01 1.1e-3 0.041.1e-274.6e-155.3e-10 5.2e-6 0.18 1726 1.0
p[2,18] 3.8e-4 1.6e-4 9.4e-34.5e-582.7e-351.5e-251.9e-17 7.6e-6 3362 1.0
p[3,18] 0.06 2.8e-3 0.091.2e-10 1.3e-5 4.7e-3 0.09 0.3 994 1.0
p[4,18] 0.07 2.5e-3 0.09 2.5e-9 1.9e-4 0.02 0.12 0.31 1432 1.0
p[5,18] 1.5e-10 6.3e-11 4.0e-92.1e-545.0e-391.1e-318.1e-259.4e-14 3996 nan
p[6,18] 3.3e-7 3.3e-7 2.1e-5 0.0 0.0 0.05.0e-972.1e-26 4000 nan
p[7,18] 5.3e-3 1.1e-3 0.04 0.02.4e-2112.0e-701.1e-31 0.05 1192 1.0
p[8,18] 0.04 2.9e-3 0.116.5e-1646.4e-28 1.4e-9 2.8e-3 0.41 1356 1.0
p[9,18] 0.03 1.9e-3 0.077.7e-325.2e-13 6.1e-7 6.6e-3 0.25 1143 1.0
p[10,18] 0.03 1.7e-3 0.09 0.02.3e-283.0e-10 7.8e-4 0.36 2847 1.0
p[11,18] 1.5e-6 9.9e-7 4.8e-52.7e-325.7e-221.2e-171.0e-13 4.5e-8 2347 nan
p[12,18] 0.03 1.8e-3 0.13.1e-2783.2e-259.7e-10 1.1e-3 0.34 2985 1.0
p[13,18] 0.02 1.3e-3 0.061.5e-2944.0e-432.1e-15 2.7e-5 0.24 2380 1.0
p[14,18] 0.04 2.8e-3 0.1 0.07.8e-31 2.0e-9 2.9e-3 0.44 1339 1.0
p[15,18] 0.07 2.7e-3 0.124.3e-13 1.4e-5 2.7e-3 0.08 0.43 1878 1.0
p[16,18] 0.03 1.7e-3 0.062.8e-20 2.2e-9 6.0e-6 5.6e-3 0.25 1441 1.0
p[17,18] 7.2e-5 6.9e-5 2.0e-31.4e-2031.6e-956.2e-733.9e-533.4e-24 860 1.0
p[18,18] 0.03 2.6e-3 0.09 0.02.1e-671.7e-22 2.2e-5 0.38 1192 1.0
p[19,18] 4.9e-3 2.6e-4 0.014.7e-28 2.4e-4 1.1e-3 3.8e-3 0.04 2722 1.0
p[20,18] 0.01 1.2e-3 0.07 0.04.0e-1712.9e-29 1.9e-8 0.18 3146 1.0
p[21,18] 0.03 2.2e-3 0.1 0.01.5e-271.6e-10 7.6e-4 0.35 1960 1.0
p[22,18] 0.02 1.4e-3 0.084.5e-2331.2e-303.8e-11 3.1e-4 0.28 2949 1.0
p[23,18] 0.02 1.2e-3 0.064.2e-2851.4e-435.3e-16 9.5e-6 0.22 2582 1.0
p[24,18] 5.3e-4 1.5e-4 6.1e-39.2e-2381.3e-582.7e-379.4e-24 2.1e-5 1686 1.0
p[25,18] 0.02 1.2e-3 0.06 0.02.7e-614.7e-20 8.9e-6 0.24 2604 1.0
p[26,18] 0.02 1.5e-3 0.077.2e-721.2e-244.7e-10 4.9e-4 0.27 2024 1.0
p[27,18] 0.02 1.6e-3 0.07 0.06.7e-1103.5e-23 1.2e-6 0.25 2087 1.0
p[28,18] 3.8e-12 3.8e-122.4e-106.0e-1787.3e-796.4e-629.7e-496.4e-29 4000 nan
p[29,18] 0.05 2.5e-3 0.092.2e-14 6.2e-6 6.2e-3 0.07 0.31 1178 1.0
p[30,18] 0.03 1.8e-3 0.072.6e-377.1e-13 7.7e-7 2.6e-3 0.28 1578 1.0
p[31,18] 0.06 2.1e-3 0.124.0e-20 3.5e-6 1.9e-3 0.06 0.46 3174 1.0
p[32,18] 1.3e-18 1.3e-188.5e-17 0.0 0.0 0.0 0.0 0.0 4000 nan
p[33,18] 3.1e-4 1.7e-4 9.0e-39.8e-934.7e-544.6e-414.5e-294.6e-10 2936 1.0
p[34,18] 0.03 1.6e-3 0.077.0e-784.9e-14 2.1e-6 6.2e-3 0.28 2268 1.0
p[35,18] 0.03 1.6e-3 0.072.8e-276.5e-13 1.5e-7 1.3e-3 0.28 2060 1.0
p[36,18] 3.6e-4 3.0e-4 0.01 0.0 0.0 0.02.9e-3092.3e-48 1318 1.0
p[37,18] 0.02 1.7e-3 0.08 0.02.5e-561.0e-15 1.2e-4 0.31 2294 1.0
p[38,18] 6.0e-4 2.2e-4 0.01 0.0 0.0 0.0 0.05.7e-54 4000 1.0
p[39,18] 2.3e-5 1.0e-5 5.7e-45.5e-225.1e-153.6e-12 1.3e-9 8.5e-6 3078 nan
p[40,18] 0.04 1.8e-3 0.19.9e-452.2e-14 1.7e-6 6.3e-3 0.38 2812 1.0
p[41,18] 0.03 2.1e-3 0.071.9e-214.1e-10 4.3e-6 0.01 0.26 1018 1.0
p[42,18] 0.03 1.9e-3 0.11.3e-1041.2e-20 6.9e-9 1.1e-3 0.42 2785 1.0
p[43,18] 0.04 2.1e-3 0.112.3e-892.4e-18 4.8e-7 8.0e-3 0.41 2605 1.0
p[44,18] 1.2e-4 6.5e-5 2.0e-32.6e-1707.2e-595.7e-433.4e-293.4e-10 974 1.0
p[45,18] 0.04 1.9e-3 0.110.0e-382.5e-12 1.3e-5 0.02 0.38 2814 1.0
p[46,18] 0.02 1.5e-3 0.061.0e-261.7e-12 2.2e-7 8.0e-4 0.26 1936 1.0
p[47,18] 0.07 2.9e-3 0.099.3e-10 3.9e-5 9.2e-3 0.12 0.3 1061 1.0
p[48,18] 1.7e-7 1.4e-7 8.7e-68.2e-571.2e-361.6e-283.7e-2210.0e-13 4000 nan
p[49,18] 0.02 1.7e-3 0.078.5e-1312.7e-292.5e-11 3.8e-4 0.27 1705 1.01
p[50,18] 2.4e-8 1.6e-8 1.0e-63.8e-721.8e-448.2e-356.6e-272.3e-14 4000 nan
p[0,19] 0.04 1.8e-3 0.11.2e-11 4.0e-6 1.5e-3 0.03 0.36 2942 1.0
p[1,19] 0.01 1.1e-3 0.055.5e-16 5.8e-9 5.0e-6 8.8e-4 0.16 2332 1.0
p[2,19] 4.2e-4 1.1e-4 5.3e-32.9e-171.2e-10 5.5e-8 8.4e-6 2.3e-3 2425 1.0
p[3,19] 0.06 2.8e-3 0.126.4e-13 6.9e-5 7.5e-3 0.07 0.44 1696 1.0
p[4,19] 0.05 2.1e-3 0.15.1e-13 2.5e-5 3.4e-3 0.05 0.37 2293 1.0
p[5,19] 4.3e-8 1.6e-8 9.9e-74.6e-371.4e-261.2e-216.6e-17 3.0e-9 3923 nan
p[6,19] 0.01 2.5e-3 0.031.8e-552.7e-376.8e-27 1.5e-4 0.1 157 1.03
p[7,19] 6.6e-3 1.1e-3 0.042.0e-1003.6e-561.4e-238.9e-12 0.06 1440 1.0
p[8,19] 0.03 2.1e-3 0.11.3e-16 2.0e-8 3.5e-5 4.3e-3 0.37 2391 1.0
p[9,19] 0.03 2.0e-3 0.087.1e-20 1.0e-8 6.4e-5 0.01 0.29 1654 1.0
p[10,19] 0.03 1.6e-3 0.091.3e-79 1.8e-8 1.6e-4 0.01 0.31 3226 1.0
p[11,19] 2.3e-6 8.5e-7 4.1e-51.1e-226.4e-166.4e-133.0e-10 3.3e-6 2299 nan
p[12,19] 0.03 1.7e-3 0.096.8e-36 4.7e-7 8.5e-4 0.02 0.33 3100 1.0
p[13,19] 0.02 1.5e-3 0.085.9e-722.3e-14 3.2e-6 2.7e-3 0.28 2875 1.0
p[14,19] 0.02 2.3e-3 0.13.2e-302.6e-10 2.9e-7 2.2e-4 0.39 1748 1.0
p[15,19] 0.05 2.2e-3 0.111.8e-12 7.3e-7 6.3e-4 0.02 0.44 2615 1.0
p[16,19] 0.04 2.1e-3 0.091.7e-12 1.9e-6 3.4e-4 0.02 0.32 1667 1.0
p[17,19] 2.1e-4 3.2e-5 1.5e-34.7e-335.2e-155.0e-11 9.9e-8 1.9e-3 2317 1.0
p[18,19] 0.04 3.4e-3 0.112.0e-961.3e-10 2.3e-6 1.8e-3 0.42 1064 1.0
p[19,19] 6.1e-3 3.4e-3 0.045.5e-141.1e-104.5e-10 3.2e-9 0.09 130 1.02
p[20,19] 0.02 1.3e-3 0.073.9e-1013.8e-26 6.8e-7 3.2e-3 0.23 3113 1.0
p[21,19] 0.03 1.7e-3 0.099.6e-64 1.2e-7 6.4e-4 0.01 0.31 2941 1.0
p[22,19] 0.03 1.5e-3 0.084.6e-591.4e-11 1.9e-5 4.7e-3 0.28 2899 1.0
p[23,19] 0.02 1.7e-3 0.081.1e-681.6e-14 9.0e-7 1.9e-3 0.23 2161 1.0
p[24,19] 6.8e-3 4.3e-4 0.022.7e-3110.0e-9 3.1e-5 2.9e-3 0.06 2713 1.0
p[25,19] 0.02 1.3e-3 0.078.9e-894.5e-19 1.7e-7 1.7e-3 0.24 2757 1.0
p[26,19] 0.02 1.5e-3 0.071.6e-313.8e-13 2.0e-6 4.6e-3 0.27 2424 1.0
p[27,19] 0.02 1.4e-3 0.087.4e-1022.9e-25 1.8e-7 1.7e-3 0.24 2963 1.0
p[28,19] 5.6e-6 3.7e-6 2.4e-41.3e-471.6e-241.0e-191.8e-15 6.8e-9 4000 nan
p[29,19] 0.05 2.1e-3 0.092.3e-14 5.0e-5 4.5e-3 0.06 0.33 1727 1.0
p[30,19] 0.04 1.9e-3 0.099.3e-17 6.2e-7 3.9e-4 0.02 0.35 2338 1.0
p[31,19] 0.03 1.6e-3 0.092.9e-11 4.3e-6 2.6e-4 8.9e-3 0.31 3412 1.0
p[32,19] 1.8e-4 2.1e-4 6.0e-37.4e-1676.7e-1241.6e-1031.5e-857.1e-56 796 1.0
p[33,19] 5.1e-4 1.4e-4 7.3e-33.2e-312.6e-202.8e-159.3e-11 3.7e-4 2615 1.0
p[34,19] 0.03 1.7e-3 0.081.6e-2910.0e-9 1.5e-4 0.02 0.32 2414 1.0
p[35,19] 0.03 1.4e-3 0.081.3e-12 3.4e-6 6.2e-4 0.01 0.3 3327 1.0
p[36,19] 3.3e-4 3.0e-4 7.7e-33.8e-1361.3e-966.0e-791.5e-609.8e-12 647 1.01
p[37,19] 0.03 1.6e-3 0.091.6e-1019.7e-14 2.1e-5 5.5e-3 0.3 2932 1.0
p[38,19] 6.4e-4 4.9e-4 0.013.1e-1483.6e-1091.9e-902.8e-72 1.1e-7 608 1.01
p[39,19] 4.2e-5 1.1e-5 5.7e-47.9e-152.0e-10 1.3e-8 5.2e-7 1.4e-4 2847 nan
p[40,19] 0.03 1.6e-3 0.095.1e-15 3.3e-6 5.4e-4 0.01 0.3 3502 1.0
p[41,19] 0.03 2.4e-3 0.071.5e-16 1.1e-7 9.9e-5 0.02 0.28 937 1.0
p[42,19] 0.04 1.8e-3 0.11.3e-15 5.3e-6 1.6e-3 0.02 0.35 2820 1.0
p[43,19] 0.03 1.6e-3 0.091.3e-16 7.9e-7 4.4e-4 0.01 0.31 3098 1.0
p[44,19] 1.3e-3 9.7e-5 5.4e-32.2e-226.1e-10 5.1e-7 1.0e-4 0.01 3159 1.0
p[45,19] 0.04 1.7e-3 0.11.5e-12 8.6e-6 6.5e-4 0.02 0.36 3398 1.0
p[46,19] 0.03 1.4e-3 0.083.9e-14 2.4e-7 1.3e-4 0.01 0.3 2896 1.0
p[47,19] 0.05 2.2e-3 0.11.2e-12 1.0e-5 3.2e-3 0.05 0.39 2212 1.0
p[48,19] 5.4e-6 2.9e-6 1.7e-43.7e-289.8e-206.2e-161.1e-12 9.4e-8 3456 nan
p[49,19] 0.02 1.6e-3 0.074.6e-474.9e-15 4.8e-7 3.4e-3 0.27 2009 1.0
p[50,19] 10.0e-7 6.3e-7 4.0e-53.3e-365.5e-244.2e-193.6e-15 8.7e-9 4000 nan
p[0,20] 0.02 1.7e-3 0.078.0e-591.7e-212.0e-10 6.6e-4 0.27 1855 1.0
p[1,20] 0.07 2.0e-3 0.116.8e-23 1.1e-3 0.02 0.08 0.41 2823 1.0
p[2,20] 1.9e-3 3.5e-4 0.01 8.7e-7 7.4e-5 2.8e-4 7.8e-4 9.8e-3 1261 1.0
p[3,20] 0.01 1.6e-3 0.053.5e-931.6e-392.2e-228.8e-12 0.17 1165 1.0
p[4,20] 3.0e-3 4.4e-4 0.027.7e-982.6e-435.3e-252.0e-13 0.03 2107 1.0
p[5,20] 1.3e-4 1.6e-5 7.3e-41.7e-12 6.6e-9 2.6e-7 8.4e-6 1.2e-3 2067 nan
p[6,20] 0.02 2.0e-3 0.071.2e-222.2e-148.9e-10 8.8e-4 0.18 1272 1.0
p[7,20] 8.9e-3 1.2e-3 0.062.0e-481.4e-25 1.2e-9 2.1e-5 0.09 2154 1.0
p[8,20] 0.03 1.9e-3 0.127.5e-152.4e-10 8.6e-6 1.4e-3 0.45 4000 1.0
p[9,20] 0.04 1.9e-3 0.077.2e-634.8e-19 4.7e-4 0.04 0.28 1518 1.0
p[10,20] 0.03 2.1e-3 0.122.2e-37 1.4e-7 3.1e-4 6.3e-3 0.36 3217 1.0
p[11,20] 7.1e-3 3.3e-4 0.02 7.2e-6 3.0e-4 1.6e-3 6.4e-3 0.05 2357 1.0
p[12,20] 0.03 2.2e-3 0.138.8e-15 5.8e-8 2.4e-4 4.7e-3 0.46 3497 1.0
p[13,20] 0.03 1.6e-3 0.091.8e-33 3.9e-8 1.4e-4 8.3e-3 0.25 3223 1.0
p[14,20] 0.03 2.5e-3 0.131.5e-168.4e-12 2.9e-7 1.2e-4 0.58 2836 1.0
p[15,20] 8.8e-3 8.7e-4 0.051.5e-349.1e-15 2.6e-9 4.3e-5 0.09 3262 1.0
p[16,20] 0.06 3.1e-3 0.15.9e-674.1e-17 2.6e-3 0.09 0.34 980 1.0
p[17,20] 6.6e-4 2.7e-5 1.5e-35.9e-13 4.2e-6 7.5e-5 6.2e-4 4.7e-3 3106 1.0
p[18,20] 0.03 2.7e-3 0.141.4e-415.3e-12 1.2e-6 6.6e-4 0.58 2469 1.0
p[19,20] 1.7e-3 5.6e-4 0.032.0e-166.7e-142.3e-131.4e-12 2.7e-4 3470 1.0
p[20,20] 0.02 1.8e-3 0.11.7e-481.4e-11 5.7e-5 3.0e-3 0.19 2974 1.0
p[21,20] 0.03 1.8e-3 0.114.5e-29 3.7e-910.0e-5 3.7e-3 0.31 4000 1.0
p[22,20] 0.03 1.9e-3 0.14.3e-27 4.6e-8 1.4e-4 9.2e-3 0.3 2643 1.0
p[23,20] 0.03 1.7e-3 0.11.0e-31 1.8e-8 1.9e-4 9.2e-3 0.3 3220 1.0
p[24,20] 0.02 1.3e-3 0.073.6e-12 3.3e-4 2.2e-3 8.8e-3 0.14 3187 1.0
p[25,20] 0.03 1.8e-3 0.13.0e-426.4e-10 3.6e-5 5.1e-3 0.3 2779 1.0
p[26,20] 0.03 1.5e-3 0.096.6e-387.3e-10 3.1e-5 9.3e-3 0.3 3301 1.0
p[27,20] 0.02 1.9e-3 0.14.8e-492.2e-12 2.1e-5 3.7e-3 0.24 2527 1.0
p[28,20] 4.3e-5 6.4e-6 4.0e-45.5e-19 1.0e-9 5.5e-8 1.7e-6 2.7e-4 3822 nan
p[29,20] 0.02 2.0e-3 0.062.2e-1145.7e-383.3e-21 1.4e-4 0.22 856 1.0
p[30,20] 0.07 3.0e-3 0.133.3e-468.3e-11 1.7e-3 0.07 0.48 1876 1.0
p[31,20] 0.01 1.2e-3 0.072.5e-15 1.0e-8 5.8e-6 1.0e-3 0.13 3683 1.0
p[32,20] 1.7e-4 1.7e-4 6.4e-33.0e-841.5e-619.0e-512.4e-419.5e-26 1493 1.0
p[33,20] 2.1e-3 5.3e-4 0.033.8e-12 4.1e-8 2.6e-6 8.5e-5 9.7e-3 2482 1.0
p[34,20] 0.04 1.7e-3 0.092.3e-496.9e-14 1.1e-5 0.01 0.34 3015 1.0
p[35,20] 0.06 2.6e-3 0.137.1e-20 7.1e-5 8.3e-3 0.05 0.49 2568 1.0
p[36,20] 5.5e-4 5.0e-4 0.012.6e-672.9e-463.6e-371.3e-27 2.3e-5 777 1.0
p[37,20] 0.03 2.0e-3 0.116.7e-498.0e-10 9.8e-5 4.9e-3 0.32 2995 1.0
p[38,20] 9.5e-4 6.6e-4 0.021.2e-734.2e-532.8e-436.0e-34 4.4e-6 1269 1.0
p[39,20] 0.06 6.8e-4 0.04 4.4e-3 0.02 0.05 0.08 0.17 4000 1.0
p[40,20] 0.04 2.3e-3 0.121.5e-16 2.3e-6 1.0e-3 0.02 0.43 2754 1.0
p[41,20] 0.05 3.5e-3 0.091.0e-992.1e-21 3.4e-3 0.07 0.31 579 1.01
p[42,20] 0.04 2.4e-3 0.131.3e-12 2.0e-6 7.6e-4 9.3e-3 0.48 3065 1.0
p[43,20] 0.04 2.6e-3 0.149.8e-13 7.4e-8 2.2e-4 4.9e-3 0.55 2888 1.0
p[44,20] 3.9e-3 2.1e-4 0.01 3.1e-8 1.4e-4 8.1e-4 2.9e-3 0.03 4000 1.0
p[45,20] 0.03 1.8e-3 0.111.2e-13 8.9e-7 6.8e-4 0.01 0.35 3515 1.0
p[46,20] 0.06 2.2e-3 0.118.6e-38 4.8e-8 6.7e-3 0.07 0.39 2445 1.0
p[47,20] 5.0e-3 6.6e-4 0.031.1e-858.8e-381.6e-213.7e-11 0.06 2267 1.0
p[48,20] 4.7e-4 3.2e-5 1.8e-3 2.9e-9 2.1e-6 2.8e-5 2.2e-4 4.1e-3 3131 1.0
p[49,20] 0.03 1.6e-3 0.081.5e-407.7e-12 7.8e-6 6.8e-3 0.29 2622 1.0
p[50,20] 3.8e-4 1.0e-4 5.1e-35.3e-13 1.5e-8 7.5e-7 1.7e-5 2.3e-3 2380 1.0
p[0,21] 0.03 1.7e-3 0.075.0e-291.1e-10 9.2e-5 0.03 0.24 1566 1.0
p[1,21] 0.05 1.3e-3 0.075.1e-11 1.5e-3 0.02 0.08 0.25 2775 1.0
p[2,21] 3.8e-3 5.7e-4 0.011.0e-10 3.6e-6 1.9e-4 2.9e-3 0.02 399 1.01
p[3,21] 0.02 2.0e-3 0.064.9e-451.2e-17 2.3e-9 7.4e-5 0.24 974 1.0
p[4,21] 0.01 1.5e-3 0.055.5e-462.1e-191.4e-10 1.7e-5 0.18 1067 1.0
p[5,21] 4.4e-5 6.5e-6 3.0e-44.8e-143.9e-10 2.7e-8 1.2e-6 3.5e-4 2135 nan
p[6,21] 2.1e-4 3.9e-5 2.0e-36.2e-1467.1e-1006.0e-755.4e-16 6.7e-4 2703 1.0
p[7,21] 7.4e-3 1.0e-3 0.038.1e-1112.8e-572.3e-16 1.6e-7 0.11 761 1.0
p[8,21] 0.03 1.5e-3 0.059.9e-29 6.7e-7 1.3e-3 0.05 0.17 983 1.0
p[9,21] 0.04 1.9e-3 0.083.8e-28 2.4e-8 9.0e-4 0.05 0.27 1598 1.0
p[10,21] 0.03 1.2e-3 0.055.3e-107 2.6e-7 1.7e-3 0.04 0.17 1637 1.0
p[11,21] 1.9e-3 1.1e-4 5.0e-3 1.3e-6 7.4e-5 3.9e-4 1.6e-3 0.01 2036 1.0
p[12,21] 0.03 1.1e-3 0.052.6e-66 1.9e-6 2.8e-3 0.04 0.19 2212 1.0
p[13,21] 0.02 1.2e-3 0.052.8e-805.3e-10 2.7e-4 0.02 0.18 1526 1.0
p[14,21] 0.03 1.5e-3 0.051.4e-100 1.4e-7 4.6e-4 0.04 0.17 1069 1.0
p[15,21] 0.02 1.0e-3 0.051.7e-20 1.7e-9 2.8e-5 6.8e-3 0.19 2070 1.0
p[16,21] 0.06 2.7e-3 0.091.5e-30 6.1e-7 0.02 0.09 0.31 1045 1.0
p[17,21] 2.1e-4 4.1e-5 2.0e-39.5e-461.3e-181.5e-13 2.8e-9 6.5e-4 2491 1.0
p[18,21] 0.03 1.5e-3 0.052.0e-267 4.5e-9 6.0e-5 0.04 0.17 1001 1.0
p[19,21] 3.9e-4 1.2e-4 4.1e-37.2e-13 3.3e-8 1.3e-7 7.8e-7 1.5e-4 1267 1.0
p[20,21] 0.02 1.0e-3 0.041.5e-1601.3e-38 3.6e-6 0.02 0.16 1683 1.0
p[21,21] 0.03 1.3e-3 0.054.1e-123 8.4e-7 2.3e-3 0.05 0.18 1392 1.0
p[22,21] 0.03 1.2e-3 0.051.9e-62 1.1e-8 6.6e-4 0.03 0.19 1711 1.0
p[23,21] 0.02 1.2e-3 0.051.6e-773.9e-10 1.6e-4 0.02 0.17 1507 1.0
p[24,21] 4.2e-3 2.6e-4 0.013.2e-559.0e-10 7.5e-6 1.1e-3 0.04 2255 1.0
p[25,21] 0.02 1.3e-3 0.058.5e-1036.7e-14 3.3e-5 0.02 0.17 1326 1.01
p[26,21] 0.03 1.2e-3 0.062.3e-25 1.2e-8 1.3e-4 0.02 0.21 2162 1.0
p[27,21] 0.02 1.3e-3 0.046.4e-1321.5e-24 1.1e-5 0.02 0.16 1128 1.0
p[28,21] 3.5e-5 1.5e-5 9.3e-47.4e-433.5e-173.0e-133.1e-10 8.0e-6 4000 nan
p[29,21] 0.03 2.9e-3 0.071.5e-541.7e-16 1.1e-8 0.01 0.24 527 1.01
p[30,21] 0.05 1.5e-3 0.079.0e-26 1.7e-5 5.1e-3 0.06 0.26 2541 1.0
p[31,21] 0.03 1.3e-3 0.051.5e-11 6.2e-6 1.6e-3 0.05 0.19 1663 1.0
p[32,21] 8.0e-6 7.5e-6 4.7e-41.9e-2901.5e-2154.9e-1793.3e-1472.3e-96 4000 nan
p[33,21] 1.8e-3 3.1e-4 0.017.6e-201.8e-11 1.9e-8 7.4e-6 0.02 1618 1.0
p[34,21] 0.04 1.4e-3 0.071.2e-30 1.9e-8 4.8e-4 0.03 0.24 2291 1.0
p[35,21] 0.05 1.3e-3 0.075.4e-11 1.6e-3 0.02 0.08 0.23 2741 1.0
p[36,21] 2.4e-4 2.3e-4 4.1e-31.9e-1941.0e-1342.5e-1081.5e-813.9e-10 312 1.01
p[37,21] 0.02 1.3e-3 0.042.8e-1401.1e-11 2.0e-4 0.03 0.17 1235 1.0
p[38,21] 6.9e-4 4.8e-4 8.7e-31.4e-2791.7e-2013.9e-1656.3e-133 1.1e-9 330 1.01
p[39,21] 0.01 2.8e-4 0.01 9.0e-4 6.7e-3 0.01 0.02 0.04 1492 1.0
p[40,21] 0.04 1.2e-3 0.054.7e-13 7.2e-5 7.2e-3 0.05 0.19 2267 1.0
p[41,21] 0.05 2.3e-3 0.073.3e-47 7.0e-9 6.5e-3 0.07 0.25 1000 1.0
p[42,21] 0.04 1.2e-3 0.052.6e-20 1.6e-5 8.5e-3 0.05 0.19 2058 1.0
p[43,21] 0.03 1.2e-3 0.052.9e-17 4.3e-6 2.5e-3 0.04 0.19 1818 1.0
p[44,21] 1.6e-3 9.5e-5 5.2e-35.3e-364.5e-10 5.4e-7 1.4e-4 0.02 2994 1.0
p[45,21] 0.04 1.2e-3 0.052.3e-11 5.5e-5 8.5e-3 0.06 0.19 2029 1.0
p[46,21] 0.05 1.5e-3 0.071.3e-18 1.4e-4 0.01 0.08 0.25 2369 1.0
p[47,21] 0.01 1.4e-3 0.056.7e-411.3e-17 2.4e-9 8.5e-5 0.19 1416 1.0
p[48,21] 1.1e-4 1.1e-5 6.2e-42.2e-13 1.5e-8 9.5e-7 1.7e-5 9.5e-4 2949 nan
p[49,21] 0.03 1.3e-3 0.063.5e-392.7e-10 4.2e-5 0.01 0.23 2080 1.0
p[50,21] 7.6e-5 2.2e-5 1.1e-36.3e-185.4e-11 1.2e-8 8.1e-7 4.6e-4 2214 1.0
p[0,22] 0.02 1.8e-3 0.095.5e-765.3e-281.1e-13 5.3e-5 0.35 2567 1.0
p[1,22] 0.11 3.5e-3 0.182.1e-29 1.3e-4 0.01 0.15 0.63 2784 1.0
p[2,22] 8.5e-3 8.4e-4 0.041.5e-204.8e-11 2.3e-7 1.5e-4 0.11 1963 1.0
p[3,22] 4.8e-3 7.2e-4 0.022.2e-841.7e-354.5e-208.9e-11 0.06 1139 1.0
p[4,22] 5.4e-3 7.9e-4 0.031.9e-888.9e-393.1e-225.9e-12 0.06 1555 1.0
p[5,22] 1.4e-4 2.0e-5 9.7e-49.3e-14 1.5e-9 1.1e-7 5.1e-6 1.2e-3 2329 nan
p[6,22] 5.7e-6 4.6e-6 2.9e-4 0.04.9e-2701.5e-2075.0e-544.4e-14 4000 nan
p[7,22] 8.4e-3 1.2e-3 0.053.2e-1992.4e-1011.5e-276.6e-11 0.09 2076 1.0
p[8,22] 0.03 1.9e-3 0.112.3e-947.5e-13 3.1e-5 4.2e-3 0.4 3295 1.0
p[9,22] 0.04 2.0e-3 0.081.4e-465.0e-14 2.7e-4 0.05 0.26 1443 1.0
p[10,22] 0.03 1.6e-3 0.099.8e-2202.2e-11 3.1e-5 8.7e-3 0.34 2954 1.0
p[11,22] 0.04 1.4e-3 0.07 7.9e-6 1.9e-3 0.01 0.05 0.24 2454 1.0
p[12,22] 0.03 1.4e-3 0.083.0e-1572.8e-10 5.3e-510.0e-3 0.29 3210 1.0
p[13,22] 0.03 1.7e-3 0.093.1e-1542.3e-16 4.2e-7 1.7e-3 0.36 2823 1.0
p[14,22] 0.04 2.1e-3 0.122.9e-2915.0e-14 2.9e-5 3.0e-3 0.47 3301 1.0
p[15,22] 5.2e-3 6.5e-4 0.041.9e-496.5e-203.6e-11 1.0e-6 0.04 3407 1.0
p[16,22] 0.06 2.7e-3 0.092.7e-541.2e-13 5.8e-3 0.09 0.3 1011 1.0
p[17,22] 6.8e-5 4.8e-5 2.4e-31.4e-1051.5e-452.6e-331.7e-23 1.3e-9 2555 1.0
p[18,22] 0.02 1.8e-3 0.09 0.08.3e-374.7e-10 3.2e-4 0.28 2583 1.0
p[19,22] 2.9e-4 5.1e-5 3.2e-31.9e-15 1.1e-5 2.6e-5 7.2e-5 1.2e-3 4000 1.0
p[20,22] 0.02 1.3e-3 0.07 0.01.2e-893.8e-11 5.7e-4 0.23 2497 1.0
p[21,22] 0.03 1.3e-3 0.084.3e-2873.1e-12 1.7e-5 5.6e-3 0.32 4000 1.0
p[22,22] 0.02 1.4e-3 0.084.2e-1163.5e-13 1.3e-6 1.6e-3 0.32 3388 1.0
p[23,22] 0.03 1.8e-3 0.093.5e-1499.3e-17 3.1e-7 1.0e-3 0.37 2529 1.0
p[24,22] 2.3e-3 3.0e-4 0.021.8e-1352.6e-261.4e-15 4.3e-9 0.02 3110 1.0
p[25,22] 0.02 1.5e-3 0.081.2e-1921.3e-24 3.2e-8 5.6e-4 0.27 2601 1.0
p[26,22] 0.03 1.8e-3 0.112.2e-572.4e-15 1.6e-7 2.1e-3 0.44 3510 1.0
p[27,22] 0.02 1.5e-3 0.075.0e-2586.3e-52 4.9e-9 8.5e-4 0.27 2541 1.0
p[28,22] 8.3e-6 4.7e-6 2.9e-48.0e-821.4e-311.5e-233.1e-17 8.2e-9 4000 nan
p[29,22] 0.03 2.7e-3 0.062.7e-662.2e-205.2e-11 7.0e-3 0.22 537 1.01
p[30,22] 0.04 2.5e-3 0.124.5e-616.9e-15 2.0e-5 9.4e-3 0.46 2191 1.0
p[31,22] 0.03 1.9e-3 0.14.1e-15 9.4e-7 2.6e-4 9.9e-3 0.38 2666 1.0
p[32,22] 2.6e-11 2.6e-11 1.6e-9 0.0 0.0 0.0 0.02.1e-216 4000 nan
p[33,22] 4.2e-3 6.8e-4 0.032.5e-391.3e-185.0e-13 2.5e-8 0.03 2286 1.0
p[34,22] 0.03 1.8e-3 0.13.8e-693.7e-20 3.7e-8 2.8e-3 0.41 3501 1.0
p[35,22] 0.07 2.8e-3 0.158.4e-29 4.7e-7 5.8e-4 0.04 0.59 3051 1.0
p[36,22] 5.3e-4 3.3e-4 0.01 0.02.1e-2765.9e-2223.5e-1662.4e-18 1342 1.0
p[37,22] 0.02 1.6e-3 0.083.0e-2864.5e-23 8.7e-7 2.7e-3 0.28 2435 1.0
p[38,22] 8.5e-4 4.9e-4 0.01 0.0 0.0 0.01.5e-3075.6e-26 861 1.01
p[39,22] 0.22 3.6e-3 0.16 6.2e-3 0.08 0.18 0.34 0.58 2081 1.0
p[40,22] 0.04 2.0e-3 0.111.7e-23 9.6e-8 9.8e-5 7.1e-3 0.42 2943 1.0
p[41,22] 0.05 2.9e-3 0.076.2e-541.5e-10 7.1e-3 0.07 0.25 633 1.01
p[42,22] 0.03 1.4e-3 0.083.4e-52 2.3e-8 1.3e-4 0.02 0.29 2994 1.0
p[43,22] 0.03 1.6e-3 0.081.4e-42 7.7e-8 2.5e-4 0.01 0.33 2870 1.0
p[44,22] 8.9e-4 1.7e-4 9.3e-36.6e-901.0e-251.7e-173.1e-11 2.5e-3 2960 1.0
p[45,22] 0.04 1.7e-3 0.11.1e-17 5.7e-7 3.3e-4 0.01 0.39 3385 1.0
p[46,22] 0.06 2.6e-3 0.147.3e-498.8e-11 6.7e-4 0.04 0.54 2862 1.0
p[47,22] 5.8e-3 7.8e-4 0.043.7e-921.2e-402.6e-232.3e-12 0.07 2383 1.0
p[48,22] 1.9e-3 2.3e-4 0.011.3e-179.1e-10 8.6e-7 8.5e-5 0.02 2647 1.0
p[49,22] 0.03 1.7e-3 0.12.6e-675.3e-20 1.1e-8 8.5e-4 0.37 3238 1.0
p[50,22] 1.1e-3 2.5e-4 0.012.0e-241.0e-12 3.3e-9 1.7e-6 5.5e-3 3113 1.0
p[0,23] 0.05 1.9e-3 0.1 3.9e-9 3.9e-4 5.7e-3 0.03 0.4 2918 1.0
p[1,23] 0.01 1.3e-3 0.061.2e-10 3.1e-6 1.6e-4 2.3e-3 0.15 1932 1.0
p[2,23] 1.2e-3 2.1e-4 7.2e-31.3e-208.0e-12 3.0e-8 1.7e-5 0.01 1192 1.0
p[3,23] 0.05 2.4e-3 0.12.2e-10 8.8e-4 0.01 0.06 0.36 1620 1.0
p[4,23] 0.05 1.9e-3 0.13.6e-10 1.1e-3 0.01 0.06 0.33 2784 1.0
p[5,23] 1.5e-6 3.2e-7 1.8e-52.5e-224.0e-163.3e-131.8e-10 3.0e-6 3109 nan
p[6,23] 5.8e-5 3.7e-5 2.3e-34.3e-3038.2e-2121.2e-1611.8e-40 1.2e-9 3925 1.0
p[7,23] 5.4e-3 8.8e-4 0.032.4e-1811.6e-952.2e-285.1e-12 0.06 1449 1.0
p[8,23] 0.03 1.3e-3 0.079.5e-72 1.3e-9 1.1e-4 0.01 0.27 3168 1.0
p[9,23] 0.03 2.2e-3 0.071.7e-13 1.1e-5 9.7e-4 0.03 0.26 1118 1.0
p[10,23] 0.03 1.3e-3 0.078.0e-1889.1e-10 1.8e-4 0.02 0.24 2727 1.0
p[11,23] 1.1e-5 1.5e-6 7.2e-51.4e-12 3.7e-9 9.1e-8 1.7e-6 9.1e-5 2332 nan
p[12,23] 0.03 1.1e-3 0.065.2e-129 8.6e-9 1.7e-4 0.02 0.22 3101 1.0
p[13,23] 0.02 1.3e-3 0.074.1e-1373.4e-16 5.1e-6 4.5e-3 0.23 2668 1.0
p[14,23] 0.03 1.4e-3 0.083.4e-2271.4e-10 6.9e-5 9.3e-3 0.3 3156 1.0
p[15,23] 0.05 2.2e-3 0.11 3.8e-9 8.7e-5 4.2e-3 0.04 0.37 2455 1.0
p[16,23] 0.03 2.1e-3 0.08 1.5e-8 1.3e-4 1.7e-3 0.03 0.27 1257 1.0
p[17,23] 7.7e-5 4.6e-5 2.6e-32.8e-892.6e-383.2e-281.0e-19 5.9e-8 3260 1.0
p[18,23] 0.03 1.2e-3 0.07 0.01.2e-26 5.6e-7 1.5e-3 0.3 4000 1.0
p[19,23] 1.0e-3 2.4e-4 0.011.0e-11 1.2e-5 3.5e-5 1.1e-4 2.8e-3 2713 1.0
p[20,23] 0.02 1.1e-3 0.051.5e-2881.2e-756.4e-10 1.1e-3 0.19 2017 1.0
p[21,23] 0.03 1.3e-3 0.063.2e-232 1.7e-9 9.5e-5 0.01 0.23 2531 1.0
p[22,23] 0.03 1.4e-3 0.082.2e-1064.5e-11 5.2e-5 9.3e-3 0.25 3274 1.0
p[23,23] 0.02 1.2e-3 0.063.8e-1322.4e-16 1.3e-6 2.7e-3 0.19 2702 1.0
p[24,23] 1.6e-3 1.7e-4 9.3e-32.6e-1091.1e-215.5e-13 1.0e-7 0.02 2895 1.0
p[25,23] 0.02 1.2e-3 0.069.3e-1732.3e-24 1.5e-7 2.7e-3 0.21 2545 1.0
p[26,23] 0.02 1.5e-3 0.086.3e-308.8e-10 5.7e-5 6.8e-3 0.28 2822 1.0
p[27,23] 0.02 1.3e-3 0.068.1e-2266.8e-46 3.8e-8 2.0e-3 0.21 2211 1.0
p[28,23] 1.6e-6 1.2e-6 7.4e-51.9e-762.4e-315.1e-242.0e-182.2e-10 4000 nan
p[29,23] 0.05 1.8e-3 0.081.3e-11 7.5e-4 9.8e-3 0.06 0.29 2063 1.0
p[30,23] 0.03 1.7e-3 0.091.0e-14 1.6e-5 1.3e-3 0.02 0.3 2619 1.0
p[31,23] 0.05 1.9e-3 0.09 6.9e-9 2.2e-4 5.4e-3 0.07 0.34 2389 1.0
p[32,23] 8.8e-9 8.8e-9 5.6e-7 0.0 0.0 0.06.2e-2711.2e-179 4000 nan
p[33,23] 4.9e-4 1.6e-4 7.2e-33.6e-371.7e-203.7e-152.0e-10 9.8e-4 1997 1.0
p[34,23] 0.03 1.7e-3 0.091.4e-33 1.6e-6 1.3e-3 0.02 0.29 2579 1.0
p[35,23] 0.03 1.4e-3 0.08 1.1e-9 5.4e-5 1.7e-3 0.02 0.25 2887 1.0
p[36,23] 2.0e-4 1.9e-4 5.3e-3 0.02.8e-2367.7e-1912.2e-1442.8e-18 743 1.0
p[37,23] 0.02 1.2e-3 0.063.4e-2452.3e-21 7.3e-6 5.8e-3 0.23 2579 1.0
p[38,23] 7.8e-4 4.4e-4 0.01 0.0 0.01.2e-3091.0e-2502.4e-20 622 1.01
p[39,23] 5.0e-5 2.2e-6 1.3e-4 1.5e-8 1.7e-6 1.0e-5 4.2e-5 3.6e-4 3147 nan
p[40,23] 0.04 1.7e-3 0.091.8e-16 6.2e-6 1.5e-3 0.03 0.31 2682 1.0
p[41,23] 0.03 2.4e-3 0.077.5e-12 5.3e-5 1.1e-3 0.02 0.23 807 1.0
p[42,23] 0.03 1.3e-3 0.075.3e-43 1.9e-7 4.0e-4 0.03 0.24 2892 1.0
p[43,23] 0.03 1.2e-3 0.061.1e-35 3.2e-7 4.8e-4 0.02 0.23 2778 1.0
p[44,23] 5.5e-4 9.1e-5 4.8e-33.0e-748.8e-223.5e-15 1.1e-9 2.8e-3 2761 1.0
p[45,23] 0.04 1.4e-3 0.085.5e-13 2.3e-5 2.2e-3 0.03 0.27 3150 1.0
p[46,23] 0.03 1.6e-3 0.082.6e-10 1.9e-5 8.4e-4 0.01 0.26 2289 1.0
p[47,23] 0.05 2.0e-3 0.11 1.6e-9 8.7e-4 8.5e-3 0.05 0.4 2934 1.0
p[48,23] 3.8e-6 1.4e-6 8.5e-58.1e-231.1e-141.4e-11 2.9e-9 6.2e-6 3456 nan
p[49,23] 0.02 1.8e-3 0.088.7e-591.1e-11 2.5e-5 6.8e-3 0.27 2053 1.0
p[50,23] 2.0e-6 1.1e-6 6.8e-51.7e-294.8e-183.6e-144.2e-11 1.5e-6 3992 nan
p[0,24] 0.05 2.4e-3 0.131.2e-18 1.1e-7 5.9e-5 0.01 0.5 2755 1.0
p[1,24] 0.01 1.4e-3 0.072.2e-311.7e-179.0e-12 4.6e-7 0.14 2325 1.0
p[2,24] 7.4e-3 2.4e-3 0.085.5e-582.7e-341.3e-242.8e-16 1.3e-4 1053 1.0
p[3,24] 0.06 2.6e-3 0.146.9e-14 1.5e-6 2.6e-4 0.02 0.56 3078 1.0
p[4,24] 0.06 2.9e-3 0.141.7e-14 3.2e-6 6.1e-4 0.03 0.54 2376 1.0
p[5,24] 1.5e-12 1.0e-126.4e-114.2e-673.5e-491.2e-405.6e-321.1e-17 4000 nan
p[6,24] 6.0e-7 6.0e-7 3.8e-5 0.0 0.0 0.04.9e-1082.7e-29 4000 nan
p[7,24] 5.7e-3 1.1e-3 0.06 0.08.5e-2135.9e-711.8e-31 1.5e-3 2783 1.0
p[8,24] 0.04 4.1e-3 0.137.2e-1886.5e-351.2e-13 7.3e-5 0.56 1091 1.0
p[9,24] 0.03 2.3e-3 0.12.2e-355.3e-174.1e-10 1.2e-4 0.42 2125 1.0
p[10,24] 0.03 3.1e-3 0.15 0.07.1e-303.1e-14 1.0e-7 0.66 2225 1.0
p[11,24] 7.6e-8 4.5e-8 2.6e-61.7e-402.9e-283.6e-232.8e-181.4e-10 3185 nan
p[12,24] 0.04 3.5e-3 0.168.9e-2981.7e-291.4e-15 9.5e-8 0.78 2103 1.0
p[13,24] 0.03 2.6e-3 0.134.1e-3021.2e-422.6e-16 1.2e-7 0.57 2476 1.0
p[14,24] 0.04 3.1e-3 0.1 0.06.0e-382.0e-12 6.7e-4 0.4 1077 1.0
p[15,24] 0.04 2.9e-3 0.131.7e-17 2.3e-8 1.7e-5 1.9e-3 0.53 2047 1.0
p[16,24] 0.02 1.8e-3 0.091.6e-265.4e-14 9.3e-9 1.3e-4 0.35 2489 1.0
p[17,24] 1.2e-3 1.2e-3 0.031.5e-2136.1e-995.9e-753.2e-546.6e-24 765 1.0
p[18,24] 0.03 3.0e-3 0.1 0.09.7e-806.2e-28 6.3e-7 0.41 1010 1.0
p[19,24] 8.7e-3 5.2e-4 0.031.9e-33 9.6e-5 9.2e-4 5.1e-3 0.08 2576 1.0
p[20,24] 0.02 2.3e-3 0.12 0.07.4e-1791.4e-304.0e-12 0.21 2603 1.0
p[21,24] 0.03 3.4e-3 0.15 0.01.8e-332.3e-16 1.1e-7 0.67 1869 1.0
p[22,24] 0.03 2.5e-3 0.133.6e-2371.3e-3010.0e-13 3.9e-6 0.59 2821 1.0
p[23,24] 0.02 2.0e-3 0.111.9e-2892.3e-431.7e-16 1.7e-7 0.18 2800 1.0
p[24,24] 0.01 3.1e-3 0.11.5e-2574.3e-611.3e-382.5e-24 9.1e-5 974 1.01
p[25,24] 0.02 2.5e-3 0.11 0.02.1e-612.5e-20 3.3e-8 0.33 2120 1.0
p[26,24] 0.03 1.9e-3 0.15.5e-749.0e-273.4e-11 7.2e-5 0.38 2672 1.0
p[27,24] 0.02 2.2e-3 0.11 0.05.1e-1121.6e-237.7e-10 0.15 2350 1.0
p[28,24] 3.3e-11 3.3e-11 2.1e-97.2e-1813.4e-797.9e-624.0e-485.4e-28 4000 nan
p[29,24] 0.06 3.3e-3 0.175.6e-234.4e-10 5.6e-6 4.2e-3 0.69 2551 1.0
p[30,24] 0.03 2.0e-3 0.19.7e-402.1e-14 7.9e-8 4.4e-4 0.43 2560 1.0
p[31,24] 0.04 3.2e-3 0.163.2e-274.4e-13 1.9e-8 7.5e-5 0.77 2383 1.0
p[32,24] 2.1e-20 2.1e-201.3e-18 0.0 0.0 0.0 0.0 0.0 4000 nan
p[33,24] 1.5e-3 6.6e-4 0.037.9e-941.5e-533.1e-404.2e-28 3.5e-9 2787 1.0
p[34,24] 0.03 2.0e-3 0.116.9e-775.4e-16 2.2e-7 6.8e-4 0.45 2797 1.0
p[35,24] 0.03 2.5e-3 0.132.2e-273.9e-13 7.1e-8 2.4e-4 0.57 2649 1.0
p[36,24] 7.3e-4 4.2e-4 0.02 0.0 0.0 0.03.1e-3202.5e-48 2764 1.0
p[37,24] 0.02 2.7e-3 0.13 0.04.0e-561.3e-18 3.3e-9 0.5 2430 1.0
p[38,24] 4.8e-4 2.7e-4 0.02 0.0 0.0 0.0 0.04.6e-57 4000 1.0
p[39,24] 2.0e-6 1.4e-6 6.9e-56.5e-311.7e-211.7e-175.5e-14 1.6e-8 2402 nan
p[40,24] 0.03 2.8e-3 0.153.7e-441.1e-15 6.1e-9 3.5e-5 0.68 2805 1.0
p[41,24] 0.02 2.2e-3 0.11.8e-308.6e-188.2e-11 1.4e-5 0.33 2135 1.0
p[42,24] 0.03 3.5e-3 0.141.4e-1102.8e-242.6e-14 9.0e-8 0.61 1517 1.0
p[43,24] 0.04 4.4e-3 0.173.2e-938.2e-235.5e-13 9.1e-7 0.83 1509 1.0
p[44,24] 3.2e-3 1.9e-3 0.054.4e-1824.5e-615.4e-441.5e-29 2.6e-9 742 1.0
p[45,24] 0.05 3.7e-3 0.175.8e-394.4e-15 2.3e-9 6.2e-5 0.79 2171 1.0
p[46,24] 0.02 1.9e-3 0.092.3e-292.4e-14 1.5e-8 1.5e-4 0.37 2499 1.0
p[47,24] 0.07 3.6e-3 0.152.2e-12 5.2e-6 8.6e-4 0.04 0.59 1884 1.0
p[48,24] 2.0e-7 1.4e-7 9.1e-66.0e-591.7e-392.5e-311.5e-243.4e-14 4000 nan
p[49,24] 0.02 2.2e-3 0.091.6e-1283.6e-318.5e-13 4.5e-5 0.35 1890 1.01
p[50,24] 4.8e-8 4.7e-8 3.0e-61.3e-739.9e-477.1e-377.7e-291.3e-15 4000 nan
p[0,25] 0.07 0.02 0.261.3e-1571.5e-801.6e-461.4e-24 1.0 170 1.01
p[1,25] 5.5e-3 5.5e-3 0.074.5e-2371.4e-1506.2e-1139.5e-797.5e-22 181 1.02
p[2,25] 3.3e-11 3.3e-11 2.1e-98.3e-2661.5e-1773.4e-1387.0e-1035.1e-47 4000 nan
p[3,25] 0.12 0.04 0.332.5e-1226.6e-632.3e-392.0e-21 1.0 62 1.04
p[4,25] 0.08 0.02 0.272.8e-1209.8e-601.2e-361.4e-22 1.0 126 1.03
p[5,25] 1.1e-92 9.6e-936.1e-91 0.0 0.01.3e-2771.4e-2274.3e-142 4000 nan
p[6,25] 5.1e-10 3.6e-10 2.3e-8 0.0 0.0 0.07.5e-1414.8e-37 4000 nan
p[7,25] 4.9e-3 3.9e-3 0.07 0.0 0.03.0e-3225.6e-1892.6e-21 323 1.01
p[8,25] 0.04 9.5e-3 0.183.0e-2203.6e-749.4e-432.9e-21 0.97 372 1.01
p[9,25] 1.3e-3 8.3e-4 0.047.4e-2661.0e-1438.3e-955.9e-522.8e-16 1823 1.0
p[10,25] 0.02 5.0e-3 0.13 0.01.9e-1245.4e-609.0e-28 4.0e-4 733 1.0
p[11,25] 7.0e-56 5.5e-563.5e-547.0e-2973.9e-2192.1e-1831.3e-14910.0e-98 3993 nan
p[12,25] 0.03 5.5e-3 0.17 0.01.6e-803.5e-431.5e-21 0.97 1002 1.0
p[13,25] 0.01 5.5e-3 0.1 0.09.7e-2223.5e-1043.3e-48 2.8e-8 329 1.01
p[14,25] 0.04 0.01 0.19 0.09.1e-934.2e-494.5e-23 0.99 202 1.01
p[15,25] 0.1 0.03 0.32.7e-1111.8e-515.1e-281.7e-14 1.0 116 1.02
p[16,25] 0.01 9.1e-3 0.112.1e-1952.5e-1144.4e-815.4e-501.1e-13 137 1.03
p[17,25] 2.9e-7 2.9e-7 1.8e-5 0.01.7e-2492.4e-1972.6e-1511.3e-75 4000 nan
p[18,25] 0.05 0.01 0.22 0.02.1e-1326.8e-603.0e-28 0.99 310 1.02
p[19,25] 0.91 0.03 0.261.3e-21 0.98 1.0 1.0 1.0 59 1.06
p[20,25] 0.02 6.3e-3 0.14 0.0 0.02.9e-1142.8e-47 4.7e-3 523 1.01
p[21,25] 0.03 4.5e-3 0.15 0.02.3e-744.1e-424.8e-22 0.65 1198 1.01
p[22,25] 0.04 0.01 0.18 0.01.5e-1838.7e-843.1e-38 1.0 295 1.01
p[23,25] 0.03 0.01 0.18 0.04.3e-2261.3e-1109.8e-51 1.0 184 1.02
p[24,25] 2.0e-3 1.4e-3 0.04 0.02.6e-1722.5e-1161.7e-771.7e-27 914 1.0
p[25,25] 0.02 6.6e-3 0.13 0.09.1e-2753.1e-1226.1e-52 6.6e-6 393 1.0
p[26,25] 0.02 7.7e-3 0.16 0.01.5e-1932.0e-1087.3e-50 0.64 409 1.0
p[27,25] 0.01 3.9e-3 0.11 0.0 0.01.5e-1221.1e-49 1.1e-7 848 1.01
p[28,25] 2.0e-63 2.0e-631.3e-61 0.0 0.03.8e-2811.1e-2315.4e-151 4000 nan
p[29,25] 0.04 0.03 0.195.2e-1625.0e-843.3e-509.5e-30 1.0 54 1.04
p[30,25] 9.3e-3 7.0e-3 0.11.5e-2444.7e-1242.2e-821.5e-453.6e-11 185 1.02
p[31,25] 0.04 9.9e-3 0.27.0e-1064.6e-496.1e-286.6e-16 1.0 389 1.01
p[32,25] 4.1e-26 4.1e-262.6e-24 0.0 0.0 0.0 0.0 0.0 4000 nan
p[33,25] 1.6e-8 1.6e-8 1.0e-6 0.09.7e-2812.4e-2232.6e-1708.3e-76 4000 nan
p[34,25] 0.03 0.01 0.17 0.09.1e-1371.0e-792.8e-40 1.0 232 1.01
p[35,25] 5.8e-3 3.6e-3 0.085.4e-2001.5e-1184.3e-812.0e-481.6e-11 441 1.01
p[36,25] 5.9e-8 5.9e-8 3.7e-6 0.0 0.0 0.0 0.02.5e-196 4000 nan
p[37,25] 0.01 2.9e-3 0.11 0.01.3e-2186.6e-776.9e-34 6.5e-7 1342 1.0
p[38,25] 1.0e-3 4.8e-4 0.03 0.0 0.0 0.0 0.05.8e-129 4000 1.0
p[39,25] 2.2e-37 2.1e-371.3e-355.6e-2225.1e-1615.8e-1353.5e-1111.7e-73 4000 nan
p[40,25] 0.03 0.01 0.183.4e-2261.7e-983.2e-571.1e-26 1.0 279 1.02
p[41,25] 0.03 0.02 0.177.1e-2031.8e-1181.3e-801.5e-46 1.0 99 1.03
p[42,25] 0.04 8.1e-3 0.182.4e-2506.1e-798.3e-461.4e-23 0.98 523 1.0
p[43,25] 0.04 7.0e-3 0.181.1e-2615.1e-771.4e-401.1e-18 0.98 678 1.01
p[44,25] 5.5e-4 5.5e-4 0.02 0.03.4e-1826.4e-1403.0e-1011.1e-45 1583 1.0
p[45,25] 0.05 0.01 0.211.8e-1841.4e-778.1e-453.8e-21 1.0 339 1.02
p[46,25] 0.02 0.01 0.132.7e-2171.1e-1281.5e-891.7e-537.3e-11 133 1.03
p[47,25] 0.1 0.03 0.296.2e-1206.4e-607.3e-366.2e-20 1.0 99 1.04
p[48,25] 1.5e-56 1.5e-569.2e-55 0.02.0e-2716.0e-2261.4e-1872.6e-123 4000 nan
p[49,25] 0.02 9.1e-3 0.12 0.03.1e-2171.6e-1171.3e-54 2.0e-9 185 1.03
p[50,25] 2.2e-65 2.2e-651.4e-63 0.02.0e-3131.1e-2595.2e-2144.5e-137 4000 nan
p[0,26] 0.04 1.7e-3 0.18.4e-11 7.4e-5 2.6e-3 0.02 0.35 3286 1.0
p[1,26] 0.02 1.4e-3 0.07 7.5e-8 7.1e-5 8.6e-4 5.9e-3 0.21 2538 1.0
p[2,26] 0.01 1.3e-3 0.02 1.4e-7 2.4e-4 3.7e-3 0.02 0.07 290 1.01
p[3,26] 0.05 2.4e-3 0.111.1e-12 1.1e-4 4.3e-3 0.04 0.39 2027 1.0
p[4,26] 0.04 1.4e-3 0.081.9e-12 4.7e-5 1.9e-3 0.02 0.29 3503 1.0
p[5,26] 3.0e-6 4.6e-7 2.6e-52.7e-177.6e-139.0e-11 1.1e-8 1.8e-5 3223 nan
p[6,26] 1.3e-3 1.8e-4 9.8e-32.8e-1288.9e-873.3e-642.2e-12 9.1e-3 3053 1.0
p[7,26] 7.0e-3 1.1e-3 0.036.3e-904.5e-461.2e-13 9.4e-7 0.09 900 1.0
p[8,26] 0.03 1.6e-3 0.061.1e-24 4.3e-7 8.8e-4 0.03 0.22 1514 1.0
p[9,26] 0.04 2.2e-3 0.09 5.6e-9 4.8e-5 1.1e-3 0.02 0.34 1721 1.0
p[10,26] 0.03 1.2e-3 0.053.5e-87 2.7e-7 2.9e-3 0.04 0.16 1486 1.0
p[11,26] 1.9e-5 1.7e-6 9.9e-52.9e-10 8.7e-8 8.8e-7 6.9e-6 1.5e-4 3248 nan
p[12,26] 0.03 1.1e-3 0.054.3e-53 1.0e-6 3.7e-3 0.05 0.16 1833 1.0
p[13,26] 0.02 1.2e-3 0.063.5e-65 5.7e-9 3.9e-4 0.02 0.21 2282 1.0
p[14,26] 0.03 1.5e-3 0.057.0e-89 9.2e-8 4.4e-4 0.03 0.19 1221 1.0
p[15,26] 0.03 1.5e-3 0.079.2e-13 9.1e-8 2.6e-4 0.02 0.26 2322 1.0
p[16,26] 0.04 2.2e-3 0.09 6.5e-8 1.6e-4 1.7e-3 0.02 0.34 1791 1.0
p[17,26] 1.5e-3 1.9e-4 9.1e-32.3e-351.7e-13 1.6e-9 2.5e-6 0.01 2416 1.0
p[18,26] 0.03 1.8e-3 0.061.0e-242 8.4e-9 1.5e-4 0.04 0.21 1027 1.0
p[19,26] 5.9e-4 2.6e-4 6.8e-37.0e-13 2.0e-8 8.1e-8 5.0e-7 1.3e-4 699 1.0
p[20,26] 0.02 1.4e-3 0.041.1e-1332.8e-30 1.9e-5 0.03 0.14 864 1.01
p[21,26] 0.03 1.4e-3 0.051.8e-102 3.9e-7 1.8e-3 0.04 0.19 1465 1.0
p[22,26] 0.03 1.3e-3 0.062.6e-50 4.9e-8 6.1e-4 0.02 0.22 2167 1.0
p[23,26] 0.02 1.2e-3 0.054.5e-63 2.1e-9 2.6e-4 0.02 0.2 1906 1.0
p[24,26] 0.02 7.0e-4 0.042.9e-4410.0e-7 6.5e-4 0.02 0.13 2605 1.0
p[25,26] 0.02 1.2e-3 0.052.1e-831.4e-11 4.7e-5 0.01 0.18 1675 1.0
p[26,26] 0.02 1.6e-3 0.086.4e-17 1.1e-7 2.0e-4 8.0e-3 0.28 2649 1.0
p[27,26] 0.02 1.4e-3 0.053.6e-1086.7e-19 2.2e-5 0.02 0.17 1142 1.0
p[28,26] 5.4e-5 1.7e-5 1.1e-32.3e-341.1e-131.3e-10 2.8e-8 8.0e-5 4000 1.0
p[29,26] 0.04 1.5e-3 0.091.8e-12 2.0e-4 3.9e-3 0.04 0.35 4000 1.0
p[30,26] 0.04 1.9e-3 0.11.0e-10 1.5e-4 2.6e-3 0.02 0.38 2899 1.0
p[31,26] 0.02 1.1e-3 0.053.3e-13 7.6e-7 3.2e-4 0.02 0.17 1781 1.0
p[32,26] 2.1e-5 2.3e-5 8.3e-42.0e-2463.3e-1816.0e-1502.2e-1229.3e-79 1297 nan
p[33,26] 2.2e-3 3.3e-4 0.011.0e-15 2.2e-9 5.1e-7 6.0e-5 0.02 1556 1.0
p[34,26] 0.03 1.8e-3 0.092.3e-17 8.2e-6 1.3e-3 0.02 0.32 2591 1.0
p[35,26] 0.04 1.5e-3 0.08 1.2e-7 8.4e-4 7.3e-3 0.03 0.29 2870 1.0
p[36,26] 3.4e-4 3.2e-4 5.3e-31.2e-1604.6e-1111.1e-882.5e-66 3.3e-8 274 1.01
p[37,26] 0.02 1.4e-3 0.042.7e-1152.8e-11 2.6e-4 0.03 0.16 873 1.0
p[38,26] 1.0e-3 7.3e-4 0.015.0e-2396.6e-1715.7e-1393.6e-111 1.4e-7 240 1.01
p[39,26] 4.0e-5 1.1e-6 7.0e-5 1.1e-7 3.5e-6 1.4e-5 4.6e-5 2.2e-4 4000 nan
p[40,26] 0.04 1.3e-3 0.064.7e-12 6.5e-5 6.6e-3 0.04 0.23 2469 1.0
p[41,26] 0.03 2.0e-3 0.088.5e-12 2.2e-5 5.6e-4 0.01 0.28 1424 1.0
p[42,26] 0.04 1.2e-3 0.054.1e-17 1.5e-5 0.01 0.06 0.15 1468 1.0
p[43,26] 0.03 1.2e-3 0.042.9e-15 1.5e-6 3.5e-3 0.05 0.15 1330 1.0
p[44,26] 0.01 4.9e-4 0.033.4e-28 6.7e-7 1.2e-4 6.1e-3 0.1 2953 1.0
p[45,26] 0.04 1.2e-3 0.061.9e-11 2.7e-5 8.4e-3 0.05 0.2 2217 1.0
p[46,26] 0.03 1.5e-3 0.09 9.0e-9 1.5e-4 2.0e-3 0.02 0.29 3323 1.0
p[47,26] 0.04 1.8e-3 0.16.2e-12 3.0e-5 2.0e-3 0.03 0.34 2953 1.0
p[48,26] 8.8e-6 1.5e-6 8.0e-56.6e-13 2.8e-9 8.0e-8 1.2e-6 6.4e-5 2986 nan
p[49,26] 0.02 1.7e-3 0.081.1e-30 2.7e-8 1.2e-4 6.4e-3 0.27 2302 1.0
p[50,26] 6.5e-6 1.5e-6 8.8e-51.5e-165.0e-11 4.3e-9 1.6e-7 4.1e-5 3509 nan
p[0,27] 0.04 1.9e-3 0.084.5e-18 6.4e-7 1.8e-3 0.03 0.29 1950 1.0
p[1,27] 0.04 1.6e-3 0.08 5.0e-8 7.8e-4 8.1e-3 0.04 0.31 2610 1.0
p[2,27] 0.05 2.5e-3 0.122.5e-12 3.0e-6 5.1e-4 0.02 0.5 2410 1.0
p[3,27] 0.04 1.6e-3 0.091.2e-13 5.9e-5 2.8e-3 0.03 0.33 3107 1.0
p[4,27] 0.04 1.8e-3 0.12.1e-13 3.2e-5 1.8e-3 0.02 0.33 2718 1.0
p[5,27] 7.7e-6 1.2e-6 5.7e-56.7e-154.7e-11 3.1e-9 1.5e-7 6.0e-5 2230 nan
p[6,27] 1.3e-4 5.5e-5 3.0e-33.2e-2231.5e-1542.7e-1171.8e-27 3.8e-6 2910 1.0
p[7,27] 9.4e-3 1.1e-3 0.046.7e-1428.2e-721.3e-19 3.7e-8 0.17 1511 1.0
p[8,27] 0.03 1.3e-3 0.064.6e-49 4.0e-7 1.9e-4 0.04 0.2 1828 1.0
p[9,27] 0.04 2.4e-3 0.11 2.0e-9 9.4e-5 1.0e-3 0.02 0.38 2015 1.0
p[10,27] 0.03 1.5e-3 0.099.2e-1471.6e-10 4.1e-5 7.7e-3 0.36 3531 1.0
p[11,27] 1.2e-4 7.2e-6 3.4e-4 1.3e-7 6.7e-6 2.9e-5 1.1e-4 7.6e-4 2218 nan
p[12,27] 0.03 1.3e-3 0.071.2e-96 8.2e-8 2.3e-4 0.02 0.27 3172 1.0
p[13,27] 0.03 1.6e-3 0.073.0e-1061.3e-13 6.3e-6 4.8e-3 0.3 2294 1.0
p[14,27] 0.03 1.6e-3 0.075.0e-155 1.8e-7 8.3e-5 0.04 0.24 1678 1.0
p[15,27] 9.0e-3 7.5e-4 0.048.0e-353.7e-14 1.9e-8 1.4e-4 0.13 2650 1.0
p[16,27] 0.04 2.2e-3 0.1 1.6e-7 3.6e-4 2.0e-3 0.02 0.36 2082 1.0
p[17,27] 5.9e-4 1.7e-4 9.2e-31.8e-652.1e-265.9e-198.5e-13 1.4e-4 2951 1.0
p[18,27] 0.02 1.2e-3 0.05 0.07.5e-16 5.5e-6 0.01 0.2 1879 1.0
p[19,27] 3.1e-4 6.8e-5 3.1e-31.3e-12 5.7e-7 2.0e-6 8.8e-6 4.9e-4 2045 1.0
p[20,27] 0.02 1.3e-3 0.071.2e-2264.8e-56 1.3e-7 1.7e-3 0.29 3081 1.0
p[21,27] 0.03 1.3e-3 0.069.0e-178 6.5e-8 2.7e-4 0.03 0.21 2142 1.0
p[22,27] 0.03 1.3e-3 0.072.2e-801.4e-11 8.5e-6 5.0e-3 0.29 3125 1.0
p[23,27] 0.03 1.5e-3 0.082.0e-1021.1e-13 3.9e-6 5.5e-3 0.29 2368 1.0
p[24,27] 0.02 1.3e-3 0.079.6e-833.3e-14 5.8e-8 2.0e-4 0.28 3020 1.0
p[25,27] 0.02 1.3e-3 0.077.0e-1356.5e-19 2.4e-7 2.2e-3 0.28 2659 1.0
p[26,27] 0.03 1.4e-3 0.071.9e-25 1.4e-8 1.8e-4 0.02 0.24 2348 1.0
p[27,27] 0.02 1.4e-3 0.072.7e-1763.7e-33 5.1e-8 1.4e-3 0.29 2909 1.0
p[28,27] 1.6e-4 7.0e-5 4.4e-32.6e-542.1e-204.3e-154.5e-11 1.3e-5 4000 1.0
p[29,27] 0.05 2.3e-3 0.132.3e-10 2.7e-4 2.3e-3 0.02 0.51 3266 1.0
p[30,27] 0.04 1.5e-3 0.094.7e-18 1.1e-4 4.5e-3 0.04 0.3 3327 1.0
p[31,27] 0.02 1.0e-3 0.061.4e-241.5e-10 1.6e-6 9.4e-4 0.2 3075 1.0
p[32,27] 3.7e-7 3.7e-7 2.3e-5 0.08.0e-3071.2e-2564.7e-2113.6e-138 4000 nan
p[33,27] 0.01 1.2e-3 0.069.1e-254.8e-12 2.5e-8 3.6e-5 0.2 2310 1.0
p[34,27] 0.04 1.6e-3 0.083.5e-25 1.8e-6 1.4e-3 0.03 0.29 2864 1.0
p[35,27] 0.06 1.8e-3 0.093.1e-17 2.5e-4 0.01 0.11 0.32 2737 1.0
p[36,27] 7.7e-4 4.3e-4 0.011.8e-2672.7e-1852.7e-1481.4e-1101.4e-12 992 1.01
p[37,27] 0.03 1.4e-3 0.086.7e-1932.2e-18 2.1e-6 3.1e-3 0.32 3181 1.0
p[38,27] 8.2e-4 5.6e-4 0.01 0.09.0e-2924.4e-2398.9e-1932.5e-13 430 1.01
p[39,27] 6.0e-4 1.2e-5 4.9e-4 3.3e-5 2.6e-4 5.2e-4 8.3e-4 1.7e-3 1635 nan
p[40,27] 0.04 1.5e-3 0.087.3e-28 1.9e-8 1.7e-4 0.02 0.32 3065 1.0
p[41,27] 0.03 2.6e-3 0.114.7e-10 2.5e-5 3.0e-4 4.7e-3 0.3 1692 1.0
p[42,27] 0.03 1.4e-3 0.083.0e-30 4.5e-7 3.8e-4 0.01 0.31 3266 1.0
p[43,27] 0.03 1.5e-3 0.086.9e-25 9.6e-8 2.0e-4 0.01 0.33 3089 1.0
p[44,27] 0.01 1.1e-3 0.061.0e-535.1e-14 3.0e-9 1.5e-5 0.16 2778 1.0
p[45,27] 0.03 1.5e-3 0.099.6e-24 6.5e-8 1.3e-4 0.01 0.34 3144 1.0
p[46,27] 0.04 1.6e-3 0.093.7e-12 3.3e-4 7.6e-3 0.05 0.32 3259 1.0
p[47,27] 0.03 1.9e-3 0.093.3e-15 9.6e-7 8.2e-4 0.02 0.29 2036 1.0
p[48,27] 5.1e-5 5.1e-6 2.7e-45.4e-15 2.8e-9 3.3e-7 8.7e-6 4.5e-4 2931 nan
p[49,27] 0.03 1.4e-3 0.073.2e-43 1.2e-9 9.0e-5 0.01 0.25 2711 1.0
p[50,27] 3.2e-5 7.3e-6 3.7e-46.1e-201.2e-11 4.6e-9 5.4e-7 2.3e-4 2571 nan
p[0,28] 0.06 3.0e-3 0.145.1e-261.7e-10 1.3e-5 0.03 0.56 2276 1.0
p[1,28] 6.1e-3 8.7e-4 0.045.6e-432.2e-231.3e-15 1.7e-9 0.05 2618 1.0
p[2,28] 1.3e-5 5.6e-6 2.8e-47.6e-1009.7e-638.1e-473.4e-337.8e-13 2439 nan
p[3,28] 0.06 2.6e-3 0.137.0e-18 2.4e-7 1.2e-4 0.04 0.5 2447 1.0
p[4,28] 0.08 3.2e-3 0.148.9e-14 5.2e-6 4.8e-3 0.09 0.53 2022 1.0
p[5,28] 9.3e-14 3.9e-142.4e-121.0e-731.8e-522.5e-422.3e-337.5e-19 3931 nan
p[6,28] 2.7e-13 2.7e-131.7e-11 0.0 0.0 0.01.9e-1632.0e-49 4000 nan
p[7,28] 5.8e-3 1.1e-3 0.05 0.0 0.05.3e-1141.7e-52 0.01 2309 1.0
p[8,28] 0.04 2.7e-3 0.091.1e-2657.0e-512.5e-19 4.8e-4 0.34 1185 1.0
p[9,28] 0.02 1.6e-3 0.082.5e-488.9e-192.7e-10 1.1e-4 0.25 2548 1.0
p[10,28] 0.03 1.9e-3 0.09 0.07.8e-523.0e-21 1.3e-5 0.36 2142 1.0
p[11,28] 2.3e-7 1.6e-7 7.8e-63.2e-458.5e-301.9e-234.6e-181.4e-10 2323 nan
p[12,28] 0.03 2.1e-3 0.09 0.01.5e-472.4e-20 1.3e-4 0.34 1770 1.0
p[13,28] 0.02 1.9e-3 0.09 0.01.8e-711.2e-284.5e-10 0.34 2201 1.0
p[14,28] 0.04 3.2e-3 0.09 0.04.6e-551.5e-18 5.4e-4 0.36 884 1.01
p[15,28] 0.09 4.2e-3 0.184.2e-25 6.6e-8 1.1e-3 0.08 0.65 1789 1.0
p[16,28] 0.02 1.8e-3 0.081.3e-291.5e-13 1.8e-8 1.6e-4 0.26 2025 1.0
p[17,28] 4.2e-6 4.2e-6 2.2e-4 0.06.6e-1627.5e-1267.8e-943.4e-46 2809 nan
p[18,28] 0.02 3.0e-3 0.07 0.03.7e-1142.3e-411.2e-10 0.3 585 1.0
p[19,28] 5.3e-3 4.3e-4 0.028.4e-52 3.1e-6 7.4e-5 1.1e-3 0.05 2533 1.0
p[20,28] 0.01 1.3e-3 0.06 0.05.4e-2788.4e-546.1e-18 0.22 2145 1.0
p[21,28] 0.03 2.3e-3 0.09 0.02.2e-511.2e-21 1.2e-4 0.35 1593 1.0
p[22,28] 0.02 1.7e-3 0.09 0.01.4e-529.4e-22 1.7e-6 0.36 3076 1.0
p[23,28] 0.02 1.5e-3 0.08 0.04.8e-721.3e-297.5e-11 0.26 2968 1.0
p[24,28] 3.4e-5 2.1e-5 8.7e-4 0.03.7e-1029.0e-684.2e-458.7e-13 1759 nan
p[25,28] 0.02 1.5e-3 0.08 0.01.2e-1011.7e-361.2e-11 0.28 2807 1.0
p[26,28] 0.02 1.7e-3 0.095.5e-1111.6e-379.6e-17 3.7e-6 0.28 3127 1.0
p[27,28] 0.02 1.4e-3 0.08 0.01.5e-1811.9e-425.8e-14 0.28 2870 1.0
p[28,28] 5.6e-21 5.6e-213.5e-192.0e-2822.0e-1291.1e-1022.1e-815.0e-50 4000 nan
p[29,28] 0.05 2.9e-3 0.111.4e-19 7.2e-8 3.6e-4 0.05 0.42 1579 1.0
p[30,28] 0.03 2.5e-3 0.17.6e-603.8e-216.7e-12 2.0e-5 0.38 1635 1.0
p[31,28] 0.07 3.2e-3 0.146.0e-385.9e-12 2.7e-4 0.05 0.51 1892 1.0
p[32,28] 4.9e-33 4.9e-333.1e-31 0.0 0.0 0.0 0.0 0.0 4000 nan
p[33,28] 5.9e-5 3.7e-5 2.3e-34.7e-1524.8e-891.1e-684.9e-501.8e-19 3992 1.0
p[34,28] 0.03 1.9e-3 0.118.7e-1244.3e-227.5e-11 2.4e-4 0.4 3167 1.0
p[35,28] 0.02 1.7e-3 0.086.9e-468.8e-234.7e-14 7.8e-7 0.24 2077 1.0
p[36,28] 6.2e-4 6.2e-4 0.02 0.0 0.0 0.0 0.02.2e-82 759 1.0
p[37,28] 0.02 2.0e-3 0.09 0.05.7e-961.6e-30 1.4e-8 0.33 1922 1.0
p[38,28] 2.6e-4 1.2e-4 7.6e-3 0.0 0.0 0.0 0.02.1e-95 4000 1.0
p[39,28] 1.2e-5 6.9e-6 4.4e-46.4e-306.7e-206.9e-162.4e-12 1.4e-7 3981 nan
p[40,28] 0.03 2.2e-3 0.115.0e-752.0e-273.3e-13 4.6e-4 0.43 2343 1.0
p[41,28] 0.03 2.7e-3 0.081.5e-301.5e-12 1.7e-7 1.1e-3 0.29 932 1.0
p[42,28] 0.03 2.2e-3 0.091.0e-1751.4e-395.9e-19 5.9e-5 0.35 1612 1.0
p[43,28] 0.04 2.8e-3 0.118.1e-1518.4e-361.8e-14 1.6e-3 0.4 1384 1.0
p[44,28] 3.0e-5 2.7e-5 1.2e-33.6e-2795.6e-1035.8e-772.4e-541.1e-21 1864 1.0
p[45,28] 0.04 2.5e-3 0.113.3e-672.0e-243.9e-11 1.7e-3 0.43 1925 1.0
p[46,28] 0.02 1.6e-3 0.084.2e-416.7e-204.2e-12 2.4e-6 0.22 2315 1.0
p[47,28] 0.08 3.6e-3 0.155.1e-17 1.2e-6 1.0e-3 0.07 0.59 1843 1.0
p[48,28] 2.4e-10 2.3e-10 1.5e-81.6e-861.4e-557.3e-437.7e-331.2e-18 4000 nan
p[49,28] 0.02 1.5e-3 0.096.3e-2054.9e-451.3e-18 9.3e-7 0.29 3277 1.0
p[50,28] 1.2e-10 1.1e-10 7.2e-95.8e-1119.9e-682.6e-522.7e-408.2e-22 4000 nan
p[0,29] 0.02 1.3e-3 0.067.7e-661.7e-244.4e-12 1.5e-4 0.2 1762 1.0
p[1,29] 0.08 2.4e-3 0.125.5e-26 3.5e-3 0.04 0.12 0.43 2364 1.0
p[2,29] 0.05 2.3e-3 0.04 1.2e-4 0.01 0.04 0.07 0.15 346 1.01
p[3,29] 9.1e-3 1.1e-3 0.051.8e-652.5e-273.5e-15 4.3e-8 0.12 1684 1.0
p[4,29] 4.0e-3 5.9e-4 0.032.8e-688.5e-306.2e-17 3.7e-9 0.05 2088 1.0
p[5,29] 1.1e-4 8.6e-6 3.9e-4 2.1e-9 3.9e-7 4.1e-6 4.2e-5 1.1e-3 2017 nan
p[6,29] 8.6e-4 1.3e-4 4.9e-34.6e-981.6e-654.9e-48 1.3e-8 9.7e-3 1484 1.0
p[7,29] 0.01 1.3e-3 0.052.0e-692.2e-348.1e-10 5.5e-5 0.15 1250 1.0
p[8,29] 0.03 1.4e-3 0.051.8e-16 1.7e-7 2.0e-3 0.05 0.17 1147 1.0
p[9,29] 0.06 3.0e-3 0.121.8e-346.0e-10 4.5e-3 0.05 0.46 1678 1.0
p[10,29] 0.03 1.1e-3 0.059.7e-68 5.0e-8 1.1e-3 0.04 0.18 2128 1.0
p[11,29] 0.01 5.5e-4 0.02 3.8e-4 2.5e-3 5.5e-3 0.01 0.06 1621 1.0
p[12,29] 0.03 1.1e-3 0.055.4e-40 6.8e-7 3.7e-3 0.04 0.17 2113 1.0
p[13,29] 0.03 1.5e-3 0.066.7e-50 2.3e-9 2.1e-4 0.03 0.23 1880 1.0
p[14,29] 0.03 1.4e-3 0.051.2e-65 2.5e-8 8.9e-4 0.03 0.17 1259 1.0
p[15,29] 3.1e-3 4.1e-4 0.021.7e-531.5e-224.9e-13 1.1e-7 0.03 2975 1.0
p[16,29] 0.06 3.0e-3 0.115.0e-419.0e-10 9.4e-3 0.07 0.4 1342 1.0
p[17,29] 2.6e-3 1.9e-4 9.4e-39.0e-26 2.7e-9 1.5e-6 2.0e-4 0.04 2552 1.0
p[18,29] 0.03 1.7e-3 0.051.3e-184 3.2e-9 5.9e-4 0.04 0.18 986 1.0
p[19,29] 2.6e-4 1.2e-4 2.6e-36.5e-149.3e-10 3.9e-9 2.6e-8 1.2e-4 440 1.01
p[20,29] 0.03 1.4e-3 0.058.9e-1053.9e-22 9.1e-5 0.03 0.17 1206 1.01
p[21,29] 0.03 1.3e-3 0.051.7e-79 2.1e-7 3.4e-3 0.05 0.17 1400 1.0
p[22,29] 0.03 1.4e-3 0.061.8e-40 1.1e-9 9.6e-5 0.02 0.25 2198 1.0
p[23,29] 0.03 1.4e-3 0.077.1e-488.1e-10 1.9e-4 0.03 0.25 2146 1.0
p[24,29] 0.03 1.0e-3 0.054.7e-33 1.4e-4 9.0e-3 0.06 0.16 2094 1.0
p[25,29] 0.03 1.5e-3 0.069.6e-652.8e-12 3.1e-5 0.02 0.23 1550 1.0
p[26,29] 0.03 1.4e-3 0.087.7e-473.2e-11 5.2e-5 0.02 0.31 3464 1.0
p[27,29] 0.03 1.6e-3 0.051.4e-842.6e-18 2.1e-5 0.02 0.2 1216 1.0
p[28,29] 4.4e-4 4.9e-5 3.1e-37.8e-24 6.6e-9 8.0e-7 2.7e-5 3.4e-3 4000 1.0
p[29,29] 0.03 2.8e-3 0.091.2e-488.4e-15 7.2e-8 6.6e-3 0.32 1104 1.0
p[30,29] 0.05 2.4e-3 0.17.5e-521.7e-12 1.2e-3 0.07 0.35 1605 1.0
p[31,29] 9.1e-3 6.9e-4 0.039.3e-223.0e-10 1.2e-6 1.1e-3 0.11 1831 1.0
p[32,29] 3.9e-5 4.6e-5 1.2e-31.6e-1955.0e-1435.7e-1186.3e-969.4e-61 727 1.0
p[33,29] 0.01 8.8e-4 0.034.5e-10 1.0e-5 3.1e-4 5.8e-3 0.12 1353 1.0
p[34,29] 0.03 1.5e-3 0.081.4e-541.6e-15 4.7e-6 0.02 0.29 2778 1.0
p[35,29] 0.06 2.4e-3 0.11.0e-30 4.3e-7 0.01 0.08 0.38 1898 1.0
p[36,29] 7.8e-4 6.9e-4 9.3e-32.7e-1273.7e-875.2e-691.2e-50 1.2e-5 185 1.02
p[37,29] 0.03 1.4e-3 0.051.1e-901.1e-12 1.3e-4 0.03 0.19 1473 1.0
p[38,29] 1.3e-3 9.2e-4 0.013.4e-1892.6e-1347.7e-1091.5e-86 8.9e-7 173 1.02
p[39,29] 0.04 5.1e-4 0.03 9.4e-3 0.02 0.03 0.05 0.13 4000 1.0
p[40,29] 0.04 1.4e-3 0.079.3e-26 1.6e-8 7.8e-4 0.04 0.24 2134 1.0
p[41,29] 0.04 2.0e-3 0.11.5e-39 1.3e-7 3.0e-3 0.03 0.35 2230 1.0
p[42,29] 0.04 1.2e-3 0.051.7e-16 6.6e-6 6.3e-3 0.06 0.18 2025 1.0
p[43,29] 0.03 1.2e-3 0.056.6e-16 3.4e-7 1.9e-3 0.04 0.17 1722 1.0
p[44,29] 0.02 5.4e-4 0.033.5e-20 1.5e-4 3.9e-3 0.03 0.11 4000 1.0
p[45,29] 0.03 1.2e-3 0.067.8e-20 6.3e-8 5.8e-4 0.03 0.21 2379 1.0
p[46,29] 0.06 2.3e-3 0.12.4e-42 2.7e-9 9.6e-3 0.08 0.37 1879 1.0
p[47,29] 4.5e-3 5.9e-4 0.035.0e-749.0e-331.2e-187.9e-10 0.06 2189 1.0
p[48,29] 1.8e-3 7.7e-5 3.6e-3 2.4e-7 8.6e-5 6.1e-4 2.1e-3 0.01 2201 1.0
p[49,29] 0.03 1.5e-3 0.083.7e-465.1e-13 1.9e-5 0.01 0.3 2898 1.0
p[50,29] 1.2e-3 2.0e-4 7.3e-35.8e-10 2.8e-6 4.1e-5 3.7e-4 8.7e-3 1412 1.0
p[0,30] 4.5e-3 8.1e-4 0.031.2e-1655.6e-657.9e-348.6e-14 0.04 1410 1.0
p[1,30] 0.06 2.7e-3 0.113.0e-69 2.7e-9 3.1e-3 0.1 0.38 1551 1.0
p[2,30] 0.19 2.7e-3 0.12 4.8e-3 0.1 0.18 0.27 0.47 2083 1.0
p[3,30] 6.6e-4 1.5e-4 8.1e-31.0e-1734.1e-773.6e-464.8e-26 9.9e-4 3065 1.0
p[4,30] 2.2e-4 10.0e-5 6.2e-31.5e-1827.5e-841.4e-505.3e-29 2.5e-5 3877 1.0
p[5,30] 1.6e-3 2.7e-5 1.7e-3 4.5e-5 3.6e-4 9.4e-4 2.3e-3 6.4e-3 4000 1.0
p[6,30] 5.8e-4 9.5e-5 3.9e-32.1e-951.9e-633.7e-46 2.8e-8 5.4e-3 1684 1.0
p[7,30] 0.02 1.6e-3 0.061.2e-582.1e-28 4.6e-9 9.4e-4 0.24 1542 1.0
p[8,30] 0.03 1.2e-3 0.062.3e-15 7.7e-8 1.2e-3 0.03 0.21 2299 1.0
p[9,30] 0.05 2.7e-3 0.099.8e-996.0e-32 7.8e-5 0.07 0.31 1014 1.01
p[10,30] 0.03 1.2e-3 0.075.5e-585.8e-10 1.6e-4 0.02 0.28 4000 1.0
p[11,30] 0.14 1.5e-3 0.09 0.03 0.07 0.11 0.17 0.38 4000 1.0
p[12,30] 0.03 1.2e-3 0.071.9e-35 2.3e-7 1.6e-3 0.03 0.25 2969 1.0
p[13,30] 0.04 1.6e-3 0.093.5e-505.2e-13 3.9e-5 0.02 0.34 3123 1.0
p[14,30] 0.03 1.4e-3 0.066.0e-66 9.4e-9 4.7e-4 0.02 0.24 2148 1.0
p[15,30] 3.1e-4 1.1e-4 7.1e-31.8e-1147.3e-504.0e-287.0e-17 2.1e-5 3827 1.0
p[16,30] 0.02 1.3e-3 0.064.1e-1143.9e-32 1.2e-6 8.0e-3 0.23 2088 1.0
p[17,30] 6.1e-3 3.0e-4 0.026.8e-21 4.1e-7 7.0e-5 2.4e-3 0.06 2854 1.0
p[18,30] 0.03 1.4e-3 0.061.2e-185 1.1e-9 3.0e-4 0.02 0.21 1818 1.0
p[19,30] 1.1e-4 5.8e-5 1.2e-32.2e-143.7e-10 1.5e-9 1.0e-8 4.1e-5 443 1.01
p[20,30] 0.03 1.5e-3 0.077.8e-941.6e-19 6.3e-5 0.02 0.27 2262 1.0
p[21,30] 0.03 1.4e-3 0.068.7e-72 8.8e-8 2.2e-3 0.03 0.22 1925 1.0
p[22,30] 0.03 1.5e-3 0.082.6e-481.6e-14 3.3e-6 9.6e-3 0.33 3048 1.0
p[23,30] 0.04 1.8e-3 0.092.1e-509.9e-14 4.8e-5 0.02 0.34 2635 1.0
p[24,30] 0.07 2.0e-3 0.094.6e-29 1.5e-3 0.03 0.1 0.32 2215 1.0
p[25,30] 0.03 1.6e-3 0.096.4e-561.3e-16 3.8e-6 0.01 0.34 3166 1.0
p[26,30] 0.04 1.9e-3 0.082.3e-1162.8e-27 1.4e-5 0.02 0.29 1812 1.0
p[27,30] 0.03 1.6e-3 0.082.4e-704.5e-20 4.0e-6 9.6e-3 0.32 2700 1.0
p[28,30] 3.5e-3 1.8e-4 9.7e-39.0e-18 6.7e-6 1.9e-4 2.1e-3 0.03 2751 1.0
p[29,30] 8.9e-3 9.1e-4 0.041.2e-1301.1e-441.1e-25 2.3e-6 0.12 1736 1.0
p[30,30] 0.03 2.4e-3 0.081.3e-1325.3e-351.0e-10 5.3e-3 0.3 1267 1.0
p[31,30] 2.4e-3 2.9e-4 0.021.9e-321.7e-156.1e-10 5.6e-6 0.02 3342 1.0
p[32,30] 4.8e-5 4.8e-5 1.7e-31.8e-1793.6e-1292.5e-1068.7e-862.0e-53 1213 1.0
p[33,30] 0.06 2.3e-3 0.1 3.9e-7 2.2e-3 0.02 0.06 0.39 1835 1.0
p[34,30] 0.03 1.9e-3 0.087.3e-1403.1e-412.8e-14 2.4e-3 0.28 1794 1.0
p[35,30] 0.03 1.6e-3 0.074.0e-703.2e-17 8.2e-7 6.2e-3 0.27 2077 1.0
p[36,30] 1.7e-3 1.4e-3 0.022.7e-1115.0e-758.0e-592.6e-42 4.4e-4 273 1.01
p[37,30] 0.03 1.4e-3 0.072.1e-771.3e-15 1.5e-5 0.01 0.29 2871 1.0
p[38,30] 1.9e-3 1.3e-3 0.022.5e-1761.1e-1231.6e-991.8e-78 9.2e-7 236 1.01
p[39,30] 0.09 1.0e-3 0.06 0.01 0.05 0.08 0.12 0.25 4000 1.0
p[40,30] 0.03 1.5e-3 0.082.8e-451.9e-14 8.4e-7 6.3e-3 0.32 2899 1.0
p[41,30] 0.03 2.1e-3 0.075.4e-1081.9e-24 4.2e-4 0.03 0.25 1100 1.0
p[42,30] 0.03 1.4e-3 0.072.2e-18 5.3e-7 1.4e-3 0.03 0.26 2771 1.0
p[43,30] 0.03 1.3e-3 0.071.7e-18 3.3e-8 3.8e-4 0.02 0.28 2737 1.0
p[44,30] 0.06 1.5e-3 0.086.5e-17 2.9e-3 0.03 0.1 0.27 2778 1.0
p[45,30] 0.02 1.4e-3 0.078.2e-311.7e-11 1.7e-6 3.5e-3 0.26 2500 1.0
p[46,30] 0.04 2.2e-3 0.099.4e-1091.6e-27 1.8e-7 0.02 0.31 1627 1.0
p[47,30] 2.6e-4 1.0e-4 5.6e-38.0e-1942.7e-891.6e-533.0e-30 3.7e-5 3033 1.0
p[48,30] 0.06 1.5e-3 0.06 3.0e-4 0.01 0.04 0.09 0.23 1653 1.0
p[49,30] 0.04 2.1e-3 0.092.3e-1161.5e-26 1.2e-6 0.02 0.33 1824 1.0
p[50,30] 0.03 1.7e-3 0.05 7.3e-6 1.4e-3 5.7e-3 0.02 0.17 891 1.0
p[0,31] 4.4e-7 4.2e-7 2.7e-5 0.0 0.0 0.01.1e-1552.8e-28 4000 nan
p[1,31] 9.7e-6 5.0e-6 3.2e-4 0.04.6e-1151.4e-486.0e-22 1.5e-8 4000 nan
p[2,31] 0.23 9.0e-3 0.22 9.2e-5 0.04 0.16 0.38 0.76 617 1.0
p[3,31] 1.8e-8 1.8e-8 1.2e-6 0.0 0.0 0.02.6e-2129.6e-41 4000 nan
p[4,31] 1.8e-12 1.8e-121.1e-10 0.0 0.0 0.02.1e-2341.4e-53 4000 nan
p[5,31] 0.9 3.5e-3 0.17 0.32 0.89 0.97 0.99 1.0 2495 1.0
p[6,31] 2.8e-3 4.2e-4 0.012.3e-469.9e-306.6e-21 4.1e-4 0.03 1130 1.01
p[7,31] 0.1 4.8e-3 0.193.8e-1459.0e-22 2.1e-9 0.12 0.64 1520 1.0
p[8,31] 0.03 1.6e-3 0.13.1e-15 1.2e-9 7.7e-5 7.8e-3 0.36 3597 1.0
p[9,31] 4.1e-4 1.0e-4 6.1e-3 0.09.3e-2282.0e-452.2e-14 6.4e-4 3663 1.0
p[10,31] 0.04 2.8e-3 0.112.0e-485.6e-18 3.5e-9 4.6e-4 0.41 1545 1.0
p[11,31] 8.1e-4 1.2e-4 6.9e-33.2e-214.0e-11 5.1e-8 9.6e-6 4.0e-3 3500 1.0
p[12,31] 0.03 2.0e-3 0.11.5e-20 5.1e-9 5.9e-5 7.8e-3 0.35 2374 1.0
p[13,31] 0.06 4.4e-3 0.151.5e-1532.6e-372.2e-13 3.1e-3 0.5 1099 1.0
p[14,31] 0.03 1.9e-3 0.112.5e-275.7e-11 1.3e-5 3.6e-3 0.39 3083 1.0
p[15,31] 1.2e-13 1.2e-137.5e-12 0.0 0.07.2e-1936.3e-1182.7e-45 4000 nan
p[16,31] 1.2e-6 4.6e-7 2.9e-5 0.04.6e-2411.3e-642.1e-28 3.2e-9 4000 nan
p[17,31] 0.05 6.8e-4 0.04 6.0e-9 0.01 0.05 0.09 0.13 4000 1.0
p[18,31] 0.03 1.6e-3 0.091.6e-888.5e-12 3.3e-5 4.2e-3 0.33 3287 1.0
p[19,31] 6.5e-5 3.4e-5 5.3e-43.4e-168.4e-133.4e-122.3e-11 7.2e-4 242 nan
p[20,31] 0.05 2.7e-3 0.122.3e-537.5e-17 3.6e-7 0.02 0.43 1851 1.0
p[21,31] 0.03 1.6e-3 0.0910.0e-38 3.6e-9 1.9e-4 9.9e-3 0.32 3345 1.0
p[22,31] 0.05 4.7e-3 0.148.4e-1872.6e-548.1e-21 1.4e-5 0.53 946 1.0
p[23,31] 0.06 3.9e-3 0.144.0e-1692.7e-393.0e-14 8.9e-4 0.51 1286 1.0
p[24,31] 0.16 3.9e-3 0.194.4e-14 0.01 0.09 0.24 0.68 2306 1.0
p[25,31] 0.06 4.0e-3 0.141.5e-1392.2e-343.4e-14 1.3e-3 0.48 1160 1.0
p[26,31] 0.09 8.9e-3 0.25 0.02.0e-2671.2e-62 8.6e-8 0.95 771 1.0
p[27,31] 0.05 3.4e-3 0.121.1e-692.7e-262.4e-12 1.1e-3 0.42 1248 1.0
p[28,31] 0.19 3.8e-3 0.14 1.8e-7 0.07 0.18 0.28 0.5 1319 1.0
p[29,31] 1.8e-4 1.2e-4 7.4e-3 0.01.7e-2625.4e-1593.9e-50 1.3e-8 3988 1.0
p[30,31] 1.4e-5 6.3e-6 4.0e-4 0.0 0.05.8e-1261.2e-40 1.9e-8 4000 nan
p[31,31] 8.4e-5 6.9e-5 4.4e-31.2e-835.0e-413.5e-265.3e-15 2.5e-6 4000 1.0
p[32,31] 5.7e-5 6.8e-5 1.5e-31.8e-1023.7e-731.1e-591.2e-471.3e-28 479 1.0
p[33,31] 0.34 5.1e-3 0.21 5.1e-7 0.2 0.35 0.48 0.75 1645 1.0
p[34,31] 7.9e-3 1.1e-3 0.07 0.0 0.01.7e-1548.8e-30 0.02 3605 1.0
p[35,31] 3.4e-6 1.8e-6 1.2e-4 0.03.1e-1436.2e-682.7e-32 2.5e-9 4000 nan
p[36,31] 5.9e-3 1.8e-3 0.046.4e-666.3e-439.4e-337.2e-23 0.07 531 1.0
p[37,31] 0.03 2.4e-3 0.16.8e-533.6e-232.0e-10 4.5e-4 0.38 1729 1.0
p[38,31] 1.8e-3 1.3e-3 0.025.5e-992.0e-681.9e-542.6e-42 1.5e-5 266 1.01
p[39,31] 3.1e-8 1.6e-8 1.0e-68.9e-368.2e-233.6e-182.3e-14 7.3e-9 4000 nan
p[40,31] 0.01 1.4e-3 0.063.9e-1689.1e-623.4e-281.9e-12 0.18 1783 1.0
p[41,31] 2.8e-3 5.5e-4 0.03 0.02.5e-1501.5e-272.0e-10 0.01 2387 1.0
p[42,31] 0.03 2.2e-3 0.094.4e-252.3e-10 3.5e-6 2.5e-3 0.32 1593 1.0
p[43,31] 0.03 1.9e-3 0.093.3e-262.0e-11 5.8e-7 1.0e-3 0.33 2349 1.0
p[44,31] 0.22 3.5e-3 0.19 1.7e-7 0.07 0.18 0.31 0.7 2846 1.0
p[45,31] 8.5e-3 1.3e-3 0.051.7e-901.5e-352.5e-187.3e-10 0.1 1823 1.0
p[46,31] 9.8e-6 5.9e-6 3.7e-4 0.09.5e-2783.7e-974.6e-36 6.1e-9 4000 nan
p[47,31] 3.5e-19 3.5e-191.6e-17 0.0 0.0 0.01.5e-2738.8e-62 2005 nan
p[48,31] 0.11 4.0e-3 0.191.9e-10 4.3e-4 0.01 0.13 0.68 2143 1.0
p[49,31] 0.03 2.9e-3 0.14 0.05.9e-2653.2e-48 3.8e-8 0.54 2294 1.0
p[50,31] 0.32 0.01 0.322.5e-10 0.01 0.19 0.6 0.94 848 1.0
lp__ -2653 0.32 11.81 -2677 -2661 -2653 -2645 -2631 1337 1.0
Samples were drawn using NUTS at Fri Jul 28 17:35:52 2017.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at
convergence, Rhat=1).
```python
samples=fit.extract()
```
```python
import seaborn as sns
sns.set(color_codes=True)
import pandas as pd
sns.set_style("white")
```
```python
s1=5
df = pd.DataFrame(np.vstack((samples['Nbb'][:,s1],samples['src_f'][:,s1,0],samples['src_f'][:,s1,1],samples['src_f'][:,s1,2],samples['src_f'][:,s1,3])).T,columns=['Nbb','S250', 'S350','S500','S100'])
g = sns.PairGrid(df,size=5)
g.map_diag(plt.hist)
g.map_lower(sns.kdeplot, cmap="Reds_d",alpha=0.5,n_levels=10,normed=True)
for i in g.axes[:,1]: i.axvline(x=fcat['S250'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[1,[0]]: i.axhline(y=fcat['S250'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[:,2]: i.axvline(x=fcat['S350'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[2,[0,1]]: i.axhline(y=fcat['S350'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[:,3]: i.axvline(x=fcat['S500'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[3,[0,1,2]]: i.axhline(y=fcat['S500'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[:,4]: i.axvline(x=fcat['S100'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[4,[0,1,2,3,]]: i.axhline(y=fcat['S100'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
```

```python
s1=17
df = pd.DataFrame(np.vstack((samples['src_f'][:,s1,0],samples['src_f'][:,s1,1],samples['src_f'][:,s1,2])).T,columns=['S250', 'S350','S500'])
g = sns.PairGrid(df,size=5)
g.map_diag(sns.kdeplot)
g.map_lower(sns.kdeplot, cmap="Reds_d",alpha=0.5,n_levels=10,normed=True)
g.map_upper(plt.scatter,color='red',alpha=0.1)
g.data=pd.DataFrame(np.vstack((posterior.samples['src_f'][:,0,s1],posterior.samples['src_f'][:,1,s1],posterior.samples['src_f'][:,2,s1])).T,columns=['S250', 'S350','S500'])
g.map_diag(sns.kdeplot)
g.map_lower(sns.kdeplot, cmap="Blues_d",alpha=0.5,n_levels=10,normed=True)
g.map_upper(plt.scatter,color='blue',alpha=0.1)
for i in g.axes[:,0]: i.axvline(x=fcat['S250'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[0,[1,2]]: i.axhline(y=fcat['S250'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[:,1]: i.axvline(x=fcat['S350'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[1,[0,2]]: i.axhline(y=fcat['S350'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[:,2]: i.axvline(x=fcat['S500'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
for i in g.axes[2,[0,1]]: i.axhline(y=fcat['S500'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
#for i in g.axes[:,4]: i.axvline(x=fcat['S100'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
#for i in g.axes[4,[0,1,2,3,]]: i.axhline(y=fcat['S100'][sgood][prior250.ID-1][s1],c='g',linestyle='--')
```

```python
posterior.samples['src_f'].shape
```
(2000, 3, 51)
```python
plt.plot(np.log10(samples['src_f'][:,28,0]))
```
[<matplotlib.lines.Line2D at 0x17fcc5198>]

```python
plt.plot(samples['lp__'])
```
[<matplotlib.lines.Line2D at 0x12d12f780>]

```python
from xidplus import plots
maps=plots.plot_map([prior250,prior350,prior500])
```

```python
for i in np.arange(0,prior250.nsrc):
maps[0][0].add_label(prior250.sra[i], prior250.sdec[i]+0.0005, str(i), relative=False,size=20,color='white')
```
```python
i=22
maps[0][1].add_label(prior250.sra[i], prior250.sdec[i]+0.0005, str(i), relative=False,size=20,color='white')
i=26
maps[0][2].add_label(prior250.sra[i], prior250.sdec[i]+0.0005, str(i), relative=False,size=20,color='white')
```
```python
maps[1]
```

```python
df = pd.DataFrame(samples['src_f'][:,[28,22],0],columns=['S250 28','S250 26'])
g = sns.PairGrid(df,size=5)
g.map_diag(plt.hist)
g.map_lower(sns.kdeplot, cmap="Reds_d",alpha=1.0,n_levels=10,normed=True)
```
<seaborn.axisgrid.PairGrid at 0x17fcaeb70>

```python
fcat['Z_OBS'][sgood][prior250.ID-1][39]
```
2.3612638
```python
fcat[[sgood][prior250.ID-1][21]
```
(' 226002890000018', ' 22600190410000018', 0.09461537, 0.44664088, 0.51100528, 0.32646787, 0.14614774, 23.095448, 22.963345, 22.325649, 21.71051, 21.510946, 2.0177048852087465, 0.0, 2.5751139e+11, 1.5233551e+09, 3.0002962e+09, 150.74933723718902, 0.65092659, 0.65018713)
```python
fcat.columns
```
ColDefs(
name = 'DHALOID'; format = '22A'
name = 'GALAXYID'; format = '22A'
name = 'S100'; format = 'E'
name = 'S160'; format = 'E'
name = 'S250'; format = 'E'
name = 'S350'; format = 'E'
name = 'S500'; format = 'E'
name = 'APPUSO_TOT_EXT'; format = 'E'
name = 'APPGSO_TOT_EXT'; format = 'E'
name = 'APPRSO_TOT_EXT'; format = 'E'
name = 'APPISO_TOT_EXT'; format = 'E'
name = 'APPZSO_TOT_EXT'; format = 'E'
name = 'DEC'; format = 'D'
name = 'IS_CENTRAL'; format = 'E'
name = 'MHHALO'; format = 'E'
name = 'MSTARDOT'; format = 'E'
name = 'MSTARS_TOT'; format = 'E'
name = 'RA'; format = 'D'
name = 'Z_COS'; format = 'E'
name = 'Z_OBS'; format = 'E'
)
```python
import pandas as pd
```
```python
SEDS_full=pd.read_pickle('./SEDS_full.pkl')
```
```python
from astropy.cosmology import Planck13
import astropy.units as u
div=(4.0*np.pi * np.square(Planck13.luminosity_distance(z).cgs))
div=div.value
```
```python
s1=17
z=fcat['Z_OBS'][sgood][prior250.ID-1][s1]
div=(4.0*np.pi * np.square(Planck13.luminosity_distance(z).cgs))
div=div.value
for s in range(0,4000,10):
plt.loglog((z+1.0)*SEDS_full['wave'],
np.power(10.0,samples['Nbb'][s,s1])*(1.0+z)
*SEDS_full[SEDS_full.columns[np.arange(1,samples['p'].shape[2]+1)
[np.random.multinomial(1, samples['p'][s,s1,:])==1]]]/div,alpha=0.1,c='b')
plt.plot([250,350,500, 100],samples['src_f'][s,s1,0:4], 'go', alpha=0.1)
plt.plot([250,350,500, 100],np.array([fcat['S250'][sgood][prior250.ID-1][s1],fcat['S350'][sgood][prior250.ID-1][s1],
fcat['S500'][sgood][prior250.ID-1][s1],fcat['S100'][sgood][prior250.ID-1][s1]]), 'ro')
```
[<matplotlib.lines.Line2D at 0x1a311c588>]

```python
```
```python
```
|
H-E-L-PREPO_NAMEXID_plusPATH_START.@XID_plus_extracted@XID_plus-master@docs@notebooks@examples@[email protected]_END.py
|
{
"filename": "LICENSE.md",
"repo_name": "gmzsebastian/SLSNe",
"repo_path": "SLSNe_extracted/SLSNe-main/LICENSE.md",
"type": "Markdown"
}
|
The MIT License (MIT)
Copyright (c) 2023 Sebastian Gomez ([email protected])
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
gmzsebastianREPO_NAMESLSNePATH_START.@SLSNe_extracted@[email protected]@.PATH_END.py
|
{
"filename": "test_cleanup_plugin.py",
"repo_name": "cosmo-ethz/hide",
"repo_path": "hide_extracted/hide-master/test/test_cleanup_plugin.py",
"type": "Python"
}
|
# HIDE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HIDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HIDE. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Sep 14, 2015
author: jakeret
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from ivy.utils.struct import Struct
from hide.plugins import clean_up
class TestCleanupPlugin(object):
def test_clean_up(self):
ctx = Struct(tod_vx = np.zeros((1,1)),
tod_vy = np.zeros((1,1)),
frequencies = np.zeros((1,1)),
strategy_coords = [(1,2,3,4,5)],
beams = [(1,2,3,4,5)],
tod_vx_rfi = np.ones((1,1))
)
plugin = clean_up.Plugin(ctx)
plugin()
assert not ctx.has_key("tod_vx")
assert not ctx.has_key("tod_vy")
assert not ctx.has_key("frequencies")
assert not ctx.has_key("strategy_coords")
assert not ctx.has_key("beams")
assert not ctx.has_key("tod_vx_rfi")
|
cosmo-ethzREPO_NAMEhidePATH_START.@hide_extracted@hide-master@test@[email protected]_END.py
|
{
"filename": "question.md",
"repo_name": "TeamLEGWORK/LEGWORK",
"repo_path": "LEGWORK_extracted/LEGWORK-main/.github/ISSUE_TEMPLATE/question.md",
"type": "Markdown"
}
|
---
name: Question
about: Confused about something in LEGWORK? Not sure how to use a function? Feel free to ask questions here!
title: ''
labels: question
assignees: ''
---
Use this issue to ask any questions about how LEGWORK functions work, how best to use LEGWORK for your project or anything else you need help with! Wherever possible please include code examples to make it easier to help you!
|
TeamLEGWORKREPO_NAMELEGWORKPATH_START.@LEGWORK_extracted@[email protected]@[email protected]@.PATH_END.py
|
{
"filename": "ex_var.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/examples/tsa/ex_var.py",
"type": "Python"
}
|
import numpy as np
import statsmodels.api as sm
from statsmodels.tsa.api import VAR
# some example data
mdata = sm.datasets.macrodata.load().data
mdata = mdata[['realgdp','realcons','realinv']]
names = mdata.dtype.names
data = mdata.view((float,3))
use_growthrate = False #True #False
if use_growthrate:
data = 100 * 4 * np.diff(np.log(data), axis=0)
model = VAR(data, names=names)
res = model.fit(4)
nobs_all = data.shape[0]
#in-sample 1-step ahead forecasts
fc_in = np.array([np.squeeze(res.forecast(model.y[t-20:t], 1))
for t in range(nobs_all-6,nobs_all)])
print(fc_in - res.fittedvalues[-6:])
#out-of-sample 1-step ahead forecasts
fc_out = np.array([np.squeeze(VAR(data[:t]).fit(2).forecast(data[t-20:t], 1))
for t in range(nobs_all-6,nobs_all)])
print(fc_out - data[nobs_all-6:nobs_all])
print(fc_out - res.fittedvalues[-6:])
#out-of-sample h-step ahead forecasts
h = 2
fc_out = np.array([VAR(data[:t]).fit(2).forecast(data[t-20:t], h)[-1]
for t in range(nobs_all-6-h+1,nobs_all-h+1)])
print(fc_out - data[nobs_all-6:nobs_all]) #out-of-sample forecast error
print(fc_out - res.fittedvalues[-6:])
#import matplotlib.pyplot as plt
res.plot_forecast(20)
#plt.show()
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@examples@tsa@[email protected]_END.py
|
{
"filename": "_decomp_schur.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/linalg/_decomp_schur.py",
"type": "Python"
}
|
"""Schur decomposition functions."""
import numpy as np
from numpy import asarray_chkfinite, single, asarray, array
from numpy.linalg import norm
# Local imports.
from ._misc import LinAlgError, _datacopied
from .lapack import get_lapack_funcs
from ._decomp import eigvals
__all__ = ['schur', 'rsf2csf']
_double_precision = ['i', 'l', 'd']
def schur(a, output='real', lwork=None, overwrite_a=False, sort=None,
check_finite=True):
"""
Compute Schur decomposition of a matrix.
The Schur decomposition is::
A = Z T Z^H
where Z is unitary and T is either upper-triangular, or for real
Schur decomposition (output='real'), quasi-upper triangular. In
the quasi-triangular form, 2x2 blocks describing complex-valued
eigenvalue pairs may extrude from the diagonal.
Parameters
----------
a : (M, M) array_like
Matrix to decompose
output : {'real', 'complex'}, optional
When the dtype of `a` is real, this specifies whether to compute
the real or complex Schur decomposition.
When the dtype of `a` is complex, this argument is ignored, and the
complex Schur decomposition is computed.
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance).
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given an eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True).
- If ``output='complex'`` OR the dtype of `a` is complex, the callable
should have one argument: the eigenvalue expressed as a complex number.
- If ``output='real'`` AND the dtype of `a` is real, the callable should have
two arguments: the real and imaginary parts of the eigenvalue, respectively.
Alternatively, string parameters may be used::
'lhp' Left-hand plane (real(eigenvalue) < 0.0)
'rhp' Right-hand plane (real(eigenvalue) >= 0.0)
'iuc' Inside the unit circle (abs(eigenvalue) <= 1.0)
'ouc' Outside the unit circle (abs(eigenvalue) > 1.0)
Defaults to None (no sorting).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Schur form of A. It is real-valued for the real Schur decomposition.
Z : (M, M) ndarray
An unitary Schur transformation matrix for A.
It is real-valued for the real Schur decomposition.
sdim : int
If and only if sorting was requested, a third return value will
contain the number of eigenvalues satisfying the sort condition.
Note that complex conjugate pairs for which the condition is true
for either eigenvalue count as 2.
Raises
------
LinAlgError
Error raised under three conditions:
1. The algorithm failed due to a failure of the QR algorithm to
compute all eigenvalues.
2. If eigenvalue sorting was requested, the eigenvalues could not be
reordered due to a failure to separate eigenvalues, usually because
of poor conditioning.
3. If eigenvalue sorting was requested, roundoff errors caused the
leading eigenvalues to no longer satisfy the sorting condition.
See Also
--------
rsf2csf : Convert real Schur form to complex Schur form
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import schur, eigvals
>>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]])
>>> T, Z = schur(A)
>>> T
array([[ 2.65896708, 1.42440458, -1.92933439],
[ 0. , -0.32948354, -0.49063704],
[ 0. , 1.31178921, -0.32948354]])
>>> Z
array([[0.72711591, -0.60156188, 0.33079564],
[0.52839428, 0.79801892, 0.28976765],
[0.43829436, 0.03590414, -0.89811411]])
>>> T2, Z2 = schur(A, output='complex')
>>> T2
array([[ 2.65896708, -1.22839825+1.32378589j, 0.42590089+1.51937378j], # may vary
[ 0. , -0.32948354+0.80225456j, -0.59877807+0.56192146j],
[ 0. , 0. , -0.32948354-0.80225456j]])
>>> eigvals(T2)
array([2.65896708, -0.32948354+0.80225456j, -0.32948354-0.80225456j]) # may vary
A custom eigenvalue-sorting condition that sorts by positive imaginary part
is satisfied by only one eigenvalue.
>>> _, _, sdim = schur(A, output='complex', sort=lambda x: x.imag > 1e-15)
>>> sdim
1
When ``output='real'`` and the array `a` is real, the `sort` callable must accept
the real and imaginary parts as separate arguments. Note that now the complex
eigenvalues ``-0.32948354+0.80225456j`` and ``-0.32948354-0.80225456j`` will be
treated as a complex conjugate pair, and according to the `sdim` documentation,
complex conjugate pairs for which the condition is True for *either* eigenvalue
increase `sdim` by *two*.
>>> _, _, sdim = schur(A, output='real', sort=lambda x, y: y > 1e-15)
>>> sdim
2
"""
if output not in ['real', 'complex', 'r', 'c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if np.issubdtype(a1.dtype, np.integer):
a1 = asarray(a, dtype=np.dtype("long"))
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
typ = a1.dtype.char
if output in ['complex', 'c'] and typ not in ['F', 'D']:
if typ in _double_precision:
a1 = a1.astype('D')
else:
a1 = a1.astype('F')
# accommodate empty matrix
if a1.size == 0:
t0, z0 = schur(np.eye(2, dtype=a1.dtype))
if sort is None:
return (np.empty_like(a1, dtype=t0.dtype),
np.empty_like(a1, dtype=z0.dtype))
else:
return (np.empty_like(a1, dtype=t0.dtype),
np.empty_like(a1, dtype=z0.dtype), 0)
overwrite_a = overwrite_a or (_datacopied(a1, a))
gees, = get_lapack_funcs(('gees',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
result = gees(lambda x: None, a1, lwork=-1)
lwork = result[-2][0].real.astype(np.int_)
if sort is None:
sort_t = 0
def sfunction(x, y=None):
return None
else:
sort_t = 1
if callable(sort):
sfunction = sort
elif sort == 'lhp':
def sfunction(x, y=None):
return x.real < 0.0
elif sort == 'rhp':
def sfunction(x, y=None):
return x.real >= 0.0
elif sort == 'iuc':
def sfunction(x, y=None):
z = x if y is None else x + y*1j
return abs(z) <= 1.0
elif sort == 'ouc':
def sfunction(x, y=None):
z = x if y is None else x + y*1j
return abs(z) > 1.0
else:
raise ValueError("'sort' parameter must either be 'None', or a "
"callable, or one of ('lhp','rhp','iuc','ouc')")
result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a,
sort_t=sort_t)
info = result[-1]
if info < 0:
raise ValueError(f'illegal value in {-info}-th argument of internal gees')
elif info == a1.shape[0] + 1:
raise LinAlgError('Eigenvalues could not be separated for reordering.')
elif info == a1.shape[0] + 2:
raise LinAlgError('Leading eigenvalues do not satisfy sort condition.')
elif info > 0:
raise LinAlgError("Schur form not found. Possibly ill-conditioned.")
if sort is None:
return result[0], result[-3]
else:
return result[0], result[-3], result[1]
eps = np.finfo(float).eps
feps = np.finfo(single).eps
_array_kind = {'b': 0, 'h': 0, 'B': 0, 'i': 0, 'l': 0,
'f': 0, 'd': 0, 'F': 1, 'D': 1}
_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1}
_array_type = [['f', 'd'], ['F', 'D']]
def _commonType(*arrays):
kind = 0
precision = 0
for a in arrays:
t = a.dtype.char
kind = max(kind, _array_kind[t])
precision = max(precision, _array_precision[t])
return _array_type[kind][precision]
def _castCopy(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.char == type:
cast_arrays = cast_arrays + (a.copy(),)
else:
cast_arrays = cast_arrays + (a.astype(type),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def rsf2csf(T, Z, check_finite=True):
"""
Convert real Schur form to complex Schur form.
Convert a quasi-diagonal real-valued Schur form to the upper-triangular
complex-valued Schur form.
Parameters
----------
T : (M, M) array_like
Real Schur form of the original array
Z : (M, M) array_like
Schur transformation matrix
check_finite : bool, optional
Whether to check that the input arrays contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
T : (M, M) ndarray
Complex Schur form of the original array
Z : (M, M) ndarray
Schur transformation matrix corresponding to the complex form
See Also
--------
schur : Schur decomposition of an array
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import schur, rsf2csf
>>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]])
>>> T, Z = schur(A)
>>> T
array([[ 2.65896708, 1.42440458, -1.92933439],
[ 0. , -0.32948354, -0.49063704],
[ 0. , 1.31178921, -0.32948354]])
>>> Z
array([[0.72711591, -0.60156188, 0.33079564],
[0.52839428, 0.79801892, 0.28976765],
[0.43829436, 0.03590414, -0.89811411]])
>>> T2 , Z2 = rsf2csf(T, Z)
>>> T2
array([[2.65896708+0.j, -1.64592781+0.743164187j, -1.21516887+1.00660462j],
[0.+0.j , -0.32948354+8.02254558e-01j, -0.82115218-2.77555756e-17j],
[0.+0.j , 0.+0.j, -0.32948354-0.802254558j]])
>>> Z2
array([[0.72711591+0.j, 0.28220393-0.31385693j, 0.51319638-0.17258824j],
[0.52839428+0.j, 0.24720268+0.41635578j, -0.68079517-0.15118243j],
[0.43829436+0.j, -0.76618703+0.01873251j, -0.03063006+0.46857912j]])
"""
if check_finite:
Z, T = map(asarray_chkfinite, (Z, T))
else:
Z, T = map(asarray, (Z, T))
for ind, X in enumerate([Z, T]):
if X.ndim != 2 or X.shape[0] != X.shape[1]:
raise ValueError(f"Input '{'ZT'[ind]}' must be square.")
if T.shape[0] != Z.shape[0]:
message = f"Input array shapes must match: Z: {Z.shape} vs. T: {T.shape}"
raise ValueError(message)
N = T.shape[0]
t = _commonType(Z, T, array([3.0], 'F'))
Z, T = _castCopy(t, Z, T)
for m in range(N-1, 0, -1):
if abs(T[m, m-1]) > eps*(abs(T[m-1, m-1]) + abs(T[m, m])):
mu = eigvals(T[m-1:m+1, m-1:m+1]) - T[m, m]
r = norm([mu[0], T[m, m-1]])
c = mu[0] / r
s = T[m, m-1] / r
G = array([[c.conj(), s], [-s, c]], dtype=t)
T[m-1:m+1, m-1:] = G.dot(T[m-1:m+1, m-1:])
T[:m+1, m-1:m+1] = T[:m+1, m-1:m+1].dot(G.conj().T)
Z[:, m-1:m+1] = Z[:, m-1:m+1].dot(G.conj().T)
T[m, m-1] = 0.0
return T, Z
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@linalg@[email protected]_END.py
|
{
"filename": "merge_benchmark.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/benchmarks/layer_benchmark/merge_benchmark.py",
"type": "Python"
}
|
"""Benchmark merge layers.
To run benchmarks, see the following command for an example, please change the
flag to your custom value:
```
python3 -m benchmarks.layer_benchmark.merge_benchmark \
--benchmark_name=benchmark_add \
--num_samples=2048 \
--batch_size=256 \
--jit_compile=True
```
"""
from absl import app
from absl import flags
from benchmarks.layer_benchmark.base_benchmark import LayerBenchmark
FLAGS = flags.FLAGS
def benchmark_add(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Add"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 256], [256, 256]],
flat_call_inputs=False,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_average(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Average"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 256], [256, 256]],
flat_call_inputs=False,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_concatenate(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Concatenate"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 256], [256, 256]],
flat_call_inputs=False,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_dot(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Dot"
init_args = {"axes": [2, 1]}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 32], [32, 64]],
flat_call_inputs=False,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_maximum(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Maximum"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 256], [256, 256]],
flat_call_inputs=False,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_minimum(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Minimum"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 256], [256, 256]],
flat_call_inputs=False,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_multiply(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Multiply"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 64], [256, 64]],
flat_call_inputs=False,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_subtract(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Subtract"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[[256, 256], [256, 256]],
flat_call_inputs=False,
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
BENCHMARK_NAMES = {
"benchmark_add": benchmark_add,
"benchmark_average": benchmark_average,
"benchmark_concatenate": benchmark_concatenate,
"benchmark_dot": benchmark_dot,
"benchmark_maximum": benchmark_maximum,
"benchmark_minimum": benchmark_minimum,
"benchmark_multiply": benchmark_multiply,
"benchmark_subtract": benchmark_subtract,
}
def main(_):
benchmark_name = FLAGS.benchmark_name
num_samples = FLAGS.num_samples
batch_size = FLAGS.batch_size
jit_compile = FLAGS.jit_compile
if benchmark_name is None:
for name, benchmark_fn in BENCHMARK_NAMES.items():
benchmark_fn(num_samples, batch_size, jit_compile)
return
if benchmark_name not in BENCHMARK_NAMES:
raise ValueError(
f"Invalid benchmark name: {benchmark_name}, `benchmark_name` must "
f"be one of {BENCHMARK_NAMES.keys()}"
)
benchmark_fn = BENCHMARK_NAMES[benchmark_name]
benchmark_fn(num_samples, batch_size, jit_compile)
if __name__ == "__main__":
app.run(main)
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@benchmarks@layer_benchmark@[email protected]_END.py
|
{
"filename": "_title.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/surface/colorbar/_title.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="surface.colorbar", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@surface@colorbar@[email protected]_END.py
|
{
"filename": "_fastica.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/decomposition/_fastica.py",
"type": "Python"
}
|
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from scipy import linalg
from ..base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from ..exceptions import ConvergenceWarning
from ..utils import as_float_array, check_array, check_random_state
from ..utils._param_validation import Interval, Options, StrOptions, validate_params
from ..utils.validation import check_is_fitted, validate_data
__all__ = ["fastica", "FastICA"]
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W.
Parameters
----------
w : ndarray of shape (n,)
Array to be orthogonalized
W : ndarray of shape (p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])
return w
def _sym_decorrelation(W):
"""Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# Avoid sqrt of negative values because of rounding errors. Note that
# np.sqrt(tiny) is larger than tiny and therefore this clipping also
# prevents division by zero in the next step.
s = np.clip(s, a_min=np.finfo(W.dtype).tiny, a_max=None)
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W])
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w**2).sum())
for i in range(max_iter):
gwtx, g_wtx = g(np.dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1**2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in range(max_iter):
gwtx, g_wtx = g(np.dot(W, X), fun_args)
W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
# np.einsum allows having the lowest memory footprint.
# It is faster than np.diag(np.dot(W1, W.T)).
lim = max(abs(abs(np.einsum("ij,ij->i", W1, W)) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn(
(
"FastICA did not converge. Consider increasing "
"tolerance or the maximum number of iterations."
),
ConvergenceWarning,
)
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get("alpha", 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0], dtype=x.dtype)
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i**2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x**2) / 2)
gx = x * exp
g_x = (1 - x**2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x**3, (3 * x**2).mean(axis=-1)
@validate_params(
{
"X": ["array-like"],
"return_X_mean": ["boolean"],
"compute_sources": ["boolean"],
"return_n_iter": ["boolean"],
},
prefer_skip_nested_validation=False,
)
def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten="unit-variance",
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
whiten_solver="svd",
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to use. If None is passed, all are used.
algorithm : {'parallel', 'deflation'}, default='parallel'
Specify which algorithm to use for FastICA.
whiten : str or bool, default='unit-variance'
Specify the whitening strategy to use.
- If 'arbitrary-variance', a whitening with variance
arbitrary is used.
- If 'unit-variance', the whitening matrix is rescaled to ensure that
each recovered source has unit variance.
- If False, the data is already considered to be whitened, and no
whitening is performed.
.. versionchanged:: 1.3
The default value of `whiten` changed to 'unit-variance' in 1.3.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example::
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-4
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array. If `w_init=None`, then an array of values
drawn from a normal distribution is used.
whiten_solver : {"eigh", "svd"}, default="svd"
The solver to use for whitening.
- "svd" is more stable numerically if the problem is degenerate, and
often faster when `n_samples <= n_features`.
- "eigh" is generally more memory efficient when
`n_samples >= n_features`, and can be faster when
`n_samples >= 50 * n_features`.
.. versionadded:: 1.2
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix.
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import fastica
>>> X, _ = load_digits(return_X_y=True)
>>> K, W, S = fastica(X, n_components=7, random_state=0, whiten='unit-variance')
>>> K.shape
(7, 64)
>>> W.shape
(7, 7)
>>> S.shape
(1797, 7)
"""
est = FastICA(
n_components=n_components,
algorithm=algorithm,
whiten=whiten,
fun=fun,
fun_args=fun_args,
max_iter=max_iter,
tol=tol,
w_init=w_init,
whiten_solver=whiten_solver,
random_state=random_state,
)
est._validate_params()
S = est._fit_transform(X, compute_sources=compute_sources)
if est.whiten in ["unit-variance", "arbitrary-variance"]:
K = est.whitening_
X_mean = est.mean_
else:
K = None
X_mean = None
returned_values = [K, est._unmixing, S]
if return_X_mean:
returned_values.append(X_mean)
if return_n_iter:
returned_values.append(est.n_iter_)
return returned_values
class FastICA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""FastICA: a fast algorithm for Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, default=None
Number of components to use. If None is passed, all are used.
algorithm : {'parallel', 'deflation'}, default='parallel'
Specify which algorithm to use for FastICA.
whiten : str or bool, default='unit-variance'
Specify the whitening strategy to use.
- If 'arbitrary-variance', a whitening with variance
arbitrary is used.
- If 'unit-variance', the whitening matrix is rescaled to ensure that
each recovered source has unit variance.
- If False, the data is already considered to be whitened, and no
whitening is performed.
.. versionchanged:: 1.3
The default value of `whiten` changed to 'unit-variance' in 1.3.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example::
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, default=200
Maximum number of iterations during fit.
tol : float, default=1e-4
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : array-like of shape (n_components, n_components), default=None
Initial un-mixing array. If `w_init=None`, then an array of values
drawn from a normal distribution is used.
whiten_solver : {"eigh", "svd"}, default="svd"
The solver to use for whitening.
- "svd" is more stable numerically if the problem is degenerate, and
often faster when `n_samples <= n_features`.
- "eigh" is generally more memory efficient when
`n_samples >= n_features`, and can be faster when
`n_samples >= 50 * n_features`.
.. versionadded:: 1.2
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The linear operator to apply to the data to get the independent
sources. This is equal to the unmixing matrix when ``whiten`` is
False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when
``whiten`` is True.
mixing_ : ndarray of shape (n_features, n_components)
The pseudo-inverse of ``components_``. It is the linear operator
that maps independent sources to the data.
mean_ : ndarray of shape(n_features,)
The mean over features. Only set if `self.whiten` is True.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
whitening_ : ndarray of shape (n_components, n_features)
Only set if whiten is 'True'. This is the pre-whitening matrix
that projects data onto the first `n_components` principal components.
See Also
--------
PCA : Principal component analysis (PCA).
IncrementalPCA : Incremental principal components analysis (IPCA).
KernelPCA : Kernel Principal component analysis (KPCA).
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
SparsePCA : Sparse Principal Components Analysis (SparsePCA).
References
----------
.. [1] A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import FastICA
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = FastICA(n_components=7,
... random_state=0,
... whiten='unit-variance')
>>> X_transformed = transformer.fit_transform(X)
>>> X_transformed.shape
(1797, 7)
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left"), None],
"algorithm": [StrOptions({"parallel", "deflation"})],
"whiten": [
StrOptions({"arbitrary-variance", "unit-variance"}),
Options(bool, {False}),
],
"fun": [StrOptions({"logcosh", "exp", "cube"}), callable],
"fun_args": [dict, None],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0.0, None, closed="left")],
"w_init": ["array-like", None],
"whiten_solver": [StrOptions({"eigh", "svd"})],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=None,
*,
algorithm="parallel",
whiten="unit-variance",
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-4,
w_init=None,
whiten_solver="svd",
random_state=None,
):
super().__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.whiten_solver = whiten_solver
self.random_state = random_state
def _fit_transform(self, X, compute_sources=False):
"""Fit the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
compute_sources : bool, default=False
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
S : ndarray of shape (n_samples, n_components) or None
Sources matrix. `None` if `compute_sources` is `False`.
"""
XT = validate_data(
self,
X,
copy=self.whiten,
dtype=[np.float64, np.float32],
ensure_min_samples=2,
).T
fun_args = {} if self.fun_args is None else self.fun_args
random_state = check_random_state(self.random_state)
alpha = fun_args.get("alpha", 1.0)
if not 1 <= alpha <= 2:
raise ValueError("alpha must be in [1,2]")
if self.fun == "logcosh":
g = _logcosh
elif self.fun == "exp":
g = _exp
elif self.fun == "cube":
g = _cube
elif callable(self.fun):
def g(x, fun_args):
return self.fun(x, **fun_args)
n_features, n_samples = XT.shape
n_components = self.n_components
if not self.whiten and n_components is not None:
n_components = None
warnings.warn("Ignoring n_components with whiten=False.")
if n_components is None:
n_components = min(n_samples, n_features)
if n_components > min(n_samples, n_features):
n_components = min(n_samples, n_features)
warnings.warn(
"n_components is too large: it will be set to %s" % n_components
)
if self.whiten:
# Centering the features of X
X_mean = XT.mean(axis=-1)
XT -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
if self.whiten_solver == "eigh":
# Faster when num_samples >> n_features
d, u = linalg.eigh(XT.dot(X))
sort_indices = np.argsort(d)[::-1]
eps = np.finfo(d.dtype).eps * 10
degenerate_idx = d < eps
if np.any(degenerate_idx):
warnings.warn(
"There are some small singular values, using "
"whiten_solver = 'svd' might lead to more "
"accurate results."
)
d[degenerate_idx] = eps # For numerical issues
np.sqrt(d, out=d)
d, u = d[sort_indices], u[:, sort_indices]
elif self.whiten_solver == "svd":
u, d = linalg.svd(XT, full_matrices=False, check_finite=False)[:2]
# Give consistent eigenvectors for both svd solvers
u *= np.sign(u[0])
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, XT)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(n_samples)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(XT, copy=False) # copy has been taken care of
w_init = self.w_init
if w_init is None:
w_init = np.asarray(
random_state.normal(size=(n_components, n_components)), dtype=X1.dtype
)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError(
"w_init has invalid shape -- should be %(shape)s"
% {"shape": (n_components, n_components)}
)
kwargs = {
"tol": self.tol,
"g": g,
"fun_args": fun_args,
"max_iter": self.max_iter,
"w_init": w_init,
}
if self.algorithm == "parallel":
W, n_iter = _ica_par(X1, **kwargs)
elif self.algorithm == "deflation":
W, n_iter = _ica_def(X1, **kwargs)
del X1
self.n_iter_ = n_iter
if compute_sources:
if self.whiten:
S = np.linalg.multi_dot([W, K, XT]).T
else:
S = np.dot(W, XT).T
else:
S = None
if self.whiten:
if self.whiten == "unit-variance":
if not compute_sources:
S = np.linalg.multi_dot([W, K, XT]).T
S_std = np.std(S, axis=0, keepdims=True)
S /= S_std
W /= S_std.T
self.components_ = np.dot(W, K)
self.mean_ = X_mean
self.whitening_ = K
else:
self.components_ = W
self.mixing_ = linalg.pinv(self.components_, check_finite=False)
self._unmixing = W
return S
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Estimated sources obtained by transforming the data with the
estimated unmixing matrix.
"""
return self._fit_transform(X, compute_sources=True)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit_transform(X, compute_sources=False)
return self
def transform(self, X, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform, where `n_samples` is the number of samples
and `n_features` is the number of features.
copy : bool, default=True
If False, data passed to fit can be overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Estimated sources obtained by transforming the data with the
estimated unmixing matrix.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
copy=(copy and self.whiten),
dtype=[np.float64, np.float32],
reset=False,
)
if self.whiten:
X -= self.mean_
return np.dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_components)
Sources, where `n_samples` is the number of samples
and `n_components` is the number of components.
copy : bool, default=True
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_features)
Reconstructed data obtained with the mixing matrix.
"""
check_is_fitted(self)
X = check_array(X, copy=(copy and self.whiten), dtype=[np.float64, np.float32])
X = np.dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@decomposition@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/choroplethmapbox/unselected/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._marker import Marker
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._marker.Marker"])
|
[email protected][email protected]@packages@python@plotly@plotly@graph_objs@choroplethmapbox@unselected@[email protected]_END.py
|
{
"filename": "test_redchi2.py",
"repo_name": "light-curve/light-curve-python",
"repo_path": "light-curve-python_extracted/light-curve-python-master/light-curve/tests/light_curve_py/features/test_redchi2.py",
"type": "Python"
}
|
import numpy as np
from numpy.testing import assert_allclose
from light_curve.light_curve_py import ReducedChi2
def test_redchi2_equal_sigma():
m = np.array([1.0, 1.0, 2.0, 3.0, 4.0, 5.0])
sigma = np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
feature = ReducedChi2()
desired = feature(np.linspace(0, 1, len(m)), m, sigma)
actual = 10.666667
assert_allclose(actual, desired)
def test_redchi2_different_sigma():
m = np.arange(6)
sigma = np.array([0.5, 1.0, 0.5, 1.0, 0.5, 1.0])
feature = ReducedChi2()
desired = feature(np.linspace(0, 1, len(m)), m, sigma)
actual = 8.48
assert_allclose(actual, desired)
|
light-curveREPO_NAMElight-curve-pythonPATH_START.@light-curve-python_extracted@light-curve-python-master@light-curve@tests@light_curve_py@features@[email protected]_END.py
|
{
"filename": "domwidget.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipywidgets/py2/ipywidgets/widgets/domwidget.py",
"type": "Python"
}
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Contains the DOMWidget class"""
from traitlets import Unicode
from .widget import Widget, widget_serialization
from .trait_types import InstanceDict, TypedTuple
from .widget_layout import Layout
from .widget_style import Style
class DOMWidget(Widget):
"""Widget that can be inserted into the DOM"""
_model_name = Unicode('DOMWidgetModel').tag(sync=True)
_dom_classes = TypedTuple(trait=Unicode(), help="CSS classes applied to widget DOM element").tag(sync=True)
layout = InstanceDict(Layout).tag(sync=True, **widget_serialization)
def add_class(self, className):
"""
Adds a class to the top level element of the widget.
Doesn't add the class if it already exists.
"""
if className not in self._dom_classes:
self._dom_classes = list(self._dom_classes) + [className]
return self
def remove_class(self, className):
"""
Removes a class from the top level element of the widget.
Doesn't remove the class if it doesn't exist.
"""
if className in self._dom_classes:
self._dom_classes = [c for c in self._dom_classes if c != className]
return self
def _repr_keys(self):
for key in super(DOMWidget, self)._repr_keys():
# Exclude layout if it had the default value
if key == 'layout':
value = getattr(self, key)
if repr(value) == '%s()' % value.__class__.__name__:
continue
yield key
# We also need to include _dom_classes in repr for reproducibility
if self._dom_classes:
yield '_dom_classes'
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipywidgets@py2@ipywidgets@[email protected]@.PATH_END.py
|
{
"filename": "_pep440.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/_utils/_pep440.py",
"type": "Python"
}
|
"""Utility to compare pep440 compatible version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
# Copyright (c) Donald Stufft and individual contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import collections
import itertools
import re
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN",
]
# BEGIN packaging/_structures.py
class Infinity:
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity:
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
# BEGIN packaging/version.py
NegativeInfinity = NegativeInfinity()
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion:
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# its adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We assume there is an implicit 0 in a pre-release if there is
# no numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower-case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume that if we are given a number but not given a letter,
# then this is using the implicit post release syntax (e.g., 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non-zero, then take the rest,
# re-reverse it back into the correct order, and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre-segment, but we _only_ want to do this
# if there is no pre- or a post-segment. If we have one of those, then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post-segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alphanumeric segments sort before numeric segments
# - Alphanumeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@_utils@[email protected]_END.py
|
{
"filename": "_meta.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/ohlc/_meta.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="ohlc", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@ohlc@[email protected]_END.py
|
{
"filename": "_templateitemname.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/parcoords/line/colorbar/tickformatstop/_templateitemname.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="parcoords.line.colorbar.tickformatstop",
**kwargs,
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@parcoords@line@colorbar@tickformatstop@[email protected]_END.py
|
{
"filename": "test_medabsdev.py",
"repo_name": "light-curve/light-curve-python",
"repo_path": "light-curve-python_extracted/light-curve-python-master/light-curve/tests/light_curve_py/features/test_medabsdev.py",
"type": "Python"
}
|
import numpy as np
from numpy.testing import assert_allclose
from light_curve.light_curve_py import MedianAbsoluteDeviation
def test_medabsdev():
m = [1.0, 1.0, 3.0, 4.0, 7.0]
feature = MedianAbsoluteDeviation()
actual = feature(np.linspace(0, 1, len(m)), m, None)
desired = 2.0
assert_allclose(actual, desired)
|
light-curveREPO_NAMElight-curve-pythonPATH_START.@light-curve-python_extracted@light-curve-python-master@light-curve@tests@light_curve_py@features@[email protected]_END.py
|
{
"filename": "test_void_size_function.ipynb",
"repo_name": "GabrieleParimbelli/COLIBRI",
"repo_path": "COLIBRI_extracted/COLIBRI-master/notebooks/test_void_size_function.ipynb",
"type": "Jupyter Notebook"
}
|
# Test void size function routines
This notebook computes the void size function routines of the ``cosmo`` class
```python
import colibri.cosmology as cc
import colibri.useful_functions as UU
import numpy as np
import matplotlib.pyplot as plt
plt.rc('text',usetex=True)
plt.rc('font',size=25,family='serif')
```
### Fixed parameters for the plot
```python
DNL = -0.8 # Underdensity for voids (typical values are -0.8, -0.7)
IMAX = 200 # Max index of sum (must be >= 200)
```
### Define a ``cosmo`` instance
```python
C=cc.cosmo(Omega_m=0.26,Omega_b=0.044,ns=0.96,As=2.168e-9,h=0.715)
```
### Arrays of radii (Mpc/h), scales (h/Mpc) and redshifts
```python
RR = np.geomspace(0.1,50.,101) # Radii of voids
zz = np.linspace(0.,5.,11)
kk = np.logspace(-4.,2,1001)
```
### Power spectra
Compute linear spectra and extrapolate them up to very large wavenumbers
```python
#===========
# Linear power spectra
#===========
_,pk=C.camb_Pk(z=zz,k=kk)
#===========
# Extrapolate linear power spectra
#===========
k_arr = []
pk_arr = []
for iz in range(len(np.atleast_1d(zz))):
k_ext,pk_ext = UU.extrapolate_log(kk,pk[iz],1e-4,1e5)
pk_arr.append(pk_ext)
k_arr = np.array(k_ext )
pk_arr = np.array(pk_arr)
```
### Void size functions
We compare three models, the linear, the Sheth-van der Weygaert and the volume conserving one,
with different critical densities (the default for ``delta_c`` is 1.686).
```python
RL_L,VSF_L = C.void_size_function(R=RR,z=zz,k=k_arr,pk=pk_arr,Delta_NL=DNL,
model = 'linear',max_index=IMAX)
RL_S,VSF_S = C.void_size_function(R=RR,z=zz,k=k_arr,pk=pk_arr,Delta_NL=DNL,
model = 'SvdW' ,max_index=IMAX)
RL_V,VSF_V = C.void_size_function(R=RR,z=zz,k=k_arr,pk=pk_arr,Delta_NL=DNL,
model = 'Vdn' ,max_index=IMAX)
RL_L,VSF_Ll = C.void_size_function(R=RR,z=zz,k=k_arr,pk=pk_arr,Delta_NL=DNL,
model = 'linear',max_index=IMAX,delta_c=1.06)
RL_S,VSF_Sl = C.void_size_function(R=RR,z=zz,k=k_arr,pk=pk_arr,Delta_NL=DNL,
model = 'SvdW' ,max_index=IMAX,delta_c=1.06)
RL_V,VSF_Vl = C.void_size_function(R=RR,z=zz,k=k_arr,pk=pk_arr,Delta_NL=DNL,
model = 'Vdn' ,max_index=IMAX,delta_c=1.06)
```
### Plot
```python
plt.figure(figsize=(10,7))
L,B,R,T=0.15,0.15,0.95,0.95
plt.subplots_adjust(L,B,R,T)
LW = 3.0
# VSF with high delta_c
plt.loglog(RL_L,VSF_L[0],'dodgerblue',lw=LW,label='linear')
plt.loglog(RL_S,VSF_S[0],'orange' ,lw=LW,label='SVdW')
plt.loglog(RL_V,VSF_V[0],'gray' ,lw=LW,label='Vdn')
# VSF with low delta_c
plt.loglog(RL_L,VSF_Ll[0],'dodgerblue',lw=LW,ls='--')
plt.loglog(RL_S,VSF_Sl[0],'orange' ,lw=LW,ls='--')
plt.loglog(RL_V,VSF_Vl[0],'gray' ,lw=LW,ls='--')
# For legend
plt.plot(np.nan,np.nan,'k',ls='-' ,label='$\delta_c=1.686$')
plt.plot(np.nan,np.nan,'k',ls='--',label='$\delta_c=1.06$')
plt.legend()
# Labels
plt.xlabel('$R \ [\mathrm{Mpc}/h]$')
plt.ylabel('$\\frac{dn}{d\ln R} \ [(h/\mathrm{Mpc})^3]$')
# Limits
plt.xlim(0.3,30)
plt.ylim(1e-7,1e0)
plt.show()
```

```python
```
|
GabrieleParimbelliREPO_NAMECOLIBRIPATH_START.@COLIBRI_extracted@COLIBRI-master@notebooks@[email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "oliverphilcox/Parity-Odd-4PCF",
"repo_path": "Parity-Odd-4PCF_extracted/Parity-Odd-4PCF-main/README.md",
"type": "Markdown"
}
|
# Parity-Odd-4PCF
Analysis pipeline for probing parity-violation with the 4-point correlation function of BOSS CMASS galaxies. This is described in detail in [Philcox 2022](https://arxiv.org/abs/2206.04227), and makes use of the [encore](https://github.com/oliverphilcox/encore) NPCF algorithm described in [Philcox et al. 2021](https://arxiv.org/abs/2105.08722) and the Gaussian NPCF covariance matrices from [Hou et al. 2021](https://arxiv.org/abs/2108.01714).
Note that the main analysis was performed blindly, *i.e.* the analysis pipeline and plotting routines were all created before the data was revealed. This includes the Chern-Simons 4PCF template, computed [here](compute_cs_4pcf.py). The main routines in the associated [notebook](BOSS%20Odd-Parity%204PCF%20(CS%20template).ipynb) were not modified after unblinding, except for replacing fake data with the truth and implementing various cosmetic improvements. We additionally include a [notebook](Nseries%20Odd-Parity%204PCF.ipynb) containing the analysis of Nseries mock catalogs. For posterity, a copy of the paper draft pre-unblinding can be found [here](paper/blinded_draft.pdf).
The BOSS data can also be used to constrain inflationary models of parity-violation, as discussed in [Cabass, Ivanov, \& Philcox (2022)](https://arxiv.org/abs/2210.16320). To this end, we provide code implementing the inflationary templates, and notebooks computing the corresponding amplitude constraints. The analysis notebooks can be found [here](BOSS%20Odd-Parity%204PCF%20(ghost%20template).ipynb) and [here](BOSS%20Odd-Parity%204PCF%20(collider%20template).ipynb), with the models provided in the ```templates/``` directory.
To run the main analysis notebook, the simulation data will be required. Almost all of this is contained in the ```data/``` directories, except for two large files ```all_patchy2048_fourpcf.npz``` and ```all_nseries-patchy2048_fourpcf.npz```. These can be downloaded from Dropbox ([file 1](https://www.dropbox.com/s/594iol702s7gk86/all_patchy2048_fourpcf.npz?dl=0) and [file 2](https://www.dropbox.com/s/r5ezfez15ou93ws/all_nseries-patchy2048_fourpcf.npz?dl=0)) and should be placed in the ```data/``` directory.
### Authors
- [Oliver Philcox](mailto:[email protected]) (Princeton / Institute for Advanced Study / Columbia / Simons Foundation)
|
oliverphilcoxREPO_NAMEParity-Odd-4PCFPATH_START.@Parity-Odd-4PCF_extracted@[email protected]@.PATH_END.py
|
{
"filename": "get_data.py",
"repo_name": "ethankruse/koi3278",
"repo_path": "koi3278_extracted/koi3278-master/get_data.py",
"type": "Python"
}
|
"""
Download the necessary files if they don't already exist.
"""
import inputs as inp
from glob import glob
import subprocess
import os
dataloc = inp.keplerdata
KIC = 3342467
KICstr = str(int(KIC))
files = glob(dataloc + 'kplr*' + KICstr + '*llc.fits')
# change this if we actually had to download something
dload = 0
# can't find the light curves
if len(files) == 0:
# move to the download location
cwd = os.getcwd()
os.chdir(dataloc)
# run the wget script to get the light curves from MAST
subprocess.check_call(['./kepler_wget.sh'])
os.chdir(cwd)
dload += 1
# check for the WD models
files = glob(inp.wdfiles)
if len(files) == 0:
# move to the download location
cwd = os.getcwd()
os.chdir('./wdmodels/')
# run the wget script to get the WD models from Bergeron website
subprocess.check_call(['./bergeron_wdmodels_wget.sh'])
os.chdir(cwd)
dload += 1
if dload:
print('Downloaded necessary data.')
else:
print('All data already downloaded. Continuing.')
|
ethankruseREPO_NAMEkoi3278PATH_START.@koi3278_extracted@koi3278-master@[email protected]_END.py
|
{
"filename": "motorhead_memory.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/memory/motorhead_memory.ipynb",
"type": "Jupyter Notebook"
}
|
# Motörhead
>[Motörhead](https://github.com/getmetal/motorhead) is a memory server implemented in Rust. It automatically handles incremental summarization in the background and allows for stateless applications.
## Setup
See instructions at [Motörhead](https://github.com/getmetal/motorhead) for running the server locally.
```python
from langchain_community.memory.motorhead_memory import MotorheadMemory
```
## Example
```python
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import OpenAI
template = """You are a chatbot having a conversation with a human.
{chat_history}
Human: {human_input}
AI:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"], template=template
)
memory = MotorheadMemory(
session_id="testing-1", url="http://localhost:8080", memory_key="chat_history"
)
await memory.init()
# loads previous state from Motörhead 🤘
llm_chain = LLMChain(
llm=OpenAI(),
prompt=prompt,
verbose=True,
memory=memory,
)
```
```python
llm_chain.run("hi im bob")
```
[1m> Entering new LLMChain chain...[0m
Prompt after formatting:
[32;1m[1;3mYou are a chatbot having a conversation with a human.
Human: hi im bob
AI:[0m
[1m> Finished chain.[0m
' Hi Bob, nice to meet you! How are you doing today?'
```python
llm_chain.run("whats my name?")
```
[1m> Entering new LLMChain chain...[0m
Prompt after formatting:
[32;1m[1;3mYou are a chatbot having a conversation with a human.
Human: hi im bob
AI: Hi Bob, nice to meet you! How are you doing today?
Human: whats my name?
AI:[0m
[1m> Finished chain.[0m
' You said your name is Bob. Is that correct?'
```python
llm_chain.run("whats for dinner?")
```
[1m> Entering new LLMChain chain...[0m
Prompt after formatting:
[32;1m[1;3mYou are a chatbot having a conversation with a human.
Human: hi im bob
AI: Hi Bob, nice to meet you! How are you doing today?
Human: whats my name?
AI: You said your name is Bob. Is that correct?
Human: whats for dinner?
AI:[0m
[1m> Finished chain.[0m
" I'm sorry, I'm not sure what you're asking. Could you please rephrase your question?"
```python
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@memory@[email protected]_END.py
|
{
"filename": "mcmc_gauss.py",
"repo_name": "HETDEX/hetdex_api",
"repo_path": "hetdex_api_extracted/hetdex_api-master/hetdex_tools/mcmc_gauss.py",
"type": "Python"
}
|
"""
Cloned from ELiXer mcmc_gauss.py
Fits an emission line with a simple, single Gaussian for four parameters:
line center (mu), line width (sigma), area (A), and y offset (y)
* Note the y offset is not a line fit (that is, the slope is zero)
"""
from __future__ import print_function
import numpy as np
from scipy.signal import medfilt
import emcee
import copy
import warnings
import logging
from hetdex_api.input_utils import setup_logging
#uncomment the next three includes AND uncomment the visualize function at the bottom if you want them
#import matplotlib.pyplot as plt
#import corner
#import io
SNR_SIGMA_WIDTH = 2 #line center +/- this width over which to compute the noise
SNR_LINEFLUX_FRACTION = 0.954 #0.682 for 1 sigma, 0.954 for 2 sigma, 0.996 for 3 sigma, assume 1.0 thereafter
UncertaintyRange = [16,50,84] #e.g. for the uncertainty as distribution
def getnearpos(array,value):
"""
Nearest, but works best (with less than and greater than) if monotonically increasing. Otherwise,
lt and gt are (almost) meaningless
:param array:
:param value:
:return: nearest index, nearest index less than the value, nearest index greater than the value
None if there is no less than or greater than
"""
if type(array) == list:
array = np.array(array)
idx = (np.abs(array-value)).argmin()
if array[idx] == value:
lt = idx
gt = idx
elif array[idx] < value:
lt = idx
gt = idx + 1
else:
lt = idx - 1
gt = idx
if lt < 0:
lt = None
if gt > len(array) -1:
gt = None
return idx, lt, gt
class MCMC_Gauss:
def __init__(self,logger=None):
#intial values are meant to be near the truth
#and are expected to come from, say, some initial "best" fit
self.log = logger
if self.log is None:
self.log = setup_logging()
self.log.setLevel(logging.INFO)
self.initial_mu = None
self.initial_sigma = None
self.initial_A = None #note: set to a negative value if this is an absorption line
self.initial_y = None
self.initial_peak = None
self.max_sigma = 20.0
self.min_sigma = 0.0
self.range_mu = 5.0
self.max_A_mult = 2.0
self.max_y_mult = 2.0
self.min_y = -10.0
self.data_x = None
self.data_y = None
self.err_x = None
self.err_y = None
#just for reference ... MCMC itself does not need to know about this
#the caller DOES though and needs to adjust the line_flux accordingly
#self.dx = None #original bin width IF NOT part of the data_y already
#this is mostly a guess ... no great way to automate, but this is pretty quick
#and since the initials are from a scipy curve fit, we stabablize pretty fast
self.burn_in = 100
self.main_run = 1000
self.walkers = 100
self.sampler = None #mcmc sampler
self.samples = None #resulting samples
#####################
# Outputs
#####################
#3-tuples [0] = fit, [1] = fit +16%, [2] = fit - 16%
self.mcmc_mu = None
self.mcmc_sigma = None
self.mcmc_A = None #note: typically this is over the HETDEX 2AA bins, so if using as the area as integrated
# lineflux you need to divide by 2AA and scale appropriately (e.g. as 1e-17)
self.mcmc_y = None
#not tuple, just single floats
self.mcmc_snr = None
self.mcmc_snr_err = 0
def approx_symmetric_error(self,parm): #parm is assumed to be a 3 vector as [0] = mean, [1] = +error, [2] = -error
try:
if parm is None or (len(parm)!= 3) :
return None
p1 = abs(parm[1])
p2 = abs(parm[2])
avg = 0.5*(p1+p2)
if avg == 0:
return 0
similarity = abs(p1-p2)/avg
if similarity > 0.1:
self.log.warning("Warning! Asymmetric uncertainty similarity = %0.3g (%0.3g, %0.3g)" %(similarity,p1,p2))
#for now, do it anyway
return avg
except:
return None
def noise_model(self):
#todo: fill in some specialized model for the noise
return 0.0
def compute_model(self,x,mu, sigma, A, y):
try:
return A * (np.exp(-np.power((x - mu) / sigma, 2.) / 2.) / np.sqrt(2 * np.pi * sigma ** 2)) + y
except:
return np.nan
def model(self,x,theta):
mu, sigma, A, y, ln_f = theta #note: not using ln_f here
if (x is None) or (mu is None) or (sigma is None):
return None
try:
value = self.compute_model(x,mu, sigma, A, y)
# note: noise is separate and included in the lnlike() function
except:
value = np.nan
return value
def lnlike(self, theta, x, y, yerr):
ln_f = theta[-1] #last parameter in theta
model = self.model(x, theta)
noise = self.noise_model()
diff = y - (model + noise)
#assumes some additional uncertainties in y based on an underestimation in the model by some factor f
# assume that the (distribution of) errors in y are known and independent
sigma2 = (self.err_y ** 2)
return -0.5 * (np.sum((diff ** 2) / sigma2 + np.log(sigma2)))
# if any are zero, the whole prior is zero
# all priors here are uniformitive ... i.e they are all flat ... either zero or one
def lnprior(self, theta): # theta is a n-tuple (_,_,_ ... )
mu, sigma, A, y, ln_f = theta
# note: could take some other dynamic maximum for y (like compute the peak ... y can't be greater than that
if self.initial_A < 0 : #same as emission, but "A" is negative (flip sign) and y is between a max and zero
if (-self.range_mu < mu - self.initial_mu < self.range_mu) and \
(self.min_sigma < sigma < self.max_sigma) and \
(self.max_A_mult * self.initial_A < A < 0.0) and \
(self.min_y < y < self.max_y_mult * self.initial_peak):
return 0.0 # remember this is ln(prior) so a return of 0.0 == 1 (since ln(1) == 0.0)
else:
if (-self.range_mu < mu - self.initial_mu < self.range_mu) and \
(self.min_sigma < sigma < self.max_sigma) and \
(0.0 < A < self.max_A_mult * self.initial_A) and \
(self.min_y < y < self.max_y_mult * self.initial_peak):
return 0.0 # remember this is ln(prior) so a return of 0.0 == 1 (since ln(1) == 0.0)
return -np.inf # -999999999 #-np.inf #roughly ln(0) == -inf
def lnprob(self, theta, x, y, yerr):
"""
ln(probability)
:param theta: parameters to check
:param x: THE data (x axis or wavelengths, in this case)
:param y: THE data (y axis or flux counts, in this case)
:param yerr: The error on the y axis data flux counts
:return:
"""
lp = self.lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + self.lnlike(theta, x, y, yerr) # again, since logs, this is a sum ln(likelihood) + ln(prior)
def sanity_check_init(self):
"""
evaluation that the initialization data makes sense before running MCMC
:return:
"""
try:
#if the error on y is None or if it is all zeros, set to all ones
if self.err_y is None:
self.err_y = np.ones(np.shape(self.data_y))
elif not np.any(self.err_y):
self.err_y = np.ones(np.shape(self.data_y))
if self.err_x is None:
self.err_x = np.ones(np.shape(self.data_x))
if (self.data_x is None) or (self.data_y is None) or (self.err_y is None):
self.log.debug("Sanity check failed. data_x or data_y or err_y is None")
return False
if len(self.data_x) == len(self.data_y) == len(self.err_y): #leave off self.err_x as could be None
if (self.err_x is not None):
if len(self.data_x) != len(self.err_x):
self.log.debug("Sanity check failed. len(data_x) != len(err_x)")
return False
if (self.initial_sigma is None) or (self.initial_mu is None) or (self.initial_A is None) or (self.initial_y is None):
self.log.debug("Sanity check failed. initial sigma, mu, A, or y is None")
return False
if self.initial_peak is None:
left,*_ = getnearpos(self.data_x,self.initial_mu-self.initial_sigma*4)
right,*_ = getnearpos(self.data_x,self.initial_mu+self.initial_sigma*4)
self.initial_peak = max(self.data_y[left:right])
if self.initial_sigma < 0.0: #self.initial_A < 0.0 ... actually, leave A alone .. might allow absorportion later
self.log.debug("Sanity check failed. initial sigma < 0")
return False
if ((self.initial_A > 0) and (self.initial_y > self.initial_peak) or \
(self.initial_A < 0) and (self.initial_y < self.initial_peak) ):
#i.e. if an emission (A > 0) then y must be less than the peak
# else if an absorption line (A < 0) then y must be greater than the peak
self.log.debug("Sanity check failed. y offset inconsistent with area (A) and initial peak.")
return False
else:
self.log.debug("Sanity check failed. lengths of data_x, data_y, and err_y do not match.")
return False
return True
except:
self.log.warning("Exception in mcmc_gauss::sanity_check",exc_info=True)
return False
def run_mcmc(self):
#cannot have nans
#note: assumes data_x (the spectral axis) and err_x have none since they are on a known grid
data_nans = np.isnan(self.data_y)
err_nans = np.isnan(self.err_y)
if (np.sum(data_nans) > 0) or (np.sum(err_nans) > 0):
self.data_y = copy.copy(self.data_y)[~data_nans]
self.err_y = copy.copy(self.err_y)[~data_nans]
self.data_x = copy.copy(self.data_x)[~data_nans]
self.err_x = copy.copy(self.err_x)[~data_nans]
#and clean up any other nan's in the error array for y
try:
err_nans = np.isnan(self.err_y)
self.err_y[err_nans] = np.nanmax(self.err_y*10)
except:
self.err_y[err_nans] = 9e9 #just set to a large value
if not self.sanity_check_init():
self.log.info("Sanity check failed. Cannot conduct MCMC.")
return False
result = True
#here for initial positions of the walkers, sample from narrow gaussian (hence the randn or randNormal)
#centered on each of the maximum likelihood selected parameter values
#mu, sigma, A, y, ln_f = theta #note f or ln(f) is another uncertainty ...an underestimation of the variance
# by some factor (f) .... e.g. variance = variance + f * model
initial_pos = [self.initial_mu,self.initial_sigma,self.initial_A,self.initial_y,0.0]
ndim = len(initial_pos)
#even with the random nudging the pos values must be greater than (or less than for absorption) these values
try:
####################################################################################################
# This is an alternate way to control the jitter in the initial positions,
# Set the boolean below to False to use this vs. the original method (smaller, normal distro jitter)
####################################################################################################
if True:
ip_mu = initial_pos[0] + np.random.uniform(-1.0*(self.data_x[1]-self.data_x[0]),1.0*(self.data_x[1]-self.data_x[0]),self.walkers)
#sigma cannot go zero or below
ip_sigma = initial_pos[1] + np.random.uniform(-0.5*self.initial_sigma,0.5*self.initial_sigma,self.walkers)
#area cannot flip signs
ip_A = initial_pos[2] + np.random.uniform(0,1.0*self.initial_A,self.walkers)
#y should not exceed min/max data value, but won't cause and error if it does
# ... should technically never be negative regardless of absorption or emission
ip_y = np.random.uniform(0,max(self.data_y),self.walkers)
ip_lnf = np.random.uniform(0.005,0.015,self.walkers) #np.zeros(self.walkers) #np.random.uniform(0.005,0.015,self.walkers) #np.zeros(self.walkers)
pos = np.column_stack((ip_mu,ip_sigma,ip_A,ip_y,ip_lnf))
# #for p in pos: #just a debug check
# # print(f"{p[0]:0.4g},{p[1]:0.4g},{p[2]:0.4g},{p[3]:0.4g},{p[4]:0.4g}")
else:
##############################################################################################
# OTHERWISE, keep the block below
# this is generally for older emcee (version < 3)
#############################################################################################
#mostly for the A (area)
if self.initial_A < 0: #absorber
max_pos = [np.inf, np.inf, 0.0, max(self.data_y), np.inf]
min_pos = [ 0.0, 0.01, -np.inf, -np.inf, -np.inf]
else:
#here, because of the max check, none mu, sigma, or A will be negative
max_pos = [np.inf, np.inf,np.inf,max(self.data_y), np.inf] #must be less than this
min_pos = [ 0.0, 0.01, 0.01, -np.inf,-np.inf] #must be greater than this
scale = np.array([10.,5.,2.0*self.initial_A,5.0*self.initial_y,1]) #don't nudge ln_f ...note ln_f = -4.5 --> f ~ 0.01
pos = [np.minimum(np.maximum(initial_pos + scale * np.random.uniform(-1,1,ndim),min_pos),max_pos) for i in range(self.walkers)]
#build the sampler
self.sampler = emcee.EnsembleSampler(self.walkers, ndim, self.lnprob,
args=(self.data_x,self.data_y, self.err_y))
#args are the positional args AFTER theta for self.lnprob function
with warnings.catch_warnings(): #ignore the occassional warnings from the walkers (NaNs, etc that reject step)
warnings.simplefilter("ignore")
self.log.debug("MCMC burn in (%d) ...." %self.burn_in)
#the skip_initial_state_check seems to be necessary now with newere emcee versions
#it does not like setting the initial lnf positions to all zero
#but the parameter coverage is good. If you are worried, switch the pos jitter boolean in the
# if True/else block just above
pos, prob, state = self.sampler.run_mcmc(pos, self.burn_in, skip_initial_state_check=True) # burn in
self.log.debug("MCMC main run (%d) ..." %self.main_run)
pos, prob, state = self.sampler.run_mcmc(pos, self.main_run,rstate0=state,skip_initial_state_check=True) # start from end position of burn-in
self.samples = self.sampler.flatchain # collapse the walkers and interations (aka steps or epochs)
self.log.debug("MCMC mean acceptance fraction: %0.3f" %(np.mean(self.sampler.acceptance_fraction)))
#for each, in order
#v[0] is the 16 percentile (~ - 1sigma)
#v[1] is the 50 percentile (so the "average")
#v[2] is the 84 percentile (~ +1sigma)
#the tuple reports then as ["average", "84th - average", "average - 16th"]
#should always be positive (assuming a positive value) BUT when printed to the log, the 3rd value is made
#to be negative showing that you would subtract it from the average to get to the 16th percentile
#using 68% interval
self.mcmc_mu, self.mcmc_sigma, self.mcmc_A, self.mcmc_y, mcmc_f = \
map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),zip(*np.percentile(self.samples, UncertaintyRange,axis=0)))
try: #basic info used by multiple SNR calculations
#bin_width = self.data_x[1] - self.data_x[0]
#2.1195 = 1.8AA * 2.355 / 2.0 ... the instrumental DEX (1.8AA)
delta_wave = max(self.mcmc_sigma[0]*SNR_SIGMA_WIDTH,2.1195) #must be at least +/- 2.1195AA
#!!! Notice: if we ever change this to +/- 1sigma, need to switch out to sum over the data
#instead of the model !!!
left,*_ = getnearpos(self.data_x,self.mcmc_mu[0]-delta_wave)
right,*_ = getnearpos(self.data_x,self.mcmc_mu[0]+delta_wave)
if self.data_x[left] - (self.mcmc_mu[0]-delta_wave) < 0:
left += 1 #less than 50% overlap in the left bin, so move one bin to the red
if self.data_x[right] - (self.mcmc_mu[0]+delta_wave) > 0:
right -=1 #less than 50% overlap in the right bin, so move one bin to the blue
#lastly ... could combine, but this is easier to read
right += 1 #since the right index is not included in slice
#at 4 sigma the mcmc_A[0] is almost identical to the model_fit (as you would expect)
#note: if choose to sum over model fit, remember that this is usually over 2AA wide bins, so to
#compare to the error data, need to multiply the model_sum by the bin width (2AA)
#(or noting that the Area == integrated flux x binwidth)
model_fit = self.compute_model(self.data_x[left:right],self.mcmc_mu[0],self.mcmc_sigma[0],self.mcmc_A[0],self.mcmc_y[0])
data_err = copy.copy(self.err_y[left:right])
data_err[data_err<=0] = np.nan #Karl has 0 value meaning it is flagged and should be skipped
#self.mcmc_snr = SNR_LINEFLUX_FRACTION*abs(self.mcmc_A[0]/bin_width) / np.sqrt(np.nansum(data_err**2))
self.mcmc_snr = abs(np.sum(model_fit-self.mcmc_y[0])) / np.sqrt(np.nansum(data_err**2))
self.mcmc_snr_err = abs(0.5*(self.mcmc_A[1]+self.mcmc_A[2])/self.mcmc_A[0] * self.mcmc_snr)
self.log.info(f"MCMC SNR model Area with data error: {self.mcmc_snr} +/- {self.mcmc_snr_err}")
except:
self.log.warning("Exception calculating MCMC SNR: ", exc_info=True)
if self.mcmc_snr is None:
self.mcmc_snr = -1
#note: these are logged as ["avg", +err, -err] so the last value becomes the negative
self.log.info("MCMC mu: initial[%0.5g] mcmc(%0.5g, +%0.5g, -%0.5g)" %
(self.initial_mu, self.mcmc_mu[0],self.mcmc_mu[1],self.mcmc_mu[2]))
self.log.info("MCMC sigma: initial[%0.5g] mcmc(%0.5g, +%0.5g, -%0.5g)" %
(self.initial_sigma, self.mcmc_sigma[0],self.mcmc_sigma[1],self.mcmc_sigma[2]))
self.log.info("MCMC A: initial[%0.5g] mcmc(%0.5g, +%0.5g, -%0.5g) *usually over 2AA bins" %
(self.initial_A, self.mcmc_A[0],self.mcmc_A[1],self.mcmc_A[2] ))
self.log.info("MCMC y: initial[%0.5g] mcmc(%0.5g, +%0.5g, -%0.5g)"%
(self.initial_y, self.mcmc_y[0],self.mcmc_y[1],self.mcmc_y[2]))
self.log.info("MCMC SNR: %0.5g" % self.mcmc_snr)
self.log.info("MCMC f: initial[%0.5g] mcmc(%0.5g, +%0.5g, -%0.5g)" %
(0.0, mcmc_f[0], mcmc_f[1], mcmc_f[2]))
except:
self.log.error("Exception in mcmc_gauss::run_mcmc",exc_info=True)
result = False
return result
#need to uncomment matplotlib, corner, and io at the top if you want this function
# def visualize(self,filename=None):
# try:
# if self.samples is not None:
# warnings.simplefilter(action='ignore', category=FutureWarning)
#
# fig = corner.corner(self.samples, labels=["$mu$", "$sigma$", "$A$", "$y$","f"],
# truths=[self.initial_mu, self.initial_sigma, self.initial_A, self.initial_y,None])
# #fifth = None is for the 'f' parameter ... there is no initial for it
# if filename is not None:
# self.log.info('Writing: ' + filename)
# fig.savefig(filename)
# else:
# plt.show()
#
# buf = None
# try:
# buf = io.BytesIO()
# fig.savefig(buf, format='png', dpi=300)
# except:
# self.log.warning("Exception in mcmc_gauss::visualize",exc_info=True)
# return buf
# except:
# self.log.warning("Exception in mcmc_gauss::visualize",exc_info=True)
# return None
|
HETDEXREPO_NAMEhetdex_apiPATH_START.@hetdex_api_extracted@hetdex_api-master@hetdex_tools@[email protected]_END.py
|
{
"filename": "constants.py",
"repo_name": "pcubillos/repack",
"repo_path": "repack_extracted/repack-master/repack/constants/constants.py",
"type": "Python"
}
|
# Copyright (c) 2017-2021 Patricio Cubillos and contributors.
# repack is open-source software under the MIT license (see LICENSE).
__all__ = [
"kB",
"amu",
"e",
"me",
"N0",
"nano",
"C1",
"C2",
"C3",
"ROOT",
]
import os
import scipy.constants as sc
# Boltzmann constant in erg K-1:
kB = sc.k * 1e7
# Unified atomic mass in g:
amu = sc.physical_constants["unified atomic mass unit"][0] * 1e3
# Elementary charge in statcoulomb (cm3/2 g1/2 s-1):
e = 4.803205e-10
# Electron mass in g:
me = sc.m_e * 1e3
# Amagat in molecules cm-3:
N0 = sc.physical_constants[
"Loschmidt constant (273.15 K, 101.325 kPa)"][0] * 1e-6
# One nanometer in centimeters:
nano = 1e-7
# Other constructed constants:
C1 = 4.0 * sc.epsilon_0 * sc.m_e * sc.c**2 / sc.e**2 * 0.01 # cm-1
C2 = sc.h * (sc.c * 100.0) / sc.k # cm K-1
C3 = sc.pi * e**2 / (me * (100*sc.c)**2) # cm
ROOT = os.path.realpath(os.path.dirname(__file__) + "/../..") + "/"
|
pcubillosREPO_NAMErepackPATH_START.@repack_extracted@repack-master@repack@[email protected]@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.