metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
---|---|---|
{
"filename": "_layer.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/layout/mapbox/_layer.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Layer(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.mapbox"
_path_str = "layout.mapbox.layer"
_valid_props = {
"below",
"circle",
"color",
"coordinates",
"fill",
"line",
"maxzoom",
"minzoom",
"name",
"opacity",
"source",
"sourceattribution",
"sourcelayer",
"sourcetype",
"symbol",
"templateitemname",
"type",
"visible",
}
# below
# -----
@property
def below(self):
"""
Determines if the layer will be inserted before the layer with
the specified ID. If omitted or set to '', the layer will be
inserted above every existing layer.
The 'below' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["below"]
@below.setter
def below(self, val):
self["below"] = val
# circle
# ------
@property
def circle(self):
"""
The 'circle' property is an instance of Circle
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.mapbox.layer.Circle`
- A dict of string/value properties that will be passed
to the Circle constructor
Supported dict properties:
radius
Sets the circle radius
(mapbox.layer.paint.circle-radius). Has an
effect only when `type` is set to "circle".
Returns
-------
plotly.graph_objs.layout.mapbox.layer.Circle
"""
return self["circle"]
@circle.setter
def circle(self, val):
self["circle"] = val
# color
# -----
@property
def color(self):
"""
Sets the primary layer color. If `type` is "circle", color
corresponds to the circle color (mapbox.layer.paint.circle-
color) If `type` is "line", color corresponds to the line color
(mapbox.layer.paint.line-color) If `type` is "fill", color
corresponds to the fill color (mapbox.layer.paint.fill-color)
If `type` is "symbol", color corresponds to the icon color
(mapbox.layer.paint.icon-color)
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coordinates
# -----------
@property
def coordinates(self):
"""
Sets the coordinates array contains [longitude, latitude] pairs
for the image corners listed in clockwise order: top left, top
right, bottom right, bottom left. Only has an effect for
"image" `sourcetype`.
The 'coordinates' property accepts values of any type
Returns
-------
Any
"""
return self["coordinates"]
@coordinates.setter
def coordinates(self, val):
self["coordinates"] = val
# fill
# ----
@property
def fill(self):
"""
The 'fill' property is an instance of Fill
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.mapbox.layer.Fill`
- A dict of string/value properties that will be passed
to the Fill constructor
Supported dict properties:
outlinecolor
Sets the fill outline color
(mapbox.layer.paint.fill-outline-color). Has an
effect only when `type` is set to "fill".
Returns
-------
plotly.graph_objs.layout.mapbox.layer.Fill
"""
return self["fill"]
@fill.setter
def fill(self, val):
self["fill"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.mapbox.layer.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
dash
Sets the length of dashes and gaps
(mapbox.layer.paint.line-dasharray). Has an
effect only when `type` is set to "line".
dashsrc
Sets the source reference on Chart Studio Cloud
for dash .
width
Sets the line width (mapbox.layer.paint.line-
width). Has an effect only when `type` is set
to "line".
Returns
-------
plotly.graph_objs.layout.mapbox.layer.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# maxzoom
# -------
@property
def maxzoom(self):
"""
Sets the maximum zoom level (mapbox.layer.maxzoom). At zoom
levels equal to or greater than the maxzoom, the layer will be
hidden.
The 'maxzoom' property is a number and may be specified as:
- An int or float in the interval [0, 24]
Returns
-------
int|float
"""
return self["maxzoom"]
@maxzoom.setter
def maxzoom(self, val):
self["maxzoom"] = val
# minzoom
# -------
@property
def minzoom(self):
"""
Sets the minimum zoom level (mapbox.layer.minzoom). At zoom
levels less than the minzoom, the layer will be hidden.
The 'minzoom' property is a number and may be specified as:
- An int or float in the interval [0, 24]
Returns
-------
int|float
"""
return self["minzoom"]
@minzoom.setter
def minzoom(self, val):
self["minzoom"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the layer. If `type` is "circle", opacity
corresponds to the circle opacity (mapbox.layer.paint.circle-
opacity) If `type` is "line", opacity corresponds to the line
opacity (mapbox.layer.paint.line-opacity) If `type` is "fill",
opacity corresponds to the fill opacity
(mapbox.layer.paint.fill-opacity) If `type` is "symbol",
opacity corresponds to the icon/text opacity
(mapbox.layer.paint.text-opacity)
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# source
# ------
@property
def source(self):
"""
Sets the source data for this layer (mapbox.layer.source). When
`sourcetype` is set to "geojson", `source` can be a URL to a
GeoJSON or a GeoJSON object. When `sourcetype` is set to
"vector" or "raster", `source` can be a URL or an array of tile
URLs. When `sourcetype` is set to "image", `source` can be a
URL to an image.
The 'source' property accepts values of any type
Returns
-------
Any
"""
return self["source"]
@source.setter
def source(self, val):
self["source"] = val
# sourceattribution
# -----------------
@property
def sourceattribution(self):
"""
Sets the attribution for this source.
The 'sourceattribution' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["sourceattribution"]
@sourceattribution.setter
def sourceattribution(self, val):
self["sourceattribution"] = val
# sourcelayer
# -----------
@property
def sourcelayer(self):
"""
Specifies the layer to use from a vector tile source
(mapbox.layer.source-layer). Required for "vector" source type
that supports multiple layers.
The 'sourcelayer' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["sourcelayer"]
@sourcelayer.setter
def sourcelayer(self, val):
self["sourcelayer"] = val
# sourcetype
# ----------
@property
def sourcetype(self):
"""
Sets the source type for this layer, that is the type of the
layer data.
The 'sourcetype' property is an enumeration that may be specified as:
- One of the following enumeration values:
['geojson', 'vector', 'raster', 'image']
Returns
-------
Any
"""
return self["sourcetype"]
@sourcetype.setter
def sourcetype(self, val):
self["sourcetype"] = val
# symbol
# ------
@property
def symbol(self):
"""
The 'symbol' property is an instance of Symbol
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.mapbox.layer.Symbol`
- A dict of string/value properties that will be passed
to the Symbol constructor
Supported dict properties:
icon
Sets the symbol icon image
(mapbox.layer.layout.icon-image). Full list:
https://www.mapbox.com/maki-icons/
iconsize
Sets the symbol icon size
(mapbox.layer.layout.icon-size). Has an effect
only when `type` is set to "symbol".
placement
Sets the symbol and/or text placement
(mapbox.layer.layout.symbol-placement). If
`placement` is "point", the label is placed
where the geometry is located If `placement` is
"line", the label is placed along the line of
the geometry If `placement` is "line-center",
the label is placed on the center of the
geometry
text
Sets the symbol text (mapbox.layer.layout.text-
field).
textfont
Sets the icon text font
(color=mapbox.layer.paint.text-color,
size=mapbox.layer.layout.text-size). Has an
effect only when `type` is set to "symbol".
textposition
Sets the positions of the `text` elements with
respects to the (x,y) coordinates.
Returns
-------
plotly.graph_objs.layout.mapbox.layer.Symbol
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# type
# ----
@property
def type(self):
"""
Sets the layer type, that is the how the layer data set in
`source` will be rendered With `sourcetype` set to "geojson",
the following values are allowed: "circle", "line", "fill" and
"symbol". but note that "line" and "fill" are not compatible
with Point GeoJSON geometries. With `sourcetype` set to
"vector", the following values are allowed: "circle", "line",
"fill" and "symbol". With `sourcetype` set to "raster" or
`*image*`, only the "raster" value is allowed.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['circle', 'line', 'fill', 'symbol', 'raster']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether this layer is displayed
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
below
Determines if the layer will be inserted before the
layer with the specified ID. If omitted or set to '',
the layer will be inserted above every existing layer.
circle
:class:`plotly.graph_objects.layout.mapbox.layer.Circle
` instance or dict with compatible properties
color
Sets the primary layer color. If `type` is "circle",
color corresponds to the circle color
(mapbox.layer.paint.circle-color) If `type` is "line",
color corresponds to the line color
(mapbox.layer.paint.line-color) If `type` is "fill",
color corresponds to the fill color
(mapbox.layer.paint.fill-color) If `type` is "symbol",
color corresponds to the icon color
(mapbox.layer.paint.icon-color)
coordinates
Sets the coordinates array contains [longitude,
latitude] pairs for the image corners listed in
clockwise order: top left, top right, bottom right,
bottom left. Only has an effect for "image"
`sourcetype`.
fill
:class:`plotly.graph_objects.layout.mapbox.layer.Fill`
instance or dict with compatible properties
line
:class:`plotly.graph_objects.layout.mapbox.layer.Line`
instance or dict with compatible properties
maxzoom
Sets the maximum zoom level (mapbox.layer.maxzoom). At
zoom levels equal to or greater than the maxzoom, the
layer will be hidden.
minzoom
Sets the minimum zoom level (mapbox.layer.minzoom). At
zoom levels less than the minzoom, the layer will be
hidden.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the layer. If `type` is "circle",
opacity corresponds to the circle opacity
(mapbox.layer.paint.circle-opacity) If `type` is
"line", opacity corresponds to the line opacity
(mapbox.layer.paint.line-opacity) If `type` is "fill",
opacity corresponds to the fill opacity
(mapbox.layer.paint.fill-opacity) If `type` is
"symbol", opacity corresponds to the icon/text opacity
(mapbox.layer.paint.text-opacity)
source
Sets the source data for this layer
(mapbox.layer.source). When `sourcetype` is set to
"geojson", `source` can be a URL to a GeoJSON or a
GeoJSON object. When `sourcetype` is set to "vector" or
"raster", `source` can be a URL or an array of tile
URLs. When `sourcetype` is set to "image", `source` can
be a URL to an image.
sourceattribution
Sets the attribution for this source.
sourcelayer
Specifies the layer to use from a vector tile source
(mapbox.layer.source-layer). Required for "vector"
source type that supports multiple layers.
sourcetype
Sets the source type for this layer, that is the type
of the layer data.
symbol
:class:`plotly.graph_objects.layout.mapbox.layer.Symbol
` instance or dict with compatible properties
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Sets the layer type, that is the how the layer data set
in `source` will be rendered With `sourcetype` set to
"geojson", the following values are allowed: "circle",
"line", "fill" and "symbol". but note that "line" and
"fill" are not compatible with Point GeoJSON
geometries. With `sourcetype` set to "vector", the
following values are allowed: "circle", "line", "fill"
and "symbol". With `sourcetype` set to "raster" or
`*image*`, only the "raster" value is allowed.
visible
Determines whether this layer is displayed
"""
def __init__(
self,
arg=None,
below=None,
circle=None,
color=None,
coordinates=None,
fill=None,
line=None,
maxzoom=None,
minzoom=None,
name=None,
opacity=None,
source=None,
sourceattribution=None,
sourcelayer=None,
sourcetype=None,
symbol=None,
templateitemname=None,
type=None,
visible=None,
**kwargs
):
"""
Construct a new Layer object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.mapbox.Layer`
below
Determines if the layer will be inserted before the
layer with the specified ID. If omitted or set to '',
the layer will be inserted above every existing layer.
circle
:class:`plotly.graph_objects.layout.mapbox.layer.Circle
` instance or dict with compatible properties
color
Sets the primary layer color. If `type` is "circle",
color corresponds to the circle color
(mapbox.layer.paint.circle-color) If `type` is "line",
color corresponds to the line color
(mapbox.layer.paint.line-color) If `type` is "fill",
color corresponds to the fill color
(mapbox.layer.paint.fill-color) If `type` is "symbol",
color corresponds to the icon color
(mapbox.layer.paint.icon-color)
coordinates
Sets the coordinates array contains [longitude,
latitude] pairs for the image corners listed in
clockwise order: top left, top right, bottom right,
bottom left. Only has an effect for "image"
`sourcetype`.
fill
:class:`plotly.graph_objects.layout.mapbox.layer.Fill`
instance or dict with compatible properties
line
:class:`plotly.graph_objects.layout.mapbox.layer.Line`
instance or dict with compatible properties
maxzoom
Sets the maximum zoom level (mapbox.layer.maxzoom). At
zoom levels equal to or greater than the maxzoom, the
layer will be hidden.
minzoom
Sets the minimum zoom level (mapbox.layer.minzoom). At
zoom levels less than the minzoom, the layer will be
hidden.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the layer. If `type` is "circle",
opacity corresponds to the circle opacity
(mapbox.layer.paint.circle-opacity) If `type` is
"line", opacity corresponds to the line opacity
(mapbox.layer.paint.line-opacity) If `type` is "fill",
opacity corresponds to the fill opacity
(mapbox.layer.paint.fill-opacity) If `type` is
"symbol", opacity corresponds to the icon/text opacity
(mapbox.layer.paint.text-opacity)
source
Sets the source data for this layer
(mapbox.layer.source). When `sourcetype` is set to
"geojson", `source` can be a URL to a GeoJSON or a
GeoJSON object. When `sourcetype` is set to "vector" or
"raster", `source` can be a URL or an array of tile
URLs. When `sourcetype` is set to "image", `source` can
be a URL to an image.
sourceattribution
Sets the attribution for this source.
sourcelayer
Specifies the layer to use from a vector tile source
(mapbox.layer.source-layer). Required for "vector"
source type that supports multiple layers.
sourcetype
Sets the source type for this layer, that is the type
of the layer data.
symbol
:class:`plotly.graph_objects.layout.mapbox.layer.Symbol
` instance or dict with compatible properties
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Sets the layer type, that is the how the layer data set
in `source` will be rendered With `sourcetype` set to
"geojson", the following values are allowed: "circle",
"line", "fill" and "symbol". but note that "line" and
"fill" are not compatible with Point GeoJSON
geometries. With `sourcetype` set to "vector", the
following values are allowed: "circle", "line", "fill"
and "symbol". With `sourcetype` set to "raster" or
`*image*`, only the "raster" value is allowed.
visible
Determines whether this layer is displayed
Returns
-------
Layer
"""
super(Layer, self).__init__("layers")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.mapbox.Layer
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.mapbox.Layer`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("below", None)
_v = below if below is not None else _v
if _v is not None:
self["below"] = _v
_v = arg.pop("circle", None)
_v = circle if circle is not None else _v
if _v is not None:
self["circle"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coordinates", None)
_v = coordinates if coordinates is not None else _v
if _v is not None:
self["coordinates"] = _v
_v = arg.pop("fill", None)
_v = fill if fill is not None else _v
if _v is not None:
self["fill"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("maxzoom", None)
_v = maxzoom if maxzoom is not None else _v
if _v is not None:
self["maxzoom"] = _v
_v = arg.pop("minzoom", None)
_v = minzoom if minzoom is not None else _v
if _v is not None:
self["minzoom"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("source", None)
_v = source if source is not None else _v
if _v is not None:
self["source"] = _v
_v = arg.pop("sourceattribution", None)
_v = sourceattribution if sourceattribution is not None else _v
if _v is not None:
self["sourceattribution"] = _v
_v = arg.pop("sourcelayer", None)
_v = sourcelayer if sourcelayer is not None else _v
if _v is not None:
self["sourcelayer"] = _v
_v = arg.pop("sourcetype", None)
_v = sourcetype if sourcetype is not None else _v
if _v is not None:
self["sourcetype"] = _v
_v = arg.pop("symbol", None)
_v = symbol if symbol is not None else _v
if _v is not None:
self["symbol"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@layout@mapbox@[email protected]_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/cone/colorbar/tickfont/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="variant", parent_name="cone.colorbar.tickfont", **kwargs
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@cone@colorbar@tickfont@[email protected]_END.py
|
{
"filename": "contrast_tools.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/sandbox/stats/contrast_tools.py",
"type": "Python"
}
|
'''functions to work with contrasts for multiple tests
contrast matrices for comparing all pairs, all levels to reference level, ...
extension to 2-way groups in progress
TwoWay: class for bringing two-way analysis together and try out
various helper functions
Idea for second part
- get all transformation matrices to move in between different full rank
parameterizations
- standardize to one parameterization to get all interesting effects.
- multivariate normal distribution
- exploit or expand what we have in LikelihoodResults, cov_params, f_test,
t_test, example: resols_dropf_full.cov_params(C2)
- connect to new multiple comparison for contrast matrices, based on
multivariate normal or t distribution (Hothorn, Bretz, Westfall)
'''
from numpy.testing import assert_equal
import numpy as np
#next 3 functions copied from multicomp.py
def contrast_allpairs(nm):
'''contrast or restriction matrix for all pairs of nm variables
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm*(nm-1)/2, nm)
contrast matrix for all pairwise comparisons
'''
contr = []
for i in range(nm):
for j in range(i+1, nm):
contr_row = np.zeros(nm)
contr_row[i] = 1
contr_row[j] = -1
contr.append(contr_row)
return np.array(contr)
def contrast_all_one(nm):
'''contrast or restriction matrix for all against first comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against first comparisons
'''
contr = np.column_stack((np.ones(nm-1), -np.eye(nm-1)))
return contr
def contrast_diff_mean(nm):
'''contrast or restriction matrix for all against mean comparison
Parameters
----------
nm : int
Returns
-------
contr : ndarray, 2d, (nm-1, nm)
contrast matrix for all against mean comparisons
'''
return np.eye(nm) - np.ones((nm,nm))/nm
def signstr(x, noplus=False):
if x in [-1,0,1]:
if not noplus:
return '+' if np.sign(x)>=0 else '-'
else:
return '' if np.sign(x)>=0 else '-'
else:
return str(x)
def contrast_labels(contrasts, names, reverse=False):
if reverse:
sl = slice(None, None, -1)
else:
sl = slice(None)
labels = [''.join([f'{signstr(c, noplus=True)}{v}'
for c,v in zip(row, names)[sl] if c != 0])
for row in contrasts]
return labels
def contrast_product(names1, names2, intgroup1=None, intgroup2=None, pairs=False):
'''build contrast matrices for products of two categorical variables
this is an experimental script and should be converted to a class
Parameters
----------
names1, names2 : lists of strings
contains the list of level labels for each categorical variable
intgroup1, intgroup2 : ndarrays TODO: this part not tested, finished yet
categorical variable
Notes
-----
This creates a full rank matrix. It does not do all pairwise comparisons,
parameterization is using contrast_all_one to get differences with first
level.
? does contrast_all_pairs work as a plugin to get all pairs ?
'''
n1 = len(names1)
n2 = len(names2)
names_prod = [f'{i}_{j}' for i in names1 for j in names2]
ee1 = np.zeros((1,n1))
ee1[0,0] = 1
if not pairs:
dd = np.r_[ee1, -contrast_all_one(n1)]
else:
dd = np.r_[ee1, -contrast_allpairs(n1)]
contrast_prod = np.kron(dd[1:], np.eye(n2))
names_contrast_prod0 = contrast_labels(contrast_prod, names_prod, reverse=True)
names_contrast_prod = [''.join([f'{signstr(c, noplus=True)}{v}'
for c,v in zip(row, names_prod)[::-1] if c != 0])
for row in contrast_prod]
ee2 = np.zeros((1,n2))
ee2[0,0] = 1
#dd2 = np.r_[ee2, -contrast_all_one(n2)]
if not pairs:
dd2 = np.r_[ee2, -contrast_all_one(n2)]
else:
dd2 = np.r_[ee2, -contrast_allpairs(n2)]
contrast_prod2 = np.kron(np.eye(n1), dd2[1:])
names_contrast_prod2 = [''.join([f'{signstr(c, noplus=True)}{v}'
for c,v in zip(row, names_prod)[::-1] if c != 0])
for row in contrast_prod2]
if (intgroup1 is not None) and (intgroup1 is not None):
d1, _ = dummy_1d(intgroup1)
d2, _ = dummy_1d(intgroup2)
dummy = dummy_product(d1, d2)
else:
dummy = None
return (names_prod, contrast_prod, names_contrast_prod,
contrast_prod2, names_contrast_prod2, dummy)
def dummy_1d(x, varname=None):
'''dummy variable for id integer groups
Parameters
----------
x : ndarray, 1d
categorical variable, requires integers if varname is None
varname : str
name of the variable used in labels for category levels
Returns
-------
dummy : ndarray, 2d
array of dummy variables, one column for each level of the
category (full set)
labels : list[str]
labels for the columns, i.e. levels of each category
Notes
-----
use tools.categorical instead for more more options
See Also
--------
statsmodels.tools.categorical
Examples
--------
>>> x = np.array(['F', 'F', 'M', 'M', 'F', 'F', 'M', 'M', 'F', 'F', 'M', 'M'],
dtype='|S1')
>>> dummy_1d(x, varname='gender')
(array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]), ['gender_F', 'gender_M'])
'''
if varname is None: #assumes integer
labels = ['level_%d' % i for i in range(x.max() + 1)]
return (x[:,None]==np.arange(x.max()+1)).astype(int), labels
else:
grouplabels = np.unique(x)
labels = [varname + '_%s' % str(i) for i in grouplabels]
return (x[:,None]==grouplabels).astype(int), labels
def dummy_product(d1, d2, method='full'):
'''dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, assumes full set for methods 'drop-last'
and 'drop-first'
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, encoding of intersection of
categories.
The drop methods provide a difference dummy encoding:
(constant, main effects, interaction effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
dummy matrix.
Returns
-------
dummy : ndarray
dummy variable for product, see method
'''
if method == 'full':
dd = (d1[:,:,None]*d2[:,None,:]).reshape(d1.shape[0],-1)
elif method == 'drop-last': #same as SAS transreg
d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,:-1], d2[:,:-1],d12rl))
#Note: dtype int should preserve dtype of d1 and d2
elif method == 'drop-first':
d12r = dummy_product(d1[:,1:], d2[:,1:])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,1:], d2[:,1:],d12r))
else:
raise ValueError('method not recognized')
return dd
def dummy_limits(d):
'''start and endpoints of groups in a sorted dummy variable array
helper function for nested categories
Examples
--------
>>> d1 = np.array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
>>> dummy_limits(d1)
(array([0, 4, 8]), array([ 4, 8, 12]))
get group slices from an array
>>> [np.arange(d1.shape[0])[b:e] for b,e in zip(*dummy_limits(d1))]
[array([0, 1, 2, 3]), array([4, 5, 6, 7]), array([ 8, 9, 10, 11])]
>>> [np.arange(d1.shape[0])[b:e] for b,e in zip(*dummy_limits(d1))]
[array([0, 1, 2, 3]), array([4, 5, 6, 7]), array([ 8, 9, 10, 11])]
'''
nobs, nvars = d.shape
start1, col1 = np.nonzero(np.diff(d,axis=0)==1)
end1, col1_ = np.nonzero(np.diff(d,axis=0)==-1)
cc = np.arange(nvars)
#print(cc, np.r_[[0], col1], np.r_[col1_, [nvars-1]]
if ((not (np.r_[[0], col1] == cc).all())
or (not (np.r_[col1_, [nvars-1]] == cc).all())):
raise ValueError('dummy variable is not sorted')
start = np.r_[[0], start1+1]
end = np.r_[end1+1, [nobs]]
return start, end
def dummy_nested(d1, d2, method='full'):
'''unfinished and incomplete mainly copy past dummy_product
dummy variable from product of two dummy variables
Parameters
----------
d1, d2 : ndarray
two dummy variables, d2 is assumed to be nested in d1
Assumes full set for methods 'drop-last' and 'drop-first'.
method : {'full', 'drop-last', 'drop-first'}
'full' returns the full product, which in this case is d2.
The drop methods provide an effects encoding:
(constant, main effects, subgroup effects). The first or last columns
of the dummy variable (i.e. levels) are dropped to get full rank
encoding.
Returns
-------
dummy : ndarray
dummy variable for product, see method
'''
if method == 'full':
return d2
start1, end1 = dummy_limits(d1)
start2, end2 = dummy_limits(d2)
first = np.in1d(start2, start1)
last = np.in1d(end2, end1)
equal = (first == last)
col_dropf = ~first*~equal
col_dropl = ~last*~equal
if method == 'drop-last':
d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,:-1], d2[:,col_dropl]))
#Note: dtype int should preserve dtype of d1 and d2
elif method == 'drop-first':
d12r = dummy_product(d1[:,1:], d2[:,1:])
dd = np.column_stack((np.ones(d1.shape[0], int), d1[:,1:], d2[:,col_dropf]))
else:
raise ValueError('method not recognized')
return dd, col_dropf, col_dropl
class DummyTransform:
'''Conversion between full rank dummy encodings
y = X b + u
b = C a
a = C^{-1} b
y = X C a + u
define Z = X C, then
y = Z a + u
contrasts:
R_b b = r
R_a a = R_b C a = r
where R_a = R_b C
Here C is the transform matrix, with dot_left and dot_right as the main
methods, and the same for the inverse transform matrix, C^{-1}
Note:
- The class was mainly written to keep left and right straight.
- No checking is done.
- not sure yet if method names make sense
'''
def __init__(self, d1, d2):
'''C such that d1 C = d2, with d1 = X, d2 = Z
should be (x, z) in arguments ?
'''
self.transf_matrix = np.linalg.lstsq(d1, d2, rcond=-1)[0]
self.invtransf_matrix = np.linalg.lstsq(d2, d1, rcond=-1)[0]
def dot_left(self, a):
''' b = C a
'''
return np.dot(self.transf_matrix, a)
def dot_right(self, x):
''' z = x C
'''
return np.dot(x, self.transf_matrix)
def inv_dot_left(self, b):
''' a = C^{-1} b
'''
return np.dot(self.invtransf_matrix, b)
def inv_dot_right(self, z):
''' x = z C^{-1}
'''
return np.dot(z, self.invtransf_matrix)
def groupmean_d(x, d):
'''groupmeans using dummy variables
Parameters
----------
x : array_like, ndim
data array, tested for 1,2 and 3 dimensions
d : ndarray, 1d
dummy variable, needs to have the same length
as x in axis 0.
Returns
-------
groupmeans : ndarray, ndim-1
means for each group along axis 0, the levels
of the groups are the last axis
Notes
-----
This will be memory intensive if there are many levels
in the categorical variable, i.e. many columns in the
dummy variable. In this case it is recommended to use
a more efficient version.
'''
x = np.asarray(x)
## if x.ndim == 1:
## nvars = 1
## else:
nvars = x.ndim + 1
sli = [slice(None)] + [None]*(nvars-2) + [slice(None)]
return (x[...,None] * d[sli]).sum(0)*1./d.sum(0)
class TwoWay:
'''a wrapper class for two way anova type of analysis with OLS
currently mainly to bring things together
Notes
-----
unclear: adding multiple test might assume block design or orthogonality
This estimates the full dummy version with OLS.
The drop first dummy representation can be recovered through the
transform method.
TODO: add more methods, tests, pairwise, multiple, marginal effects
try out what can be added for userfriendly access.
missing: ANOVA table
'''
def __init__(self, endog, factor1, factor2, varnames=None):
self.nobs = factor1.shape[0]
if varnames is None:
vname1 = 'a'
vname2 = 'b'
else:
vname1, vname1 = varnames
self.d1, self.d1_labels = d1, d1_labels = dummy_1d(factor1, vname1)
self.d2, self.d2_labels = d2, d2_labels = dummy_1d(factor2, vname2)
self.nlevel1 = nlevel1 = d1.shape[1]
self.nlevel2 = nlevel2 = d2.shape[1]
#get product dummies
res = contrast_product(d1_labels, d2_labels)
prodlab, C1, C1lab, C2, C2lab, _ = res
self.prod_label, self.C1, self.C1_label, self.C2, self.C2_label, _ = res
dp_full = dummy_product(d1, d2, method='full')
dp_dropf = dummy_product(d1, d2, method='drop-first')
self.transform = DummyTransform(dp_full, dp_dropf)
#estimate the model
self.nvars = dp_full.shape[1]
self.exog = dp_full
self.resols = sm.OLS(endog, dp_full).fit()
self.params = self.resols.params
#get transformed parameters, (constant, main, interaction effect)
self.params_dropf = self.transform.inv_dot_left(self.params)
self.start_interaction = 1 + (nlevel1 - 1) + (nlevel2 - 1)
self.n_interaction = self.nvars - self.start_interaction
#convert to cached property
def r_nointer(self):
'''contrast/restriction matrix for no interaction
'''
nia = self.n_interaction
R_nointer = np.hstack((np.zeros((nia, self.nvars-nia)), np.eye(nia)))
#inter_direct = resols_full_dropf.tval[-nia:]
R_nointer_transf = self.transform.inv_dot_right(R_nointer)
self.R_nointer_transf = R_nointer_transf
return R_nointer_transf
def ttest_interaction(self):
'''ttests for no-interaction terms are zero
'''
#use self.r_nointer instead
nia = self.n_interaction
R_nointer = np.hstack((np.zeros((nia, self.nvars-nia)), np.eye(nia)))
#inter_direct = resols_full_dropf.tval[-nia:]
R_nointer_transf = self.transform.inv_dot_right(R_nointer)
self.R_nointer_transf = R_nointer_transf
t_res = self.resols.t_test(R_nointer_transf)
return t_res
def ftest_interaction(self):
'''ttests for no-interaction terms are zero
'''
R_nointer_transf = self.r_nointer()
return self.resols.f_test(R_nointer_transf)
def ttest_conditional_effect(self, factorind):
if factorind == 1:
return self.resols.t_test(self.C1), self.C1_label
else:
return self.resols.t_test(self.C2), self.C2_label
def summary_coeff(self):
from statsmodels.iolib import SimpleTable
params_arr = self.params.reshape(self.nlevel1, self.nlevel2)
stubs = self.d1_labels
headers = self.d2_labels
title = 'Estimated Coefficients by factors'
table_fmt = dict(
data_fmts = ["%#10.4g"]*self.nlevel2)
return SimpleTable(params_arr, headers, stubs, title=title,
txt_fmt=table_fmt)
# --------------- tests
# TODO: several tests still missing, several are in the example with print
class TestContrastTools:
def __init__(self):
self.v1name = ['a0', 'a1', 'a2']
self.v2name = ['b0', 'b1']
self.d1 = np.array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
def test_dummy_1d(self):
x = np.array(['F', 'F', 'M', 'M', 'F', 'F', 'M', 'M', 'F', 'F', 'M', 'M'],
dtype='|S1')
d, labels = (np.array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]]), ['gender_F', 'gender_M'])
res_d, res_labels = dummy_1d(x, varname='gender')
assert_equal(res_d, d)
assert_equal(res_labels, labels)
def test_contrast_product(self):
res_cp = contrast_product(self.v1name, self.v2name)
res_t = [0]*6
res_t[0] = ['a0_b0', 'a0_b1', 'a1_b0', 'a1_b1', 'a2_b0', 'a2_b1']
res_t[1] = np.array([[-1., 0., 1., 0., 0., 0.],
[ 0., -1., 0., 1., 0., 0.],
[-1., 0., 0., 0., 1., 0.],
[ 0., -1., 0., 0., 0., 1.]])
res_t[2] = ['a1_b0-a0_b0', 'a1_b1-a0_b1', 'a2_b0-a0_b0', 'a2_b1-a0_b1']
res_t[3] = np.array([[-1., 1., 0., 0., 0., 0.],
[ 0., 0., -1., 1., 0., 0.],
[ 0., 0., 0., 0., -1., 1.]])
res_t[4] = ['a0_b1-a0_b0', 'a1_b1-a1_b0', 'a2_b1-a2_b0']
for ii in range(5):
np.testing.assert_equal(res_cp[ii], res_t[ii], err_msg=str(ii))
def test_dummy_limits(self):
b,e = dummy_limits(self.d1)
assert_equal(b, np.array([0, 4, 8]))
assert_equal(e, np.array([ 4, 8, 12]))
if __name__ == '__main__':
tt = TestContrastTools()
tt.test_contrast_product()
tt.test_dummy_1d()
tt.test_dummy_limits()
import statsmodels.api as sm
examples = ['small', 'large', None][1]
v1name = ['a0', 'a1', 'a2']
v2name = ['b0', 'b1']
res_cp = contrast_product(v1name, v2name)
print(res_cp)
y = np.arange(12)
x1 = np.arange(12)//4
x2 = np.arange(12)//2 % 2
if 'small' in examples:
d1, d1_labels = dummy_1d(x1)
d2, d2_labels = dummy_1d(x2)
if 'large' in examples:
x1 = np.repeat(x1, 5, axis=0)
x2 = np.repeat(x2, 5, axis=0)
nobs = x1.shape[0]
d1, d1_labels = dummy_1d(x1)
d2, d2_labels = dummy_1d(x2)
dd_full = dummy_product(d1, d2, method='full')
dd_dropl = dummy_product(d1, d2, method='drop-last')
dd_dropf = dummy_product(d1, d2, method='drop-first')
#Note: full parameterization of dummies is orthogonal
#np.eye(6)*10 in "large" example
print((np.dot(dd_full.T, dd_full) == np.diag(dd_full.sum(0))).all())
#check that transforms work
#generate 3 data sets with the 3 different parameterizations
effect_size = [1., 0.01][1]
noise_scale = [0.001, 0.1][0]
noise = noise_scale * np.random.randn(nobs)
beta = effect_size * np.arange(1,7)
ydata_full = (dd_full * beta).sum(1) + noise
ydata_dropl = (dd_dropl * beta).sum(1) + noise
ydata_dropf = (dd_dropf * beta).sum(1) + noise
resols_full_full = sm.OLS(ydata_full, dd_full).fit()
resols_full_dropf = sm.OLS(ydata_full, dd_dropf).fit()
params_f_f = resols_full_full.params
params_f_df = resols_full_dropf.params
resols_dropf_full = sm.OLS(ydata_dropf, dd_full).fit()
resols_dropf_dropf = sm.OLS(ydata_dropf, dd_dropf).fit()
params_df_f = resols_dropf_full.params
params_df_df = resols_dropf_dropf.params
tr_of = np.linalg.lstsq(dd_dropf, dd_full, rcond=-1)[0]
tr_fo = np.linalg.lstsq(dd_full, dd_dropf, rcond=-1)[0]
print(np.dot(tr_fo, params_df_df) - params_df_f)
print(np.dot(tr_of, params_f_f) - params_f_df)
transf_f_df = DummyTransform(dd_full, dd_dropf)
print(np.max(np.abs(dd_full - transf_f_df.inv_dot_right(dd_dropf))))
print(np.max(np.abs(dd_dropf - transf_f_df.dot_right(dd_full))))
print(np.max(np.abs(params_df_df
- transf_f_df.inv_dot_left(params_df_f))))
np.max(np.abs(params_f_df
- transf_f_df.inv_dot_left(params_f_f)))
prodlab, C1, C1lab, C2, C2lab,_ = contrast_product(v1name, v2name)
print('\ntvalues for no effect of factor 1')
print('each test is conditional on a level of factor 2')
print(C1lab)
print(resols_dropf_full.t_test(C1).tvalue)
print('\ntvalues for no effect of factor 2')
print('each test is conditional on a level of factor 1')
print(C2lab)
print(resols_dropf_full.t_test(C2).tvalue)
#covariance matrix of restrictions C2, note: orthogonal
resols_dropf_full.cov_params(C2)
#testing for no interaction effect
R_noint = np.hstack((np.zeros((2,4)), np.eye(2)))
inter_direct = resols_full_dropf.tvalues[-2:]
inter_transf = resols_full_full.t_test(transf_f_df.inv_dot_right(R_noint)).tvalue
print(np.max(np.abs(inter_direct - inter_transf)))
#now with class version
tw = TwoWay(ydata_dropf, x1, x2)
print(tw.ttest_interaction().tvalue)
print(tw.ttest_interaction().pvalue)
print(tw.ftest_interaction().fvalue)
print(tw.ftest_interaction().pvalue)
print(tw.ttest_conditional_effect(1)[0].tvalue)
print(tw.ttest_conditional_effect(2)[0].tvalue)
print(tw.summary_coeff())
''' documentation for early examples while developing - some have changed already
>>> y = np.arange(12)
>>> y
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
>>> x1 = np.arange(12)//4
>>> x1
array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
>>> x2 = np.arange(12)//2%2
>>> x2
array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1])
>>> d1 = dummy_1d(x1)
>>> d1
array([[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
>>> d2 = dummy_1d(x2)
>>> d2
array([[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[0, 1]])
>>> d12 = dummy_product(d1, d2)
>>> d12
array([[1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1]])
>>> d12rl = dummy_product(d1[:,:-1], d2[:,:-1])
>>> np.column_stack((np.ones(d1.shape[0]), d1[:,:-1], d2[:,:-1],d12rl))
array([[ 1., 1., 0., 1., 1., 0.],
[ 1., 1., 0., 1., 1., 0.],
[ 1., 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0., 0.],
[ 1., 0., 1., 1., 0., 1.],
[ 1., 0., 1., 1., 0., 1.],
[ 1., 0., 1., 0., 0., 0.],
[ 1., 0., 1., 0., 0., 0.],
[ 1., 0., 0., 1., 0., 0.],
[ 1., 0., 0., 1., 0., 0.],
[ 1., 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0., 0.]])
'''
#nprod = ['%s_%s' % (i,j) for i in ['a0', 'a1', 'a2'] for j in ['b0', 'b1']]
#>>> [''.join(['%s%s' % (signstr(c),v) for c,v in zip(row, nprod) if c != 0])
# for row in np.kron(dd[1:], np.eye(2))]
'''
>>> nprod = ['%s_%s' % (i,j) for i in ['a0', 'a1', 'a2'] for j in ['b0', 'b1']]
>>> nprod
['a0_b0', 'a0_b1', 'a1_b0', 'a1_b1', 'a2_b0', 'a2_b1']
>>> [''.join(['%s%s' % (signstr(c),v) for c,v in zip(row, nprod) if c != 0]) for row in np.kron(dd[1:], np.eye(2))]
['-a0b0+a1b0', '-a0b1+a1b1', '-a0b0+a2b0', '-a0b1+a2b1']
>>> [''.join(['%s%s' % (signstr(c),v) for c,v in zip(row, nprod)[::-1] if c != 0]) for row in np.kron(dd[1:], np.eye(2))]
['+a1_b0-a0_b0', '+a1_b1-a0_b1', '+a2_b0-a0_b0', '+a2_b1-a0_b1']
>>> np.r_[[[1,0,0,0,0]],contrast_all_one(5)]
array([[ 1., 0., 0., 0., 0.],
[ 1., -1., 0., 0., 0.],
[ 1., 0., -1., 0., 0.],
[ 1., 0., 0., -1., 0.],
[ 1., 0., 0., 0., -1.]])
>>> idxprod = [(i,j) for i in range(3) for j in range(2)]
>>> idxprod
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
>>> np.array(idxprod).reshape(2,3,2,order='F')[:,:,0]
array([[0, 1, 2],
[0, 1, 2]])
>>> np.array(idxprod).reshape(2,3,2,order='F')[:,:,1]
array([[0, 0, 0],
[1, 1, 1]])
>>> dd3_ = np.r_[[[0,0,0]],contrast_all_one(3)]
pairwise contrasts and reparameterization
dd = np.r_[[[1,0,0,0,0]],-contrast_all_one(5)]
>>> dd
array([[ 1., 0., 0., 0., 0.],
[-1., 1., 0., 0., 0.],
[-1., 0., 1., 0., 0.],
[-1., 0., 0., 1., 0.],
[-1., 0., 0., 0., 1.]])
>>> np.dot(dd.T, np.arange(5))
array([-10., 1., 2., 3., 4.])
>>> np.round(np.linalg.inv(dd.T)).astype(int)
array([[1, 1, 1, 1, 1],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
>>> np.round(np.linalg.inv(dd)).astype(int)
array([[1, 0, 0, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 0, 1, 0],
[1, 0, 0, 0, 1]])
>>> dd
array([[ 1., 0., 0., 0., 0.],
[-1., 1., 0., 0., 0.],
[-1., 0., 1., 0., 0.],
[-1., 0., 0., 1., 0.],
[-1., 0., 0., 0., 1.]])
>>> ddinv=np.round(np.linalg.inv(dd.T)).astype(int)
>>> np.dot(ddinv, np.arange(5))
array([10, 1, 2, 3, 4])
>>> np.dot(dd, np.arange(5))
array([ 0., 1., 2., 3., 4.])
>>> np.dot(dd, 5+np.arange(5))
array([ 5., 1., 2., 3., 4.])
>>> ddinv2 = np.round(np.linalg.inv(dd)).astype(int)
>>> np.dot(ddinv2, np.arange(5))
array([0, 1, 2, 3, 4])
>>> np.dot(ddinv2, 5+np.arange(5))
array([ 5, 11, 12, 13, 14])
>>> np.dot(ddinv2, [5, 0, 0 , 1, 2])
array([5, 5, 5, 6, 7])
>>> np.dot(ddinv2, dd)
array([[ 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
>>> dd3 = -np.r_[[[1,0,0]],contrast_all_one(3)]
>>> dd2 = -np.r_[[[1,0]],contrast_all_one(2)]
>>> np.kron(np.eye(3), dd2)
array([[-1., 0., 0., 0., 0., 0.],
[-1., 1., 0., 0., 0., 0.],
[ 0., 0., -1., 0., 0., 0.],
[ 0., 0., -1., 1., 0., 0.],
[ 0., 0., 0., 0., -1., 0.],
[ 0., 0., 0., 0., -1., 1.]])
>>> dd2
array([[-1., 0.],
[-1., 1.]])
>>> np.kron(np.eye(3), dd2[1:])
array([[-1., 1., 0., 0., 0., 0.],
[ 0., 0., -1., 1., 0., 0.],
[ 0., 0., 0., 0., -1., 1.]])
>>> np.kron(dd[1:], np.eye(2))
array([[-1., 0., 1., 0., 0., 0.],
[ 0., -1., 0., 1., 0., 0.],
[-1., 0., 0., 0., 1., 0.],
[ 0., -1., 0., 0., 0., 1.]])
d_ = np.r_[[[1,0,0,0,0]],contrast_all_one(5)]
>>> d_
array([[ 1., 0., 0., 0., 0.],
[ 1., -1., 0., 0., 0.],
[ 1., 0., -1., 0., 0.],
[ 1., 0., 0., -1., 0.],
[ 1., 0., 0., 0., -1.]])
>>> np.round(np.linalg.pinv(d_)).astype(int)
array([[ 1, 0, 0, 0, 0],
[ 1, -1, 0, 0, 0],
[ 1, 0, -1, 0, 0],
[ 1, 0, 0, -1, 0],
[ 1, 0, 0, 0, -1]])
>>> np.linalg.inv(d_).astype(int)
array([[ 1, 0, 0, 0, 0],
[ 1, -1, 0, 0, 0],
[ 1, 0, -1, 0, 0],
[ 1, 0, 0, -1, 0],
[ 1, 0, 0, 0, -1]])
group means
>>> sli = [slice(None)] + [None]*(3-2) + [slice(None)]
>>> (np.column_stack((y, x1, x2))[...,None] * d1[sli]).sum(0)*1./d1.sum(0)
array([[ 1.5, 5.5, 9.5],
[ 0. , 1. , 2. ],
[ 0.5, 0.5, 0.5]])
>>> [(z[:,None] * d1).sum(0)*1./d1.sum(0) for z in np.column_stack((y, x1, x2)).T]
[array([ 1.5, 5.5, 9.5]), array([ 0., 1., 2.]), array([ 0.5, 0.5, 0.5])]
>>>
'''
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@sandbox@stats@[email protected]_END.py
|
{
"filename": "test_params.py",
"repo_name": "cpinte/pymcfost",
"repo_path": "pymcfost_extracted/pymcfost-master/tests/test_params.py",
"type": "Python"
}
|
import pytest
from pymcfost.parameters import _word_to_bool
true_strings = ["True", ".True.", "TRUE", ".TRUE.", "true", ".true.", "t", ".t.", "T", ".T."]
@pytest.mark.parametrize("string", true_strings)
def test_read_true_string(string):
assert _word_to_bool(string)
false_strings = ["False", ".False.", "FALSE", ".FALSE.", "false", ".false.", "f", ".f.", "F", ".F."]
@pytest.mark.parametrize("string", false_strings)
def test_read_false_string(string):
assert not _word_to_bool(string)
|
cpinteREPO_NAMEpymcfostPATH_START.@pymcfost_extracted@pymcfost-master@tests@[email protected]_END.py
|
{
"filename": "_marker.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/_marker.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="funnel", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color` is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color` is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color` is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color` is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color` is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.funnel.marker.Colo
rBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use `marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
line
:class:`plotly.graph_objects.funnel.marker.Line
` instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud
for `opacity`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color` is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color` is set to a numerical array.
""",
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@funnel@[email protected]_END.py
|
{
"filename": "dashline.md",
"repo_name": "jbroll/starbase",
"repo_path": "starbase_extracted/starbase-master/docs/dashline.md",
"type": "Markdown"
}
|
### `dashline` - output the dashline of a starbase data table.
SYNOPSYS
--------
```
dashline [-i input] [-o output] [-t ~template~] [column]
```
DESCRIPTION
-----------
`dashline` prints the dashed line that separates the header and data portions
of a starbase data table.
OPTIONS
-------
All of the options of the [column](column.html) program are also availabe with `dashline.`
`dashline` is exactally like running "`column` -hd".
{% include colstd-opts.md %}
EXAMPLES
--------
```
john@panic : dashline < tab
- - -
```
{% include column-seealso.md %}
|
jbrollREPO_NAMEstarbasePATH_START.@starbase_extracted@starbase-master@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "lgrcia/prose",
"repo_path": "prose_extracted/prose-main/prose/blocks/__init__.py",
"type": "Python"
}
|
from prose.core.block import Block
from .alignment import *
from .catalogs import *
from .centroids import *
from .detection import *
from .geometry import *
from .photometry import *
from .psf import *
from .utils import *
from .visualization import *
|
lgrciaREPO_NAMEprosePATH_START.@prose_extracted@prose-main@prose@blocks@[email protected]_END.py
|
{
"filename": "fno.py",
"repo_name": "neuraloperator/neuraloperator",
"repo_path": "neuraloperator_extracted/neuraloperator-main/neuralop/models/fno.py",
"type": "Python"
}
|
from functools import partialmethod
from typing import Tuple, List, Union
Number = Union[float, int]
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..layers.embeddings import GridEmbeddingND, GridEmbedding2D
from ..layers.spectral_convolution import SpectralConv
from ..layers.padding import DomainPadding
from ..layers.fno_block import FNOBlocks
from ..layers.channel_mlp import ChannelMLP
from ..layers.complex import ComplexValued
from .base_model import BaseModel
class FNO(BaseModel, name='FNO'):
"""N-Dimensional Fourier Neural Operator. The FNO learns a mapping between
spaces of functions discretized over regular grids using Fourier convolutions,
as described in [1]_.
The key component of an FNO is its SpectralConv layer (see
``neuralop.layers.spectral_convolution``), which is similar to a standard CNN
conv layer but operates in the frequency domain.
For a deeper dive into the FNO architecture, refer to :ref:`fno_intro`.
Parameters
----------
n_modes : Tuple[int]
number of modes to keep in Fourier Layer, along each dimension
The dimensionality of the FNO is inferred from ``len(n_modes)``
in_channels : int
Number of channels in input function
out_channels : int
Number of channels in output function
hidden_channels : int
width of the FNO (i.e. number of channels), by default 256
n_layers : int, optional
Number of Fourier Layers, by default 4
Documentation for more advanced parameters is below.
Other parameters
------------------
lifting_channel_ratio : int, optional
ratio of lifting channels to hidden_channels, by default 2
The number of liting channels in the lifting block of the FNO is
lifting_channel_ratio * hidden_channels (e.g. default 512)
projection_channel_ratio : int, optional
ratio of projection channels to hidden_channels, by default 2
The number of projection channels in the projection block of the FNO is
projection_channel_ratio * hidden_channels (e.g. default 512)
positional_embedding : Union[str, nn.Module], optional
Positional embedding to apply to last channels of raw input
before being passed through the FNO. Defaults to "grid"
* If "grid", appends a grid positional embedding with default settings to
the last channels of raw input. Assumes the inputs are discretized
over a grid with entry [0,0,...] at the origin and side lengths of 1.
* If an initialized GridEmbedding module, uses this module directly
See :mod:`neuralop.embeddings.GridEmbeddingND` for details.
* If None, does nothing
non_linearity : nn.Module, optional
Non-Linear activation function module to use, by default F.gelu
norm : str {"ada_in", "group_norm", "instance_norm"}, optional
Normalization layer to use, by default None
complex_data : bool, optional
Whether data is complex-valued (default False)
if True, initializes complex-valued modules.
channel_mlp_dropout : float, optional
dropout parameter for ChannelMLP in FNO Block, by default 0
channel_mlp_expansion : float, optional
expansion parameter for ChannelMLP in FNO Block, by default 0.5
channel_mlp_skip : str {'linear', 'identity', 'soft-gating'}, optional
Type of skip connection to use in channel-mixing mlp, by default 'soft-gating'
fno_skip : str {'linear', 'identity', 'soft-gating'}, optional
Type of skip connection to use in FNO layers, by default 'linear'
resolution_scaling_factor : Union[Number, List[Number]], optional
layer-wise factor by which to scale the domain resolution of function, by default None
* If a single number n, scales resolution by n at each layer
* if a list of numbers [n_0, n_1,...] scales layer i's resolution by n_i.
domain_padding : Union[Number, List[Number]], optional
If not None, percentage of padding to use, by default None
To vary the percentage of padding used along each input dimension,
pass in a list of percentages e.g. [p1, p2, ..., pN] such that
p1 corresponds to the percentage of padding along dim 1, etc.
domain_padding_mode : str {'symmetric', 'one-sided'}, optional
How to perform domain padding, by default 'one-sided'
fno_block_precision : str {'full', 'half', 'mixed'}, optional
precision mode in which to perform spectral convolution, by default "full"
stabilizer : str {'tanh'} | None, optional
whether to use a tanh stabilizer in FNO block, by default None
Note: stabilizer greatly improves performance in the case
`fno_block_precision='mixed'`.
max_n_modes : Tuple[int] | None, optional
* If not None, this allows to incrementally increase the number of
modes in Fourier domain during training. Has to verify n <= N
for (n, m) in zip(max_n_modes, n_modes).
* If None, all the n_modes are used.
This can be updated dynamically during training.
factorization : str, optional
Tensor factorization of the FNO layer weights to use, by default None.
* If None, a dense tensor parametrizes the Spectral convolutions
* Otherwise, the specified tensor factorization is used.
rank : float, optional
tensor rank to use in above factorization, by default 1.0
fixed_rank_modes : bool, optional
Modes to not factorize, by default False
implementation : str {'factorized', 'reconstructed'}, optional
* If 'factorized', implements tensor contraction with the individual factors of the decomposition
* If 'reconstructed', implements with the reconstructed full tensorized weight.
decomposition_kwargs : dict, optional
extra kwargs for tensor decomposition (see `tltorch.FactorizedTensor`), by default dict()
separable : bool, optional (**DEACTIVATED**)
if True, use a depthwise separable spectral convolution, by default False
preactivation : bool, optional (**DEACTIVATED**)
whether to compute FNO forward pass with resnet-style preactivation, by default False
conv_module : nn.Module, optional
module to use for FNOBlock's convolutions, by default SpectralConv
Examples
---------
>>> from neuralop.models import FNO
>>> model = FNO(n_modes=(12,12), in_channels=1, out_channels=1, hidden_channels=64)
>>> model
FNO(
(positional_embedding): GridEmbeddingND()
(fno_blocks): FNOBlocks(
(convs): SpectralConv(
(weight): ModuleList(
(0-3): 4 x DenseTensor(shape=torch.Size([64, 64, 12, 7]), rank=None)
)
)
... torch.nn.Module printout truncated ...
References
-----------
.. [1] :
Li, Z. et al. "Fourier Neural Operator for Parametric Partial Differential
Equations" (2021). ICLR 2021, https://arxiv.org/pdf/2010.08895.
"""
def __init__(
self,
n_modes: Tuple[int],
in_channels: int,
out_channels: int,
hidden_channels: int,
n_layers: int=4,
lifting_channel_ratio: int=2,
projection_channel_ratio: int=2,
positional_embedding: Union[str, nn.Module]="grid",
non_linearity: nn.Module=F.gelu,
norm: str=None,
complex_data: bool=False,
channel_mlp_dropout: float=0,
channel_mlp_expansion: float=0.5,
channel_mlp_skip: str="soft-gating",
fno_skip: str="linear",
resolution_scaling_factor: Union[Number, List[Number]]=None,
domain_padding: Union[Number, List[Number]]=None,
domain_padding_mode: str="one-sided",
fno_block_precision: str="full",
stabilizer: str=None,
max_n_modes: Tuple[int]=None,
factorization: str=None,
rank: float=1.0,
fixed_rank_modes: bool=False,
implementation: str="factorized",
decomposition_kwargs: dict=dict(),
separable: bool=False,
preactivation: bool=False,
conv_module: nn.Module=SpectralConv,
**kwargs
):
super().__init__()
self.n_dim = len(n_modes)
# n_modes is a special property - see the class' property for underlying mechanism
# When updated, change should be reflected in fno blocks
self._n_modes = n_modes
self.hidden_channels = hidden_channels
self.in_channels = in_channels
self.out_channels = out_channels
self.n_layers = n_layers
# init lifting and projection channels using ratios w.r.t hidden channels
self.lifting_channel_ratio = lifting_channel_ratio
self.lifting_channels = lifting_channel_ratio * self.hidden_channels
self.projection_channel_ratio = projection_channel_ratio
self.projection_channels = projection_channel_ratio * self.hidden_channels
self.non_linearity = non_linearity
self.rank = rank
self.factorization = factorization
self.fixed_rank_modes = fixed_rank_modes
self.decomposition_kwargs = decomposition_kwargs
self.fno_skip = (fno_skip,)
self.channel_mlp_skip = (channel_mlp_skip,)
self.implementation = implementation
self.separable = separable
self.preactivation = preactivation
self.complex_data = complex_data
self.fno_block_precision = fno_block_precision
if positional_embedding == "grid":
spatial_grid_boundaries = [[0., 1.]] * self.n_dim
self.positional_embedding = GridEmbeddingND(in_channels=self.in_channels,
dim=self.n_dim,
grid_boundaries=spatial_grid_boundaries)
elif isinstance(positional_embedding, GridEmbedding2D):
if self.n_dim == 2:
self.positional_embedding = positional_embedding
else:
raise ValueError(f'Error: expected {self.n_dim}-d positional embeddings, got {positional_embedding}')
elif isinstance(positional_embedding, GridEmbeddingND):
self.positional_embedding = positional_embedding
elif positional_embedding == None:
self.positional_embedding = None
else:
raise ValueError(f"Error: tried to instantiate FNO positional embedding with {positional_embedding},\
expected one of \'grid\', GridEmbeddingND")
if domain_padding is not None and (
(isinstance(domain_padding, list) and sum(domain_padding) > 0)
or (isinstance(domain_padding, (float, int)) and domain_padding > 0)
):
self.domain_padding = DomainPadding(
domain_padding=domain_padding,
padding_mode=domain_padding_mode,
resolution_scaling_factor=resolution_scaling_factor,
)
else:
self.domain_padding = None
self.domain_padding_mode = domain_padding_mode
self.complex_data = self.complex_data
if resolution_scaling_factor is not None:
if isinstance(resolution_scaling_factor, (float, int)):
resolution_scaling_factor = [resolution_scaling_factor] * self.n_layers
self.resolution_scaling_factor = resolution_scaling_factor
self.fno_blocks = FNOBlocks(
in_channels=hidden_channels,
out_channels=hidden_channels,
n_modes=self.n_modes,
resolution_scaling_factor=resolution_scaling_factor,
channel_mlp_dropout=channel_mlp_dropout,
channel_mlp_expansion=channel_mlp_expansion,
non_linearity=non_linearity,
stabilizer=stabilizer,
norm=norm,
preactivation=preactivation,
fno_skip=fno_skip,
channel_mlp_skip=channel_mlp_skip,
complex_data=complex_data,
max_n_modes=max_n_modes,
fno_block_precision=fno_block_precision,
rank=rank,
fixed_rank_modes=fixed_rank_modes,
implementation=implementation,
separable=separable,
factorization=factorization,
decomposition_kwargs=decomposition_kwargs,
conv_module=conv_module,
n_layers=n_layers,
**kwargs
)
# if adding a positional embedding, add those channels to lifting
lifting_in_channels = self.in_channels
if self.positional_embedding is not None:
lifting_in_channels += self.n_dim
# if lifting_channels is passed, make lifting a Channel-Mixing MLP
# with a hidden layer of size lifting_channels
if self.lifting_channels:
self.lifting = ChannelMLP(
in_channels=lifting_in_channels,
out_channels=self.hidden_channels,
hidden_channels=self.lifting_channels,
n_layers=2,
n_dim=self.n_dim,
non_linearity=non_linearity
)
# otherwise, make it a linear layer
else:
self.lifting = ChannelMLP(
in_channels=lifting_in_channels,
hidden_channels=self.hidden_channels,
out_channels=self.hidden_channels,
n_layers=1,
n_dim=self.n_dim,
non_linearity=non_linearity
)
# Convert lifting to a complex ChannelMLP if self.complex_data==True
if self.complex_data:
self.lifting = ComplexValued(self.lifting)
self.projection = ChannelMLP(
in_channels=self.hidden_channels,
out_channels=out_channels,
hidden_channels=self.projection_channels,
n_layers=2,
n_dim=self.n_dim,
non_linearity=non_linearity,
)
if self.complex_data:
self.projection = ComplexValued(self.projection)
def forward(self, x, output_shape=None, **kwargs):
"""FNO's forward pass
1. Applies optional positional encoding
2. Sends inputs through a lifting layer to a high-dimensional latent space
3. Applies optional domain padding to high-dimensional intermediate function representation
4. Applies `n_layers` Fourier/FNO layers in sequence (SpectralConvolution + skip connections, nonlinearity)
5. If domain padding was applied, domain padding is removed
6. Projection of intermediate function representation to the output channels
Parameters
----------
x : tensor
input tensor
output_shape : {tuple, tuple list, None}, default is None
Gives the option of specifying the exact output shape for odd shaped inputs.
* If None, don't specify an output shape
* If tuple, specifies the output-shape of the **last** FNO Block
* If tuple list, specifies the exact output-shape of each FNO Block
"""
if output_shape is None:
output_shape = [None]*self.n_layers
elif isinstance(output_shape, tuple):
output_shape = [None]*(self.n_layers - 1) + [output_shape]
# append spatial pos embedding if set
if self.positional_embedding is not None:
x = self.positional_embedding(x)
x = self.lifting(x)
if self.domain_padding is not None:
x = self.domain_padding.pad(x)
for layer_idx in range(self.n_layers):
x = self.fno_blocks(x, layer_idx, output_shape=output_shape[layer_idx])
if self.domain_padding is not None:
x = self.domain_padding.unpad(x)
x = self.projection(x)
return x
@property
def n_modes(self):
return self._n_modes
@n_modes.setter
def n_modes(self, n_modes):
self.fno_blocks.n_modes = n_modes
self._n_modes = n_modes
class FNO1d(FNO):
"""1D Fourier Neural Operator
For the full list of parameters, see :class:`neuralop.models.FNO`.
Parameters
----------
modes_height : int
number of Fourier modes to keep along the height
"""
def __init__(
self,
n_modes_height,
hidden_channels,
in_channels=3,
out_channels=1,
lifting_channels=256,
projection_channels=256,
max_n_modes=None,
n_layers=4,
resolution_scaling_factor=None,
non_linearity=F.gelu,
stabilizer=None,
complex_data=False,
fno_block_precision="full",
channel_mlp_dropout=0,
channel_mlp_expansion=0.5,
norm=None,
skip="soft-gating",
separable=False,
preactivation=False,
factorization=None,
rank=1.0,
fixed_rank_modes=False,
implementation="factorized",
decomposition_kwargs=dict(),
domain_padding=None,
domain_padding_mode="one-sided",
**kwargs
):
super().__init__(
n_modes=(n_modes_height,),
hidden_channels=hidden_channels,
in_channels=in_channels,
out_channels=out_channels,
lifting_channels=lifting_channels,
projection_channels=projection_channels,
n_layers=n_layers,
resolution_scaling_factor=resolution_scaling_factor,
non_linearity=non_linearity,
stabilizer=stabilizer,
complex_data=complex_data,
fno_block_precision=fno_block_precision,
channel_mlp_dropout=channel_mlp_dropout,
channel_mlp_expansion=channel_mlp_expansion,
max_n_modes=max_n_modes,
norm=norm,
skip=skip,
separable=separable,
preactivation=preactivation,
factorization=factorization,
rank=rank,
fixed_rank_modes=fixed_rank_modes,
implementation=implementation,
decomposition_kwargs=decomposition_kwargs,
domain_padding=domain_padding,
domain_padding_mode=domain_padding_mode,
)
self.n_modes_height = n_modes_height
class FNO2d(FNO):
"""2D Fourier Neural Operator
For the full list of parameters, see :class:`neuralop.models.FNO`.
Parameters
----------
n_modes_width : int
number of modes to keep in Fourier Layer, along the width
n_modes_height : int
number of Fourier modes to keep along the height
"""
def __init__(
self,
n_modes_height,
n_modes_width,
hidden_channels,
in_channels=3,
out_channels=1,
lifting_channels=256,
projection_channels=256,
n_layers=4,
resolution_scaling_factor=None,
max_n_modes=None,
non_linearity=F.gelu,
stabilizer=None,
complex_data=False,
fno_block_precision="full",
channel_mlp_dropout=0,
channel_mlp_expansion=0.5,
norm=None,
skip="soft-gating",
separable=False,
preactivation=False,
factorization=None,
rank=1.0,
fixed_rank_modes=False,
implementation="factorized",
decomposition_kwargs=dict(),
domain_padding=None,
domain_padding_mode="one-sided",
**kwargs
):
super().__init__(
n_modes=(n_modes_height, n_modes_width),
hidden_channels=hidden_channels,
in_channels=in_channels,
out_channels=out_channels,
lifting_channels=lifting_channels,
projection_channels=projection_channels,
n_layers=n_layers,
resolution_scaling_factor=resolution_scaling_factor,
non_linearity=non_linearity,
stabilizer=stabilizer,
complex_data=complex_data,
fno_block_precision=fno_block_precision,
channel_mlp_dropout=channel_mlp_dropout,
channel_mlp_expansion=channel_mlp_expansion,
max_n_modes=max_n_modes,
norm=norm,
skip=skip,
separable=separable,
preactivation=preactivation,
factorization=factorization,
rank=rank,
fixed_rank_modes=fixed_rank_modes,
implementation=implementation,
decomposition_kwargs=decomposition_kwargs,
domain_padding=domain_padding,
domain_padding_mode=domain_padding_mode,
)
self.n_modes_height = n_modes_height
self.n_modes_width = n_modes_width
class FNO3d(FNO):
"""3D Fourier Neural Operator
For the full list of parameters, see :class:`neuralop.models.FNO`.
Parameters
----------
modes_width : int
number of modes to keep in Fourier Layer, along the width
modes_height : int
number of Fourier modes to keep along the height
modes_depth : int
number of Fourier modes to keep along the depth
"""
def __init__(
self,
n_modes_height,
n_modes_width,
n_modes_depth,
hidden_channels,
in_channels=3,
out_channels=1,
lifting_channels=256,
projection_channels=256,
n_layers=4,
resolution_scaling_factor=None,
max_n_modes=None,
non_linearity=F.gelu,
stabilizer=None,
complex_data=False,
fno_block_precision="full",
channel_mlp_dropout=0,
channel_mlp_expansion=0.5,
norm=None,
skip="soft-gating",
separable=False,
preactivation=False,
factorization=None,
rank=1.0,
fixed_rank_modes=False,
implementation="factorized",
decomposition_kwargs=dict(),
domain_padding=None,
domain_padding_mode="one-sided",
**kwargs
):
super().__init__(
n_modes=(n_modes_height, n_modes_width, n_modes_depth),
hidden_channels=hidden_channels,
in_channels=in_channels,
out_channels=out_channels,
lifting_channels=lifting_channels,
projection_channels=projection_channels,
n_layers=n_layers,
resolution_scaling_factor=resolution_scaling_factor,
non_linearity=non_linearity,
stabilizer=stabilizer,
complex_data=complex_data,
fno_block_precision=fno_block_precision,
max_n_modes=max_n_modes,
channel_mlp_dropout=channel_mlp_dropout,
channel_mlp_expansion=channel_mlp_expansion,
norm=norm,
skip=skip,
separable=separable,
preactivation=preactivation,
factorization=factorization,
rank=rank,
fixed_rank_modes=fixed_rank_modes,
implementation=implementation,
decomposition_kwargs=decomposition_kwargs,
domain_padding=domain_padding,
domain_padding_mode=domain_padding_mode,
)
self.n_modes_height = n_modes_height
self.n_modes_width = n_modes_width
self.n_modes_depth = n_modes_depth
def partialclass(new_name, cls, *args, **kwargs):
"""Create a new class with different default values
Notes
-----
An obvious alternative would be to use functools.partial
>>> new_class = partial(cls, **kwargs)
The issue is twofold:
1. the class doesn't have a name, so one would have to set it explicitly:
>>> new_class.__name__ = new_name
2. the new class will be a functools object and one cannot inherit from it.
Instead, here, we define dynamically a new class, inheriting from the existing one.
"""
__init__ = partialmethod(cls.__init__, *args, **kwargs)
new_class = type(
new_name,
(cls,),
{
"__init__": __init__,
"__doc__": cls.__doc__,
"forward": cls.forward,
},
)
return new_class
TFNO = partialclass("TFNO", FNO, factorization="Tucker")
TFNO1d = partialclass("TFNO1d", FNO1d, factorization="Tucker")
TFNO2d = partialclass("TFNO2d", FNO2d, factorization="Tucker")
TFNO3d = partialclass("TFNO3d", FNO3d, factorization="Tucker")
|
neuraloperatorREPO_NAMEneuraloperatorPATH_START.@neuraloperator_extracted@neuraloperator-main@neuralop@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "pymc-devs/pymc",
"repo_path": "pymc_extracted/pymc-main/tests/model/transform/__init__.py",
"type": "Python"
}
|
# Copyright 2024 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
pymc-devsREPO_NAMEpymcPATH_START.@pymc_extracted@pymc-main@tests@model@transform@[email protected]_END.py
|
{
"filename": "sotl2_jax.ipynb",
"repo_name": "HajimeKawahara/sot",
"repo_path": "sot_extracted/sot-master/tutorial/sotl2_jax.ipynb",
"type": "Jupyter Notebook"
}
|
## SOT JAX using L2
This code retrieves a surface map from reflection integrated light curve of an Earth analog.
See
- Kawahara & Fujii 2010 (http://adsabs.harvard.edu/cgi-bin/bib_query?arXiv:1004.5152)
- Kawahara & Fujii 2011 (http://adsabs.harvard.edu/cgi-bin/bib_query?arXiv:1106.0136)
- Fujii & Kawahara 2012 (http://adsabs.harvard.edu/cgi-bin/bib_query?arXiv:1204.3504)
for more details.
You need healpy, jax, sot package https://github.com/HajimeKawahara/sot.
```python
%load_ext autoreload
%autoreload 2
%matplotlib inline
```
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
```python
import numpy as np
import healpy as hp
import pylab
import matplotlib.pyplot as plt
import time
import jax.numpy as jnp
from jax import random
from sot.core import jaxweight as jw
```
```python
#set geometry
inc=0.0
Thetaeq=np.pi
zeta=np.pi/3.0
Pspin=23.9344699/24.0 #Pspin: a sidereal day
wspin=2*np.pi/Pspin
#Porb=365.242190402
Porb=40.0
worb=2*np.pi/Porb
N=1024
obst=jnp.linspace(0.0,Porb,N)
```
```python
# test map
nside=16
mmap=(hp.read_map("../data/mockalbedo16.fits"))
mask=(mmap>0.0)
mmap[mask]=1.0
mmap=jnp.asarray(mmap)
hp.mollview(mmap, title="Cloud-subtracted Earth",flip="geo",cmap=plt.cm.bone,min=0,max=1)
#hp.mollview(Kvolall,title="Kvol",flip="geo",cmap=plt.cm.Spectral,min=-0.3,max=0.3)
hp.graticule(color="white");
M=len(mmap)
```
NSIDE = 16
ORDERING = RING in fits file
INDXSCHM = IMPLICIT
0.0 180.0 -180.0 180.0
The interval between parallels is 30 deg -0.00'.
The interval between meridians is 30 deg -0.00'.

```python
Thetav=worb*obst
Phiv=jnp.mod(wspin*obst,2*np.pi)
WI,WV=jw.comp_weight(nside,zeta,inc,Thetaeq,Thetav,Phiv)
```
```python
W=jnp.array(WV*WI)
lc=jnp.dot(W,mmap)
key = random.PRNGKey(0)
sigma=jnp.mean(lc)*0.0
noise=np.random.normal(0.0,sigma,len(lc))
lc=lc+noise
```
```python
fig= plt.figure(figsize=(10,7.5))
ax = fig.add_subplot(111)
ax.plot(obst/obst[-1],lc/np.max(lc),lw=2,color="gray")
plt.legend(loc="upper right",prop={'size':11},frameon=False)
plt.tick_params(labelsize=18)
plt.ylabel("light intensity",fontsize=18)
plt.xlabel("time [yr]",fontsize=18)
plt.title("cloudless Earth",fontsize=18)
plt.savefig("sotlc.png", bbox_inches="tight", pad_inches=0.0)
plt.show()
```
WARNING:matplotlib.legend:No handles with labels found to put in legend.

## fixing a spin vector, we infer geography using adam optimizer in Jax.
```python
# loss or objective function
def objective(m):
lam=2.0
f=jnp.sum((lc - jnp.dot(W,m))**2) + lam*jnp.sum(m*m)
return f
```
```python
# ADAM
#see https://jax.readthedocs.io/en/latest/jax.experimental.optimizers.html
from jax.experimental import optimizers
from jax import jit
from jax import value_and_grad
m0 = jnp.array(np.random.normal(0.0,1.0,np.shape(mmap)))
opt_init, opt_update, get_params = optimizers.adam(1e0)
opt_state = opt_init(m0)
@jit
def step(t, opt_state):
value, grads = value_and_grad(objective)(get_params(opt_state))
opt_state = opt_update(t, grads, opt_state)
return value, opt_state
for t in range(10000):
value, opt_state = step(t, opt_state)
```
```python
m = get_params(opt_state)
mnp=np.array(m)
hp.mollview(mnp, title="",flip="geo",cmap=plt.cm.bone)
hp.graticule(color="white");
```
0.0 180.0 -180.0 180.0
The interval between parallels is 30 deg -0.00'.
The interval between meridians is 30 deg -0.00'.

### then, we infer geography and spin simulataneously
```python
def objective2(x):
m,zeta,Thetaeq=x
WI,WV=jw.comp_weight(nside,zeta,inc,Thetaeq,Thetav,Phiv)
W=jnp.array(WV*WI)
lam=2.0
f=jnp.sum((lc - jnp.dot(W,m))**2) + lam*jnp.sum(m*m)
return f
```
```python
import tqdm
m0 = jnp.array(np.random.normal(0.0,1.0,np.shape(mmap)))
zeta0=1.0
thetaeq0=3.0
opt_init, opt_update, get_params = optimizers.adam(1e-2)
opt_state = opt_init([m0,zeta0,thetaeq0])
@jit
def step(t, opt_state):
value, grads = value_and_grad(objective2)(get_params(opt_state))
opt_state = opt_update(t, grads, opt_state)
return value, opt_state
for t in tqdm.tqdm(range(10000)):
value, opt_state = step(t, opt_state)
```
100%|██████████| 10000/10000 [06:11<00:00, 26.89it/s]
```python
m,zeta_inf,thetaeq_inf = get_params(opt_state)
print(zeta_inf,thetaeq_inf)
print(zeta,thetaeq)
```
1.0465717 3.1406984
1.049021 3.1409156
```python
m,zeta,thetaeq = get_params(opt_state)
mnp=np.array(m)
hp.mollview(mnp, title="",flip="geo",cmap=plt.cm.bone)
hp.graticule(color="white");
```
0.0 180.0 -180.0 180.0
The interval between parallels is 30 deg -0.00'.
The interval between meridians is 30 deg -0.00'.

```python
#manual steepst gradient descent is very very slow
m = jnp.array(np.random.normal(0.0,1.0,np.shape(mmap)))
tmp_val = 1e5
while True:
val, grad_val = value_and_grad(objective)(m)
m = m - 1e-5*grad_val
#print(tmp_val,jnp.abs(val - tmp_val).sum())
if jnp.abs(val - tmp_val).sum() < 0.5:
break
tmp_val = val
```
```python
mnp=np.array(m)
hp.mollview(mnp, title="",flip="geo",cmap=plt.cm.bone)
hp.graticule(color="white");
```
0.0 180.0 -180.0 180.0
The interval between parallels is 30 deg -0.00'.
The interval between meridians is 30 deg -0.00'.

```python
```
|
HajimeKawaharaREPO_NAMEsotPATH_START.@sot_extracted@sot-master@tutorial@[email protected]_END.py
|
{
"filename": "test_rss.py",
"repo_name": "sdss/marvin",
"repo_path": "marvin_extracted/marvin-main/tests/tools/test_rss.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Brian Cherinka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2018-07-24
# @Filename: test_rss.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego ([email protected])
# @Last modified time: 2018-08-04 13:35:39
import astropy.io.fits
import astropy.table
import numpy
import pytest
import marvin
from ..conftest import Galaxy, set_the_config
@pytest.fixture(scope='session')
def galaxy(get_params, plateifu):
"""Yield an instance of a Galaxy object for use in tests."""
release, bintype, template = get_params
set_the_config(release)
gal = Galaxy(plateifu=plateifu)
gal.set_params(bintype=bintype, template=template, release=release)
gal.set_filepaths()
gal.set_galaxy_data()
yield gal
@pytest.fixture(scope='session')
def rss_session(galaxy, mode):
# These get created only once per session.
# if mode == 'auto' or str(galaxy.bintype) != 'SPX':
# pytest.skip()
if mode == 'local':
rss = marvin.tools.RSS(filename=galaxy.rsspath, release=galaxy.release, mode='local')
else:
rss = marvin.tools.RSS(plateifu=galaxy.plateifu, release=galaxy.release, mode='remote')
rss.expdata = galaxy.rss
yield rss
@pytest.fixture(scope='function')
def rss(rss_session):
# In some of the tests we modify the RSS objects. Here we implement
# a setup procedure that "unloads" the RSSFiber objects and resets the
# autoload attribute.
for rssfiber in rss_session:
rssfiber.loaded = False
rss_session.autoload = True
yield rss_session
@pytest.fixture(scope='session')
def rssfiber(rss_session):
fiberid = 0
if rss_session[fiberid].loaded is False:
rss_session[fiberid].load()
yield rss_session[fiberid]
@pytest.mark.usefixtures('monkeyauth')
class TestRSS(object):
def test_rss_init(self, rss):
assert isinstance(rss, marvin.tools.RSS)
assert isinstance(rss, marvin.tools.mixins.NSAMixIn)
assert isinstance(rss, list)
assert isinstance(rss.obsinfo, astropy.table.Table)
if rss.mode == 'file':
assert isinstance(rss.data, astropy.io.fits.HDUList)
assert rss._wavelength is not None
assert len(rss) == rss._nfibers
rss.autoload = False # To make things faster for this test
assert all([isinstance(rss_fiber, marvin.tools.rss.RSSFiber) for rss_fiber in rss])
@pytest.mark.parametrize('autoload', [True, False])
def test_rss_autoload(self, rss, autoload):
rss.autoload = autoload
assert rss[0].loaded is autoload
def test_load(self, rss):
rss.autoload = False
assert rss[0].loaded is False
rss[0].load()
assert rss[0].loaded is True
def test_load_all(self, rss):
if rss.mode == 'remote':
pytest.skip()
rss.load_all()
assert all([rss_fiber.loaded is True for rss_fiber in rss])
def test_obsinfo_to_rssfiber(self, rss):
# We get it in this complicated way so that it is a different way of
# obtianing it than in the _populate_fibres method.
ifusize = int(str(rss.ifu)[0:-2])
exp_idx = 0
n_fiber = 1
for rssfiber in rss:
assert numpy.all(rss.obsinfo[exp_idx] == rssfiber.obsinfo)
n_fiber += 1
if n_fiber > ifusize:
n_fiber = 1
exp_idx += 1
def test_getcube(self, rss):
cube = rss.getCube()
assert isinstance(cube, marvin.tools.Cube)
assert cube.mode == rss.mode
assert cube.plateifu == rss.plateifu
assert cube.mangaid == rss.mangaid
assert cube.release == rss.release
def test_select_fibers(self, rss):
# Skipping for API or it will take forever. Should not matter since
# we have already tested slicing for API.
if rss.data_origin == 'api':
pytest.skip()
fibers_expnum = rss.select_fibers(exposure_no=rss.expdata['expnum'])
assert len(fibers_expnum) == rss.expdata['nfiber']
assert fibers_expnum[0].obsinfo['EXPNUM'][0] == rss.expdata['expnum']
fibers_mjd = rss.select_fibers(mjd=1234)
assert len(fibers_mjd) == 0
fibers_mjd = rss.select_fibers(mjd=rss.expdata['mjd'])
assert len(fibers_mjd) == (rss.expdata['nexp'] * rss.expdata['nfiber'])
assert fibers_mjd[0].obsinfo['MJD'][0] == rss.expdata['mjd']
@pytest.mark.usefixtures('monkeyauth')
class TestRSSFiber(object):
def test_rssfiber_spectra(self, rssfiber):
assert isinstance(rssfiber, marvin.tools.RSSFiber)
assert isinstance(rssfiber.rss, marvin.tools.RSS)
assert isinstance(rssfiber.obsinfo, astropy.table.Table)
assert hasattr(rssfiber, 'ivar')
assert isinstance(rssfiber.ivar, numpy.ndarray)
assert len(rssfiber.ivar) == len(rssfiber.wavelength)
assert hasattr(rssfiber, 'mask')
assert isinstance(rssfiber.mask, numpy.ndarray)
assert len(rssfiber.mask) == len(rssfiber.wavelength)
for dm_element in rssfiber.rss.datamodel.rss + rssfiber.rss.datamodel.spectra:
if dm_element.name == 'flux':
continue
spectrum = getattr(rssfiber, dm_element.name, None)
assert spectrum is not None
assert isinstance(spectrum, numpy.ndarray)
assert len(spectrum) == len(rssfiber.wavelength)
def test_rssfiber_data(self, rssfiber):
rss_filename = rssfiber.rss._getFullPath()
rss_hdu = astropy.io.fits.open(rss_filename)
numpy.testing.assert_allclose(rss_hdu['FLUX'].data[rssfiber.fiberid, :], rssfiber.value)
numpy.testing.assert_allclose(rss_hdu['IVAR'].data[rssfiber.fiberid, :], rssfiber.ivar)
numpy.testing.assert_array_equal(rss_hdu['MASK'].data[rssfiber.fiberid, :], rssfiber.mask)
for dm_element in rssfiber.rss.datamodel.rss:
if dm_element.name == 'flux':
continue
fits_data = rss_hdu[dm_element.fits_extension()].data[rssfiber.fiberid, :]
numpy.testing.assert_allclose(fits_data, getattr(rssfiber, dm_element.name).value)
for dm_element in rssfiber.rss.datamodel.spectra:
fits_data = rss_hdu[dm_element.fits_extension()].data
numpy.testing.assert_allclose(fits_data, getattr(rssfiber, dm_element.name).value)
def test_rssfiber_slice(self, rssfiber):
n_elements = 10
sliced = rssfiber[0:n_elements]
assert len(sliced.value) == n_elements
numpy.testing.assert_allclose(sliced.value, rssfiber.value[0:n_elements])
assert len(sliced.ivar) == n_elements
assert len(sliced.mask) == n_elements
for dm_element in rssfiber.rss.datamodel.rss + rssfiber.rss.datamodel.spectra:
if dm_element.name == 'flux':
continue
spectrum_sliced = getattr(sliced, dm_element.name, None)
assert len(spectrum_sliced) == n_elements
assert sliced.obsinfo is not None
def test_rssfiber_masked(self, rssfiber):
assert numpy.sum(rssfiber.masked.mask) > 0
def test_rssfiber_descale(self, rssfiber):
descaled = rssfiber.descale()
numpy.testing.assert_allclose(descaled.value, rssfiber.value * rssfiber.unit.scale)
assert descaled.obsinfo is not None
class TestPickling(object):
def test_pickling_file(self, temp_scratch, rss):
if rss.data_origin == 'file':
assert rss.data is not None
rss_file = temp_scratch / 'test_rss.mpf'
rss.save(str(rss_file))
assert rss_file.exists() is True
rss_restored = marvin.tools.RSS.restore(str(rss_file))
assert rss_restored.data_origin == rss.data_origin
assert isinstance(rss_restored, marvin.tools.RSS)
assert len(rss_restored) > 0
assert isinstance(rss_restored[0], marvin.tools.RSSFiber)
assert numpy.sum(rss_restored[0].value) > 0
if rss.data_origin == 'file':
assert rss_restored.data is not None
else:
assert rss_restored.data is None
|
sdssREPO_NAMEmarvinPATH_START.@marvin_extracted@marvin-main@tests@tools@[email protected]_END.py
|
{
"filename": "_ticktext.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter/marker/colorbar/_ticktext.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicktextValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="ticktext", parent_name="scatter.marker.colorbar", **kwargs
):
super(TicktextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@scatter@marker@colorbar@[email protected]_END.py
|
{
"filename": "test_roman.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/tests/test_roman.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import logging
import os
import sys
import numpy as np
import datetime
from unittest import mock
import galsim
import galsim.roman
from galsim_test_helpers import *
@timer
def skip_roman_wcs():
"""Test the Roman WCS routines against ones provided by the Roman project office.
"""
# This test is out of date and is not run, but since it was a useful test, the code is kept here
# as a reminder to reinstate it if/when we get an updated version of the WCS software from the
# Roman project office for cycle 7+. Everything below this comment is the original code from
# GalSim v1.4.
######################################################################################3
# The standard against which we will compare is the output of some software provided by Jeff
# Kruk. The files used here were generated by Rachel on her Macbook using the script in
# roman_files/make_standards.sh, and none of the parameters below can be changed without
# modifying and rerunning that script. We use 4 sky positions and rotation angles (2 defined
# using the focal plane array, 2 using the observatory coordinates), and in each case, use a
# different SCA for our tests. We will simply read in the stored WCS and generate new ones, and
# check that they have the right value of SCA center and pixel scale at the center, and that if
# we offset by 500 pixels in some direction that gives the same sky position in each case.
ra_test = [127., 307.4, -61.52, 0.0]
dec_test = [-70., 50., 22.7, 0.0]
pa_test = [160., 79., 23.4, -3.1]
sca_test = [2, 13, 7, 18]
import datetime
ve = datetime.datetime(2025,3,20,9,2,0)
date_test = [ve, ve, ve, datetime.date(2025,6,20)]
pa_is_fpa_test = [True, False, True, False]
dist_arcsec = []
dist_2_arcsec = []
pix_area_ratio = []
for i_test in range(len(ra_test)):
# Make the WCS for this test.
world_pos = galsim.CelestialCoord(ra_test[i_test]*galsim.degrees,
dec_test[i_test]*galsim.degrees)
if i_test == 0:
# Just for this case, we want to get the WCS for all SCAs. This will enable some
# additional tests that we don't do for the other test case.
gs_wcs_dict = galsim.roman.getWCS(PA=pa_test[i_test]*galsim.degrees,
world_pos=world_pos,
PA_is_FPA=pa_is_fpa_test[i_test],
date=date_test[i_test])
np.testing.assert_equal(
len(gs_wcs_dict), galsim.roman.n_sca,
err_msg='WCS dict has wrong length: %d vs. %d'%(len(gs_wcs_dict),
galsim.roman.n_sca))
else:
# Use the SCAs keyword to just get the WCS for the SCA that we want.
gs_wcs_dict = galsim.roman.getWCS(PA=pa_test[i_test]*galsim.degrees,
world_pos=world_pos,
PA_is_FPA=pa_is_fpa_test[i_test],
SCAs=sca_test[i_test],
date=date_test[i_test])
np.testing.assert_equal(
len(gs_wcs_dict), 1,
err_msg='WCS dict has wrong length: %d vs. %d'%(len(gs_wcs_dict), 1))
# Read in reference.
test_file = 'test%d_sca_%02d.fits'%(i_test+1, sca_test[i_test])
ref_wcs = galsim.FitsWCS(os.path.join('roman_files',test_file))
gs_wcs = gs_wcs_dict[sca_test[i_test]]
# Check center position:
im_cent_pos = galsim.PositionD(galsim.roman.n_pix/2., galsim.roman.n_pix/2)
ref_cent_pos = ref_wcs.toWorld(im_cent_pos)
gs_cent_pos = gs_wcs.toWorld(im_cent_pos)
dist_arcsec.append(ref_cent_pos.distanceTo(gs_cent_pos) / galsim.arcsec)
# Check pixel area
rat = ref_wcs.pixelArea(image_pos=im_cent_pos)/gs_wcs.pixelArea(image_pos=im_cent_pos)
pix_area_ratio.append(rat-1.)
# Check another position, just in case rotations are messed up.
im_other_pos = galsim.PositionD(im_cent_pos.x+500., im_cent_pos.y-200.)
ref_other_pos = ref_wcs.toWorld(im_other_pos)
gs_other_pos = gs_wcs.toWorld(im_other_pos)
dist_2_arcsec.append(ref_other_pos.distanceTo(gs_other_pos) / galsim.arcsec)
if i_test == 0:
# For just one of our tests cases, we'll do some additional tests. These will target
# the findSCA() functionality. First, we'll choose an SCA and check that its center is
# found to be in that SCA.
found_sca = galsim.roman.findSCA(gs_wcs_dict, gs_cent_pos)
np.testing.assert_equal(found_sca, sca_test[i_test],
err_msg='Did not find SCA center position to be on that SCA!')
# Then, we go to a place that should be off the side by a tiny bit, and check that it is
# NOT on an SCA if we exclude borders, but IS on the SCA if we include borders.
im_off_edge_pos = galsim.PositionD(-2., galsim.roman.n_pix/2.)
world_off_edge_pos = gs_wcs.toWorld(im_off_edge_pos)
found_sca = galsim.roman.findSCA(gs_wcs_dict, world_off_edge_pos)
assert found_sca is None
found_sca = galsim.roman.findSCA(gs_wcs_dict, world_off_edge_pos, include_border=True)
np.testing.assert_equal(found_sca, sca_test[i_test],
err_msg='Did not find slightly off-edge position on the SCA'
' when including borders!')
np.testing.assert_array_less(
np.array(dist_arcsec),
np.ones(len(ra_test))*galsim.roman.pixel_scale/100,
err_msg='For at least one WCS, center offset from reference was > 0.01(pixel scale).')
np.testing.assert_array_less(
np.array(dist_2_arcsec),
np.ones(len(ra_test))*galsim.roman.pixel_scale/100,
err_msg='For at least one WCS, other offset from reference was > 0.01(pixel scale).')
np.testing.assert_array_less(
np.array(pix_area_ratio),
np.ones(len(ra_test))*0.0001,
err_msg='For at least one WCS, pixel areas differ from reference by >0.01%.')
@timer
def test_roman_wcs(run_slow):
"""Test the Roman WCS routines against the one produced by code from Chris Hirata.
"""
# The standard against which we will compare is the output of some software provided by Chris
# Hirata. The files used here were generated by Rachel on her Macbook using the script in
# roman_files/, with sky positions randomly selected and then stored as part of the
# comparison. We read in a list of FPA center positions and other RA/dec positions, the
# position angle for the observation, and the SCA those other positions land on (if any). Then
# we compare that with the GalSim routines for finding SCAs.
import datetime
date = datetime.datetime(2025, 1, 12)
test_data_file = os.path.join('roman_files','chris_comparison.txt')
test_data = np.loadtxt(test_data_file).transpose()
ra_cen = test_data[0,:]
dec_cen = test_data[1,:]
ra = test_data[2,:]
dec = test_data[3,:]
pa = test_data[4,:]
chris_sca = test_data[5,:]
if not run_slow:
i_start = 4
n_test = 3 # None of these 3 fail, so the nfail test is ok.
else:
i_start = 0
n_test = len(ra_cen)
n_fail = 0
for i_test in range(i_start, i_start+n_test):
print('i_test = ',i_test)
# Make the WCS for this test.
world_pos = galsim.CelestialCoord(ra_cen[i_test]*galsim.degrees,
dec_cen[i_test]*galsim.degrees)
gs_wcs_dict = galsim.roman.getWCS(PA=pa[i_test]*galsim.radians,
world_pos=world_pos,
PA_is_FPA=True,
date=date)
np.testing.assert_equal(
len(gs_wcs_dict), galsim.roman.n_sca,
err_msg='WCS dict has wrong length: %d vs. %d'%(len(gs_wcs_dict),
galsim.roman.n_sca))
found_sca = galsim.roman.findSCA(
gs_wcs_dict,
galsim.CelestialCoord(ra[i_test]*galsim.degrees,
dec[i_test]*galsim.degrees))
if found_sca is None: found_sca=0
if found_sca != chris_sca[i_test]:
n_fail += 1
print('Failed to find SCA: ',found_sca, chris_sca[i_test])
# Just cycle through the SCAs for the next bits.
sca_test = i_test % 18 + 1
gs_wcs = gs_wcs_dict[sca_test]
# Check center position:
im_cent_pos = galsim.PositionD(galsim.roman.n_pix/2., galsim.roman.n_pix/2)
gs_cent_pos = gs_wcs.toWorld(im_cent_pos)
# Check pixel area
pix_area = gs_wcs.pixelArea(image_pos=im_cent_pos)
print('pix_area = ',pix_area)
np.testing.assert_allclose(pix_area, 0.012, atol=0.001)
if i_test == 0:
# For just one of our tests cases, we'll do some additional tests. These will target
# the findSCA() functionality. First, check that the center is found in that SCA.
found_sca = galsim.roman.findSCA(gs_wcs_dict, gs_cent_pos)
np.testing.assert_equal(found_sca, sca_test,
err_msg='Did not find SCA center position to be on that SCA!')
# Then, we go to a place that should be off the side by a tiny bit, and check that it is
# NOT on an SCA if we exclude borders, but IS on the SCA if we include borders.
im_off_edge_pos = galsim.PositionD(-2., galsim.roman.n_pix/2.)
world_off_edge_pos = gs_wcs.toWorld(im_off_edge_pos)
found_sca = galsim.roman.findSCA(gs_wcs_dict, world_off_edge_pos)
assert found_sca is None
found_sca = galsim.roman.findSCA(gs_wcs_dict, world_off_edge_pos, include_border=True)
np.testing.assert_equal(found_sca, sca_test,
err_msg='Did not find slightly off-edge position on the SCA'
' when including borders!')
if i_test < 5:
# Also make sure that for a given SCA, we find positions on it that should be on it,
# without/with inclusion of borders. Just do this test a limited number of times.
for sca_ind in range(1,19):
sca_edge_test = sca_ind
tmp_wcs = gs_wcs_dict[sca_edge_test]
im_test_pos = galsim.PositionD(10.0, galsim.roman.n_pix/2)
tmp_pos = tmp_wcs.toWorld(im_test_pos)
found_sca = galsim.roman.findSCA(gs_wcs_dict, tmp_pos, include_border=False)
assert found_sca==sca_edge_test
found_sca = galsim.roman.findSCA(gs_wcs_dict, tmp_pos, include_border=True)
assert found_sca==sca_edge_test
im_test_pos = galsim.PositionD(galsim.roman.n_pix/2, galsim.roman.n_pix+3)
tmp_pos = tmp_wcs.toWorld(im_test_pos)
found_sca = galsim.roman.findSCA(gs_wcs_dict, tmp_pos, include_border=False)
assert found_sca==None
found_sca = galsim.roman.findSCA(gs_wcs_dict, tmp_pos, include_border=True)
assert found_sca==sca_edge_test
# And check that we can go from the center of that SCA and reverse-engineer the
# position of the center of the FPA.
im_test_pos = galsim.PositionD(galsim.roman.n_pix/2, galsim.roman.n_pix/2)
test_sca_pos = tmp_wcs.toWorld(im_test_pos)
test_fpa_pos = galsim.roman.convertCenter(test_sca_pos, int(sca_edge_test),
PA=pa[i_test]*galsim.radians,
date=date, PA_is_FPA=True)
# Also test that convertCenter checks inputs appropriately.
with assert_raises(TypeError):
galsim.roman.convertCenter(test_sca_pos, 3.5)
with assert_raises(TypeError):
galsim.roman.convertCenter(
test_sca_pos, int(sca_edge_test), PA=pa[i_test]*galsim.radians,
date=date, PA_is_FPA=True, tol=1.0)
delta_arcsec = test_fpa_pos.distanceTo(world_pos) / galsim.arcsec
assert delta_arcsec<0.5, "could not round-trip from FPA to SCA to FPA center"
# There were few-arcsec offsets in our WCS, so allow some fraction of failures.
print('n_fail = ',n_fail)
assert n_fail < 0.2*n_test, 'Failed in SCA-matching against reference: %d %d'%(n_fail,n_test)
# Check whether we're allowed to look at certain positions on certain dates.
# Let's choose RA=90 degrees, dec=10 degrees.
# We know that it's best to look about 90 degrees from the Sun. So on the vernal and autumnal
# equinox, this should be a great place to look, but not midway in between.
pos = galsim.CelestialCoord(90.*galsim.degrees, 10.*galsim.degrees)
import datetime
assert galsim.roman.allowedPos(pos, datetime.date(2025,3,20))
assert galsim.roman.allowedPos(pos, datetime.date(2025,9,20))
assert not galsim.roman.allowedPos(pos, datetime.date(2025,6,20))
assert galsim.roman.bestPA(pos, datetime.date(2025,6,20)) is None
# Finally make sure it does something reasonable for the observatory position angle.
# When the sun is at (0,0), and we look at (90,0), then +Z points towards the Sun and +Y points
# North, giving a PA of 0 degrees.
pos = galsim.CelestialCoord(90.*galsim.degrees, 0.*galsim.degrees)
test_date = datetime.datetime(2025,3,20,9,2)
pa = galsim.roman.bestPA(pos, test_date)
np.testing.assert_almost_equal(pa.rad, 0., decimal=3)
# Now make it look at the same RA as the sun but quite different declination. It wants +Z
# pointing North toward Sun, so we'll get a -90 degree angle for the PA.
pos = galsim.CelestialCoord(0.*galsim.degrees, -70.*galsim.degrees)
pa = galsim.roman.bestPA(pos, test_date)
np.testing.assert_almost_equal(pa.rad, -np.pi/2, decimal=3)
sun_pos= galsim.CelestialCoord(0*galsim.degrees, 0*galsim.degrees)
sun_pa = galsim.roman.bestPA(sun_pos, test_date)
assert sun_pa is None
with assert_raises(TypeError):
galsim.roman.getWCS(world_pos=galsim.PositionD(300,400))
with assert_raises(galsim.GalSimError):
galsim.roman.getWCS(world_pos=sun_pos, date=test_date)
with assert_raises(TypeError):
galsim.roman.getWCS(world_pos=pos, PA=33.)
with assert_raises(galsim.GalSimRangeError):
galsim.roman.getWCS(world_pos=pos, SCAs=[-1,1])
with assert_raises(galsim.GalSimRangeError):
galsim.roman.getWCS(world_pos=pos, SCAs=[1,23])
# Check the rather bizarre convention that LONPOLE is always 180 EXCEPT (!!) when
# observing directly at the south pole. Apparently, this convention comes from the Roman
# project office's use of the LONPOLE keyword. So we keep it, even though it's stupid.
# cf. https://github.com/GalSim-developers/GalSim/pull/651#discussion-diff-26277673
assert gs_wcs_dict[1].header['LONPOLE'] == 180.
south_pole = galsim.CelestialCoord(0*galsim.degrees, -90*galsim.degrees)
wcs = galsim.roman.getWCS(world_pos=south_pole, SCAs=1)
assert wcs[1].header['LONPOLE'] == 0
with assert_raises(TypeError):
galsim.roman.findSCA(wcs_dict=None, world_pos=pos)
with assert_raises(TypeError):
galsim.roman.findSCA(wcs_dict=wcs, world_pos=galsim.PositionD(300,400))
with mock.patch('galsim.roman.roman_wcs.sip_filename', 'sip_7_6_8.txt'):
with assert_raises(OSError):
galsim.roman.getWCS(world_pos=world_pos, date=date)
@timer
def test_roman_backgrounds():
"""Test the Roman background estimation routines for basic sanity.
"""
import datetime
# The routine should not allow us to look directly at the sun since the background there is high
# (to understate the problem). If no date is supplied, then the routine assumes RA=dec=0 means
# we are looking at the sun.
bp_dict = galsim.roman.getBandpasses()
bp = bp_dict['J129'] # one of the standard filters, doesn't really matter which
with assert_raises(ValueError):
galsim.roman.getSkyLevel(
bp, world_pos=galsim.CelestialCoord(0.*galsim.degrees, 0.*galsim.degrees))
# near autumn equinox
with assert_raises(ValueError):
galsim.roman.getSkyLevel(
bp, world_pos=galsim.CelestialCoord(180.*galsim.degrees, 5.*galsim.degrees),
date=datetime.date(2025,9,15))
# world_pos must be a CelestialCoord.
with assert_raises(TypeError):
galsim.roman.getSkyLevel(bp, world_pos=galsim.PositionD(300,400))
# No world_pos works. Produces sky level for some plausible generic location.
sky_level = galsim.roman.getSkyLevel(bp)
print('sky_level = ',sky_level)
# regression test relative to v2.5
np.testing.assert_allclose(sky_level, 6928.267815, rtol=0.01)
# But not with a non-Roman bandpass
with assert_raises(galsim.GalSimError):
galsim.roman.getSkyLevel(galsim.Bandpass('wave', 'nm', 400, 550))
# The routine should have some obvious symmetry, for example, ecliptic latitude above vs. below
# plane and ecliptic longitude positive vs. negative (or vs. 360 degrees - original value).
# Because of how equatorial and ecliptic coordinates are related on the adopted date, we can do
# this test as follows:
test_ra = 50.*galsim.degrees
test_dec = 10.*galsim.degrees
test_pos_p = galsim.CelestialCoord(test_ra, test_dec)
test_pos_m = galsim.CelestialCoord(-1.*(test_ra/galsim.degrees)*galsim.degrees,
-1.*(test_dec/galsim.degrees)*galsim.degrees)
level_p = galsim.roman.getSkyLevel(bp, world_pos=test_pos_p)
level_m = galsim.roman.getSkyLevel(bp, world_pos=test_pos_m)
np.testing.assert_almost_equal(level_m, level_p, decimal=8)
# The routine should handle an input exposure time sensibly. Our original level_p was in
# e-/arcsec^2 using the Roman exposure time. We will define another exposure time, pass it in,
# and confirm that the output is consistent with this.
level_p_2 = galsim.roman.getSkyLevel(bp, world_pos=test_pos_p,
exptime=1.7*galsim.roman.exptime)
np.testing.assert_almost_equal(1.7*level_p, level_p_2, decimal=8)
@timer
def test_roman_bandpass():
"""Test the Roman bandpasses for basic sanity.
"""
from galsim._pyfits import pyfits
# Obtain the bandpasses with AB_zeropoint set
bp = galsim.roman.getBandpasses(AB_zeropoint=True)
# Check if the zeropoints have been set correctly
AB_spec = lambda x: (3631e-23)
AB_sed = galsim.SED(spec=AB_spec, wave_type='nm', flux_type='fnu')
for filter_name, filter_ in bp.items():
mag = AB_sed.calculateMagnitude(bandpass=filter_)
np.testing.assert_almost_equal(mag,0.0,decimal=6,
err_msg="Zeropoint not set accurately enough for bandpass filter "+filter_name)
# Do a slightly less trivial check of bandpass-related calculations:
# Jeff Kruk (at Goddard) took an SED template from the Castelli-Kurucz library, normalized it to
# a particular magnitude in SDSS g band, and checked the count rates he expects for the Roman
# bands. I (RM) independently did the same calculation (downloading the templates and bandpass
# myself and using GalSim for all the important bits of the calculation) and my results agree a
# the 5% level. Given that I didn't quite have the same SED, we were very happy with this level
# of agreement. The unit test below reproduces this test, and requires agreement at the 10%
# level.
# Jeff used the C-K template with solar metallicity, T=9550K, surface gravity logg=3.95. I
# downloaded a grid of templates and just used the nearest one, which has solar metallicity,
# T=9500K, surface gravity logg=4.0.
with pyfits.open(os.path.join('roman_files','ckp00_9500.fits')) as fits:
sed_data = fits[1].data
lam = sed_data.WAVELENGTH.astype(np.float64)
t = sed_data.g40.astype(np.float64)
sed_tab = galsim.LookupTable(x=lam, f=t, interpolant='linear')
sed = galsim.SED(sed_tab, wave_type='A', flux_type='flambda')
# Now take the SDSS g bandpass:
# gfile = '/Users/rmandelb/Downloads/g.dat'
bp_dat = np.loadtxt(os.path.join('roman_files','g.dat')).transpose()
bp_tab = galsim.LookupTable(x=bp_dat[0,:], f=bp_dat[1,:], interpolant='linear')
bp_ref = galsim.Bandpass(bp_tab, wave_type='A').withZeropoint('AB')
# Now get a new SED that has magnitude -0.093 in this filter, since that's the normalization
# that Jeff imposed for his tests.
sed = sed.withMagnitude(-0.093, bp_ref)
# Reference count rates, from Jeff:
reference = {}
reference['Z087'] = 1.98e10
reference['Y106'] = 1.97e10
reference['J129'] = 1.52e10
reference['H158'] = 1.11e10
reference['F184'] = 0.58e10
reference['W146'] = 4.34e10
# Only 15% accuracy required because did not use quite the same stellar template. Fortunately,
# bugs can easily lead to orders of magnitude errors, so this unit test is still pretty
# non-trivial.
for filter_name, filter_ in bp.items():
if filter_name not in reference:
continue
flux = sed.calculateFlux(filter_) # photons / cm^2 / s
count_rate = flux * galsim.roman.collecting_area # photons / s
print(count_rate, reference[filter_name])
np.testing.assert_allclose(
count_rate, reference[filter_name], rtol=0.15,
err_msg="Count rate for stellar model not as expected for bandpass "
"{0}".format(filter_name))
# Finally, compare against some external zeropoint calculations from the Roman microlensing
# group: https://roman.ipac.caltech.edu/sims/MABuLS_sim.html
# They calculated instrumental zero points, defined such that the flux is 1 photon/sec (taking
# into account the Roman collecting area). We convert ours to their definition by adding
# `delta_zp` calculated below:
area_eff = galsim.roman.collecting_area
delta_zp = 2.5 * np.log10(area_eff)
# Define the zeropoints that they calculated:
# Note: with the new bandpass changes since their calculation, the agreement is only about
# 0.3 magnitudes.
ref_zp = {
'W146': 27.554,
'Z087': 26.163
}
for key in ref_zp.keys():
galsim_zp = bp[key].zeropoint + delta_zp
# They use slightly different versions of the bandpasses, so we only require agreement to
# 0.1 mag.
print('zp for %s: '%key, galsim_zp, ref_zp[key])
np.testing.assert_allclose(galsim_zp, ref_zp[key], atol=0.3,
err_msg="Wrong zeropoint for bandpass "+key)
# Note: the difference is not due to our default thinning. This isn't any better.
nothin_bp = galsim.roman.getBandpasses(AB_zeropoint=True, default_thin_trunc=False)
for key in ref_zp.keys():
galsim_zp = nothin_bp[key].zeropoint + delta_zp
print('nothin zp for %s: '%key, galsim_zp, ref_zp[key])
np.testing.assert_allclose(galsim_zp, ref_zp[key], atol=0.3,
err_msg="Wrong zeropoint for bandpass "+key)
# Even with fairly extreme thinning, the error is still 0.3 mag.
verythin_bp = galsim.roman.getBandpasses(AB_zeropoint=True, default_thin_trunc=False,
relative_throughput=0.05, rel_err=0.1)
for key in ref_zp.keys():
galsim_zp = verythin_bp[key].zeropoint + delta_zp
print('verythin zp for %s: '%key, galsim_zp, ref_zp[key])
np.testing.assert_allclose(galsim_zp, ref_zp[key], atol=0.3,
err_msg="Wrong zeropoint for bandpass "+key)
with assert_raises(TypeError):
galsim.roman.getBandpasses(default_thin_trunc=False, rel_tp=0.05)
with assert_warns(galsim.GalSimWarning):
galsim.roman.getBandpasses(relative_throughput=0.05, rel_err=0.1)
# Can also not bother to set the zeropoint.
nozp_bp = galsim.roman.getBandpasses(AB_zeropoint=False)
for key in nozp_bp:
assert nozp_bp[key].zeropoint is None
@timer
def test_roman_nonimaging_bandpass():
"""Test the Roman non-imaging bandpasses for basic sanity.
"""
bp_imaging = galsim.roman.getBandpasses(AB_zeropoint=True)
bp_all = galsim.roman.getBandpasses(AB_zeropoint=True, include_all_bands=True)
# Check that the imaging bandpasses are in the all bandpasses
for key in bp_imaging:
assert key in bp_all
# Check that the non-imaging bandpasses are in the all bandpasses
assert 'Grism_0thOrder' in bp_all
assert 'Grism_1stOrder' in bp_all
assert 'SNPrism' in bp_all
# Check that the non-imaging bandpasses are not in the imaging bandpasses
assert 'Grism_0thOrder' not in bp_imaging
assert 'Grism_1stOrder' not in bp_imaging
assert 'SNPrism' not in bp_imaging
@timer
def test_roman_detectors():
"""Test the Roman detector routines for consistency with standard detector routines.
"""
# This seems almost silly, but for now the Roman detector routines are defined in terms of the
# standard GalSim detector routines, and we should check that even if the routines are modified,
# they still can agree given the same inputs.
# So start by making a fairly simple image.
obj = galsim.Gaussian(sigma=3.*galsim.roman.pixel_scale, flux=1.e5)
im = obj.drawImage(scale=galsim.roman.pixel_scale)
im.replaceNegative(0.)
# Make copies that we transform using both sets of routines, and check for consistency.
# First we do nonlinearity:
im_1 = im.copy()
im_2 = im.copy()
im_1.applyNonlinearity(NLfunc=galsim.roman.NLfunc)
galsim.roman.applyNonlinearity(im_2)
assert im_2.scale == im_1.scale
assert im_2.wcs == im_1.wcs
assert im_2.dtype == im_1.dtype
assert im_2.bounds == im_1.bounds
np.testing.assert_array_equal(
im_2.array, im_1.array,
err_msg='Nonlinearity results depend on function used.')
# Then we do reciprocity failure:
im_1 = im.copy()
im_2 = im.copy()
im_1.addReciprocityFailure(exp_time=galsim.roman.exptime,
alpha=galsim.roman.reciprocity_alpha,
base_flux=1.0)
galsim.roman.addReciprocityFailure(im_2)
assert im_2.scale == im_1.scale
assert im_2.wcs == im_1.wcs
assert im_2.dtype == im_1.dtype
assert im_2.bounds == im_1.bounds
np.testing.assert_array_equal(
im_2.array, im_1.array,
err_msg='Reciprocity failure results depend on function used.')
# Then we do Persistence
im_1 = im.copy()
im_2 = im.copy()
rng = galsim.BaseDeviate(1234567)
im0 = galsim.Image(im.bounds) # create a new image for two noise images as 'ghost images'
im0_1 = im0.copy()
im0_1.addNoise(galsim.GaussianNoise(rng,sigma=10.))
im0_2 = im0_1.copy()
im0_2.addNoise(galsim.PoissonNoise(rng))
im_list = [im0_1,im0_2]*4
im_1.applyPersistence(im_list,galsim.roman.persistence_coefficients)
galsim.roman.applyPersistence(im_2, im_list, method='linear') #check the linear method
assert im_2.scale == im_1.scale
assert im_2.wcs == im_1.wcs
assert im_2.dtype == im_1.dtype
assert im_2.bounds == im_1.bounds
np.testing.assert_array_equal(
im_2.array, im_1.array,
err_msg='Persistence results depend on function used.')
im_unit = galsim.Image(np.ones((2,2)), copy=True)
im_f = im_unit*0.0
im_f1 = im_unit*0.0
illuminatin_list = [1.E3, 1.E4, 4.E4, 4.99E4, 5.01E4, 1.E5, 1.0E6]
im_f_list = [x*im_unit for x in illuminatin_list]
galsim.roman.applyPersistence(im_f, im_f_list, method='fermi') #check fermi method
#Check the functionality of the fermi method.
A, x0, dx, a, r, half_well = galsim.roman.persistence_fermi_parameters
#formula of the fermi model. See the documentation of
#galsim.roman.roman_detectors.applyPersistence for more info.
f_fermi = lambda x, t: A* (x/x0)**a * (t/1000.)**(-r)/(np.exp( -(x-x0)/dx)+1.)
ps = 0.0
for i,x in enumerate(illuminatin_list):
t = (0.5+i)*galsim.roman.exptime #mid-time of each exposure
if x>=0.0 and x<half_well: #linear tail below half well of saturation
ps += f_fermi(half_well, t)*x/half_well
elif x>= half_well:
ps += f_fermi(x,t)
ps *= galsim.roman.exptime
assert np.allclose( im_f.array, np.ones((2,2))*ps, rtol=1.E-06 ), 'Error in Fermi persistence model'
galsim.roman.applyPersistence(im_f1, im_f_list)
np.testing.assert_array_equal(im_f, im_f1,
err_msg='The default method of roman.applyPersistence is not fermi.')
assert_raises(TypeError, galsim.roman.applyPersistence, im_2, im0)
assert_raises(galsim.GalSimValueError, galsim.roman.applyPersistence, im_2, im_list, method='wrong method')
# Then we do IPC:
im_1 = im.copy()
im_2 = im.copy()
im_1.applyIPC(IPC_kernel=galsim.roman.ipc_kernel, kernel_normalization=True)
galsim.roman.applyIPC(im_2)
assert im_2.scale == im_1.scale
assert im_2.wcs == im_1.wcs
assert im_2.dtype == im_1.dtype
assert im_2.bounds == im_1.bounds
np.testing.assert_array_equal(
im_2.array, im_1.array,
err_msg='IPC results depend on function used.')
# Finally, just check that this runs.
# (Accuracy of component functionality is all tested elsewhere.)
ntest = 10 # number of exposures for this test
past_images = []
for i in range(ntest):
im = obj.drawImage(scale=galsim.roman.pixel_scale)
past_images = galsim.roman.allDetectorEffects(im, past_images, rng=rng)
assert len(past_images) == ntest
@timer
def test_roman_psfs(run_slow):
"""Test the Roman PSF routines for reasonable behavior.
"""
# The Roman PSF routines can take a long time under some circumstances. For example, storing
# images for interpolation can be expensive, particularly when using the full pupil plane
# functionality. To speed up our calculations, we will limit the unit tests to certain
# situations:
# - fully chromatic PSFs without interpolation. Then we just want to play with the objects in
# a fast way (e.g., evaluating at one wavelength, not integrating over a bandpass).
# - fully chromatic PSFs with interpolation, but only interpolating between two wavelengths.
# - achromatic PSFs.
#
# We also only test pupil_bin=4,8 in pytest runs. Tests of pupil_bin=1,2 are done in
# __main__ runs.
# Providing a wavelength returns achromatic PSFs
psf_5 = galsim.roman.getPSF(SCA=5, bandpass='F184', wavelength=1950., pupil_bin=8)
assert isinstance(psf_5, galsim.GSObject)
# Make sure we do the case where we add aberrations
psf_5_ab = galsim.roman.getPSF(SCA=5, bandpass='F184', wavelength=1950., pupil_bin=8,
extra_aberrations=np.zeros(23)+0.001)
# Check that we get the same answer if we specify the center of the focal plane.
psf_5_tmp = galsim.roman.getPSF(SCA=5, bandpass='F184', wavelength=1950., pupil_bin=8,
SCA_pos=galsim.PositionD(galsim.roman.n_pix/2,
galsim.roman.n_pix/2))
assert psf_5==psf_5_tmp
# Check that if we specify a particular wavelength, the PSF that is drawn is the same as if we
# had gotten chromatic PSFs and then used evaluateAtWavelength. Note that this nominally seems
# like a test of the chromatic functionality, but there are ways that getPSF() could mess up
# inputs such that there is a disagreement. That's why this unit test belongs here.
use_sca = 5
all_bp = galsim.roman.getBandpasses()
zbp = all_bp['Z087']
use_lam = zbp.effective_wavelength
psf_chrom = galsim.roman.getPSF(use_sca, None, pupil_bin=8)
psf_achrom = galsim.roman.getPSF(use_sca, None, wavelength=use_lam, pupil_bin=8)
psf_achrom2 = galsim.roman.getPSF(use_sca, 'Z087', wavelength=use_lam, pupil_bin=8)
# First, we can draw the achromatic PSF.
im_achrom = psf_achrom.drawImage(scale=galsim.roman.pixel_scale)
im_achrom2 = im_achrom.copy()
im_achrom2 = psf_achrom2.drawImage(image=im_achrom2, scale=galsim.roman.pixel_scale)
im_chrom = im_achrom.copy()
obj_chrom = psf_chrom.evaluateAtWavelength(use_lam)
im_chrom = obj_chrom.drawImage(image=im_chrom, scale=galsim.roman.pixel_scale)
# Normalization should probably not be right.
im_chrom *= im_achrom.array.sum()/im_chrom.array.sum()
# But otherwise these images should agree *extremely* well.
np.testing.assert_array_almost_equal(
im_chrom.array, im_achrom.array, decimal=8,
err_msg='PSF at a given wavelength and chromatic one evaluated at that wavelength disagree.')
np.testing.assert_array_almost_equal(
im_achrom.array, im_achrom2.array, decimal=8,
err_msg='Two PSFs at a given wavelength specified in different ways disagree.')
# Make a very limited check that interpolation works: just 2 wavelengths, 1 SCA.
# use the blue and red limits for Z087:
blue_limit = all_bp['Z087'].blue_limit
red_limit = all_bp['Z087'].red_limit
n_waves = 3
psf_int = galsim.roman.getPSF(SCA=use_sca, bandpass='Z087', pupil_bin=8, n_waves=n_waves)
# Check that evaluation at a single wavelength is consistent with previous results.
im_int = im_achrom.copy()
obj_int = psf_int.evaluateAtWavelength(use_lam)
im_int = obj_int.drawImage(image=im_int, scale=galsim.roman.pixel_scale)
# These images should agree well, but not perfectly. One of them comes from drawing an image
# from an object directly, whereas the other comes from drawing an image of that object, making
# it into an InterpolatedImage, then re-drawing it. Different accuracies are used for those
# intermediate steps than would be used when drawing directly, so that can give rise to some
# disagreement. Check for agreement at the level of 2e-3 (requiring 1e-3 gives rise to failure
# in 2 pixels!).
diff_im = 0.5*(im_int.array-im_achrom.array)
np.testing.assert_array_almost_equal(
diff_im, np.zeros_like(diff_im), decimal=3,
err_msg='PSF at a given wavelength and interpolated chromatic one evaluated at that '
'wavelength disagree.')
# Make sure the interpolated version isn't gratuitously copying the aperture. It should be
# able to use the same aperture object for each wavelength.
for obj in psf_int.objs[1:]:
assert obj._aper is psf_int.objs[0]._aper
# Check some invalid inputs.
with assert_raises(TypeError):
galsim.roman.getPSF(SCA=use_sca, bandpass='Z087', n_waves=2, wavelength='Z087')
with assert_raises(TypeError):
galsim.roman.getPSF(SCA=use_sca, bandpass=None, n_waves=2, wavelength_limits=red_limit)
with assert_raises(TypeError):
galsim.roman.getPSF(SCA=use_sca, bandpass='Z087', wavelength='Z087')
with assert_raises(ValueError):
galsim.roman.getPSF(SCA=use_sca, bandpass='Z099', n_waves=2, wavelength='Z099')
with assert_raises(ValueError):
galsim.roman.getPSF(SCA=use_sca, bandpass='Z099', n_waves=2, wavelength='Z099')
with assert_raises(TypeError):
galsim.roman.getPSF(SCA=use_sca, bandpass='Z087', n_waves=2, wavelength='Z087')
with assert_raises(TypeError):
galsim.roman.getPSF(SCA=use_sca, bandpass='F184', n_waves=2, wavelength='F184')
with assert_raises(galsim.GalSimValueError):
galsim.roman.getPSF(SCA=use_sca, bandpass=3)
# Make sure we can instantiate a PSF with bandpass='short'/'long' and get an equivalent object
# when we're not using interpolation.
use_sca = 3
bp_type = 'long'
bp = galsim.roman.longwave_bands[0]
psf1 = galsim.roman.getPSF(use_sca, bp, pupil_bin=8)
psf2 = galsim.roman.getPSF(use_sca, 'long', pupil_bin=8)
assert psf1==psf2
# Test some variation in the accuracy settings.
kwargs_list = [
{ 'pupil_bin':4 },
{ 'pupil_bin':8 },
]
if run_slow:
# A few more that are too slow to run in regular pytest
kwargs_list.extend([
{ 'pupil_bin':1 },
{ 'pupil_bin':2 },
{ 'pupil_bin':1, 'gsparams':galsim.GSParams(folding_threshold=2.e-3) },
{ 'pupil_bin':2, 'gsparams':galsim.GSParams(folding_threshold=2.e-3) },
{ 'pupil_bin':4, 'gsparams':galsim.GSParams(folding_threshold=2.e-3) },
])
for kwargs in kwargs_list:
psf = galsim.roman.getPSF(use_sca, 'Z087', **kwargs)
psf_achrom = galsim.roman.getPSF(use_sca, 'Z087', wavelength=zbp, **kwargs)
psf_chrom = psf.evaluateAtWavelength(use_lam)
im_achrom = psf_achrom.drawImage(scale=galsim.roman.pixel_scale)
im_chrom = psf_chrom.drawImage(image=im_achrom.copy())
#im_achrom.write('im_achrom.fits')
#im_chrom.write('im_chrom.fits')
print("chrom, achrom fluxes = ", im_chrom.array.sum(), im_achrom.array.sum())
im_chrom *= im_achrom.array.sum()/im_chrom.array.sum()
print("max diff = ",np.max(np.abs(im_chrom.array - im_achrom.array)))
np.testing.assert_array_almost_equal(
im_chrom.array, im_achrom.array, decimal=8,
err_msg='getPSF with %s has discrepency for chrom/achrom'%kwargs)
# Check the stated method for recovering memory used in aperture caches.
# Despite using a leading-underscore, sub-module-level function, the fact that we
# document it makes it officially part of the API. It requires a proper deprecation
# if we change the syntax of this.
galsim.roman.roman_psfs._make_aperture.clear()
# Check for exceptions if we:
# (1) Include optional aberrations in an unacceptable form.
# (2) Invalid SCA numbers.
# (3) Invalid kwarg combination.
assert_raises(ValueError, galsim.roman.getPSF, 3, None, extra_aberrations=[0.03, -0.06]*20)
assert_raises(ValueError, galsim.roman.getPSF, 30, None)
assert_raises(ValueError, galsim.roman.getPSF, 0, None)
assert_raises(ValueError, galsim.roman.getPSF, 3, 'short', n_waves=10)
@timer
def test_roman_basic_numbers():
"""Trivial test of basic numbers stored in Roman module.
"""
# Would be better to have a non-trivial test, but this will do for now.
ref_gain = 1.0
ref_pixel_scale = 0.11 # arcsec / pixel
ref_diameter = 2.36 # meters
ref_obscuration = 0.32
ref_exptime = 139.8 # s
ref_dark_current = 0.015 # e-/pix/s
ref_nonlinearity_beta = -6.e-7
ref_reciprocity_alpha = 0.0065
ref_read_noise = 8.5 # e-
ref_n_dithers = 6
ref_thermal_backgrounds = {'R062': 0.00, # e-/pix/s
'Z087': 0.00,
'Y106': 0.00,
'J129': 0.00,
'H158': 0.04,
'F184': 0.17,
'K213': 4.52,
'W146': 0.98,
'SNPrism': 0.00,
'Grism_0thOrder': 0.00,
'Grism_1stOrder': 0.00,
}
ref_pupil_plane_file = os.path.join(
galsim.meta_data.share_dir, 'roman', 'SCA2_rim_mask.fits.gz')
ref_stray_light_fraction = 0.1
ref_ipc_kernel = np.array([ [0.001269938, 0.015399776, 0.001199862],
[0.013800177, 1.0, 0.015600367],
[0.001270391, 0.016129619, 0.001200137] ])
ref_ipc_kernel /= np.sum(ref_ipc_kernel)
ref_ipc_kernel = galsim.Image(ref_ipc_kernel)
ref_persistence_coefficients = np.array(
[0.045707683,0.014959818,0.009115737,0.00656769,0.005135571,
0.004217028,0.003577534,0.003106601])/100.
ref_persistence_fermi_parameters = np.array(
[0.017, 60000., 50000., 0.045, 1., 50000.])
ref_n_sca = 18
ref_n_pix_tot = 4096
ref_n_pix = 4088
ref_jitter_rms = 0.014
ref_charge_diffusion = 0.1
assert galsim.roman.gain==ref_gain
assert galsim.roman.pixel_scale==ref_pixel_scale
assert galsim.roman.diameter==ref_diameter
assert galsim.roman.obscuration==ref_obscuration
assert galsim.roman.exptime==ref_exptime
assert galsim.roman.dark_current==ref_dark_current
assert galsim.roman.nonlinearity_beta==ref_nonlinearity_beta
assert galsim.roman.reciprocity_alpha==ref_reciprocity_alpha
assert galsim.roman.read_noise==ref_read_noise
assert galsim.roman.n_dithers==ref_n_dithers
assert galsim.roman.thermal_backgrounds.keys()==ref_thermal_backgrounds.keys()
for key in ref_thermal_backgrounds.keys():
assert galsim.roman.thermal_backgrounds[key]==ref_thermal_backgrounds[key]
assert galsim.roman.pupil_plane_file==ref_pupil_plane_file
assert galsim.roman.stray_light_fraction==ref_stray_light_fraction
np.testing.assert_array_equal(ref_ipc_kernel, galsim.roman.ipc_kernel)
np.testing.assert_array_equal(ref_persistence_coefficients,
galsim.roman.persistence_coefficients)
np.testing.assert_array_equal(ref_persistence_fermi_parameters,
galsim.roman.persistence_fermi_parameters)
assert galsim.roman.n_sca==ref_n_sca
assert galsim.roman.n_pix_tot==ref_n_pix_tot
assert galsim.roman.n_pix==ref_n_pix
assert galsim.roman.jitter_rms==ref_jitter_rms
assert galsim.roman.charge_diffusion==ref_charge_diffusion
@timer
def test_roman_psf_wcs():
"""Test drawing the PSF with a provided WCS."""
# Make a PSF without giving a wcs
image_pos = galsim.PositionD(153, 921)
psf = galsim.roman.getPSF(SCA=5, bandpass='F184', wavelength=1950., pupil_bin=8,
SCA_pos=image_pos)
# Draw it on an image with pixel_scale wcs
im_scale = psf.drawImage(scale=galsim.roman.pixel_scale, center=image_pos)
# Get a plausible commemorative observation for Roman's 100th birthday.
world_pos = galsim.CelestialCoord(
ra = galsim.Angle.from_hms('16:01:41.01257'), # AG Draconis
dec = galsim.Angle.from_dms('66:48:10.1312')
)
PA = 112*galsim.degrees # Random.
date = datetime.datetime(2025, 5, 16) # NGR's 100th birthday.
wcs_dict = galsim.roman.getWCS(PA=PA, world_pos=world_pos, date=date)
wcs = wcs_dict[5]
# Get the PSF in real world coordinates with this wcs
psf = galsim.roman.getPSF(SCA=5, bandpass='F184', wavelength=1950., pupil_bin=8,
SCA_pos=image_pos, wcs=wcs)
# Draw on an image with this wcs.
im_wcs = psf.drawImage(bounds=im_scale.bounds, wcs=wcs, center=image_pos)
np.testing.assert_allclose(im_wcs.array, im_scale.array)
@timer
def test_config_psf():
"""Test RomanPSF config type"""
# Start with default everything
config = {
'modules' : ['galsim.roman'],
'psf' : { 'type' : 'RomanPSF', 'SCA': 4, 'bandpass': 'H158' }
}
galsim.config.ImportModules(config)
psf1 = galsim.config.BuildGSObject(config, 'psf')[0]
psf2 = galsim.roman.getPSF(SCA=4, bandpass='H158')
print('psf1 = ',str(psf1))
print('psf2 = ',str(psf2))
assert psf1 == psf2
# Now check some non-default options
config = galsim.config.CleanConfig(config)
config['psf']['pupil_bin'] = 8
config['psf']['n_waves'] = 4
config['psf']['extra_aberrations'] = [0.01, 0, 0, 0.03, -0.05]
config['psf']['gsparams'] = {'folding_threshold' : 1.e-2}
psf1 = galsim.config.BuildGSObject(config, 'psf')[0]
psf2 = galsim.roman.getPSF(SCA=4, bandpass='H158', pupil_bin=8, n_waves=4,
extra_aberrations=[0,0,0,0, 0.01, 0, 0, 0.03, -0.05],
gsparams=galsim.GSParams(folding_threshold=1.e-2))
print('psf1 = ',str(psf1))
print('psf2 = ',str(psf2))
assert psf1 == psf2
# Check using some values that are may already loaded into base config dict.
config = galsim.config.CleanConfig(config)
del config['psf']['SCA']
del config['psf']['bandpass']
del config['psf']['n_waves']
config['bandpass'] = galsim.roman.getBandpasses(AB_zeropoint=True)['Z087']
config['SCA'] = 9
config['image_pos'] = galsim.PositionD(123,456)
config['psf']['use_SCA_pos'] = True
config['psf']['wavelength'] = 985.
psf1 = galsim.config.BuildGSObject(config, 'psf')[0]
psf2 = galsim.roman.getPSF(SCA=9, bandpass='Z087', pupil_bin=8, wavelength=985.,
SCA_pos=galsim.PositionD(123,456),
extra_aberrations=[0,0,0,0, 0.01, 0, 0, 0.03, -0.05],
gsparams=galsim.GSParams(folding_threshold=1.e-2))
print('psf1 = ',str(psf1))
print('psf2 = ',str(psf2))
assert psf1 == psf2
# Let bandpass be built by RomanBandpass type
config = galsim.config.CleanConfig(config)
config['image'] = {
'bandpass' : { 'type' : 'RomanBandpass', 'name' : 'J129' }
}
config['bandpass'] = galsim.config.BuildBandpass(config['image'], 'bandpass', config)[0]
psf1 = galsim.config.BuildGSObject(config, 'psf')[0]
psf2 = galsim.roman.getPSF(SCA=9, bandpass='J129', pupil_bin=8, wavelength=985.,
SCA_pos=galsim.PositionD(123,456),
extra_aberrations=[0,0,0,0, 0.01, 0, 0, 0.03, -0.05],
gsparams=galsim.GSParams(folding_threshold=1.e-2))
print('psf1 = ',str(psf1))
print('psf2 = ',str(psf2))
assert psf1 == psf2
@timer
def test_config_sca():
"""Test RomanSCA config type"""
# The standard size of a Roman SCA is a bit large for an efficient unit test,
# so we use mock to reduce the size of the image being constructed here.
logger = logging.getLogger('test_config_sca')
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
with mock.patch('galsim.roman.roman_config.n_pix', 64):
config = {
'modules': ['galsim.roman'],
'image': {
'type': 'RomanSCA',
# These are required:
'nobjects' : 1,
'SCA': 5,
'ra': '16 hours',
'dec': '66 degrees',
'filter': 'H158',
'date': datetime.datetime(2025, 5, 16),
# Set the rng seed:
'random_seed': 1234,
# Start with all the extra effects turned off.
'stray_light': False,
'thermal_background': False,
'reciprocity_failure': False,
'dark_current': False,
'nonlinearity': False,
'ipc': False,
'read_noise': False,
'sky_subtract': False,
# image_pos can be either here or in stamp.
'image_pos': (23,17),
},
# Nothing complicated for the object to draw.
'gal': {
'type': 'Exponential',
'half_light_radius': 1.2,
'flux': 177,
},
}
galsim.config.ImportModules(config)
im1 = galsim.config.BuildImage(config, obj_num=0)
# Compare to manually constructed image
pointing = galsim.CelestialCoord(ra=16*galsim.hours, dec=66*galsim.degrees)
date = datetime.datetime(2025, 5, 16)
wcs = galsim.roman.getWCS(world_pos=pointing, SCAs=[5], date=date)[5]
im2 = galsim.Image(64,64, wcs=wcs)
bp = galsim.roman.getBandpasses()['H158']
sky_level = galsim.roman.getSkyLevel(bp, world_pos=wcs.toWorld(im2.true_center))
wcs.makeSkyImage(im2, sky_level)
gal = galsim.Exponential(half_light_radius=1.2, flux=177)
gal.drawImage(im2, center=(23,17), add_to_image=True)
first_seed = galsim.BaseDeviate(1234).raw()
poisson_noise = galsim.PoissonNoise(galsim.BaseDeviate(first_seed))
im2.addNoise(poisson_noise)
im2 /= galsim.roman.gain
im2.quantize()
assert im1 == im2
# Repeat with all of the detector effects
config = galsim.config.CleanConfig(config)
config['image']['stray_light'] = True
config['image']['thermal_background'] = True
config['image']['reciprocity_failure'] = True
config['image']['dark_current'] = True
config['image']['nonlinearity'] = True
config['image']['ipc'] = True
config['image']['read_noise'] = True
config['image']['sky_subtract'] = True
im1 = galsim.config.BuildImage(config, obj_num=0, logger=logger)
sky_level *= (1.0 + galsim.roman.stray_light_fraction)
wcs.makeSkyImage(im2, sky_level)
im2 += galsim.roman.thermal_backgrounds['H158'] * galsim.roman.exptime
sky_image = im2.copy()
gal.drawImage(im2, center=(23,17), add_to_image=True)
rng = galsim.BaseDeviate(first_seed)
poisson_noise = galsim.PoissonNoise(rng)
im2.addNoise(poisson_noise)
galsim.roman.addReciprocityFailure(im2)
dc = galsim.roman.dark_current * galsim.roman.exptime
sky_image += dc
im2.addNoise(galsim.DeviateNoise(galsim.PoissonDeviate(rng, dc)))
galsim.roman.applyNonlinearity(im2)
galsim.roman.applyIPC(im2)
im2.addNoise(galsim.GaussianNoise(rng, sigma=galsim.roman.read_noise))
im2 /= galsim.roman.gain
im2.quantize()
sky_image /= galsim.roman.gain
sky_image.quantize()
im2 -= sky_image
assert im1 == im2
# If photon shooting, objects already have Poisson noise.
# Also, all detector effects on is the default, so can remove these items from config.
# And use explicit Bandpass, which is allowed (possibly to override the default bp's),
# but in this case is equivalent to the default behavior.
config = galsim.config.CleanConfig(config)
del config['image']['stray_light']
del config['image']['thermal_background']
del config['image']['reciprocity_failure']
del config['image']['dark_current']
del config['image']['nonlinearity']
del config['image']['ipc']
del config['image']['read_noise']
del config['image']['sky_subtract']
config['stamp'] = { 'draw_method' : 'phot' }
config['image']['bandpass'] = { 'type' : 'RomanBandpass', 'name' : 'H158' }
im1 = galsim.config.BuildImage(config, obj_num=0, logger=logger)
wcs.makeSkyImage(im2, sky_level)
im2 += galsim.roman.thermal_backgrounds['H158'] * galsim.roman.exptime
sky_image = im2.copy()
rng = galsim.BaseDeviate(first_seed)
poisson_noise = galsim.PoissonNoise(rng)
im2.addNoise(poisson_noise)
gal_rng = galsim.BaseDeviate(first_seed+1)
gal.drawImage(im2, center=(23,17), add_to_image=True, method='phot', rng=gal_rng)
galsim.roman.addReciprocityFailure(im2)
dc = galsim.roman.dark_current * galsim.roman.exptime
sky_image += dc
im2.addNoise(galsim.DeviateNoise(galsim.PoissonDeviate(rng, dc)))
galsim.roman.applyNonlinearity(im2)
galsim.roman.applyIPC(im2)
im2.addNoise(galsim.GaussianNoise(rng, sigma=galsim.roman.read_noise))
im2 /= galsim.roman.gain
im2.quantize()
sky_image /= galsim.roman.gain
sky_image.quantize()
im2 -= sky_image
assert im1 == im2
@timer
def test_aberration_interpolation(run_slow):
"""Test the Roman aberration interpolation method inside roman.roman_psfs
"""
# We read in pairs of conjunction points, they are on different SCAs but are physically
# adjacent on the FPA. The optical aberration between the two points in a pair should
# be much less than the aberration range in the FPA. The maximum and minimum of aberration
# in the FPA is pre-calculated using an 20x20 grid of positions on each SCA by Tianqing
# Zhang. The conjunction pairs between the first row and the second row are considered
# 'far', because are further separated. For 'far' pairs, z_diff<= 0.1*(z_max - z_min),
# elsewhere, z_diff<= 0.05*(z_max - z_min).
print("Start continuity test for aberration interpolation")
# Make an arbitrary WCS. We don't care about the sky positions, but we'll check
# which SCAs have close neighbors on each side by round tripping through the sky.
world_pos = galsim.CelestialCoord(127.*galsim.degrees, -70*galsim.degrees)
date = datetime.datetime(2025,3,20,9,2,0)
wcs = galsim.roman.getWCS(PA=0*galsim.degrees, world_pos=world_pos, date=date)
# World position of the SCA centers.
centers = { isca: wcs[isca].toWorld(galsim.PositionD(2048,2048)) for isca in range(1,19) }
# List of (x, y, dx, dy) for possible pairings.
trial_positions = [(0,4046,-500,0), (2048,4096,0,500), (4096,4046,500,0),
(0,2048,-500,0), (4096,2048,500,0),
(0,50,-500,0), (2048,0,0,-500), (4096,50,500,0) ]
# Pairs will have tuples that look like:
# (sca1, x1, y1, sca2, x2, y2, far)
# corresponding to points on two different SCAs that are physically close to each other.
# far means we found a pair at 1000 pixels separation, but not 500.
pairs = []
for sca1 in range(1,19):
for sca2 in range(sca1+1,19):
# Nominal size of one SCA is 4096 * 0.11 arcsec/pix ~= 440 arcsec
# If two SCAs are more than sqrt(2) times this apart, they don't share any edges.
if centers[sca1].distanceTo(centers[sca2]) > 650 * galsim.arcsec: continue
#print('Consider pair ', sca1, sca2)
for x, y, dx, dy in trial_positions:
pos2 = wcs[sca2].toImage(wcs[sca1].toWorld(galsim.PositionD(x+dx, y+dy)))
#print('Position on sc2 = ',pos2)
if 0 < pos2.x < 4096 and 0 < pos2.y < 4096:
#print('Valid')
pairs.append((sca1, x, y, sca2, pos2.x, pos2.y, False))
elif x == 2048:
# For vertical offsets, also try doubling dy.
pos2 = wcs[sca2].toImage(wcs[sca1].toWorld(galsim.PositionD(x+dx, y+2*dy)))
#print('Far position on sc2 = ',pos2)
if 0 < pos2.x < 4096 and 0 < pos2.y < 4096:
#print('Valid for far')
pairs.append((sca1, x, y, sca2, pos2.x, pos2.y, True))
for pair in pairs:
print(pair)
#Read the aberration and sca_pos for the interpolation *reference* points from the roman files.
abers = { isca: galsim.roman.roman_psfs._read_aberrations(isca) for isca in range(1,19) }
# Calculate the min/max zernike values across the FPA.
aberration_array = np.concatenate([a[0] for a in abers.values()])
print('all aberrations = ',aberration_array)
print('max = ',np.max(aberration_array, axis=0))
print('min = ',np.min(aberration_array, axis=0))
Z_min = np.min(aberration_array, axis=0)
Z_max = np.max(aberration_array, axis=0)
Z_range = Z_max - Z_min
if run_slow:
from matplotlib import pyplot as plt
world_pos = galsim.CelestialCoord(0.*galsim.degrees, 0*galsim.degrees)
date = datetime.datetime(2025,5,20)
wcs = galsim.roman.getWCS(PA=0*galsim.degrees, world_pos=world_pos, date=date)
# Plot the value of each zernike coefficient across the fov.
for i in range(1,23):
fig = plt.figure(figsize=(6,6))
ax = fig.subplots()
ax.set_xlim(0.6, -0.6) # +RA is left
ax.set_ylim(-0.6, 0.6)
ax.set_xlabel('RA')
ax.set_ylabel('Dec')
ax.set_title('Zernike i={}'.format(i))
ra = []
dec = []
aber = []
for sca in range(1,19):
x,y = np.meshgrid(np.arange(0,4096,500), np.arange(0,4096,500))
for xx,yy in zip(x.ravel(),y.ravel()):
ab = galsim.roman.roman_psfs._interp_aberrations_bilinear(
abers[sca][0], abers[sca][1], abers[sca][2],
SCA_pos=galsim.PositionD(xx,yy))
coord = wcs[sca].toWorld(galsim.PositionD(xx,yy))
print(i,sca,xx,yy,coord,ab[i])
ra.append(coord.ra.deg)
dec.append(coord.dec.deg)
aber.append(ab[i])
ax.scatter(ra, dec, c=aber)
plt.savefig('output/z{}.png'.format(i))
#plt.show()
plt.close()
for sca1, x1, y1, sca2, x2, y2, far in pairs:
print(sca1, x1, y1, sca2, x2, y2, far)
#For each pair of conjunction points, calculate their aberration by calling
# _interp_aberrations_bilinear
point_1_abe = galsim.roman.roman_psfs._interp_aberrations_bilinear(
abers[sca1][0], abers[sca1][1], abers[sca1][2],
SCA_pos=galsim.PositionD(x1,y1))
point_2_abe = galsim.roman.roman_psfs._interp_aberrations_bilinear(
abers[sca2][0], abers[sca2][1], abers[sca2][2],
SCA_pos=galsim.PositionD(x2,y2))
for i in range(1,23):
z_diff = np.abs(point_2_abe[i] - point_1_abe[i])
print(' ',i,z_diff,point_1_abe[i],point_2_abe[i],Z_range[i],far,z_diff/Z_range[i])
# We used to have different tolerances for far vs near separations, but now
# the main thing is that different zernike coefficients are differently smooth.
# 2-10 are all very smooth and most pass with frac=0.10.
# 1 is worse, but 1 is weird that it is even in the data.
# 11-14 are also a little choppy.
# 15+ all look really bad in the plots (made above when run from main).
# I think our code is ok here, but the data seem to be very noisy for the
# higher order Zernikes.
if i < 2:
frac = 0.25
elif i < 11:
frac = 0.15
elif i < 15:
frac = 0.25
else:
frac = 0.5
assert z_diff <= frac*(Z_range[i]),\
'z_diff > {}(z_max - z_min), failed aberration continuity test.\
\n Fail for Z{}, z_diff = {}, sca1 = {}, sca2 = {}'.format(frac,i,z_diff,sca1,sca2)
print("Continuity test passes.")
@timer
def test_roman_focal_plane(run_slow):
"""Test that a full focal plane has everything oriented as shown in mapping_v210503.pdf
"""
# For this test, we mostly try to plot points on each SCA that visually reproduce
# the appearance of the red and blue arrows on Chris's plot of the focal plane.
# We'll put in some asserts, but the real test is to open the produced figure and
# compare the two diagrams visually.
# Boresight at 0,0, so RA, Dec are essentially observatory coordinates.
world_pos = galsim.CelestialCoord(0.*galsim.degrees, 0*galsim.degrees)
date = datetime.datetime(2025,5,20)
# Zero PA, so +Dec is up. (Remember that +RA is left.)
wcs = galsim.roman.getWCS(PA=0*galsim.degrees, world_pos=world_pos, date=date)
red_arrows = {}
blue_arrows = {}
border = {}
numbers = {}
for sca in range(1,19):
# Make the red arrow for this SCA.
# x = 200, y = 200..3900, dotted.
red_arrows[sca] = []
x = 200
for y in range(200,3901,10):
if (y // 500) % 2 == 1: continue
coord = wcs[sca].toWorld(galsim.PositionD(x,y))
red_arrows[sca].append( (coord.ra.deg, coord.dec.deg) )
# Make the blue arrow
# y = 200, x = 200..3900, solid
blue_arrows[sca] = []
y = 200
for x in range(200,3901,10):
coord = wcs[sca].toWorld(galsim.PositionD(x,y))
blue_arrows[sca].append( (coord.ra.deg, coord.dec.deg) )
# Make a grey border around the edge
border[sca] = []
for xy in range(0,4096,10):
coord = wcs[sca].toWorld(galsim.PositionD(xy,0))
border[sca].append( (coord.ra.deg, coord.dec.deg) )
coord = wcs[sca].toWorld(galsim.PositionD(0,xy))
border[sca].append( (coord.ra.deg, coord.dec.deg) )
coord = wcs[sca].toWorld(galsim.PositionD(xy,4096))
border[sca].append( (coord.ra.deg, coord.dec.deg) )
coord = wcs[sca].toWorld(galsim.PositionD(4096,xy))
border[sca].append( (coord.ra.deg, coord.dec.deg) )
# Make crude numbers for each SCA
# Fit each number into rectangle from 1200<x<2800, 1000<y<3400
numbers[1] = []
x = 2000
for y in range(1000,3401,10):
numbers[1].append( (x,y) )
for d in range(10,401,10):
numbers[1].append( (2000-d, 3400-d) )
numbers[2] = []
for t in range(180,-91,-1):
theta = t * galsim.degrees
numbers[2].append( (2000+800*theta.cos(), 2600+800*theta.sin()) )
for d in range(10,801,10):
numbers[2].append( (2000-d, 1800-d) )
y = 1000
for x in range(1200,2801,10):
numbers[2].append( (x, y) )
numbers[3] = []
for t in range(180,-91,-1):
theta = t * galsim.degrees
numbers[3].append( (2000+600*theta.cos(), 2800+600*theta.sin()) )
for t in range(90,-181,-1):
theta = t * galsim.degrees
numbers[3].append( (2000+600*theta.cos(), 1600+600*theta.sin()) )
numbers[4] = []
x = 2400
for y in range(1000,3401,10):
numbers[4].append( (x,y) )
for d in range(10,1201,10):
numbers[4].append( (2400-d,3400-d) )
y = 2200
for x in range(1200,2801,10):
numbers[4].append( (x,y) )
numbers[5] = []
for t in range(-160,161):
theta = t * galsim.degrees
numbers[5].append( (2000+800*theta.cos(), 1800+800*theta.sin()) )
x = 1250
for y in range(2080,3401,10):
numbers[5].append( (x,y) )
y = 3400
for x in range(1250,2751,10):
numbers[5].append( (x,y) )
numbers[6] = []
for t in range(30,180):
theta = t * galsim.degrees
numbers[6].append( (2000+800*theta.cos(), 2600+800*theta.sin()) )
x = 1200
for y in range(1800,2600,10):
numbers[6].append( (x,y) )
for t in range(-180,180):
theta = t * galsim.degrees
numbers[6].append( (2000+800*theta.cos(), 1800+800*theta.sin()) )
numbers[7] = []
for x in range(1200,2801,10):
y = 1000 + 3*(x-1200)//2
numbers[7].append( (x,y) )
for x in range(1200,2801,10):
numbers[7].append( (x,3400) )
numbers[8] = []
for t in range(0,360):
theta = t * galsim.degrees
numbers[8].append( (2000+600*theta.cos(), 2800+600*theta.sin()) )
for t in range(0,360):
theta = t * galsim.degrees
numbers[8].append( (2000+600*theta.cos(), 1600+600*theta.sin()) )
numbers[9] = []
for t in range(-150,0):
theta = t * galsim.degrees
numbers[9].append( (2000+800*theta.cos(), 1800+800*theta.sin()) )
x = 2800
for y in range(1800,2600,10):
numbers[9].append( (x,y) )
for t in range(0,360):
theta = t * galsim.degrees
numbers[9].append( (2000+800*theta.cos(), 2600+800*theta.sin()) )
numbers[10] = []
for t in range(0,360):
theta = t * galsim.degrees
numbers[10].append( (2400+800*theta.cos(), 2200+1200*theta.sin()) )
x = 1200
for y in range(1000,3401,10):
numbers[10].append( (x,y) )
for d in range(10,401,10):
numbers[10].append( (1200-d, 3400-d) )
for sca in range(11,19):
numbers[sca] = [(x+400,y) for x,y in numbers[sca-10]]
x = 1200
for y in range(1000,3401,10):
numbers[sca].append( (x,y) )
for d in range(10,401,10):
numbers[sca].append( (1200-d, 3400-d) )
# OK, now convert all the x,y in numbers to ra, dec
for sca in range(1,19):
coords = [wcs[sca].toWorld(galsim.PositionD(x,y)) for x,y in numbers[sca]]
numbers[sca] = [(c.ra.deg, c.dec.deg) for c in coords]
if run_slow:
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
# Make a plot of all these in the observatory coordinate system.
fig = Figure(figsize=(6,6))
ax = fig.subplots()
ax.set_xlim(0.6, -0.6) # +RA is left
ax.set_ylim(-0.6, 0.6)
ax.set_xlabel('RA')
ax.set_ylabel('Dec')
for sca in range(1,19):
r,d = zip(*red_arrows[sca])
ax.scatter(r, d, color='red', marker='.', s=2)
r,d = zip(*blue_arrows[sca])
ax.scatter(r, d, color='blue', marker='.', s=2)
r,d = zip(*numbers[sca])
ax.scatter(r, d, color='black', marker='.', s=2)
r,d = zip(*border[sca])
ax.scatter(r, d, color='grey', marker='.', s=1)
canvas = FigureCanvasAgg(fig)
fig.set_layout_engine('tight')
file_name = os.path.join('output', 'test_roman_focal_plane.pdf')
canvas.print_figure(file_name, dpi=300)
print('Made ',file_name)
print('Compare this to ../devel/roman/mapping_v210503.pdf.')
# The real test is that the images look similar. But turn this into some
# assert statements.
for sca in range(1,19):
if sca%3 == 0:
# 3,6,9,... have blue moving NE and red moving NW
assert blue_arrows[sca][-1][0] > blue_arrows[sca][0][0] # RA increases
assert blue_arrows[sca][-1][1] > blue_arrows[sca][0][1] # Dec increases
assert red_arrows[sca][-1][0] < red_arrows[sca][0][0] # RA decreases
assert red_arrows[sca][-1][1] > red_arrows[sca][0][1] # Dec increases
else:
# others have blue moving SW and red moving SE
assert blue_arrows[sca][-1][0] < blue_arrows[sca][0][0] # RA decreases
assert blue_arrows[sca][-1][1] < blue_arrows[sca][0][1] # Dec decreases
assert red_arrows[sca][-1][0] > red_arrows[sca][0][0] # RA increases
assert red_arrows[sca][-1][1] < red_arrows[sca][0][1] # Dec decreases
if __name__ == "__main__":
runtests(__file__)
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@tests@[email protected]_END.py
|
{
"filename": "how_to_run_TRES_from_AMUSE.py",
"repo_name": "amusecode/TRES",
"repo_path": "TRES_extracted/TRES-main/developer/how_to_run_TRES_from_AMUSE.py",
"type": "Python"
}
|
# the default method to run TRES is described in README.md
# in some cases you may want to run TRES from within AMUSE
# aka to run TRES as a sort of community code or to allow for more flexibility
# this document provides a few examples of how to do this
import numpy as np
import matplotlib.pyplot as plt
from amuse.datamodel import Particles
from amuse.units import units
from amuse.community.seba.interface import SeBa
import sys, os
sys.path.append(os.path.dirname(os.getcwd()))
import TRES as TRES
from seculartriple_TPS.interface import SecularTriple
#simplest way of running TRES
def example_1():
print('TRES example 1')
tr = TRES.main()
print(tr.triple.eccentricity, tr.triple.child2.eccentricity)
# the stellar and secular codes are stopped inside main(),
# so no need to do it here:
# tr.stellar_code.stop()
# tr.secular_code.stop()
#simple way of running TRES with adjusting input parameters
#possible parameters:
#inner_primary_mass, inner_secondary_mass, outer_mass,
#inner_semimajor_axis, outer_semimajor_axis, inner_eccentricity, outer_eccentricity,
#relative_inclination, inner_argument_of_pericenter, outer_argument_of_pericenter, inner_longitude_of_ascending_node,
#metallicity,
#tend, tinit,
#number #id number of (first) triple
#
#possible stopping conditions:
#stop_at_mass_transfer, stop_at_init_mass_transfer,stop_at_outer_mass_transfer,
#stop_at_stable_mass_transfer, stop_at_eccentric_stable_mass_transfer,
#stop_at_unstable_mass_transfer, stop_at_eccentric_unstable_mass_transfer,
#stop_at_no_CHE, include_CHE,
#stop_at_merger, stop_at_disintegrated,
#stop_at_inner_collision, stop_at_outer_collision,
#stop_at_dynamical_instability,
#stop_at_semisecular_regime,
#stop_at_SN, SN_kick_distr,
#
#possible settings:
#impulse_kick_for_black_holes,
#fallback_kick_for_black_holes,
#which_common_envelope,
#stop_at_CPU_time,
#max_CPU_time,
#file_name, file_type, dir_plots
def example_2():
print('TRES example 2')
M1 = 1.5|units.MSun
M2 = 0.6|units.MSun
M3 = 0.6|units.MSun
ain = 150|units.RSun
aout = 15000|units.RSun
incl = 81.0*np.pi/180.0
tr = TRES.main(inner_primary_mass = M1, inner_secondary_mass = M2, outer_mass = M3,
inner_semimajor_axis = ain, outer_semimajor_axis = aout, relative_inclination = incl)
print(tr.triple.eccentricity, tr.triple.child2.eccentricity)
# tr.stellar_code.stop()
# tr.secular_code.stop()
#simple way of running TRES with adjusting input parameters
# evolves just the stars to 625Myr (without changing the triple), and continues the triple simulation until 630Myr
# useful in case of starting the simulation with evolved stars
def example_3():
print('TRES example 3')
M1 = 2.5|units.MSun
tinit = 625|units.Myr
tend = 630|units.Myr
tr = TRES.main(inner_primary_mass = M1, tinit = tinit, tend = tend)
print(tr.triple.eccentricity, tr.triple.child2.eccentricity)
# tr.stellar_code.stop()
# tr.secular_code.stop()
# example of Kozai-Lidov cycles in a triple with plotting
# advanced level
# uses TRES.main_developer() in stead of TRES.main()
# evolve the triple to multiple timestamps
def example_4():
print('TRES example 4')
M1 = 1.3|units.MSun
M2 = 0.5|units.MSun
M3 = 0.5|units.MSun
Ain = 200|units.RSun
Aout = 20000|units.RSun
Ein = 0.1
Eout = 0.5
incl = 80.0*np.pi/180.0
Gin = 0.1
Gout = 0.5
Oin = 0.
metallicity = 0.02
stars, bins, correct_params = TRES.make_particle_sets(M1,M2,M3, Ain, Aout, Ein, Eout, incl, Gin, Gout, Oin)
stellar_code = SeBa()
stellar_code.parameters.metallicity = metallicity
secular_code = SecularTriple()
inner_eccentricity_array = []
outer_eccentricity_array = []
n_steps = 150
time_array = (np.arange(n_steps)+1)*5|units.Myr/n_steps
#make triple object and evolve for small timestep
#needs to be bigger then 1e-4|units.Myr for secular code
tr = TRES.main_developer(stars, bins, correct_params, stellar_code, secular_code,
incl, tend=1e-4|units.Myr)
for i in range(len(time_array)):
tr.evolve_model(time_array[i])
inner_eccentricity_array.append(tr.triple.child2.eccentricity)
outer_eccentricity_array.append(tr.triple.eccentricity)
if tr.check_stopping_conditions()==False or tr.check_stopping_conditions_stellar()==False or tr.check_stopping_conditions_stellar_interaction()==False:
print('stopping conditions reached')
time_array = time_array[:len(inner_eccentricity_array)]
time_array[-1] = tr.triple.time
break
plt.plot(time_array.value_in(units.Myr), inner_eccentricity_array, label='inner eccentricity')
plt.plot(time_array.value_in(units.Myr), outer_eccentricity_array, label='outer eccentricity')
plt.plot(time_array.value_in(units.Myr), inner_eccentricity_array, 'k.')
plt.xlabel('time (Myr)')
plt.ylabel('eccentricity')
plt.legend(loc=0)
plt.show()
tr.stellar_code.stop()
tr.secular_code.stop()
# example of triple with wind mass loss & calculating through mass transfer, with plotting
# advanced level
# uses TRES.main_developer() in stead of TRES.main()
def example_5():
print('TRES example 5')
M1 = 3.|units.MSun
M2 = 0.5|units.MSun
M3 = 0.5|units.MSun
Ain = 825|units.RSun
Aout = 80000|units.RSun
Ein = 0.0
Eout = 0.5
incl = 0.0
Gin = 0.1
Gout = 0.5
Oin = 0.
metallicity = 0.02
stars, bins, correct_params = TRES.make_particle_sets(M1,M2,M3, Ain, Aout, Ein, Eout, incl, Gin, Gout, Oin)
stellar_code = SeBa()
stellar_code.parameters.metallicity = metallicity
secular_code = SecularTriple()
inner_semimajor_axis_array = np.array([])
outer_semimajor_axis_array = np.array([])
radius_primary_array = np.array([])
stellar_type_primary_array = np.array([])
time_array = (np.arange(25)+1)/25.*370
time_array = np.append(time_array, (np.arange(100)+1)/100.*100 + 370)
time_array = np.append(time_array, (np.arange(100)+1)/100.*10 + 470)
time_array = time_array|units.Myr
#make triple object and evolve for small timestep
#needs to be bigger then 1e-4|units.Myr for secular code
tr = TRES.main_developer(stars, bins, correct_params, stellar_code, secular_code, incl, tend=1e-4|units.Myr, stop_at_mass_transfer=False)
for i in range(len(time_array)):
tr.evolve_model(time_array[i])
# print(time_array[i], tr.triple.child2.bin_type, tr.instantaneous_evolution,tr.triple.child2.child1.stellar_type)
inner_semimajor_axis_array = np.append(inner_semimajor_axis_array, tr.triple.child2.semimajor_axis.value_in(units.RSun))
outer_semimajor_axis_array = np.append(outer_semimajor_axis_array, tr.triple.semimajor_axis.value_in(units.RSun))
radius_primary_array = np.append(radius_primary_array, tr.triple.child2.child1.radius.value_in(units.RSun))
stellar_type_primary_array = np.append(stellar_type_primary_array, tr.triple.child2.child1.stellar_type.value_in(units.stellar_type))
if tr.check_stopping_conditions()==False or tr.check_stopping_conditions_stellar()==False or tr.check_stopping_conditions_stellar_interaction()==False:
print('stopping conditions reached')
time_array = time_array[:len(inner_semimajor_axis_array)]
time_array[-1] = tr.triple.time
break
plt.semilogy(time_array.value_in(units.Myr), inner_semimajor_axis_array, label='inner semimajor axis')
plt.semilogy(time_array.value_in(units.Myr), outer_semimajor_axis_array, label='outer semimajor axis')
plt.xlabel('time (Myr)')
plt.ylabel('semimajor axis (RSun)')
plt.legend(loc=0)
plt.show()
w_ms = np.arange(len(stellar_type_primary_array))[stellar_type_primary_array<=1]
w_hg = np.arange(len(stellar_type_primary_array))[stellar_type_primary_array==2]
w_gb = np.arange(len(stellar_type_primary_array))[stellar_type_primary_array==3]
w_cheb = np.arange(len(stellar_type_primary_array))[stellar_type_primary_array==4]
w_agb = np.arange(len(stellar_type_primary_array))[stellar_type_primary_array==5]
w_wd = np.arange(len(stellar_type_primary_array))[(stellar_type_primary_array>=10) & (stellar_type_primary_array<=12)]
plt.semilogy(time_array.value_in(units.Myr), radius_primary_array, 'k')
plt.semilogy(time_array.value_in(units.Myr)[w_ms], radius_primary_array[w_ms], '.', label='MS')
plt.semilogy(time_array.value_in(units.Myr)[w_hg], radius_primary_array[w_hg], '.', label='HG')
plt.semilogy(time_array.value_in(units.Myr)[w_gb], radius_primary_array[w_gb], '.', label='GB')
plt.semilogy(time_array.value_in(units.Myr)[w_cheb], radius_primary_array[w_cheb], '.', label='CHeB')
plt.semilogy(time_array.value_in(units.Myr)[w_agb], radius_primary_array[w_agb], '.', label='AGB')
plt.semilogy(time_array.value_in(units.Myr)[w_wd], radius_primary_array[w_wd], '.', label='WD')
plt.xlabel('time (Myr)')
plt.ylabel('primary radius (RSun)')
plt.legend(loc=0)
plt.show()
tr.stellar_code.stop()
tr.secular_code.stop()
# advanced level
# uses TRES.main_developer() in stead of TRES.main()
# evolve the triple to 2Myr, 3Myr, 5Myr, then evolves just the stars to 8Myr (without changing the triple), and continues the triple simulation until 9Myr
def example_6():
print('TRES example 6')
M1 = 1.3|units.MSun
M2 = 0.5|units.MSun
M3 = 0.5|units.MSun
Ain = 200|units.RSun
Aout = 20000|units.RSun
Ein = 0.1
Eout = 0.5
incl = 80.0*np.pi/180.0
Gin = 0.1
Gout = 0.5
Oin = 0.
metallicity = 0.02
stars, bins, correct_params = TRES.make_particle_sets(M1,M2,M3, Ain, Aout, Ein, Eout, incl, Gin, Gout, Oin)
stellar_code = SeBa()
stellar_code.parameters.metallicity = metallicity
secular_code = SecularTriple()
#make triple object and evolve unil 2Myr
tr = TRES.main_developer(stars, bins, correct_params, stellar_code, secular_code, incl, tend=2|units.Myr)
#continue evolution until 3 myr
tr.evolve_model(3|units.Myr)
#continue evolution until 5 myr
tr.evolve_model(5|units.Myr)
#only evolve the stars until 8Myr
stellar_code.evolve_model(8|units.Myr)
channel_from_stellar = stellar_code.particles.new_channel_to(stars)
channel_from_stellar.copy()
#pickup triple evolution at 8Myr
tr.triple.time = 8|units.Myr
tr.secular_code.model_time = 8|units.Myr #not redundant!
# continue triple evolution until 9Myr
tr.evolve_model(9|units.Myr)
print(tr.triple.eccentricity, tr.triple.child2.eccentricity)
tr.stellar_code.stop()
tr.secular_code.stop()
# advanced level
# uses TRES.main_developer() in stead of TRES.main()
# evolve the triple to 2Myr, 3Myr, 5Myr, then evolves just the stars to 8Myr (without changing the triple), and continues the triple simulation until 9Myr
#at 9Myr, some mass is removed from one star (without changing the triple), then the triple is evolved until 10 Myr
def example_7():
print('TRES example 7')
M1 = 1.3|units.MSun
M2 = 0.5|units.MSun
M3 = 0.5|units.MSun
Ain = 200|units.RSun
Aout = 20000|units.RSun
Ein = 0.1
Eout = 0.5
incl = 80.0*np.pi/180.0
Gin = 0.1
Gout = 0.5
Oin = 0.
metallicity = 0.02
stars, bins, correct_params = TRES.make_particle_sets(M1,M2,M3, Ain, Aout, Ein, Eout, incl, Gin, Gout, Oin)
stellar_code = SeBa()
stellar_code.parameters.metallicity = metallicity
secular_code = SecularTriple()
#make triple object and evolve unil 2Myr
tr = TRES.main_developer(stars, bins, correct_params, stellar_code, secular_code, incl, tend=2|units.Myr)
#continue evolution until 3 myr
tr.evolve_model(3|units.Myr)
#continue evolution until 5 myr
tr.evolve_model(5|units.Myr)
#only evolve the stars until 8Myr
stellar_code.evolve_model(8|units.Myr)
channel_from_stellar = stellar_code.particles.new_channel_to(stars)
channel_from_stellar.copy()
#pickup triple evolution at 8Myr
tr.triple.time = 8|units.Myr
tr.secular_code.model_time = 8|units.Myr #not redundant!
# continue triple evolution until 9Myr
tr.evolve_model(9|units.Myr)
# make modifications to stellar object
# in this case remove some mass of the envelope (without effect on triple)
donor_in_stellar_code = stars[0].as_set().get_intersecting_subset_in(stellar_code.particles)[0]
donor_in_stellar_code.change_mass(-0.2|units.MSun, 0.|units.yr)
minimum_time_step = 1.e-9 |units.Myr
stellar_code.evolve_model(tr.triple.time+minimum_time_step) #to get updated radii
channel_from_stellar.copy()
# continue triple evolution until 10Myr
tr.evolve_model(10|units.Myr)
print(tr.triple.eccentricity, tr.triple.child2.eccentricity)
tr.stellar_code.stop()
tr.secular_code.stop()
# most advanced level
# uses TRES.main_developer() in stead of TRES.main()
# make triple, but don't evolve it. evolves just the stars to 8Myr (without changing the triple), and continues the triple simulation until 9Myr
# useful in case of starting the simulation with evolved stars, stripped stars, or compact objects
# note that the same can be achieved very simply with tr = TRES.main(tinit = 8|units.Myr, tend = 9|units.Myr) - see example_3.
# below option provides more flexibility
def example_8():
print('TRES example 8')
M1 = 1.3|units.MSun
M2 = 0.5|units.MSun
M3 = 0.5|units.MSun
Ain = 200|units.RSun
Aout = 20000|units.RSun
Ein = 0.1
Eout = 0.5
incl = 80.0*np.pi/180.0
Gin = 0.1
Gout = 0.5
Oin = 0.
metallicity = 0.02
stars, bins, correct_params = TRES.make_particle_sets(M1,M2,M3, Ain, Aout, Ein, Eout, incl, Gin, Gout, Oin)
stellar_code = SeBa()
stellar_code.parameters.metallicity = metallicity
secular_code = SecularTriple()
#make triple object (evolve for small timestep)
#needs to be bigger then 1e-4|units.Myr for secular code
tr = TRES.main_developer(stars, bins, correct_params, stellar_code, secular_code, incl, tend=1e-4|units.Myr)
#only evolve the stars until 8Myr
stellar_code.evolve_model(8|units.Myr)
channel_from_stellar = stellar_code.particles.new_channel_to(stars)
channel_from_stellar.copy()
#pickup triple evolution at 8Myr
tr.triple.time = 8|units.Myr
tr.secular_code.model_time = 8|units.Myr #not redundant!
# continue triple evolution until 9Myr
tr.evolve_model(9|units.Myr)
print(tr.triple.eccentricity, tr.triple.child2.eccentricity)
tr.stellar_code.stop()
tr.secular_code.stop()
# most advanced level
# uses TRES.main_developer() in stead of TRES.main()
# make triple, but don't evolve it. evolves just the stars to 8Myr (without changing the triple), and continues the triple simulation until 9Myr
# useful in case of starting the simulation with evolved stars, stripped stars, or compact objects
#at 9Myr, some mass is removed from one star (without changing the triple), then the triple is evolved until 10 Myr
def example_9():
print('TRES example 9')
M1 = 1.3|units.MSun
M2 = 0.5|units.MSun
M3 = 0.5|units.MSun
Ain = 200|units.RSun
Aout = 20000|units.RSun
Ein = 0.1
Eout = 0.5
incl = 80.0*np.pi/180.0
Gin = 0.1
Gout = 0.5
Oin = 0.
metallicity = 0.02
stars, bins, correct_params = TRES.make_particle_sets(M1,M2,M3, Ain, Aout, Ein, Eout, incl, Gin, Gout, Oin)
stellar_code = SeBa()
stellar_code.parameters.metallicity = metallicity
secular_code = SecularTriple()
#make triple object (evolve for small timestep)
#needs to be bigger then 1e-4|units.Myr for secular code
tr = TRES.main_developer(stars, bins, correct_params, stellar_code, secular_code, incl, tend=1e-4|units.Myr)
#only evolve the stars until 8Myr
stellar_code.evolve_model(8|units.Myr)
channel_from_stellar = stellar_code.particles.new_channel_to(stars)
channel_from_stellar.copy()
#pickup triple evolution at 8Myr
tr.triple.time = 8|units.Myr
tr.secular_code.model_time = 8|units.Myr #not redundant!
# continue triple evolution until 9Myr
tr.evolve_model(9|units.Myr)
# make modifications to stellar object
# in this case remove some mass of the envelope (without effect on triple)
donor_in_stellar_code = stars[0].as_set().get_intersecting_subset_in(stellar_code.particles)[0]
donor_in_stellar_code.change_mass(-0.2|units.MSun, 0.|units.yr)
minimum_time_step = 1.e-9 |units.Myr
stellar_code.evolve_model(tr.triple.time+minimum_time_step) #to get updated radii
channel_from_stellar.copy()
# continue triple evolution until 10Myr
tr.evolve_model(10|units.Myr)
print(tr.triple.eccentricity, tr.triple.child2.eccentricity)
tr.stellar_code.stop()
tr.secular_code.stop()
example_1()
#example_2()
#example_3()
#example_4()
#example_5()
#example_6()
#example_7()
#example_8()
#example_9()
|
amusecodeREPO_NAMETRESPATH_START.@TRES_extracted@TRES-main@developer@[email protected]_END.py
|
{
"filename": "model.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/python_models/model_init_del/model.py",
"type": "Python"
}
|
# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import time
import triton_python_backend_utils as pb_utils
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from util import get_delay, inc_count
class TritonPythonModel:
def initialize(self, args):
inc_count("initialize")
self._sleep("initialize")
def execute(self, requests):
responses = []
for request in requests:
input_tensor = pb_utils.get_input_tensor_by_name(request, "INPUT0")
out_tensor = pb_utils.Tensor("OUTPUT0", input_tensor.as_numpy())
responses.append(pb_utils.InferenceResponse([out_tensor]))
self._sleep("infer")
return responses
def finalize(self):
inc_count("finalize")
def _sleep(self, kind):
delay = get_delay(kind)
if delay > 0:
time.sleep(delay)
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@python_models@[email protected]@.PATH_END.py
|
{
"filename": "_labelfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/histogram2dcontour/contours/_labelfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Labelfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2dcontour.contours"
_path_str = "histogram2dcontour.contours.labelfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Labelfont object
Sets the font used for labeling the contour levels. The default
color comes from the lines, if shown. The default family and
size come from `layout.font`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram2dcon
tour.contours.Labelfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Labelfont
"""
super(Labelfont, self).__init__("labelfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2dcontour.contours.Labelfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.contours.Labelfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@histogram2dcontour@contours@[email protected]_END.py
|
{
"filename": "conf.py",
"repo_name": "samb8s/PsrPopPy",
"repo_path": "PsrPopPy_extracted/PsrPopPy-master/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# PsrPopPy documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 22 16:36:12 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode', 'sphinx.ext.mathbase']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PsrPopPy'
copyright = u'2012, S Bates'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, thatReturnses and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PsrPopPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PsrPopPy.tex', u'PsrPopPy Documentation',
u'S Bates', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'psrpoppy', u'PsrPopPy Documentation',
[u'S Bates'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PsrPopPy', u'PsrPopPy Documentation',
u'S Bates', 'PsrPopPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
samb8sREPO_NAMEPsrPopPyPATH_START.@PsrPopPy_extracted@PsrPopPy-master@[email protected]@.PATH_END.py
|
{
"filename": "micro_lensing.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/Cosmo/micro_lensing.py",
"type": "Python"
}
|
import numpy as np
from lenstronomy.Util import constants
# routines to conveniently compute micro-lensing events
def einstein_radius(mass, d_l, d_s):
"""Einstein radius for a given point mass and distances to lens and source.
:param mass: point source mass [M_sun]
:param d_l: distance to lens [pc]
:param d_s: distance to source [pc]
:return: Einstein radius [arc seconds]
"""
mass_kg = mass * constants.M_sun
dl_m = d_l * constants.pc
ds_m = d_s * constants.pc
# Einstein radius in radian
theta_e = np.sqrt(
4 * constants.G * mass_kg / constants.c**2 * (ds_m - dl_m) / (ds_m * dl_m)
)
theta_e /= constants.arcsec # arc seconds
return theta_e
def source_size(diameter, d_s):
"""
:param diameter: diameter of the source in units of the solar diameter
:param d_s: distance to the source in [pc]
:return: diameter in [arc seconds]
"""
diameter_m = diameter * constants.r_sun * 2 # diameter in [m]
diameter_arcsec = diameter_m / (d_s * constants.pc) # diameter in [radian]
diameter_arcsec /= constants.arcsec # diameter in [arc seconds]
return diameter_arcsec
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@Cosmo@[email protected]_END.py
|
{
"filename": "lens_reconstruction.py",
"repo_name": "toshiyan/cmblensplus",
"repo_path": "cmblensplus_extracted/cmblensplus-master/example/flatlens/lens_reconstruction.py",
"type": "Python"
}
|
#!/usr/bin/env python
# coding: utf-8
# ### A simple lensing reconstruction in flat sky
# In[1]:
import numpy as np
import tqdm
import basic
import flatsky
import cmb
import binning
from matplotlib.pyplot import *
# In[2]:
# parameters
Tcmb = 2.72e6
lmax = 3000
rL = [100,2500]
oL = [2,3000]
nx, ny = 512, 512
D = np.array([nx,ny]) / 60.*np.pi/180.
bn = 50
mc = 10
#qlist = ['TT','TE','TB','EE','EB']
qlist = ['TT']
# In[3]:
# binned multipoles
bp, bc = binning.binned_ells(bn,oL[0],oL[1])
# In[4]:
# multipoles on grids
lx, ly, el, il = flatsky.utils.elarrays(nx,ny,D)
kl = el*(el+1.)/2.
# In[5]:
# load unlensed and lensed Cls
lcl = cmb.read_camb_cls('../data/lensedcls.dat',ftype='lens',output='array')[:,:lmax+1]
itt = np.zeros(lmax+1)
iee = np.zeros(lmax+1)
ibb = np.zeros(lmax+1)
itt[2:] = 1./lcl[0,2:]
iee[2:] = 1./lcl[1,2:]
ibb[2:] = 1./lcl[2,2:]
# In[6]:
# assign 1d cl on 2d grid
cltt = flatsky.utils.cl2c2d(nx,ny,D,2,lmax,lcl[0,:])
clee = flatsky.utils.cl2c2d(nx,ny,D,2,lmax,lcl[1,:])
clte = flatsky.utils.cl2c2d(nx,ny,D,2,lmax,lcl[3,:])
iltt = flatsky.utils.cl2c2d(nx,ny,D,2,lmax,itt)
ilee = flatsky.utils.cl2c2d(nx,ny,D,2,lmax,iee)
ilbb = flatsky.utils.cl2c2d(nx,ny,D,2,lmax,ibb)
# In[7]:
# compute analytic normalization with 2d filtering
Ag, Ac = {}, {}
Ag['TT'], Ac['TT'] = flatsky.norm_lens.qtt(nx,ny,D,rL,iltt,cltt,oL)
Ag['TE'], Ac['TE'] = flatsky.norm_lens.qte(nx,ny,D,rL,iltt,ilee,clte,oL)
Ag['TB'], Ac['TB'] = flatsky.norm_lens.qtb(nx,ny,D,iltt,ilbb,clte,rL,oL)
Ag['EE'], Ac['EE'] = flatsky.norm_lens.qee(nx,ny,D,ilee,clee,rL,oL)
Ag['EB'], Ac['EB'] = flatsky.norm_lens.qeb(nx,ny,D,ilee,ilbb,clee,rL,oL)
# In[8]:
# kappa binned spectrum
Alg = {q: flatsky.utils.c2d2bcl(nx,ny,D,kl**2*Ag[q],bn,oL) for q in qlist}
Alc = {q: flatsky.utils.c2d2bcl(nx,ny,D,kl**2*Ac[q],bn,oL) for q in qlist}
# In[9]:
# save
#np.savetxt('al_grad.dat',np.array((bc,Alg['TT'],Alg['TE'],Alg['TB'],Alg['EE'],Alg['EB'])).T,fmt='%8.6e')
#np.savetxt('al_curl.dat',np.array((bc,Alc['TT'],Alc['TE'],Alc['TB'],Alc['EE'],Alc['EB'])).T,fmt='%8.6e')
# loop over MC realizations
cks = {q: np.zeros((mc,bn)) for q in qlist}
for i in tqdm.tqdm(range(mc)):
# generate Fourier mode on 2d grids
tlm, elm = flatsky.utils.gauss2alm(nx,ny,D,2,lmax,lcl[0,:],lcl[3,:],lcl[1,:])
blm = flatsky.utils.gauss1alm(nx,ny,D,2,lmax,lcl[2,:])
#cls[i,0,:] = flatsky.utils.alm2bcl(bn,oL,nx,ny,D,tlm)
#cls[i,1,:] = flatsky.utils.alm2bcl(bn,oL,nx,ny,D,tlm,elm)
#cls[i,2,:] = flatsky.utils.alm2bcl(bn,oL,nx,ny,D,elm)
# filtering
tlm *= iltt
elm *= ilee
blm *= ilbb
# reconstruction
klm = {}
klm['TT'], clm = flatsky.rec_lens.qtt(nx,ny,D,rL,cltt,tlm,tlm,gtype='k')
klm['TE'], clm = flatsky.rec_lens.qte(nx,ny,D,rL,clte,tlm,elm,gtype='k')
klm['TB'], clm = flatsky.rec_lens.qtb(nx,ny,D,rL,clte,tlm,blm,gtype='k')
klm['EE'], clm = flatsky.rec_lens.qee(nx,ny,D,rL,clee,elm,elm,gtype='k')
klm['EB'], clm = flatsky.rec_lens.qeb(nx,ny,D,rL,clee,elm,blm,gtype='k')
for q in qlist:
klm[q] *= Ag[q]*kl**2
cks[q][i,:] = flatsky.utils.alm2bcl(bn,oL,nx,ny,D,klm[q])
# In[11]:
for q in qlist:
mcks = np.mean(cks[q],axis=0)
plot(bc,mcks,label='sim,'+q)
plot(bc,Alg[q],label='norm,'+q)
legend()
show()
clf()
|
toshiyanREPO_NAMEcmblensplusPATH_START.@cmblensplus_extracted@cmblensplus-master@example@flatlens@[email protected]_END.py
|
{
"filename": "graph.py",
"repo_name": "jiayindong/obliquity",
"repo_path": "obliquity_extracted/obliquity-main/src/scripts/graph.py",
"type": "Python"
}
|
import os
from pathlib import Path
os.environ["PATH"] += os.pathsep + str(Path.home() / "bin")
import paths
import sys
import subprocess
from matplotlib import rc
rc('font', **{'family':'sans-serif'})
rc('text', usetex=True)
rc('text.latex', preamble=r'\usepackage{physics}')
import daft
import daft
pgm = daft.PGM(observed_style="shaded",node_unit=1, node_ec='k',dpi=100,line_width=0.8)
# Hierarchical parameters.
pgm.add_node("beta", r"$\vb*{\beta}$", 2, 4.)
# Latent variable.
pgm.add_node("psi", r"$\psi_n$", 2, 3.)
pgm.add_node("theta", r"$\theta_n$", 3, 3.)
pgm.add_node("iorb", r"$i_{{\rm orb},n}$", 4, 3.)
pgm.add_node("lambda", r"$\lambda_n$", 2., 1.5)
pgm.add_node("istar", r"$i_{\star,n}$", 3, 1.5)
pgm.add_node("pstar", r"$\gamma_{\star,n}$", 4, 1.5)
# Observed variable.
pgm.add_node("obs_iorb", r"$\hat{i}_{{\rm orb}, n}$", 4, 2.2, observed=True)
pgm.add_node("obs_lambda", r"$\hat{\lambda}_n$", 2., 0.7, observed=True)
pgm.add_node("Obs", r"Obs$_{\star, n}$", 3, 0.7, observed=True, aspect=1., fontsize=8)
# Add edges
pgm.add_edge("beta", "psi")
pgm.add_edge("psi", "lambda")
pgm.add_edge("theta", "lambda")
pgm.add_edge("iorb", "lambda")
pgm.add_edge("psi", "istar")
pgm.add_edge("theta", "istar")
pgm.add_edge("iorb", "istar")
pgm.add_edge("iorb", "obs_iorb")
pgm.add_edge("lambda", "obs_lambda")
pgm.add_edge("pstar", "Obs", plot_params=dict(ls=(0, (2, 2)),head_width=0,head_length=0))
pgm.add_edge("pstar", "Obs", plot_params={'ls':''})
pgm.add_edge("istar", "Obs", plot_params=dict(ls=(0, (2, 2)),head_width=0,head_length=0))
pgm.add_edge("istar", "Obs", plot_params={'ls':''})
# And a plate.
pgm.add_plate([1.5, 0.2, 3, 3.4], label=r"$n = 1, \ldots, N$", shift=-0.1, fontsize=8)
# Render and save.
pgm.render();
pgm.savefig(paths.figures / "graph.pdf", bbox_inches="tight", dpi=600)
|
jiayindongREPO_NAMEobliquityPATH_START.@obliquity_extracted@obliquity-main@src@[email protected]@.PATH_END.py
|
{
"filename": "_error_x.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/scatter3d/_error_x.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ErrorX(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter3d"
_path_str = "scatter3d.error_x"
_valid_props = {
"array",
"arrayminus",
"arrayminussrc",
"arraysrc",
"color",
"copy_zstyle",
"symmetric",
"thickness",
"traceref",
"tracerefminus",
"type",
"value",
"valueminus",
"visible",
"width",
}
# array
# -----
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
# arrayminus
# ----------
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
# arrayminussrc
# -------------
@property
def arrayminussrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
# arraysrc
# --------
@property
def arraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `array`.
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
# color
# -----
@property
def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# copy_zstyle
# -----------
@property
def copy_zstyle(self):
"""
The 'copy_zstyle' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["copy_zstyle"]
@copy_zstyle.setter
def copy_zstyle(self, val):
self["copy_zstyle"] = val
# symmetric
# ---------
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# traceref
# --------
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
# tracerefminus
# -------------
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
# type
# ----
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the square of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# value
# -----
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valueminus
# ----------
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stoke color of the error bars.
copy_zstyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
copy_zstyle=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs,
):
"""
Construct a new ErrorX object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.ErrorX`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
`arrayminus`.
arraysrc
Sets the source reference on Chart Studio Cloud for
`array`.
color
Sets the stoke color of the error bars.
copy_zstyle
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorX
"""
super(ErrorX, self).__init__("error_x")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.ErrorX
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.ErrorX`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
_v = array if array is not None else _v
if _v is not None:
self["array"] = _v
_v = arg.pop("arrayminus", None)
_v = arrayminus if arrayminus is not None else _v
if _v is not None:
self["arrayminus"] = _v
_v = arg.pop("arrayminussrc", None)
_v = arrayminussrc if arrayminussrc is not None else _v
if _v is not None:
self["arrayminussrc"] = _v
_v = arg.pop("arraysrc", None)
_v = arraysrc if arraysrc is not None else _v
if _v is not None:
self["arraysrc"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("copy_zstyle", None)
_v = copy_zstyle if copy_zstyle is not None else _v
if _v is not None:
self["copy_zstyle"] = _v
_v = arg.pop("symmetric", None)
_v = symmetric if symmetric is not None else _v
if _v is not None:
self["symmetric"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("traceref", None)
_v = traceref if traceref is not None else _v
if _v is not None:
self["traceref"] = _v
_v = arg.pop("tracerefminus", None)
_v = tracerefminus if tracerefminus is not None else _v
if _v is not None:
self["tracerefminus"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
_v = arg.pop("valueminus", None)
_v = valueminus if valueminus is not None else _v
if _v is not None:
self["valueminus"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@scatter3d@[email protected]_END.py
|
{
"filename": "environment.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/ixpeobssim/utils/environment.py",
"type": "Python"
}
|
# Copyright (C) 2022, the ixpeobssim team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Versioning of various third-party packages.
"""
import sys
import numpy
import scipy
import astropy
import matplotlib
import regions
import skyfield
from ixpeobssim.utils.logging_ import logger
from ixpeobssim.utils.packaging_ import retrieve_version, xPackageVersion
# pylint: disable=unused-import
# Since the Python wrapper to xspec is not always trivial to set up, we put
# this guard, here, to signal downstream whether pyxspec is indeed installed
# or not.
#
# You can check whether the Python bindings for xspec are installed by
#
# >>> from ixpeobssim.utils.environment import PYXSPEC_INSTALLED
# >>> if PYXSPEC_INSTALLED:
# >>> import ixpeobssim.evt.xspec_ as xspec_
#
# If PYXSPEC_INSTALLED is false you should refrain from touching anything into
#ixpeobssim.evt.xspec_, or event try to import anything from in there.
try:
import xspec
PYXSPEC_INSTALLED = True
except ImportError:
logger.warning('PyXSPEC is not installed, you will no be able to use it.')
PYXSPEC_INSTALLED = False
# Retrieve the Python version.
PYTHON_VERSION = xPackageVersion(sys.version_info.major, sys.version_info.minor,
sys.version_info.micro)
# Retrieve the version numbers for some of the most important third-party packages.
NUMPY_VERSION = retrieve_version(numpy)
SCIPY_VERSION = retrieve_version(scipy)
ASTROPY_VERSION = retrieve_version(astropy)
MATPLOTLIB_VERSION = retrieve_version(matplotlib)
SKYFIELD_VERSION = retrieve_version(skyfield)
REGIONS_VERSION = retrieve_version(regions)
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@ixpeobssim@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "barentsen/dave",
"repo_path": "dave_extracted/dave-master/vetting/__init__.py",
"type": "Python"
}
|
barentsenREPO_NAMEdavePATH_START.@dave_extracted@dave-master@vetting@[email protected]_END.py
|
|
{
"filename": "test_json.py",
"repo_name": "grahambell/pymoc",
"repo_path": "pymoc_extracted/pymoc-main/test/test_json.py",
"type": "Python"
}
|
# Copyright (C) 2014 Science and Technology Facilities Council.
# Copyright (C) 2017-2024 East Asian Observatory.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from io import BytesIO
from unittest import TestCase
from pymoc import MOC
from pymoc.io.json import read_moc_json, write_moc_json
class JSONTestCase(TestCase):
def test_json(self):
test_json = b'{"1":[1,2,4],"2":[12,13,14,21,23,25]}'
in_ = BytesIO(test_json)
moc = MOC()
read_moc_json(moc, file=in_)
self.assertEqual(moc.order, 2)
self.assertEqual(moc[0], frozenset())
self.assertEqual(moc[1], frozenset([1, 2, 4]))
self.assertEqual(moc[2], frozenset([12, 13, 14, 21, 23, 25]))
out = BytesIO()
write_moc_json(moc, file=out)
self.assertEqual(out.getvalue(), test_json)
def test_json_trailing(self):
# Check MOC 1.1 addition of trailing section to
# signify the MOCORDER.
in_ = BytesIO(b'{"13":[5,6,7],"14":[]}')
moc = MOC()
read_moc_json(moc, file=in_)
self.assertEqual(moc[13], frozenset([5, 6, 7]))
self.assertEqual(moc[14], frozenset())
def test_json_large(self):
orig = MOC()
orig.add(29, [
3458700000000000000, 3458700000000000007,
3458700000000000008, 3458700000000000009,
])
out = BytesIO()
write_moc_json(orig, file=out)
json = out.getvalue()
self.assertEqual(
json, b'{"29":[3458700000000000000,3458700000000000007,'
b'3458700000000000008,3458700000000000009]}')
copy = MOC()
in_ = BytesIO(json)
read_moc_json(copy, file=in_)
self.assertEqual(copy.order, 29)
self.assertEqual(copy.cells, 4)
self.assertEqual(copy[29], frozenset([
3458700000000000000, 3458700000000000007,
3458700000000000008, 3458700000000000009]))
|
grahambellREPO_NAMEpymocPATH_START.@pymoc_extracted@pymoc-main@test@[email protected]_END.py
|
{
"filename": "configurations_test.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/tests/configurations_test.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from absl.testing import absltest
from flax.configurations import bool_flag, config
class MyTestCase(absltest.TestCase):
def setUp(self):
super().setUp()
self.enter_context(mock.patch.object(config, '_values', {}))
self._flag = bool_flag('test', default=False, help='Just a test flag.')
def test_duplicate_flag(self):
with self.assertRaisesRegex(RuntimeError, 'already defined'):
bool_flag(self._flag.name, default=False, help='Another test flag.')
def test_default(self):
self.assertFalse(self._flag.value)
self.assertFalse(config.test)
def test_typed_update(self):
config.update(self._flag, True)
self.assertTrue(self._flag.value)
self.assertTrue(config.test)
def test_untyped_update(self):
config.update(self._flag.name, True)
self.assertTrue(self._flag.value)
self.assertTrue(config.test)
def test_update_unknown_flag(self):
with self.assertRaisesRegex(LookupError, 'Unrecognized config option'):
config.update('unknown', True)
if __name__ == '__main__':
absltest.main()
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@tests@[email protected]_END.py
|
{
"filename": "TT_unbinned.py",
"repo_name": "ggalloni/cobaya",
"repo_path": "cobaya_extracted/cobaya-master/cobaya/likelihoods/planck_2018_highl_plik/TT_unbinned.py",
"type": "Python"
}
|
from cobaya.likelihoods.base_classes import Planck2018Clik
class TT_unbinned(Planck2018Clik):
r"""
Unbinned high-$\ell$ temperature-only \textsc{plik} likelihood of Planck's 2018 data
release \cite{Aghanim:2019ame}.
"""
pass
|
ggalloniREPO_NAMEcobayaPATH_START.@cobaya_extracted@cobaya-master@cobaya@likelihoods@planck_2018_highl_plik@[email protected]_END.py
|
{
"filename": "header.py",
"repo_name": "n-claes/legolas",
"repo_path": "legolas_extracted/legolas-master/post_processing/pylbo/utilities/datfiles/header.py",
"type": "Python"
}
|
from __future__ import annotations
from typing import Any, BinaryIO
import numpy as np
from pylbo._version import VersionHandler
from pylbo.utilities.datfiles.istream_reader import (
SIZE_COMPLEX,
SIZE_DOUBLE,
SIZE_INT,
read_boolean_from_istream,
read_complex_from_istream,
read_float_from_istream,
read_int_from_istream,
read_string_from_istream,
)
from pylbo.utilities.toolbox import transform_to_numpy
from pylbo.visualisation.utils import ensure_attr_set
class LegolasHeader:
"""Baseclass for a Legolas header"""
def __init__(self, istream: BinaryIO, version: VersionHandler):
self.legolas_version = version
self.data = {}
self._str_len = None
self._str_len_array = None
self._set_str_lengths(istream)
[ensure_attr_set(self, attr) for attr in ("_str_len", "_str_len_array")]
self.read_header_data(istream)
self.read_data_offsets(istream)
def __str__(self) -> str:
keys_to_avoid = ["ef_written_flags", "ef_written_idxs", "offsets"]
return "".join(
[
f"{key}: {self.data.get(key)}\n"
for key in self.data.keys()
if key not in keys_to_avoid
]
)
def __getitem__(self, key: str) -> Any:
return self.data[key]
def _set_str_lengths(self, istream: BinaryIO) -> None:
self._str_len, self._str_len_array = read_int_from_istream(istream, amount=2)
def get(self, key: str, default: Any = None) -> Any:
return self.data.get(key, default)
def read_header_data(self, istream: BinaryIO) -> None:
data = {}
data.update(self._read_physics_type_info(istream))
data.update(self._read_grid_info(istream))
data.update(self._read_io_info(istream))
data.update(self._read_solver_info(istream))
data.update(self._read_equilibrium_info(istream))
data.update(self._read_units_info(istream))
data.update(self._read_physics_info(istream))
data.update(self._read_parameters(istream))
data.update(self._read_equilibrium_names(istream))
self.data.update(data)
def read_data_offsets(self, istream: BinaryIO) -> None:
offsets = {}
# eigenvalues
nb_eigenvals = read_int_from_istream(istream)
self.data["nb_eigenvalues"] = nb_eigenvals
offsets["eigenvalues"] = istream.tell()
bytesize = nb_eigenvals * SIZE_COMPLEX
istream.seek(istream.tell() + bytesize)
# grid
offsets["grid"] = istream.tell()
bytesize = self.data["gridpoints"] * SIZE_DOUBLE
istream.seek(istream.tell() + bytesize)
# Gaussian grid
offsets["grid_gauss"] = istream.tell()
bytesize = self.data["gauss_gridpoints"] * SIZE_DOUBLE
istream.seek(istream.tell() + bytesize)
# equilibrium arrays
offsets["equilibrium_arrays"] = istream.tell()
bytesize = (
self.data["gauss_gridpoints"]
* len(self.data["equilibrium_names"])
* SIZE_DOUBLE
)
istream.seek(istream.tell() + bytesize)
offsets.update(self._get_eigenfunction_offsets(istream))
offsets.update(self._get_derived_eigenfunction_offsets(istream))
offsets.update(self._get_eigenvector_offsets(istream))
offsets.update(self._get_residual_offsets(istream))
offsets.update(self._get_matrices_offsets(istream))
self.data["offsets"] = offsets
def _read_physics_type_info(self, istream: BinaryIO) -> dict:
data = {}
data["nb_eqs"] = read_int_from_istream(istream)
len_type = read_int_from_istream(istream)
data["physics_type"] = read_string_from_istream(istream, length=len_type)
len_name, size_vector = read_int_from_istream(istream, amount=2)
data["state_vector"] = np.asarray(
read_string_from_istream(istream, length=len_name, amount=size_vector),
dtype=str,
)
data["dims"] = {}
for key in ("integralblock", "subblock", "quadblock", "matrix"):
data["dims"][f"dim_{key}"] = read_int_from_istream(istream)
return data
def _read_grid_info(self, istream: BinaryIO) -> dict:
data = {}
len_geom = read_int_from_istream(istream)
data["geometry"] = read_string_from_istream(istream, length=len_geom)
for key in ("", "gauss_", "ef_"):
data[f"{key}gridpoints"] = read_int_from_istream(istream)
data["gauss"] = {}
n_gauss = read_int_from_istream(istream)
data["gauss"]["number_of_nodes"] = n_gauss
data["gauss"]["nodes"] = np.asarray(
read_float_from_istream(istream, amount=n_gauss), dtype=float
)
data["gauss"]["weights"] = np.asarray(
read_float_from_istream(istream, amount=n_gauss), dtype=float
)
data["x_start"], data["x_end"] = read_float_from_istream(istream, amount=2)
return data
def _read_io_info(self, istream: BinaryIO) -> dict:
data = {}
data["has_matrices"] = read_boolean_from_istream(istream)
data["has_eigenvectors"] = read_boolean_from_istream(istream)
data["has_residuals"] = read_boolean_from_istream(istream)
data["has_efs"] = read_boolean_from_istream(istream)
data["has_derived_efs"] = read_boolean_from_istream(istream)
data["ef_subset_used"] = read_boolean_from_istream(istream)
data["ef_subset_radius"] = read_float_from_istream(istream)
data["ef_subset_center"] = read_complex_from_istream(istream)
return data
def _read_solver_info(self, istream: BinaryIO) -> dict:
data = {}
len_solver = read_int_from_istream(istream)
data["solver"] = read_string_from_istream(istream, length=len_solver)
arnoldi_data = {}
len_mode = read_int_from_istream(istream)
arnoldi_data["arpack_mode"] = read_string_from_istream(istream, length=len_mode)
arnoldi_data["number_of_eigenvalues"] = read_int_from_istream(istream)
len_which = read_int_from_istream(istream)
arnoldi_data["which_eigenvalues"] = read_string_from_istream(
istream, length=len_which
)
arnoldi_data["ncv"] = read_int_from_istream(istream)
data["maxiter"] = read_int_from_istream(istream)
data["sigma"] = read_complex_from_istream(istream)
data["tolerance"] = read_float_from_istream(istream)
data["arnoldi"] = arnoldi_data if data["solver"] == "arnoldi" else None
return data
def _read_equilibrium_info(self, istream: BinaryIO) -> dict:
data = {}
len_equil_type = read_int_from_istream(istream)
data["eq_type"] = read_string_from_istream(istream, length=len_equil_type)
len_boundary_type = read_int_from_istream(istream)
data["boundary_type"] = read_string_from_istream(
istream, length=len_boundary_type
)
return data
def _read_units_info(self, istream: BinaryIO) -> dict:
n_units = read_int_from_istream(istream)
units = {"cgs": read_boolean_from_istream(istream)}
for _ in range(n_units):
len_name = read_int_from_istream(istream)
name = read_string_from_istream(istream, length=len_name)
value = read_float_from_istream(istream)
units[name] = value
return {"units": units}
def _read_physics_info(self, istream: BinaryIO) -> dict:
data = {}
data["gamma"] = read_float_from_istream(istream)
data["is_incompressible"] = read_boolean_from_istream(istream)
physics = {}
physics["flow"] = read_boolean_from_istream(istream)
physics["cooling"] = read_boolean_from_istream(istream)
len_curve = read_int_from_istream(istream)
physics["cooling_curve"] = read_string_from_istream(istream, length=len_curve)
physics["interpolation_points"] = read_int_from_istream(istream)
physics["external_gravity"] = read_boolean_from_istream(istream)
physics["resistivity"] = read_boolean_from_istream(istream)
physics["has_fixed_resistivity"] = read_boolean_from_istream(istream)
physics["viscosity"] = read_boolean_from_istream(istream)
physics["has_viscous_heating"] = read_boolean_from_istream(istream)
physics["conduction"] = read_boolean_from_istream(istream)
physics["has_parallel_conduction"] = read_boolean_from_istream(istream)
physics["has_fixed_tc_para"] = read_boolean_from_istream(istream)
physics["has_perpendicular_conduction"] = read_boolean_from_istream(istream)
physics["has_fixed_tc_perp"] = read_boolean_from_istream(istream)
physics["Hall"] = read_boolean_from_istream(istream)
physics["Hall_uses_substitution"] = read_boolean_from_istream(istream)
physics["has_electron_inertia"] = read_boolean_from_istream(istream)
data["physics"] = physics
return data
def _read_parameters(self, istream: BinaryIO) -> dict:
parameters = {}
nb_params, len_name = read_int_from_istream(istream, amount=2)
for _ in range(nb_params):
name = read_string_from_istream(istream, length=len_name)
parameters[name] = read_float_from_istream(istream)
parameters = {k: v for k, v in parameters.items() if not np.isnan(v)}
return {"parameters": parameters}
def _read_equilibrium_names(self, istream: BinaryIO) -> dict:
nb_names, len_name = read_int_from_istream(istream, amount=2)
self.data["has_background"] = nb_names > 0
names = (
read_string_from_istream(istream, length=len_name, amount=nb_names)
if self.data["has_background"]
else []
)
# due to a typo older datfiles contain 'db03' instead of 'dB03'
names = [name.replace("db03", "dB03") for name in names]
return {"equilibrium_names": names}
def _get_eigenfunction_offsets(self, istream: BinaryIO) -> dict:
if not self.data["has_efs"]:
return {}
self.data["ef_names"] = self.data["state_vector"]
# eigenfunction grid
ef_gridsize = read_int_from_istream(istream)
offsets = self._get_ef_grid_offset(ef_gridsize, istream)
# flags
self._get_ef_written_flags(istream)
# eigenfunction block offsets
offsets.update(self._get_ef_block_offsets(istream))
return offsets
def _get_ef_grid_offset(self, ef_gridsize: int, istream: BinaryIO) -> dict:
offsets = {"ef_grid": istream.tell()}
bytesize = ef_gridsize * SIZE_DOUBLE
istream.seek(istream.tell() + bytesize)
return offsets
def _get_ef_written_flags(self, istream: BinaryIO) -> None:
# eigenfunction flags
ef_flags_size = read_int_from_istream(istream)
self.data["ef_written_flags"] = np.asarray(
read_int_from_istream(istream, amount=ef_flags_size), dtype=bool
)
ef_idxs_size = read_int_from_istream(istream)
self.data["ef_written_idxs"] = transform_to_numpy(
np.asarray(read_int_from_istream(istream, amount=ef_idxs_size), dtype=int)
- 1
) # -1 corrects for Fortran 1-based indexing
# do a sanity check
assert np.all(
self.data["ef_written_idxs"] == np.where(self.data["ef_written_flags"])[0]
)
def _get_ef_block_offsets(self, istream: BinaryIO) -> dict:
# eigenfunction offsets
offsets = {"ef_arrays": istream.tell()}
# bytesize of a single eigenfunction block (all efs for 1 state vector variable)
bytesize_block = (
self.data["ef_gridpoints"]
* len(self.data["ef_written_idxs"])
* SIZE_COMPLEX
)
offsets["ef_block_bytesize"] = bytesize_block
offsets["ef_bytesize"] = self.data["ef_gridpoints"] * SIZE_COMPLEX
istream.seek(istream.tell() + bytesize_block * self.data["nb_eqs"])
return offsets
def _get_derived_eigenfunction_offsets(self, istream: BinaryIO) -> dict:
if not self.data["has_derived_efs"]:
return {}
nb_names, size_names = read_int_from_istream(istream, amount=2)
return self._get_derived_ef_names_and_offsets(nb_names, size_names, istream)
def _get_derived_ef_names_and_offsets(
self, nb_names, size_names, istream: BinaryIO
) -> dict:
self.data["derived_ef_names"] = np.asarray(
read_string_from_istream(istream, length=size_names, amount=nb_names),
dtype=str,
)
offsets = {"derived_ef_arrays": istream.tell()}
bytesize_block = (
self.data["ef_gridpoints"]
* len(self.data["ef_written_idxs"])
* nb_names
* SIZE_COMPLEX
)
istream.seek(istream.tell() + bytesize_block)
return offsets
def _get_eigenvector_offsets(self, istream: BinaryIO) -> dict:
if not self.data["has_eigenvectors"]:
return {}
len_eigvecs, nb_eigvecs = read_int_from_istream(istream, amount=2)
offsets = {
"eigenvectors": istream.tell(),
"eigenvector_length": len_eigvecs,
"nb_eigenvectors": nb_eigvecs,
}
bytesize = len_eigvecs * nb_eigvecs * SIZE_COMPLEX
istream.seek(istream.tell() + bytesize)
return offsets
def _get_residual_offsets(self, istream: BinaryIO) -> dict:
if not self.data["has_residuals"]:
return {}
nb_residuals = read_int_from_istream(istream)
offsets = {"residuals": istream.tell(), "nb_residuals": nb_residuals}
bytesize = nb_residuals * SIZE_DOUBLE
istream.seek(istream.tell() + bytesize)
return offsets
def _get_matrices_offsets(self, istream: BinaryIO) -> dict:
if not self.data["has_matrices"]:
return {}
nonzero_B_elements, nonzero_A_elements = read_int_from_istream(
istream, amount=2
)
# B matrix is written as (row, column, real value)
byte_size = (2 * SIZE_INT + SIZE_DOUBLE) * nonzero_B_elements
offsets = {"matrix_B": istream.tell()}
istream.seek(istream.tell() + byte_size)
self.data["nonzero_B_elements"] = nonzero_B_elements
# A matrix is written as (row, column, complex value)
offsets["matrix_A"] = istream.tell()
self.data["nonzero_A_elements"] = nonzero_A_elements
return offsets
|
n-claesREPO_NAMElegolasPATH_START.@legolas_extracted@legolas-master@post_processing@pylbo@utilities@[email protected]@.PATH_END.py
|
{
"filename": "texture.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/skimage/feature/texture.py",
"type": "Python"
}
|
"""
Methods to characterize image textures.
"""
import warnings
import numpy as np
from .._shared.utils import check_nD
from ..color import gray2rgb
from ..util import img_as_float
from ._texture import _glcm_loop, _local_binary_pattern, _multiblock_lbp
def graycomatrix(image, distances, angles, levels=None, symmetric=False, normed=False):
"""Calculate the gray-level co-occurrence matrix.
A gray level co-occurrence matrix is a histogram of co-occurring
grayscale values at a given offset over an image.
.. versionchanged:: 0.19
`greymatrix` was renamed to `graymatrix` in 0.19.
Parameters
----------
image : array_like
Integer typed input image. Only positive valued images are supported.
If type is other than uint8, the argument `levels` needs to be set.
distances : array_like
List of pixel pair distance offsets.
angles : array_like
List of pixel pair angles in radians.
levels : int, optional
The input image should contain integers in [0, `levels`-1],
where levels indicate the number of gray-levels counted
(typically 256 for an 8-bit image). This argument is required for
16-bit images or higher and is typically the maximum of the image.
As the output matrix is at least `levels` x `levels`, it might
be preferable to use binning of the input image rather than
large values for `levels`.
symmetric : bool, optional
If True, the output matrix `P[:, :, d, theta]` is symmetric. This
is accomplished by ignoring the order of value pairs, so both
(i, j) and (j, i) are accumulated when (i, j) is encountered
for a given offset. The default is False.
normed : bool, optional
If True, normalize each matrix `P[:, :, d, theta]` by dividing
by the total number of accumulated co-occurrences for the given
offset. The elements of the resulting matrix sum to 1. The
default is False.
Returns
-------
P : 4-D ndarray
The gray-level co-occurrence histogram. The value
`P[i,j,d,theta]` is the number of times that gray-level `j`
occurs at a distance `d` and at an angle `theta` from
gray-level `i`. If `normed` is `False`, the output is of
type uint32, otherwise it is float64. The dimensions are:
levels x levels x number of distances x number of angles.
References
----------
.. [1] M. Hall-Beyer, 2007. GLCM Texture: A Tutorial
https://prism.ucalgary.ca/handle/1880/51900
DOI:`10.11575/PRISM/33280`
.. [2] R.M. Haralick, K. Shanmugam, and I. Dinstein, "Textural features for
image classification", IEEE Transactions on Systems, Man, and
Cybernetics, vol. SMC-3, no. 6, pp. 610-621, Nov. 1973.
:DOI:`10.1109/TSMC.1973.4309314`
.. [3] M. Nadler and E.P. Smith, Pattern Recognition Engineering,
Wiley-Interscience, 1993.
.. [4] Wikipedia, https://en.wikipedia.org/wiki/Co-occurrence_matrix
Examples
--------
Compute 4 GLCMs using 1-pixel distance and 4 different angles. For example,
an angle of 0 radians refers to the neighboring pixel to the right;
pi/4 radians to the top-right diagonal neighbor; pi/2 radians to the pixel
above, and so forth.
>>> image = np.array([[0, 0, 1, 1],
... [0, 0, 1, 1],
... [0, 2, 2, 2],
... [2, 2, 3, 3]], dtype=np.uint8)
>>> result = graycomatrix(image, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4],
... levels=4)
>>> result[:, :, 0, 0]
array([[2, 2, 1, 0],
[0, 2, 0, 0],
[0, 0, 3, 1],
[0, 0, 0, 1]], dtype=uint32)
>>> result[:, :, 0, 1]
array([[1, 1, 3, 0],
[0, 1, 1, 0],
[0, 0, 0, 2],
[0, 0, 0, 0]], dtype=uint32)
>>> result[:, :, 0, 2]
array([[3, 0, 2, 0],
[0, 2, 2, 0],
[0, 0, 1, 2],
[0, 0, 0, 0]], dtype=uint32)
>>> result[:, :, 0, 3]
array([[2, 0, 0, 0],
[1, 1, 2, 0],
[0, 0, 2, 1],
[0, 0, 0, 0]], dtype=uint32)
"""
check_nD(image, 2)
check_nD(distances, 1, 'distances')
check_nD(angles, 1, 'angles')
image = np.ascontiguousarray(image)
image_max = image.max()
if np.issubdtype(image.dtype, np.floating):
raise ValueError(
"Float images are not supported by graycomatrix. "
"Convert the image to an unsigned integer type."
)
# for image type > 8bit, levels must be set.
if image.dtype not in (np.uint8, np.int8) and levels is None:
raise ValueError(
"The levels argument is required for data types "
"other than uint8. The resulting matrix will be at "
"least levels ** 2 in size."
)
if np.issubdtype(image.dtype, np.signedinteger) and np.any(image < 0):
raise ValueError("Negative-valued images are not supported.")
if levels is None:
levels = 256
if image_max >= levels:
raise ValueError(
"The maximum grayscale value in the image should be "
"smaller than the number of levels."
)
distances = np.ascontiguousarray(distances, dtype=np.float64)
angles = np.ascontiguousarray(angles, dtype=np.float64)
P = np.zeros(
(levels, levels, len(distances), len(angles)), dtype=np.uint32, order='C'
)
# count co-occurences
_glcm_loop(image, distances, angles, levels, P)
# make each GLMC symmetric
if symmetric:
Pt = np.transpose(P, (1, 0, 2, 3))
P = P + Pt
# normalize each GLCM
if normed:
P = P.astype(np.float64)
glcm_sums = np.sum(P, axis=(0, 1), keepdims=True)
glcm_sums[glcm_sums == 0] = 1
P /= glcm_sums
return P
def graycoprops(P, prop='contrast'):
"""Calculate texture properties of a GLCM.
Compute a feature of a gray level co-occurrence matrix to serve as
a compact summary of the matrix. The properties are computed as
follows:
- 'contrast': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}(i-j)^2`
- 'dissimilarity': :math:`\\sum_{i,j=0}^{levels-1}P_{i,j}|i-j|`
- 'homogeneity': :math:`\\sum_{i,j=0}^{levels-1}\\frac{P_{i,j}}{1+(i-j)^2}`
- 'ASM': :math:`\\sum_{i,j=0}^{levels-1} P_{i,j}^2`
- 'energy': :math:`\\sqrt{ASM}`
- 'correlation':
.. math:: \\sum_{i,j=0}^{levels-1} P_{i,j}\\left[\\frac{(i-\\mu_i) \\
(j-\\mu_j)}{\\sqrt{(\\sigma_i^2)(\\sigma_j^2)}}\\right]
- 'mean': :math:`\\sum_{i=0}^{levels-1} i*P_{i}`
- 'variance': :math:`\\sum_{i=0}^{levels-1} P_{i}*(i-mean)^2`
- 'std': :math:`\\sqrt{variance}`
- 'entropy': :math:`\\sum_{i,j=0}^{levels-1} -P_{i,j}*log(P_{i,j})`
Each GLCM is normalized to have a sum of 1 before the computation of
texture properties.
.. versionchanged:: 0.19
`greycoprops` was renamed to `graycoprops` in 0.19.
Parameters
----------
P : ndarray
Input array. `P` is the gray-level co-occurrence histogram
for which to compute the specified property. The value
`P[i,j,d,theta]` is the number of times that gray-level j
occurs at a distance d and at an angle theta from
gray-level i.
prop : {'contrast', 'dissimilarity', 'homogeneity', 'energy', \
'correlation', 'ASM', 'mean', 'variance', 'std', 'entropy'}, optional
The property of the GLCM to compute. The default is 'contrast'.
Returns
-------
results : 2-D ndarray
2-dimensional array. `results[d, a]` is the property 'prop' for
the d'th distance and the a'th angle.
References
----------
.. [1] M. Hall-Beyer, 2007. GLCM Texture: A Tutorial v. 1.0 through 3.0.
The GLCM Tutorial Home Page,
https://prism.ucalgary.ca/handle/1880/51900
DOI:`10.11575/PRISM/33280`
Examples
--------
Compute the contrast for GLCMs with distances [1, 2] and angles
[0 degrees, 90 degrees]
>>> image = np.array([[0, 0, 1, 1],
... [0, 0, 1, 1],
... [0, 2, 2, 2],
... [2, 2, 3, 3]], dtype=np.uint8)
>>> g = graycomatrix(image, [1, 2], [0, np.pi/2], levels=4,
... normed=True, symmetric=True)
>>> contrast = graycoprops(g, 'contrast')
>>> contrast
array([[0.58333333, 1. ],
[1.25 , 2.75 ]])
"""
def glcm_mean():
I = np.arange(num_level).reshape((num_level, 1, 1, 1))
mean = np.sum(I * P, axis=(0, 1))
return I, mean
check_nD(P, 4, 'P')
(num_level, num_level2, num_dist, num_angle) = P.shape
if num_level != num_level2:
raise ValueError('num_level and num_level2 must be equal.')
if num_dist <= 0:
raise ValueError('num_dist must be positive.')
if num_angle <= 0:
raise ValueError('num_angle must be positive.')
# normalize each GLCM
P = P.astype(np.float64)
glcm_sums = np.sum(P, axis=(0, 1), keepdims=True)
glcm_sums[glcm_sums == 0] = 1
P /= glcm_sums
# create weights for specified property
I, J = np.ogrid[0:num_level, 0:num_level]
if prop == 'contrast':
weights = (I - J) ** 2
elif prop == 'dissimilarity':
weights = np.abs(I - J)
elif prop == 'homogeneity':
weights = 1.0 / (1.0 + (I - J) ** 2)
elif prop in ['ASM', 'energy', 'correlation', 'entropy', 'variance', 'mean', 'std']:
pass
else:
raise ValueError(f'{prop} is an invalid property')
# compute property for each GLCM
if prop == 'energy':
asm = np.sum(P**2, axis=(0, 1))
results = np.sqrt(asm)
elif prop == 'ASM':
results = np.sum(P**2, axis=(0, 1))
elif prop == 'mean':
_, results = glcm_mean()
elif prop == 'variance':
I, mean = glcm_mean()
results = np.sum(P * ((I - mean) ** 2), axis=(0, 1))
elif prop == 'std':
I, mean = glcm_mean()
var = np.sum(P * ((I - mean) ** 2), axis=(0, 1))
results = np.sqrt(var)
elif prop == 'entropy':
ln = -np.log(P, where=(P != 0), out=np.zeros_like(P))
results = np.sum(P * ln, axis=(0, 1))
elif prop == 'correlation':
results = np.zeros((num_dist, num_angle), dtype=np.float64)
I = np.array(range(num_level)).reshape((num_level, 1, 1, 1))
J = np.array(range(num_level)).reshape((1, num_level, 1, 1))
diff_i = I - np.sum(I * P, axis=(0, 1))
diff_j = J - np.sum(J * P, axis=(0, 1))
std_i = np.sqrt(np.sum(P * (diff_i) ** 2, axis=(0, 1)))
std_j = np.sqrt(np.sum(P * (diff_j) ** 2, axis=(0, 1)))
cov = np.sum(P * (diff_i * diff_j), axis=(0, 1))
# handle the special case of standard deviations near zero
mask_0 = std_i < 1e-15
mask_0[std_j < 1e-15] = True
results[mask_0] = 1
# handle the standard case
mask_1 = ~mask_0
results[mask_1] = cov[mask_1] / (std_i[mask_1] * std_j[mask_1])
elif prop in ['contrast', 'dissimilarity', 'homogeneity']:
weights = weights.reshape((num_level, num_level, 1, 1))
results = np.sum(P * weights, axis=(0, 1))
return results
def local_binary_pattern(image, P, R, method='default'):
"""Compute the local binary patterns (LBP) of an image.
LBP is a visual descriptor often used in texture classification.
Parameters
----------
image : (M, N) array
2D grayscale image.
P : int
Number of circularly symmetric neighbor set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : str {'default', 'ror', 'uniform', 'nri_uniform', 'var'}, optional
Method to determine the pattern:
``default``
Original local binary pattern which is grayscale invariant but not
rotation invariant.
``ror``
Extension of default pattern which is grayscale invariant and
rotation invariant.
``uniform``
Uniform pattern which is grayscale invariant and rotation
invariant, offering finer quantization of the angular space.
For details, see [1]_.
``nri_uniform``
Variant of uniform pattern which is grayscale invariant but not
rotation invariant. For details, see [2]_ and [3]_.
``var``
Variance of local image texture (related to contrast)
which is rotation invariant but not grayscale invariant.
Returns
-------
output : (M, N) array
LBP image.
References
----------
.. [1] T. Ojala, M. Pietikainen, T. Maenpaa, "Multiresolution gray-scale
and rotation invariant texture classification with local binary
patterns", IEEE Transactions on Pattern Analysis and Machine
Intelligence, vol. 24, no. 7, pp. 971-987, July 2002
:DOI:`10.1109/TPAMI.2002.1017623`
.. [2] T. Ahonen, A. Hadid and M. Pietikainen. "Face recognition with
local binary patterns", in Proc. Eighth European Conf. Computer
Vision, Prague, Czech Republic, May 11-14, 2004, pp. 469-481, 2004.
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851
:DOI:`10.1007/978-3-540-24670-1_36`
.. [3] T. Ahonen, A. Hadid and M. Pietikainen, "Face Description with
Local Binary Patterns: Application to Face Recognition",
IEEE Transactions on Pattern Analysis and Machine Intelligence,
vol. 28, no. 12, pp. 2037-2041, Dec. 2006
:DOI:`10.1109/TPAMI.2006.244`
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V'),
}
if np.issubdtype(image.dtype, np.floating):
warnings.warn(
"Applying `local_binary_pattern` to floating-point images may "
"give unexpected results when small numerical differences between "
"adjacent pixels are present. It is recommended to use this "
"function with images of integer dtype."
)
image = np.ascontiguousarray(image, dtype=np.float64)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
def multiblock_lbp(int_image, r, c, width, height):
"""Multi-block local binary pattern (MB-LBP).
The features are calculated similarly to local binary patterns (LBPs),
(See :py:meth:`local_binary_pattern`) except that summed blocks are
used instead of individual pixel values.
MB-LBP is an extension of LBP that can be computed on multiple scales
in constant time using the integral image. Nine equally-sized rectangles
are used to compute a feature. For each rectangle, the sum of the pixel
intensities is computed. Comparisons of these sums to that of the central
rectangle determine the feature, similarly to LBP.
Parameters
----------
int_image : (N, M) array
Integral image.
r : int
Row-coordinate of top left corner of a rectangle containing feature.
c : int
Column-coordinate of top left corner of a rectangle containing feature.
width : int
Width of one of the 9 equal rectangles that will be used to compute
a feature.
height : int
Height of one of the 9 equal rectangles that will be used to compute
a feature.
Returns
-------
output : int
8-bit MB-LBP feature descriptor.
References
----------
.. [1] L. Zhang, R. Chu, S. Xiang, S. Liao, S.Z. Li. "Face Detection Based
on Multi-Block LBP Representation", In Proceedings: Advances in
Biometrics, International Conference, ICB 2007, Seoul, Korea.
http://www.cbsr.ia.ac.cn/users/scliao/papers/Zhang-ICB07-MBLBP.pdf
:DOI:`10.1007/978-3-540-74549-5_2`
"""
int_image = np.ascontiguousarray(int_image, dtype=np.float32)
lbp_code = _multiblock_lbp(int_image, r, c, width, height)
return lbp_code
def draw_multiblock_lbp(
image,
r,
c,
width,
height,
lbp_code=0,
color_greater_block=(1, 1, 1),
color_less_block=(0, 0.69, 0.96),
alpha=0.5,
):
"""Multi-block local binary pattern visualization.
Blocks with higher sums are colored with alpha-blended white rectangles,
whereas blocks with lower sums are colored alpha-blended cyan. Colors
and the `alpha` parameter can be changed.
Parameters
----------
image : ndarray of float or uint
Image on which to visualize the pattern.
r : int
Row-coordinate of top left corner of a rectangle containing feature.
c : int
Column-coordinate of top left corner of a rectangle containing feature.
width : int
Width of one of 9 equal rectangles that will be used to compute
a feature.
height : int
Height of one of 9 equal rectangles that will be used to compute
a feature.
lbp_code : int
The descriptor of feature to visualize. If not provided, the
descriptor with 0 value will be used.
color_greater_block : tuple of 3 floats
Floats specifying the color for the block that has greater
intensity value. They should be in the range [0, 1].
Corresponding values define (R, G, B) values. Default value
is white (1, 1, 1).
color_greater_block : tuple of 3 floats
Floats specifying the color for the block that has greater intensity
value. They should be in the range [0, 1]. Corresponding values define
(R, G, B) values. Default value is cyan (0, 0.69, 0.96).
alpha : float
Value in the range [0, 1] that specifies opacity of visualization.
1 - fully transparent, 0 - opaque.
Returns
-------
output : ndarray of float
Image with MB-LBP visualization.
References
----------
.. [1] L. Zhang, R. Chu, S. Xiang, S. Liao, S.Z. Li. "Face Detection Based
on Multi-Block LBP Representation", In Proceedings: Advances in
Biometrics, International Conference, ICB 2007, Seoul, Korea.
http://www.cbsr.ia.ac.cn/users/scliao/papers/Zhang-ICB07-MBLBP.pdf
:DOI:`10.1007/978-3-540-74549-5_2`
"""
# Default colors for regions.
# White is for the blocks that are brighter.
# Cyan is for the blocks that has less intensity.
color_greater_block = np.asarray(color_greater_block, dtype=np.float64)
color_less_block = np.asarray(color_less_block, dtype=np.float64)
# Copy array to avoid the changes to the original one.
output = np.copy(image)
# As the visualization uses RGB color we need 3 bands.
if len(image.shape) < 3:
output = gray2rgb(image)
# Colors are specified in floats.
output = img_as_float(output)
# Offsets of neighbor rectangles relative to central one.
# It has order starting from top left and going clockwise.
neighbor_rect_offsets = (
(-1, -1),
(-1, 0),
(-1, 1),
(0, 1),
(1, 1),
(1, 0),
(1, -1),
(0, -1),
)
# Pre-multiply the offsets with width and height.
neighbor_rect_offsets = np.array(neighbor_rect_offsets)
neighbor_rect_offsets[:, 0] *= height
neighbor_rect_offsets[:, 1] *= width
# Top-left coordinates of central rectangle.
central_rect_r = r + height
central_rect_c = c + width
for element_num, offset in enumerate(neighbor_rect_offsets):
offset_r, offset_c = offset
curr_r = central_rect_r + offset_r
curr_c = central_rect_c + offset_c
has_greater_value = lbp_code & (1 << (7 - element_num))
# Mix-in the visualization colors.
if has_greater_value:
new_value = (1 - alpha) * output[
curr_r : curr_r + height, curr_c : curr_c + width
] + alpha * color_greater_block
output[curr_r : curr_r + height, curr_c : curr_c + width] = new_value
else:
new_value = (1 - alpha) * output[
curr_r : curr_r + height, curr_c : curr_c + width
] + alpha * color_less_block
output[curr_r : curr_r + height, curr_c : curr_c + width] = new_value
return output
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@skimage@[email protected]@.PATH_END.py
|
{
"filename": "_tickwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/parcoords/line/colorbar/_tickwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="tickwidth", parent_name="parcoords.line.colorbar", **kwargs
):
super(TickwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@parcoords@line@colorbar@[email protected]_END.py
|
{
"filename": "wshow.py",
"repo_name": "msiebert1/UCSC_spectral_pipeline",
"repo_path": "UCSC_spectral_pipeline_extracted/UCSC_spectral_pipeline-master/spectral_reduction/tmath/wombat/wshow.py",
"type": "Python"
}
|
def wshow():
import matplotlib.pyplot as plt
# plt.ioff()
wm=plt.get_current_fig_manager()
blah=wm.window.attributes('-topmost',1)
blah=wm.window.attributes('-topmost',0)
# plt.ion()
return
|
msiebert1REPO_NAMEUCSC_spectral_pipelinePATH_START.@UCSC_spectral_pipeline_extracted@UCSC_spectral_pipeline-master@spectral_reduction@tmath@[email protected]@.PATH_END.py
|
{
"filename": "WriteRatesToCSV_EM.py",
"repo_name": "FloorBroekgaarden/Double-Compact-Object-Mergers",
"repo_path": "Double-Compact-Object-Mergers_extracted/Double-Compact-Object-Mergers-main/otherCode/WriteRatesToCSV_EM.py",
"type": "Python"
}
|
# from __future__ import print_function
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
import time
import sys
import copy
#Quick fudge to make import from ../Scripts work
import sys
sys.path.append('../Scripts')
import string
# import ClassCosmicIntegrator as CI #Given settings and redshifts returns rates (2D arrays) Loads the data
# import coencodeVarious as CV
from PostProcessingScripts import *
from ClassFormationChannels_5mainchannels import *
import pandas as pd
from astropy import units as u
from astropy import constants as const
# MSSFRnameslist = []
# MSSFRnameslist.append('000') # add phenomenological
# for ind_SFR, SFR in enumerate(SFRs):
# ind_x = ind_SFR+1
# for ind_GSMF, GSMF in enumerate(GSMFs):
# ind_y = ind_GSMF + 1
# for ind_MZ, MZ in enumerate(MZs):
# ind_z = ind_MZ +1
# MSSFRnameslist.append('%s%s%s'%(ind_x, ind_y, ind_z))
# MSSFRnameslistCSV = ['.0.0.0', '.1.1.1', '.1.1.2', '.1.1.3', '.1.2.1', '.1.2.2', '.1.2.3', '.1.3.1', '.1.3.2', '.1.3.3', '.2.1.1', '.2.1.2', '.2.1.3', '.2.2.1', '.2.2.2', '.2.2.3', '.2.3.1', '.2.3.2', '.2.3.3', '.3.1.1', '.3.1.2', '.3.1.3', '.3.2.1', '.3.2.2', '.3.2.3', '.3.3.1', '.3.3.2', '.3.3.3']
# OLD:
MSSFRnameslist = []
MSSFRnameslist.append('000') # add phenomenological
for ind_GSMF, GSMF in enumerate(GSMFs):
ind_y = ind_GSMF + 1
for ind_MZ, MZ in enumerate(MZs):
ind_z = ind_MZ +1
for ind_SFR, SFR in enumerate(SFRs):
ind_x = ind_SFR+1
MSSFRnameslist.append('%s%s%s'%(ind_x, ind_y, ind_z))
GSMFs = [1,2,3]
SFRs = [1,2,3]
MZs=[1,2,3]
MSSFRnameslistCSV = []
MSSFRnameslistCSV.append('.0.0.0') # add phenomenological
for ind_GSMF, GSMF in enumerate(GSMFs):
ind_y = ind_GSMF + 1
for ind_MZ, MZ in enumerate(MZs):
ind_z = ind_MZ +1
for ind_SFR, SFR in enumerate(SFRs):
ind_x = ind_SFR+1
MSSFRnameslistCSV.append('.%s.%s.%s'%(ind_x, ind_y, ind_z))
# def calculateRisco(m_bhtemp, Xefftemp):
# # this is prograde orbit
# # see also https://duetosymmetry.com/tool/kerr-isco-calculator/
# # everything in cgs
# c = 2.99792458E10 #[cm s-1]
# G = 6.67259E-8
# Msun = 1.99E33 # gr
# Rsun = 6.96E10 # cm
# factorFront = ((G*m_bhtemp)/c**2) #m_bhtemp #s
# Z1 = 1 + (1 - Xefftemp**2)**(1/3) * ((1 + Xefftemp)**(1/3) + (1 - Xefftemp)**(1/3) )
# Z2 = np.sqrt((3* Xefftemp**2 + Z1**2))
# Risco = factorFront * (3 + Z2 - np.sqrt((3-Z1)*(3+Z1 +2*Z2)))
# return Risco
# def calculateEjectedMassMerger(m_ns, r_ns, m_bh, Xeff ):
# # from 1807.00011, Eq 4
# # returns M_rem in solar masses
# # input r and m in solar masses and R sun. Xeff in [0,1] (symmetric)
# # RNS in km
# # everything in cgs
# c = 2.99792458E10 #[cm s-1]
# G = 6.67259E-8
# Msun = 1.99E33 # gr
# Rsun = 6.96E10 # cm
# # convert to cgs
# r_ns = r_ns*0.1*10**6 #np.asarray([1.E6]* len(m_ns)) # to cm
# m_ns_cgs = Msun * m_ns
# m_bh_cgs = Msun * m_bh
# alpha, beta, gamma, delta = 0.406, 0.139, 0.255, 1.761
# C_NS = G * m_ns_cgs / (r_ns * c**2)
# R_isco = calculateRisco(m_bh_cgs, Xeff)
# R_isco_norm = R_isco / (m_bh_cgs * (G/c**2))
# Q = m_bh_cgs / m_ns_cgs
# eta = Q / (1 + Q)**2
# FirstTerm = alpha*(1 - 2*C_NS) / eta**(1/3)
# SecondTerm = beta* R_isco_norm * C_NS / eta
# A = np.asarray(FirstTerm - SecondTerm + gamma)
# B = np.zeros_like(m_ns_cgs)
# Mrem_model = np.maximum(A,B)**(delta)
# Mrem_model /= Msun # in solar masses
# # and the true M remnant mass (not normalized and in solar masses =)
# Mrem_solar = Mrem_model * m_ns_cgs
# return Mrem_solar # in [Msun]
# def QinBHspinmodel(separationPreSN2, M1, M2, maskNSBH):
# # returns spins from Qin et al + 2018 model
# # start with all zeseparationDCOFormationarationDCOFormationBH spins
# BHspins = np.zeros_like(separationPreSN2)
# # now add spins for NS-BH following Qin et al 2018:
# # this is based on the separation prior to the second SN
# PeriodPreSN2 = convert_a_to_P_circular(separation=separationPreSN2*u.Rsun, M1=M1*u.Msun, M2=M2*u.Msun)
# PeriodPreSN2 = PeriodPreSN2.to(u.d).value # in days
# # only do non zero spins
# # first mask super tight NSBH that will get spin 1
# maskNSBHChi1 = (np.log10(PeriodPreSN2) < -0.3) & (maskNSBH ==1)
# BHspins[maskNSBHChi1] = np.ones(np.sum(maskNSBHChi1)) # fill with ones
# # print('#total, = ', len(maskNSBHChi1))
# # print('# with Chi = 1, = ', np.sum(maskNSBHChi1))
# # now the variable spin
# maskNSBHChi_var = (np.log10(PeriodPreSN2) > -0.3) & (np.log10(PeriodPreSN2) < 0.3) &(maskNSBH ==1)
# m_, c_ = -5./3, 0.5 # from Qin + 2018
# spins_var = m_ * np.log10(PeriodPreSN2[maskNSBHChi_var]) + c_
# BHspins[maskNSBHChi_var] = spins_var
# # print('# with Chi var = ', np.sum(maskNSBHChi_var))
# return BHspins
# Rns = 11.5 # in km
# r_ns = np.asarray([Rns]*len(m1bh))
# for ind_chi, chi_bh in enumerate(listXbh):
# Mej = calculateEjectedMassMerger(m_ns=1.4, r_ns=Rns, m_bh=10, Xeff=0)
# print(Mej)
# NSmasses = np.linspace(1,2.5,10000)
# Niter = 1000
# # BH_chi = 0
# Arrays_minNSmassEjecta = [] # _Rns11chi0 _Rns13chi0 Rns11chi1 Rns13chi1
# for ind_chi, chi in enumerate([0.0, 0.5]):
# BH_chi = chi * np.ones_like(NSmasses)
# for ind_Rns, NSradii in enumerate([11.5,13.0]):
# Rns = NSradii
# # BH_chi=chi
# minNSmassEjecta = []
# for ind_bh, BHmass in enumerate(np.linspace(2.5, 20, Niter)):
# BHmasses = BHmass*np.ones_like(NSmasses)
# NS_radii = Rns * np.ones_like(NSmasses)
# Mej = calculateEjectedMassMerger(m_ns=NSmasses, r_ns=NS_radii, m_bh=BHmasses, Xeff=BH_chi)
# maskEjecta = (Mej > 0)
# # if there are solutions with Mej >0, append the first solution (with min BH mass)
# if len(NSmasses[maskEjecta]):
# minNSmassEjecta.append(NSmasses[maskEjecta][-1])
# # print(minNSmassEjecta[-1])
# else:
# minNSmassEjecta.append(-1) # just append a non physical value that should not show up on plot
# print('R_ns, chi =', Rns, BH_chi )
# Arrays_minNSmassEjecta.append(minNSmassEjecta)
####################################
#path to the data
def writeToRatesFile_Mejecta(BPSmodelName='Z'):
DCOtype='BHNS'
if DCOtype=='BHNS':
DCOname='BHNS'
elif DCOtype=='BBH':
DCOname='BHBH'
elif DCOtype=='BNS':
DCOname='NSNS'
# path for files
path_dir = '/Volumes/Andromeda/DATA/AllDCO_bugfix/'
nModels=15
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
modelDirList = ['fiducial', 'massTransferEfficiencyFixed_0_25', 'massTransferEfficiencyFixed_0_5', 'massTransferEfficiencyFixed_0_75', \
'unstableCaseBB', 'alpha0_5', 'alpha2_0', 'fiducial', 'rapid', 'maxNSmass2_0', 'maxNSmass3_0', 'noPISN', 'ccSNkick_100km_s', 'ccSNkick_30km_s', 'noBHkick' ]
alphabetDirDict = {BPSnameslist[i]: modelDirList[i] for i in range(len(BPSnameslist))}
#####
path_ = path_dir
path_ = path_ + alphabetDirDict[BPSmodelName] +'/'
path = path_ + 'COMPASCompactOutput_'+ DCOtype +'_' + BPSmodelName + '.h5'
fdata = h5.File(path)
# obtain BH and NS masses
M1 = fdata['doubleCompactObjects']['M1'][...].squeeze()
M2 = fdata['doubleCompactObjects']['M2'][...].squeeze()
MBH, MNS = obtainM1BHandM2BHassymetric(M1, M2)
# del M1
# del M2
seedsSN = fdata['supernovae']['randomSeed'][...].squeeze()
# get only SN seeds for DCOs
# maskSNdco = np.in1d(seedsSN, Data.seeds[mask][maskZ])
whichSN = fdata['supernovae']['whichStar'][...].squeeze()
whichSN1 = whichSN[::2] # get whichStar for first SN
separationPreSN = fdata['supernovae']['separationBefore'][...].squeeze()
separationPreSN2 = separationPreSN[1::2] # in Rsun.
maskNSBH = ((whichSN1==2) & (M1>M2) ) | ((whichSN1==1) & (M1<M2) )
# get intrinsic weights
fparam_intrinsic = 'weights_intrinsic'
# get detected weights
fparam_detected = 'weights_detected'
####################################################
######### ITERATE OVER MSSFR MODELS #############
####################################################
intrinsicRates = np.zeros((6, len(MSSFRnameslist)))
detectedRates = np.zeros((6, len(MSSFRnameslist)))
namesEMlist = []
for ind_mssfr, mssfr in enumerate(MSSFRnameslist):
# print('mssfr =',ind_mssfr)
weightheader = 'w_' + mssfr
w_int = fdata[fparam_intrinsic][weightheader][...].squeeze()
w_det = fdata[fparam_detected][weightheader][...].squeeze()
iii=0
# labelMej = []
# for ind_chi, chi in enumerate([0.0, .5, 'Qin']):
# if chi=='Qin':
# # Qin 2018 spin model
# BH_chi = QinBHspinmodel(separationPreSN2, M1, M2, maskNSBH)
# else:
# BH_chi = chi * np.ones_like(MNS)
# for ind_Rns, NSradii in enumerate([11.5,13.0]):
# Rns = NSradii
# NS_radii = Rns * np.ones_like(MNS)
# Mej = calculateEjectedMassMerger(m_ns=MNS, r_ns=NS_radii, m_bh=MBH, Xeff=BH_chi)
# maskEjecta = (Mej > 0)
# rate_Z[ii]+= np.sum(weightsALL[maskEjecta])
# labelMej.append('chi = ' + str(chi) + ' Rns = ' + str(NSradii) +' km')
# ii+=1
# needed for Qin spin model
for ind_chi, chi in enumerate([0.0, .5, 'Qin']):
if chi=='Qin':
# Qin 2018 spin model
BH_chi = QinBHspinmodel(separationPreSN2, M1, M2, maskNSBH)
else:
BH_chi = chi * np.ones_like(w_int)
for ind_Rns, NSradii in enumerate([11.5,13.0]):
Rns = NSradii
if ind_mssfr ==0:
stringg = 'Rns_'+ str(NSradii) + 'km_' + 'spinBH_' + str(chi)
namesEMlist.append(stringg)
NS_radii = Rns * np.ones_like(w_int)
Mej = calculateEjectedMassMerger(m_ns=MNS, r_ns=NS_radii, m_bh=MBH, Xeff=BH_chi)
mask_EM = (Mej>0)
intrinsicRates[iii][ind_mssfr] = np.sum(w_int[mask_EM])
detectedRates[iii][ind_mssfr] = np.sum(w_det[mask_EM])
iii+=1
# rates0 = df[name0]
for iii in range(6):
# print(iii)
# print(shape(intrinsicRates))
df = pd.read_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + namesEMlist[iii] + '.csv', index_col=0)
namez0 = BPSmodelName + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs = BPSmodelName + ' observed (design LVK) [yr^{-1}]'
df[namez0] = intrinsicRates[iii]
df[nameObs] = detectedRates[iii]
df.to_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + namesEMlist[iii] + '.csv')
fdata.close()
return
def writeToRatesFile_GENERAL(BPSmodelName='Z', DCOtype='BHNS'):
# DCOtype='BHNS'
if DCOtype=='BHNS':
DCOname='BHNS'
elif DCOtype=='BBH':
DCOname='BHBH'
elif DCOtype=='BNS':
DCOname='NSNS'
# path for files
path_dir = '/Volumes/Andromeda/DATA/AllDCO_bugfix/'
nModels=15
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
modelDirList = ['fiducial', 'massTransferEfficiencyFixed_0_25', 'massTransferEfficiencyFixed_0_5', 'massTransferEfficiencyFixed_0_75', \
'unstableCaseBB', 'alpha0_5', 'alpha2_0', 'fiducial', 'rapid', 'maxNSmass2_0', 'maxNSmass3_0', 'noPISN', 'ccSNkick_100km_s', 'ccSNkick_30km_s', 'noBHkick' ]
alphabetDirDict = {BPSnameslist[i]: modelDirList[i] for i in range(len(BPSnameslist))}
#####
path_ = path_dir
path_ = path_ + alphabetDirDict[BPSmodelName] +'/'
path = path_ + 'COMPASCompactOutput_'+ DCOtype +'_' + BPSmodelName + '.h5'
# read in data
fdata = h5.File(path)
# # obtain BH and NS masses
# M1 = fdata['doubleCompactObjects']['M1'][...].squeeze()
# M2 = fdata['doubleCompactObjects']['M2'][...].squeeze()
# MBH, MNS = obtainM1BHandM2BHassymetric(M1, M2)
# del M1
# del M2
# get intrinsic weights
fparam_intrinsic = 'weights_intrinsic'
# get detected weights
fparam_detected = 'weights_detected'
####################################################
######### ITERATE OVER MSSFR MODELS #############
####################################################
intrinsicRates = np.zeros(len(MSSFRnameslist))
detectedRates = np.zeros(len(MSSFRnameslist))
namesEMlist = []
print(MSSFRnameslist)
for ind_mssfr, mssfr in enumerate(MSSFRnameslist):
# print('mssfr =',ind_mssfr)
weightheader = 'w_' + mssfr
print(ind_mssfr, weightheader)
w_int = fdata[fparam_intrinsic][weightheader][...].squeeze()
w_det = fdata[fparam_detected][weightheader][...].squeeze()
intrinsicRates[ind_mssfr] = np.sum(w_int)
detectedRates[ind_mssfr] = np.sum(w_det)
stringgg = 'AllDCOsimulation'
df = pd.read_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv', index_col=0)
namez0 = BPSmodelName + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs = BPSmodelName + ' observed (design LVK) [yr^{-1}]'
df[namez0] = intrinsicRates
df[nameObs] = detectedRates
df.to_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv')
fdata.close()
return
def writeToRatesFile_FormationChannels(BPSmodelName='Z', DCOtype='BHNS'):
if DCOtype=='BHNS':
DCOname='BHNS'
elif DCOtype=='BBH':
DCOname='BHBH'
elif DCOtype=='BNS':
DCOname='NSNS'
# path for files
path_dir = '/Volumes/Andromeda/DATA/AllDCO_bugfix/'
nModels=15
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
modelDirList = ['fiducial', 'massTransferEfficiencyFixed_0_25', 'massTransferEfficiencyFixed_0_5', 'massTransferEfficiencyFixed_0_75', \
'unstableCaseBB', 'alpha0_5', 'alpha2_0', 'fiducial', 'rapid', 'maxNSmass2_0', 'maxNSmass3_0', 'noPISN', 'ccSNkick_100km_s', 'ccSNkick_30km_s', 'noBHkick' ]
alphabetDirDict = {BPSnameslist[i]: modelDirList[i] for i in range(len(BPSnameslist))}
#####
path_ = path_dir
path_ = path_ + alphabetDirDict[BPSmodelName] +'/'
path = path_ + 'COMPASCompactOutput_'+ DCOtype +'_' + BPSmodelName + '.h5'
# read in data
fdata = h5.File(path)
# set optimistic true if that is the variation (H)
OPTIMISTIC=False
if BPSmodelName=='H':
OPTIMISTIC=True
# get formation channel Seeds!
seedsPercentageClassic, seedsPercentageOnlyStableMT = returnSeedsPercentageClassicAndOnlyStableMT(pathCOMPASOutput=path,\
types=DCOtype, withinHubbleTime=True, optimistic=OPTIMISTIC, \
binaryFraction=1)
seedsClassic, percentageClassic = seedsPercentageClassic
seedsOnlyStableMT, percentageOnlyStableMT = seedsPercentageOnlyStableMT
seedsDoubleCE, percentageDoubleCE = returnSeedsPercentageDoubleCoreCEE(pathCOMPASOutput=path,\
types=DCOtype, withinHubbleTime=True, optimistic=OPTIMISTIC, \
binaryFraction=1)
seedsSingleCE, percentageSingleCE = returnSeedsPercentageSingleCoreCEE(pathCOMPASOutput=path,\
types=DCOtype, withinHubbleTime=True, optimistic=OPTIMISTIC, \
binaryFraction=1)
seedschannels = [seedsClassic, seedsOnlyStableMT, seedsSingleCE, seedsDoubleCE]
seedsOther, percentageOther = returnSeedsPercentageOther(pathCOMPASOutput=path,\
types=DCOtype, withinHubbleTime=True, optimistic=OPTIMISTIC, \
binaryFraction=1, channelsSeedsList=seedschannels)
dictChannelsBHNS = { 'classic':seedsClassic, \
'immediate CE':seedsSingleCE,\
'stable B no CEE':seedsOnlyStableMT, \
r'double-core CE':seedsDoubleCE, \
'other':seedsOther\
}
dictPercentages = { 'classic':percentageClassic, \
'immediate CE':percentageSingleCE,\
'stable B no CEE':percentageOnlyStableMT, \
r'double-core CE':percentageDoubleCE, \
'other':percentageOther\
}
# # obtain BH and NS masses
# M1 = fdata['doubleCompactObjects']['M1'][...].squeeze()
# M2 = fdata['doubleCompactObjects']['M2'][...].squeeze()
# MBH, MNS = obtainM1BHandM2BHassymetric(M1, M2)
# del M1
# del M2
# get intrinsic weights
fparam_intrinsic = 'weights_intrinsic'
# get detected weights
fparam_detected = 'weights_detected'
####################################################
######### ITERATE OVER MSSFR MODELS #############
####################################################
intrinsicRates = np.zeros(len(MSSFRnameslist))
detectedRates = np.zeros(len(MSSFRnameslist))
namesEMlist = []
intrinsicRates_I = np.zeros(len(MSSFRnameslist))
detectedRates_I = np.zeros(len(MSSFRnameslist))
intrinsicRates_II = np.zeros(len(MSSFRnameslist))
detectedRates_II = np.zeros(len(MSSFRnameslist))
intrinsicRates_III = np.zeros(len(MSSFRnameslist))
detectedRates_III = np.zeros(len(MSSFRnameslist))
intrinsicRates_IV = np.zeros(len(MSSFRnameslist))
detectedRates_IV = np.zeros(len(MSSFRnameslist))
intrinsicRates_V = np.zeros(len(MSSFRnameslist))
detectedRates_V = np.zeros(len(MSSFRnameslist))
DCOSeeds = fdata['doubleCompactObjects']['seed'][...].squeeze()
for ind_mssfr, mssfr in enumerate(MSSFRnameslist):
# print('mssfr =',ind_mssfr, 'mssfr= ', mssfr)
weightheader = 'w_' + mssfr
# print(ind_mssfr, weightheader)
w_int = fdata[fparam_intrinsic][weightheader][...].squeeze()
w_det = fdata[fparam_detected][weightheader][...].squeeze()
intrinsicRates[ind_mssfr] = np.sum(w_int)
detectedRates[ind_mssfr] = np.sum(w_det)
for nrC, Channel in enumerate(dictChannelsBHNSList):
# #Get the seeds that relate to sorted indices
seedsInterest = dictChannelsBHNS[Channel]
mask_C = np.in1d(DCOSeeds, np.array(seedsInterest))
if Channel=='classic':
intrinsicRates_I[ind_mssfr] = np.sum(w_int[mask_C])
detectedRates_I[ind_mssfr] = np.sum(w_det[mask_C])
elif Channel=='stable B no CEE':
intrinsicRates_II[ind_mssfr] = np.sum(w_int[mask_C])
detectedRates_II[ind_mssfr] = np.sum(w_det[mask_C])
elif Channel=='immediate CE':
intrinsicRates_III[ind_mssfr] = np.sum(w_int[mask_C])
detectedRates_III[ind_mssfr] = np.sum(w_det[mask_C])
elif Channel=='double-core CE':
intrinsicRates_IV[ind_mssfr] = np.sum(w_int[mask_C])
detectedRates_IV[ind_mssfr] = np.sum(w_det[mask_C])
elif Channel=='other':
intrinsicRates_V[ind_mssfr] = np.sum(w_int[mask_C])
detectedRates_V[ind_mssfr] = np.sum(w_det[mask_C])
stringgg = 'AllDCOsimulation_formation_channels'
df = pd.read_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv', index_col=0)
namez0 = BPSmodelName + 'All intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs = BPSmodelName + 'All observed (design LVK) [yr^{-1}]'
namez0_I = BPSmodelName + 'channel I intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs_I = BPSmodelName + 'channel I observed (design LVK) [yr^{-1}]'
namez0_II = BPSmodelName + 'channel II intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs_II = BPSmodelName + 'channel II observed (design LVK) [yr^{-1}]'
namez0_III = BPSmodelName + 'channel III intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs_III = BPSmodelName + 'channel III observed (design LVK) [yr^{-1}]'
namez0_IV = BPSmodelName + 'channel IV intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs_IV = BPSmodelName + 'channel IV observed (design LVK) [yr^{-1}]'
namez0_V = BPSmodelName + 'channel V intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs_V = BPSmodelName + 'channel V observed (design LVK) [yr^{-1}]'
df[namez0] = intrinsicRates
df[nameObs] = detectedRates
df[namez0_I] = intrinsicRates_I
df[nameObs_I] = detectedRates_I
df[namez0_II] = intrinsicRates_II
df[nameObs_II] = detectedRates_II
df[namez0_III] = intrinsicRates_III
df[nameObs_III] = detectedRates_III
df[namez0_IV] = intrinsicRates_IV
df[nameObs_IV] = detectedRates_IV
df[namez0_V] = intrinsicRates_V
df[nameObs_V] = detectedRates_V
df.to_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv')
fdata.close()
return
def writeToRatesFile_NSBH(BPSmodelName='Z'):
"""writes NS-BH rate to CSV file for all models"""
DCOname=dic
# path for files
path_dir = '/Volumes/Andromeda/DATA/AllDCO_bugfix/'
nModels=15
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
modelDirList = ['fiducial', 'massTransferEfficiencyFixed_0_25', 'massTransferEfficiencyFixed_0_5', 'massTransferEfficiencyFixed_0_75', \
'unstableCaseBB', 'alpha0_5', 'alpha2_0', 'fiducial', 'rapid', 'maxNSmass2_0', 'maxNSmass3_0', 'noPISN', 'ccSNkick_100km_s', 'ccSNkick_30km_s', 'noBHkick' ]
alphabetDirDict = {BPSnameslist[i]: modelDirList[i] for i in range(len(BPSnameslist))}
path_ = path_dir
path_ = path_ + alphabetDirDict[BPSmodelName] +'/'
path = path_ + 'COMPASCompactOutput_'+ DCOtype +'_' + BPSmodelName + '.h5'
fdata = h5.File(path)
# obtain BH and NS masses
M1 = fdata['doubleCompactObjects']['M1'][...].squeeze()
M2 = fdata['doubleCompactObjects']['M2'][...].squeeze()
MBH, MNS = obtainM1BHandM2BHassymetric(M1, M2)
whichSN = fdata['supernovae']['whichStar'][...].squeeze()[::2] # get whichStar for first SN
maskNSBH = ((whichSN==2) & (M1>M2) ) | ((whichSN==1) & (M1<M2) )
del M1
del M2
# get intrinsic weights
fparam_intrinsic = 'weights_intrinsic'
# get detected weights
fparam_detected = 'weights_detected'
####################################################
######### ITERATE OVER MSSFR MODELS #############
####################################################
intrinsicRates = np.zeros(len(MSSFRnameslist))
detectedRates = np.zeros(len(MSSFRnameslist))
namesEMlist = []
for ind_mssfr, mssfr in enumerate(MSSFRnameslist):
# print('mssfr =',ind_mssfr)
weightheader = 'w_' + mssfr
w_int = fdata[fparam_intrinsic][weightheader][...].squeeze()[maskNSBH]
w_det = fdata[fparam_detected][weightheader][...].squeeze()[maskNSBH]
intrinsicRates[ind_mssfr] = np.sum(w_int)
detectedRates[ind_mssfr] = np.sum(w_det)
stringgg = 'NSBH'
df = pd.read_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv', index_col=0)
namez0 = BPSmodelName + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs = BPSmodelName + ' observed (design LVK) [yr^{-1}]'
df[namez0] = intrinsicRates
df[nameObs] = detectedRates
df.to_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv')
fdata.close()
return
def writeToRatesFile_lightestFormsFirst(BPSmodelName='Z', DCOtype='BHNS'):
"""writes NS-BH rate to CSV file for all models"""
if DCOtype=='BHNS':
DCOname='BHNS'
elif DCOtype=='BBH':
DCOname='BHBH'
elif DCOtype=='BNS':
DCOname='NSNS'
print('nmodels =', nModels)
# path for files
path_dir = '/Volumes/Andromeda/DATA/AllDCO_bugfix/'
path_ = path_dir
path_ = path_ + alphabetDirDict[BPSmodelName] +'/'
path = path_ + 'COMPASCompactOutput_'+ DCOtype +'_' + BPSmodelName + '.h5'
fdata = h5.File(path)
# obtain BH and NS masses
M1 = fdata['doubleCompactObjects']['M1'][...].squeeze()
M2 = fdata['doubleCompactObjects']['M2'][...].squeeze()
MBH, MNS = obtainM1BHandM2BHassymetric(M1, M2)
whichSN = fdata['supernovae']['whichStar'][...].squeeze()[::2] # get whichStar for first SN
maskNSBH = ((whichSN==2) & (M1>M2) ) | ((whichSN==1) & (M1<M2) )
del M1
del M2
# get intrinsic weights
fparam_intrinsic = 'weights_intrinsic'
# get detected weights
fparam_detected = 'weights_detected'
####################################################
######### ITERATE OVER MSSFR MODELS #############
####################################################
intrinsicRates = np.zeros(len(MSSFRnameslist))
detectedRates = np.zeros(len(MSSFRnameslist))
namesEMlist = []
for ind_mssfr, mssfr in enumerate(MSSFRnameslist):
# print('mssfr =',ind_mssfr)
weightheader = 'w_' + mssfr
w_int = fdata[fparam_intrinsic][weightheader][...].squeeze()[maskNSBH]
w_det = fdata[fparam_detected][weightheader][...].squeeze()[maskNSBH]
intrinsicRates[ind_mssfr] = np.sum(w_int)
detectedRates[ind_mssfr] = np.sum(w_det)
stringgg = 'lightestFormsFirst'
df = pd.read_csv('/Users/floorbroekgaarden/Projects/GitHub/Double-Compact-Object-Mergers/dataFiles/lightestBHformsFirst/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv', index_col=0)
namez0 = BPSmodelName + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs = BPSmodelName + ' observed (design LVK) [yr^{-1}]'
df[namez0] = intrinsicRates
df[nameObs] = detectedRates
df.to_csv('/Users/floorbroekgaarden/Projects/GitHub/Double-Compact-Object-Mergers/dataFiles/lightestBHformsFirst/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv')
fdata.close()
return
def writeToRatesFile_GW190814(BPSmodelName='Z', DCOtype='BNS'):
print('NEED TO UPDATE THIS FUNCTION')
if DCOtype=='BHNS':
DCOname='BHNS'
elif DCOtype=='BBH':
DCOname='BHBH'
elif DCOtype=='BNS':
DCOname='NSNS'
# constants
Zsolar=0.0142
nModels = 12
BPScolors = sns.color_palette("husl", nModels)
lw = 3.5
Virgo = '/Volumes/Virgo/DATA/BHNS/'
VirgoAllDCO = '/Volumes/Virgo/DATA/AllDCO/'
AndromedaBHNS = '/Volumes/Andromeda/DATA/BHNS/'
AndromedaAllDCO = '/Volumes/Andromeda/DATA/AllDCO/'
alphabet = list(string.ascii_uppercase)
BPSnameslist = alphabet[:nModels]
BPSdir = ['fiducial/', 'fiducial/', 'alpha0_5/', 'alpha2_0/', 'unstableCaseBB/', 'rapid/', 'zeroBHkick/', 'massTransferEfficiencyFixed_0_25/', 'massTransferEfficiencyFixed_0_5/', 'massTransferEfficiencyFixed_0_75/', 'ccSNkick_100km_s/', 'ccSNkick_30km_s/']
dictBPSnameToDir = dict(zip(BPSnameslist, BPSdir))
dictBPSnameToColor = dict(zip(BPSnameslist, BPScolors))
# READ IN DATA
if BPSmodelName in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I' ,'J', 'K', 'L']:
path1 = AndromedaAllDCO
path1 = path1 + dictBPSnameToDir[BPSmodelName]
path = path1 + 'COMPASCompactOutput_'+ DCOtype +'_' + BPSmodelName + '.h5'
# print(path)
else:
print('error: path does not exist')
# print('given path:', path)
fdata = h5.File(path)
# obtain BH and NS masses
M1 = fdata['doubleCompactObjects']['M1'][...].squeeze()
M2 = fdata['doubleCompactObjects']['M2'][...].squeeze()
MBH, MNS = obtainM1BHandM2BHassymetric(M1, M2)
# 90% confidence intervals:
maskGW190412 = (((MBH <= (23.2+1.1)) & (MBH>=23.2-1.0)) & ((MNS <= (2.85)) & (MNS>=2.35)))
del M1
del M2
# get intrinsic weights
fparam_intrinsic = 'weights_intrinsic'
# get detected weights
fparam_detected = 'weights_detected'
####################################################
######### ITERATE OVER MSSFR MODELS #############
####################################################
intrinsicRates = np.zeros(len(MSSFRnameslist))
detectedRates = np.zeros(len(MSSFRnameslist))
namesEMlist = []
for ind_mssfr, mssfr in enumerate(MSSFRnameslist):
# print('mssfr =',ind_mssfr)
weightheader = 'w_' + mssfr
w_int = fdata[fparam_intrinsic][weightheader][...].squeeze()[maskGW190412]
w_det = fdata[fparam_detected][weightheader][...].squeeze()[maskGW190412]
intrinsicRates[ind_mssfr] = np.sum(w_int)
detectedRates[ind_mssfr] = np.sum(w_det)
stringgg = 'GW190814rate'
df = pd.read_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv', index_col=0)
namez0 = BPSmodelName + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs = BPSmodelName + ' observed (design LVK) [yr^{-1}]'
df[namez0] = intrinsicRates
df[nameObs] = detectedRates
df.to_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/ratesCSVfiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv')
fdata.close()
return
#### RUN different simulation summaries :
runMejecta = False
runFormationChannels =False
runNSBH = False
runGeneralBHNS = False
runGeneralBHBH = False
runGeneralNSNS = False
runLightestFormsFirst = True
if runLightestFormsFirst==True:
# INITIALIZE FILE
namesEMlist=[]
DCOname ='BHBH'
iii=0
# CREATE PANDAS FILE
nModels=26
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
NAMES = []
stringgg = 'lightestFormsFirst'
for ind_l, L in enumerate(BPSnameslist):
str_z0 = str(L + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]')
str_obs = str(L + ' observed (design LVK) [yr^{-1}]')
NAMES.append(str_z0)
NAMES.append(str_obs)
datas=[]
for i in range(len(BPSnameslist)):
datas.append(np.zeros_like(MSSFRnameslist))
datas.append(np.zeros_like(MSSFRnameslist))
df = pd.DataFrame(data=datas, index=NAMES, columns=MSSFRnameslistCSV).T
df.columns = df.columns.map(str)
df.index.names = ['.x.y.z']
df.columns.names = ['m']
# print(df)
df.to_csv('/Users/floorbroekgaarden/Projects/GitHub/Double-Compact-Object-Mergers/dataFiles/lightestBHformsFirst/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv')
# run calculation
for BPS in BPSnameslist:
print(BPS)
for DCOtype in ['BHBH']:
print('at DCOtype =', DCOtype)
writeToRatesFile_lightestFormsFirst(BPSmodelName=BPS, DCOtype=DCOtype)
print('done with ', BPS)
if runMejecta ==True:
for BPS in ['A','B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O' ]:
print(BPS)
for DCOtype in ['BHNS']:
print('at DCOtype =', DCOtype)
writeToRatesFile_Mejecta(BPSmodelName=BPS)
print('done with ', BPS)
if runFormationChannels ==True:
for BPS in ['A','B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O' ]:
print(BPS)
for DCOtype in ['BHNS']:
print('at DCOtype =', DCOtype)
writeToRatesFile_FormationChannels(BPSmodelName=BPS, DCOtype=DCOtype)
print('done with ', BPS)
# for BPS in [ 'O' ]:
# print(BPS)
# for DCOtype in ['BBH']:
# print('at DCOtype =', DCOtype)
# writeToRatesFile_FormationChannels(BPSmodelName=BPS, DCOtype='BBH')
# print('done with ', BPS)
if runNSBH ==True:
for BPS in ['A','B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O' ]:
print(BPS)
for DCOtype in ['BHNS']:
print('at DCOtype =', DCOtype)
writeToRatesFile_NSBH(BPSmodelName=BPS)
print('done with ', BPS)
if runGeneralBHNS ==True:
for BPS in ['A','B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O' ]:
print(BPS)
for DCOtype in ['BHNS']:
print('at DCOtype =', DCOtype)
writeToRatesFile_GENERAL(BPSmodelName=BPS, DCOtype=DCOtype)
print('done with ', BPS)
if runGeneralNSNS ==True:
for BPS in ['A','B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O' ]:
print(BPS)
for DCOtype in ['BBH']:
print('at DCOtype =', DCOtype)
writeToRatesFile_GENERAL(BPSmodelName=BPS, DCOtype=DCOtype)
print('done with ', BPS)
if runGeneralBHBH ==True:
for BPS in ['A','B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O' ]:
print(BPS)
for DCOtype in ['BNS']:
print('at DCOtype =', DCOtype)
writeToRatesFile_GENERAL(BPSmodelName=BPS, DCOtype=DCOtype)
print('done with ', BPS)
# Models to RUN
# May 20: I am updating my data with the AllDCO focused runs :-)
# this is an overwrite with better data (old ones are in BHNS copy)
# for DCOtype in ['BHNS', 'BBH', 'BNS']:
# print('at DCOtype =', DCOtype)
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/fiducial/'
# modelname = 'A'
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=False)
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/fiducial/'
# modelname = 'B'
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=True)
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/zeroBHkick/'
# modelname = 'G'
# writeToRatesFile(modelname=modelname, pa thCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=True)
# INITIALIZE_FormationChannels = True
# INITIALIZE_NSBH= True #False #False#True
# INITIALIZE=True #False #False #True
# INITIALIZE_GENERAL = True #False #False #True #False#True #False
# INITIALIZE_GW190814 = True #False
# INITIALIZE_EM =True #False
INITIALIZE_FormationChannels = False
INITIALIZE_NSBH= False #False #False#True
INITIALIZE=False #False #False #True
INITIALIZE_GENERAL = False #False #False #False #True #False#True #False
INITIALIZE_GW190814 = False #False
INITIALIZE_EM =False #False
# ['.0.0.0', '.1.1.1', '.2.1.1', '.3.1.1', '.1.1.2', '.2.1.2', '.3.1.2', '.1.1.3', '.2.1.3', '.3.1.3', '.1.2.1', '.2.2.1', '.3.2.1', '.1.2.2', '.2.2.2', '.3.2.2', '.1.2.3', '.2.2.3', '.3.2.3', '.1.3.1', '.2.3.1', '.3.3.1', '.1.3.2', '.2.3.2', '.3.3.2', '.1.3.3', '.2.3.3', '.3.3.3']
if INITIALIZE_FormationChannels==True:
# namesEMlist=[]
DCOname ='BHNS'
iii=0
# CREATE PANDAS FILE
nModels=26
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
NAMES = []
stringgg = 'AllDCOsimulation_formation_channels'
for ind_l, BPSmodelName in enumerate(BPSnameslist):
# str_z0 = str(L + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]')
# str_obs = str(L + ' observed (design LVK) [yr^{-1}]')
namez0 = BPSmodelName + 'All intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs = BPSmodelName + 'All observed (design LVK) [yr^{-1}]'
namez0_I = BPSmodelName + 'channel I intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs_I = BPSmodelName + 'channel I observed (design LVK) [yr^{-1}]'
namez0_II = BPSmodelName + 'channel II intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs_II = BPSmodelName + 'channel II observed (design LVK) [yr^{-1}]'
namez0_III = BPSmodelName + 'channel III intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs_III = BPSmodelName + 'channel III observed (design LVK) [yr^{-1}]'
namez0_IV = BPSmodelName + 'channel IV intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs_IV = BPSmodelName + 'channel IV observed (design LVK) [yr^{-1}]'
namez0_V = BPSmodelName + 'channel V intrinsic (z=0) [Gpc^{-3} yr^{-1}]'
nameObs_V = BPSmodelName + 'channel V observed (design LVK) [yr^{-1}]'
NAMES.append(namez0)
NAMES.append(nameObs)
NAMES.append(namez0_I)
NAMES.append(nameObs_I)
NAMES.append(namez0_II)
NAMES.append(nameObs_II)
NAMES.append(namez0_III)
NAMES.append(nameObs_III)
NAMES.append(namez0_IV)
NAMES.append(nameObs_IV)
NAMES.append(namez0_V)
NAMES.append(nameObs_V)
datas=[]
for i in range(len(BPSnameslist)):
for ii in range(6):
datas.append(np.zeros_like(MSSFRnameslist))
datas.append(np.zeros_like(MSSFRnameslist))
df = pd.DataFrame(data=datas, index=NAMES, columns=MSSFRnameslistCSV).T
df.columns = df.columns.map(str)
df.index.names = ['xyz']
df.columns.names = ['m']
# print(df)
df.to_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv')
if INITIALIZE_GW190814==True:
for dcotype in ['NSNS', 'BHBH', 'BHNS']:
namesEMlist=[]
DCOname=dcotype
iii=0
# CREATE PANDAS FILE
nModels=26
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
NAMES = []
stringgg = 'GW190814rate'
for ind_l, L in enumerate(BPSnameslist):
str_z0 = str(L + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]')
str_obs = str(L + ' observed (design LVK) [yr^{-1}]')
NAMES.append(str_z0)
NAMES.append(str_obs)
datas=[]
for i in range(len(BPSnameslist)):
datas.append(np.zeros_like(MSSFRnameslist))
datas.append(np.zeros_like(MSSFRnameslist))
df = pd.DataFrame(data=datas, index=NAMES, columns=MSSFRnameslistCSV).T
df.columns = df.columns.map(str)
df.index.names = ['xyz']
df.columns.names = ['m']
# print(df)
df.to_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv')
if INITIALIZE_NSBH==True:
namesEMlist=[]
DCOname ='BHNS'
iii=0
# CREATE PANDAS FILE
nModels=26
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
NAMES = []
stringgg = 'NSBH'
for ind_l, L in enumerate(BPSnameslist):
str_z0 = str(L + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]')
str_obs = str(L + ' observed (design LVK) [yr^{-1}]')
NAMES.append(str_z0)
NAMES.append(str_obs)
datas=[]
for i in range(len(BPSnameslist)):
datas.append(np.zeros_like(MSSFRnameslist))
datas.append(np.zeros_like(MSSFRnameslist))
df = pd.DataFrame(data=datas, index=NAMES, columns=MSSFRnameslistCSV).T
df.columns = df.columns.map(str)
df.index.names = ['.x.y.z']
df.columns.names = ['m']
# print(df)
df.to_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv')
if INITIALIZE_GENERAL==True:
namesEMlist=[]
DCOname ='BHNS'
iii=0
# CREATE PANDAS FILE
nModels=26
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
NAMES = []
stringgg = 'AllDCOsimulation'
for ind_l, L in enumerate(BPSnameslist):
str_z0 = str(L + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]')
str_obs = str(L + ' observed (design LVK) [yr^{-1}]')
NAMES.append(str_z0)
NAMES.append(str_obs)
datas=[]
for i in range(len(BPSnameslist)):
datas.append(np.zeros_like(MSSFRnameslist))
datas.append(np.zeros_like(MSSFRnameslist))
df = pd.DataFrame(data=datas, index=NAMES, columns=MSSFRnameslistCSV).T
df.columns = df.columns.map(str)
df.index.names = ['xyz']
df.columns.names = ['m']
# print(df)
df.to_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringgg + '.csv')
# #### INITIALIZE:::
if INITIALIZE_EM==True:
namesEMlist=[]
DCOname ='BHNS'
iii=0
for ind_chi, chi in enumerate([0.0, .5, 'Qin']):
# print(chi)
iii+=1
BH_chi = chi
for ind_Rns, NSradii in enumerate([11.5,13.0]):
iii+=1
Rns = NSradii
# if ind_mssfr ==0:
# # print(chi)
stringg = 'Rns_'+ str(NSradii) + 'km_' + 'spinBH_' + str(chi)
namesEMlist.append(stringg)
# CREATE PANDAS FILE
nModels=26
BPSnameslist = list(string.ascii_uppercase)[0:nModels]
NAMES = []
for ind_l, L in enumerate(BPSnameslist):
str_z0 = str(L + ' intrinsic (z=0) [Gpc^{-3} yr^{-1}]')
str_obs = str(L + ' observed (design LVK) [yr^{-1}]')
NAMES.append(str_z0)
NAMES.append(str_obs)
datas=[]
for i in range(len(BPSnameslist)):
datas.append(np.zeros_like(MSSFRnameslist))
datas.append(np.zeros_like(MSSFRnameslist))
print(MSSFRnameslist)
df = pd.DataFrame(data=datas, index=NAMES, columns=MSSFRnameslistCSV).T
df.columns = df.columns.map(str)
df.index.names = ['xyz']
df.columns.names = ['m']
# print(df)
df.to_csv('/Users/floorbroekgaarden/Projects/BlackHole-NeutronStar/csvFiles/rates_MSSFR_Models_'+DCOname+ '_' + stringg + '.csv')
# print(namesEMlist)
# for DCOtype in ['BHNS']:
# for Rns in enumerate()
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/zeroBHkick/'
# modelname = 'G'
# print('modelname')
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=True)
# print('at DCOtype =', DCOtype)
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/fiducial/'
# modelname = 'A'
# print('modelname')
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=False)
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/fiducial/'
# modelname = 'B'
# print('modelname')
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=True)
# for DCOtype in ['BHNS']:
# for Rns in enumerate()
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/zeroBHkick/'
# modelname = 'G'
# print('modelname')
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=True)
# print('at DCOtype =', DCOtype)
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/fiducial/'
# modelname = 'A'
# print('modelname')
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=False)
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/fiducial/'
# modelname = 'B'
# print('modelname')
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=True)
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/alpha0_5/'
# modelname, DCOtype = 'M', 'BNS'
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=False)
# DCOtype='BHNS'
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=False)
# DCOtype='BBH'
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=False)
# pathCOMPASOutput = '/Volumes/Andromeda/DATA/AllDCO/alpha2_0/'
# modelname, DCOtype = 'N', 'BNS'
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=False)
# DCOtype='BHNS'
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=False)
# DCOtype='BBH'
# writeToRatesFile(modelname=modelname, pathCOMPASOutput=pathCOMPASOutput, DCOtype=DCOtype, Optmistic=False)
|
FloorBroekgaardenREPO_NAMEDouble-Compact-Object-MergersPATH_START.@Double-Compact-Object-Mergers_extracted@Double-Compact-Object-Mergers-main@otherCode@[email protected]_END.py
|
{
"filename": "feature_request.md",
"repo_name": "PrincetonUniversity/athena",
"repo_path": "athena_extracted/athena-master/.github/ISSUE_TEMPLATE/feature_request.md",
"type": "Markdown"
}
|
---
name: Feature request
about: Suggest an idea for Athena++
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
|
PrincetonUniversityREPO_NAMEathenaPATH_START.@athena_extracted@[email protected]@ISSUE_TEMPLATE@[email protected]_END.py
|
{
"filename": "_sizesrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymapbox/hoverlabel/font/_sizesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="sizesrc",
parent_name="densitymapbox.hoverlabel.font",
**kwargs,
):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@densitymapbox@hoverlabel@font@[email protected]_END.py
|
{
"filename": "spawn.py",
"repo_name": "jdswinbank/Comet",
"repo_path": "Comet_extracted/Comet-master/comet/handler/spawn.py",
"type": "Python"
}
|
# Comet VOEvent Broker.
# Event handler to spawn an external command & supply a VOEvent on stdin.
import os
from twisted.internet import reactor
from twisted.internet import defer
from twisted.internet.protocol import ProcessProtocol
from zope.interface import implementer
from comet.icomet import IHandler
import comet.log as log
__all__ = ["SpawnCommand"]
class SpawnCommandProtocol(ProcessProtocol):
# Assume that all external processes write a UTF-8 bytestream to STDOUT.
# This is obviously a questionable assumption, but it's not clear what a
# better alternative would be (probably trying to auto-detect, but that's
# error prone).
STDOUT_ENCODING = "UTF-8"
def __init__(self, deferred, raw_bytes):
self.deferred = deferred
self.raw_bytes = raw_bytes
def connectionMade(self):
# Note that we're squiring whatever encoding raw_bytes happens to be
# in at the process.
self.transport.write(self.raw_bytes)
self.transport.closeStdin()
def outReceived(self, data):
log.debug("External process said: %s" % (data.decode(self.STDOUT_ENCODING),))
def errReceived(self, data):
self.outReceived(data)
def processEnded(self, reason):
if reason.value.exitCode:
self.deferred.errback(reason)
else:
self.deferred.callback(True)
@implementer(IHandler)
class SpawnCommand(object):
"""
Send a VOEvent to standard input of an external command.
"""
name = "spawn-command"
def __init__(self, cmd, *args):
self.cmd = cmd
self.args = [cmd]
self.args.extend(args)
def __call__(self, event):
log.info("Running external command: %s" % (self.cmd,))
d = defer.Deferred()
if not os.access(self.cmd, os.X_OK):
msg = "%s is not executable" % (self.cmd,)
log.warn(msg)
d.errback(Exception(msg))
else:
def log_reason(reason):
"""
Catch a Failure returned from an unsuccessful process execution
and log the return value, then re-raise the error.
"""
msg = "%s returned non-zero (%d)" % (self.cmd, reason.value.exitCode)
log.warn(msg)
return reason
d.addErrback(log_reason)
reactor.spawnProcess(
SpawnCommandProtocol(d, event.raw_bytes),
self.cmd,
args=self.args,
env=os.environ,
)
return d
|
jdswinbankREPO_NAMECometPATH_START.@Comet_extracted@Comet-master@comet@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/core/__init__.py",
"type": "Python"
}
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@core@[email protected]_END.py
|
|
{
"filename": "_opensrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/candlestick/_opensrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpensrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="opensrc", parent_name="candlestick", **kwargs):
super(OpensrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@candlestick@[email protected]_END.py
|
{
"filename": "pickle_test.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/tests/pickle_test.py",
"type": "Python"
}
|
import pytest
import pickle
import numpy as np
import vaex
N_rows = 1024*4
def test_pickle_roundtrip(df_local):
df = df_local
data = pickle.dumps(df)
df2 = pickle.loads(data)
if 'obj' in df:
# comparison fails for obj
df = df.drop('obj')
df2 = df2.drop('obj')
df['x'].tolist() == df2['x'].tolist()
df.x.tolist() == df2.x.tolist()
result = list(df.compare(df2))
result[2] = [] # ignore dtype mismatches, it seems that pickling a big endian numpy arrays will make it little endian
result = tuple(result)
assert result == ([], [], [], [])
def test_pick_file(tmpdir, file_extension):
x = np.arange(N_rows, dtype='i8')
df = vaex.from_arrays(x=x, x2=-x)
df['y'] = df.x**2
data = pickle.dumps(df)
# if the data is in memory, pickle will be large
assert len(data) > len(x) * x.itemsize
xsum = df.x.sum()
ysum = df.y.sum()
# but on disk, it should just pickle the file path
# TODO: arrow is not supported yet
for ext in 'hdf5 parquet'.split():
path = tmpdir / f'test.{ext}'
df.export(path)
df = vaex.open(path)
data = pickle.dumps(df)
assert len(data) < 1000
assert df.x.sum() == xsum
assert df.y.sum() == ysum
@pytest.fixture(params=['hdf5', 'parquet', 'arrow'])
def file_extension(request):
return request.param
@pytest.fixture()
def df_file(file_extension, tmpdir):
x = np.arange(N_rows, dtype='i8')
df = vaex.from_arrays(x=x, x2=-x)
df['y'] = df.x**2
path = tmpdir / f'test.{file_extension}'
df.export(path)
df = vaex.open(path)
yield df
def test_slice(df_file):
df = df_file[:len(df_file)-2]
assert len(pickle.dumps(df)) < 2000
df2 = pickle.loads(pickle.dumps(df))
assert df.compare(df2) == ([], [], [], [])
def test_rename(df_file):
df = df_file[:len(df_file)-2]
df.rename('x', 'a')
assert len(pickle.dumps(df)) < 2000
df2 = pickle.loads(pickle.dumps(df))
assert df.compare(df2) == ([], [], [], [])
def test_drop(df_file):
df = df_file.drop('x2')
assert len(pickle.dumps(df)) < 2000
df2 = pickle.loads(pickle.dumps(df))
assert df.compare(df2) == ([], [], [], [])
def test_merge_files(df_file, tmpdir):
path = tmpdir / 'test2.hdf5'
df_file[['x']].export(path)
df_join = vaex.open(path)
df_join.rename('x', 'z')
df = df_file.join(df_join)
assert len(pickle.dumps(df)) < 2300
df2 = pickle.loads(pickle.dumps(df))
assert df.compare(df2) == ([], [], [], [])
assert df2.sum('x-z') == 0
def test_merge_data(df_file, tmpdir):
df_join = vaex.from_arrays(z=df_file.x.values)
df = df_file.join(df_join)
assert len(pickle.dumps(df)) > N_rows * 4, 'transport all'
df2 = pickle.loads(pickle.dumps(df))
assert df.compare(df2) == ([], [], [], [])
assert (df2.x - df2.z).sum() == 0
def test_take(df_file, tmpdir):
df = df_file.shuffle()
assert len(pickle.dumps(df)) > N_rows * 4, 'indices take space'
df2 = pickle.loads(pickle.dumps(df))
assert df.compare(df2) == ([], [], [], [])
assert df2.x.sum() == df_file.x.sum()
def test_concat(df_file, tmpdir):
path = tmpdir / 'test2.hdf5'
df_file[['x']].export(path)
df_concat = vaex.open(path)
df = vaex.concat([df_file, df_concat])
assert len(pickle.dumps(df)) < 2000
df2 = pickle.loads(pickle.dumps(df))
assert len(df) == len(df_file) * 2
assert len(df2) == len(df_file) * 2
# assert df.compare(df2) == ([], [], [], [])
assert df2.x.count() == len(df_file) * 2, 'x is repeated'
assert df2.x.sum() == df_file.x.sum() * 2, 'x is repeated'
assert df2.y.sum() == df_file.y.sum(), 'y is not repeated'
def test_state_with_set():
df = vaex.from_arrays(x=[1,2,3])
df['test'] = df.x.isin([1,2])
df2 = pickle.loads(pickle.dumps(df))
assert df2.x.tolist() == df.x.tolist()
# make sure the state itself can be pickled, not just the dataframe
pickle.dumps(df.state_get())
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@tests@[email protected]_END.py
|
{
"filename": "_legend.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/barpolar/_legend.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="legend", parent_name="barpolar", **kwargs):
super(LegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "legend"),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@barpolar@[email protected]_END.py
|
{
"filename": "minimax.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/llms/minimax.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.llms import Minimax
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"Minimax": "langchain_community.llms"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Minimax",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/api/ops/numpy/__init__.py",
"type": "Python"
}
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.numpy import abs
from keras.src.ops.numpy import absolute
from keras.src.ops.numpy import add
from keras.src.ops.numpy import all
from keras.src.ops.numpy import amax
from keras.src.ops.numpy import amin
from keras.src.ops.numpy import any
from keras.src.ops.numpy import append
from keras.src.ops.numpy import arange
from keras.src.ops.numpy import arccos
from keras.src.ops.numpy import arccosh
from keras.src.ops.numpy import arcsin
from keras.src.ops.numpy import arcsinh
from keras.src.ops.numpy import arctan
from keras.src.ops.numpy import arctan2
from keras.src.ops.numpy import arctanh
from keras.src.ops.numpy import argmax
from keras.src.ops.numpy import argmin
from keras.src.ops.numpy import argpartition
from keras.src.ops.numpy import argsort
from keras.src.ops.numpy import array
from keras.src.ops.numpy import average
from keras.src.ops.numpy import bincount
from keras.src.ops.numpy import bitwise_and
from keras.src.ops.numpy import bitwise_invert
from keras.src.ops.numpy import bitwise_left_shift
from keras.src.ops.numpy import bitwise_not
from keras.src.ops.numpy import bitwise_or
from keras.src.ops.numpy import bitwise_right_shift
from keras.src.ops.numpy import bitwise_xor
from keras.src.ops.numpy import broadcast_to
from keras.src.ops.numpy import ceil
from keras.src.ops.numpy import clip
from keras.src.ops.numpy import concatenate
from keras.src.ops.numpy import conj
from keras.src.ops.numpy import conjugate
from keras.src.ops.numpy import copy
from keras.src.ops.numpy import correlate
from keras.src.ops.numpy import cos
from keras.src.ops.numpy import cosh
from keras.src.ops.numpy import count_nonzero
from keras.src.ops.numpy import cross
from keras.src.ops.numpy import cumprod
from keras.src.ops.numpy import cumsum
from keras.src.ops.numpy import diag
from keras.src.ops.numpy import diagflat
from keras.src.ops.numpy import diagonal
from keras.src.ops.numpy import diff
from keras.src.ops.numpy import digitize
from keras.src.ops.numpy import divide
from keras.src.ops.numpy import divide_no_nan
from keras.src.ops.numpy import dot
from keras.src.ops.numpy import einsum
from keras.src.ops.numpy import empty
from keras.src.ops.numpy import equal
from keras.src.ops.numpy import exp
from keras.src.ops.numpy import exp2
from keras.src.ops.numpy import expand_dims
from keras.src.ops.numpy import expm1
from keras.src.ops.numpy import eye
from keras.src.ops.numpy import flip
from keras.src.ops.numpy import floor
from keras.src.ops.numpy import floor_divide
from keras.src.ops.numpy import full
from keras.src.ops.numpy import full_like
from keras.src.ops.numpy import get_item
from keras.src.ops.numpy import greater
from keras.src.ops.numpy import greater_equal
from keras.src.ops.numpy import histogram
from keras.src.ops.numpy import hstack
from keras.src.ops.numpy import identity
from keras.src.ops.numpy import imag
from keras.src.ops.numpy import inner
from keras.src.ops.numpy import isclose
from keras.src.ops.numpy import isfinite
from keras.src.ops.numpy import isinf
from keras.src.ops.numpy import isnan
from keras.src.ops.numpy import left_shift
from keras.src.ops.numpy import less
from keras.src.ops.numpy import less_equal
from keras.src.ops.numpy import linspace
from keras.src.ops.numpy import log
from keras.src.ops.numpy import log1p
from keras.src.ops.numpy import log2
from keras.src.ops.numpy import log10
from keras.src.ops.numpy import logaddexp
from keras.src.ops.numpy import logical_and
from keras.src.ops.numpy import logical_not
from keras.src.ops.numpy import logical_or
from keras.src.ops.numpy import logical_xor
from keras.src.ops.numpy import logspace
from keras.src.ops.numpy import matmul
from keras.src.ops.numpy import max
from keras.src.ops.numpy import maximum
from keras.src.ops.numpy import mean
from keras.src.ops.numpy import median
from keras.src.ops.numpy import meshgrid
from keras.src.ops.numpy import min
from keras.src.ops.numpy import minimum
from keras.src.ops.numpy import mod
from keras.src.ops.numpy import moveaxis
from keras.src.ops.numpy import multiply
from keras.src.ops.numpy import nan_to_num
from keras.src.ops.numpy import ndim
from keras.src.ops.numpy import negative
from keras.src.ops.numpy import nonzero
from keras.src.ops.numpy import not_equal
from keras.src.ops.numpy import ones
from keras.src.ops.numpy import ones_like
from keras.src.ops.numpy import outer
from keras.src.ops.numpy import pad
from keras.src.ops.numpy import power
from keras.src.ops.numpy import prod
from keras.src.ops.numpy import quantile
from keras.src.ops.numpy import ravel
from keras.src.ops.numpy import real
from keras.src.ops.numpy import reciprocal
from keras.src.ops.numpy import repeat
from keras.src.ops.numpy import reshape
from keras.src.ops.numpy import right_shift
from keras.src.ops.numpy import roll
from keras.src.ops.numpy import round
from keras.src.ops.numpy import select
from keras.src.ops.numpy import sign
from keras.src.ops.numpy import sin
from keras.src.ops.numpy import sinh
from keras.src.ops.numpy import size
from keras.src.ops.numpy import slogdet
from keras.src.ops.numpy import sort
from keras.src.ops.numpy import split
from keras.src.ops.numpy import sqrt
from keras.src.ops.numpy import square
from keras.src.ops.numpy import squeeze
from keras.src.ops.numpy import stack
from keras.src.ops.numpy import std
from keras.src.ops.numpy import subtract
from keras.src.ops.numpy import sum
from keras.src.ops.numpy import swapaxes
from keras.src.ops.numpy import take
from keras.src.ops.numpy import take_along_axis
from keras.src.ops.numpy import tan
from keras.src.ops.numpy import tanh
from keras.src.ops.numpy import tensordot
from keras.src.ops.numpy import tile
from keras.src.ops.numpy import trace
from keras.src.ops.numpy import transpose
from keras.src.ops.numpy import tri
from keras.src.ops.numpy import tril
from keras.src.ops.numpy import triu
from keras.src.ops.numpy import true_divide
from keras.src.ops.numpy import trunc
from keras.src.ops.numpy import unravel_index
from keras.src.ops.numpy import var
from keras.src.ops.numpy import vdot
from keras.src.ops.numpy import vectorize
from keras.src.ops.numpy import vstack
from keras.src.ops.numpy import where
from keras.src.ops.numpy import zeros
from keras.src.ops.numpy import zeros_like
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@api@ops@numpy@[email protected]_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter/hoverlabel/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="scatter.hoverlabel", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
""",
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@scatter@hoverlabel@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/io/harwell_boeing/tests/__init__.py",
"type": "Python"
}
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@scipy@io@harwell_boeing@tests@[email protected]_END.py
|
|
{
"filename": "compare_results.py",
"repo_name": "spacetelescope/hst_cosmic_rays",
"repo_path": "hst_cosmic_rays_extracted/hst_cosmic_rays-master/analyzing_cr_rejection/compare_results.py",
"type": "Python"
}
|
import argparse
from collections import defaultdict
import glob
import json
import logging
import os
_MOD_DIR = os.path.dirname(os.path.abspath(__file__))
_BASE = os.path.join('/', *_MOD_DIR.split('/')[:-1])
import shutil
import sys
sys.path.append(os.path.join(_BASE, 'pipeline'))
import warnings
warnings.simplefilter("ignore")
from astropy.io import fits
from astropy.table import Table
from astropy.time import Time
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
plt.style.use('ggplot')
import numpy as np
import pandas as pd
import scipy.ndimage as ndimage
from utils import initialize
from utils import datahandler as dh
_PLOT_DIR = os.path.join(_BASE, 'analyzing_cr_rejection', 'plots')
_RESULTS_DIR = os.path.join(_BASE,
'analyzing_cr_rejection',
'results',
'STIS'
)
logging.basicConfig(format='%(levelname)-4s '
'[%(module)s.%(funcName)s:%(lineno)d]'
' %(message)s',
)
LOG = logging.getLogger('compare_results')
LOG.setLevel(logging.INFO)
def create_data_objects(flist):
"""
Parameters
----------
flist : TYPE
Description
Returns
-------
TYPE
Description
"""
obj = dh.DataReader(instr='stis_ccd', statistic='incident_cr_rate')
obj.hdf5_files = flist
obj.read_cr_rate()
return obj
def make_MEF(fname, hdf5file1=None, hdf5file2=None, params1=None, params2=None):
"""
Parameters
----------
fname : TYPE
Description
hdf5file1 : None, optional
Description
hdf5file2 : None, optional
Description
"""
LOG.info(f'Creating a MEF for {fname}')
dset = os.path.basename(fname)
hdu_list = fits.HDUList()
with fits.open(fname) as hdu:
prhdr = hdu[0].header
scihdr = hdu[1].header
sci = hdu[1].data
dqhdr = hdu[3].header
dq = hdu[3].data
prhdu = fits.PrimaryHDU(header=prhdr)
hdu_list.append(fits.ImageHDU(header=scihdr, data=sci))
hdu_list.append(fits.ImageHDU(header=dqhdr, data=dq))
if hdf5file1 is not None:
label1, metadata1 = label_from_file(
hdf5file=hdf5file1,
dset_name=dset,
shape=sci.shape
)
hdr1 = fits.Header(cards=[], copy=False)
params = '_'.join(os.path.basename(hdf5file1).split('.hdf5')[0].split('_')[-3:])
hdr1.fromkeys(metadata1)
hdr1['EXTNAME'] = 'CRLABEL'
hdr1['PARAMS'] = params1
hdu_list.append(fits.ImageHDU(header=hdr1, data=label1))
if hdf5file2 is not None:
label2, metadata2 = label_from_file(
hdf5file=hdf5file2,
dset_name=dset,
shape=sci.shape
)
hdr2 = fits.Header(cards=[], copy=False)
params = '_'.join(os.path.basename(hdf5file2).split('.hdf5')[0].split('_')[-3:])
hdr2.fromkeys(metadata2)
hdr2['EXTNAME'] = 'CRLABEL'
hdr2['PARAMS'] = params2
hdu_list.append(fits.ImageHDU(header=hdr2, data=label2))
LOG.info(f"{hdu_list.info()}")
hdu_list.writeto(f"{fname.replace('_flt.fits', '_all.fits')}", overwrite=True)
def label_from_file(hdf5file, dset_name, shape=None):
"""
Parameters
----------
hdf5file : TYPE
Description
dset_name : TYPE
Description
shape : None, optional
Description
Returns
-------
TYPE
Description
"""
dh1 = dh.DataReader(instr='stis_ccd', statistic='cr_affected_pixels')
cr_affected_pixels, metadata = dh1.read_single_dst(hdf5file, dset_name)
template = np.zeros(shape)
for (y,x) in cr_affected_pixels:
template[int(y)][int(x)] +=1
label, num_feat = ndimage.label(template,
structure=np.ones((3,3)))
return label, metadata
def examine_label(dirname=_RESULTS_DIR, exptime=60.0):
"""
Parameters
----------
dirname : TYPE, optional
Description
exptime : float, optional
Description
"""
flist = glob.glob(f"{dirname}/stis*cr_affected_pixels*hdf5")
file1 = display_menu(flist)
params1 = '_'.join(os.path.basename(file1).split('.hdf5')[0].split('_')[-2:])
dir1 = os.path.join(
_BASE,'analyzing_cr_rejection',
f"{exptime}_{params1}"
)
print(dir1)
dataset1 = glob.glob(dir1+'/*flt.fits')
file2 = display_menu(flist)
params2 = '_'.join(os.path.basename(file2).split('.hdf5')[0].split('_')[-2:])
dir2 = os.path.join(
_BASE, 'analyzing_cr_rejection', f"{exptime}_{params2}")
print(dir2)
dataset2 = glob.glob(dir2+'/*flt.fits')
print(len(dataset1), len(dataset2))
for f1, f2 in zip(dataset1, dataset2):
make_MEF(fname=f1, hdf5file1=file1, params1=params1)
make_MEF(fname=f2, hdf5file2=file2, params2=params2)
def exptime_summary(dh, title=''):
"""
Parameters
----------
dh : TYPE
Description
title : str, optional
Description
"""
counts = dh.data_df.integration_time.value_counts()
fig, ax = plt.subplots(nrows=1, ncols=1)
counts.plot.barh(ax=ax)
ax.set_title(title)
ax.set_ylabel('Integration Time [seconds]')
plt.show()
def compare_by_exptime(dh, title=''):
"""
Parameters
----------
dh : TYPE
Description
title : str, optional
Description
"""
longexp = dh.data_df.integration_time.gt(1000)
shortexp = dh.data_df.integration_time.lt(100)
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.autofmt_xdate()
ax.scatter(dh.data_df[longexp].index,
dh.data_df[longexp].incident_cr_rate, label='longexp')
ax.scatter(dh.data_df[shortexp].index,
dh.data_df[shortexp].incident_cr_rate, label='shortexp')
ax.legend(loc='best')
ax.set_title(title)
plt.show()
def compare_by_rej_params(
dh1,
dh2,
label1='',
label2='',
title='',
fout=None,
figsize=(6,5)
):
"""
Parameters
----------
dh1 : TYPE
Description
dh2 : TYPE
Description
label1 : str, optional
Description
label2 : str, optional
Description
title : str, optional
Description
fout : None, optional
Description
figsize : tuple, optional
Description
"""
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
days_major = mdates.DayLocator(interval=5)
days_minor = mdates.DayLocator(interval=1)
years_fmt = mdates.DateFormatter('%Y-%m-%d')
expcut = dh1.data_df.integration_time.gt(1000)
shortexp = dh1.data_df.integration_time.lt(100)
fig, (ax1, ax2) = plt.subplots(
nrows=1,
ncols=2,
figsize=figsize,
sharex=True
)
diff = dh2.data_df[expcut].incident_cr_rate - \
dh1.data_df[expcut].incident_cr_rate
dates = dh1.data_df[expcut].index.values
rate1 = dh1.data_df[expcut].incident_cr_rate
rate2 = dh2.data_df[expcut].incident_cr_rate
fig.autofmt_xdate()
# ax1.xaxis.set_major_locator(plt.MaxNLocator(10))
# ax2.xaxis.set_major_locator(plt.MaxNLocator(10))
ax1.scatter(dh1.data_df[expcut].index.values,
rate1, label=label1)
ax1.scatter(dh2.data_df[expcut].index.values,
rate2, label=label2)
ax1.set_ylabel('CR Rate [CR/cm$^2$/second]')
ax1.legend(loc='best')
ax2.scatter(diff.index.values, diff, c='k')
ax2.set_title(f'{label2} - {label1}')
ax1.set_xlim(
(Time('2019-05-25', format='iso').to_datetime(),
Time('2019-07-01', format='iso').to_datetime())
)
# ax1.fmt_xdata = mdates.DateFormatter('%m-%d')
# ax2.fmt_xdata = mdates.DateFormatter('%m-%d')
ax1.xaxis.set_major_locator(days_major)
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax1.yaxis.set_minor_locator(AutoMinorLocator(5))
ax1.xaxis.set_minor_locator(days_minor)
ax2.xaxis.set_major_locator(days_major)
ax2.yaxis.set_minor_locator(AutoMinorLocator(5))
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
ax2.xaxis.set_minor_locator(days_minor)
fig.suptitle(title)
if fout is not None:
fout = os.path.join(_PLOT_DIR, fout)
fig.savefig(fout, format='png', dpi=300, bbox_inches='tight')
plt.show()
def get_default_parameters(dh):
"""
Parameters
----------
dh : TYPE
Description
"""
expsum = dh.data_df.integration_time.sum()
n_images = len(dh.data_df)
tb = Table.read('/Users/nmiles/hst_cosmic_rays/j3m1403io_crr.fits')
def display_menu(flist):
# Generate a list of options for the user to choose from
out_str = 'Choose a dataset to analyze:\n'
for i, f in enumerate(flist):
out_str += f"{i}) {os.path.basename(f)}\n"
LOG.info(f"{out_str}\n{'-'*79}")
idx = int(input('Enter selection: '))
file = flist[idx]
LOG.info(f"Selected option: {file}")
return file
def run_comparison(fout=None):
"""
Parameters
----------
fout : None, optional
Description
"""
results_dir = os.path.join(_BASE,
'analyzing_cr_rejection',
'results',
'STIS'
)
# Get a list of all the files generated with median combination
medflist = glob.glob(
os.path.join(_RESULTS_DIR,'stis*cr_rate*med*hdf5')
)
# Get a list of all the files generated with minimum combination
minflist = glob.glob(
os.path.join(_RESULTS_DIR, 'stis*cr_rate*min*hdf5')
)
flist = medflist + minflist
file1 = display_menu(flist)
file2 = display_menu(flist)
# Extract the CR parameters from the filename
file1_rej_param = file1.replace('.hdf5','')
file1_rej_param = ' '.join(file1_rej_param.split('_')[-2:])
# Extract the CR parameters from the filename
file2_rej_param = file2.replace('.hdf5','')
file2_rej_param = ' '.join(file2_rej_param.split('_')[-2:])
dh_1 = create_data_objects([file1])
dh_2 = create_data_objects([file2])
# exptime_summary(dh_min, title='Exposure Times')
# compare_by_exptime(dh_min, title=min_rej_param)
# compare_by_exptime(dh_med, title=med_rej_param)
compare_by_rej_params(dh_1, dh_2,
label1=file1_rej_param, label2=file2_rej_param,
title=f'{file1_rej_param} vs. {file2_rej_param}',
fout=f'{file1_rej_param}_{file2_rej_param}.png', figsize=(6.5,4.5))
examine_label()
|
spacetelescopeREPO_NAMEhst_cosmic_raysPATH_START.@hst_cosmic_rays_extracted@hst_cosmic_rays-master@analyzing_cr_rejection@[email protected]_END.py
|
{
"filename": "plot.py",
"repo_name": "lgrcia/paper-nuance",
"repo_path": "paper-nuance_extracted/paper-nuance-main/workflows/benchmark/scripts/plot.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
import yaml
plt.figure(figsize=(8, 2.5))
nuance = np.array([yaml.safe_load(open(f))["linear"] for f in snakemake.input.nuance])
bls = np.array([yaml.safe_load(open(f))["biweight"] for f in snakemake.input.bls])
points = np.array(snakemake.params.points)
plt.subplot(121)
plt.plot(points, nuance, ".-", c="k", label="linear search")
i = np.flatnonzero(points == 10000)[0]
plt.plot(
points[i:],
nuance[i] * (points[i:] / 10000),
".-",
label="N x linear search",
c="0.5",
zorder=-1,
)
plt.plot(points, bls, ".-", c="C0", label="biweight")
plt.xlabel("number of points")
plt.ylabel("processing time (s)")
plt.yscale("log")
plt.xscale("log")
ylim = (1e-2, 3e3)
plt.ylim(ylim)
plt.legend()
nuance = [yaml.safe_load(open(f)) for f in snakemake.input.nuance]
nuance = [n["all"] - n["linear"] for n in nuance]
bls = [yaml.safe_load(open(f)) for f in snakemake.input.bls]
bls = [b["bls"] - b["biweight"] for b in bls]
points = np.array(snakemake.params.points)
plt.subplot(122)
plt.plot(points, nuance, ".-", c="k", label="periodic search")
plt.plot(points, bls, ".-", c="C0", label="BLS")
plt.xlabel("number of points")
plt.ylabel("processing time (s)")
plt.yscale("log")
plt.xscale("log")
plt.ylim(ylim)
plt.legend()
plt.tight_layout()
plt.savefig(snakemake.output[0])
|
lgrciaREPO_NAMEpaper-nuancePATH_START.@paper-nuance_extracted@paper-nuance-main@workflows@benchmark@[email protected]@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnel/marker/colorbar/tickfont/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="funnel.marker.colorbar.tickfont",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnel@marker@colorbar@tickfont@[email protected]_END.py
|
{
"filename": "test_representation.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/coordinates/tests/test_representation.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.coordinates import Angle, Latitude, Longitude
from astropy.coordinates.distances import Distance
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.coordinates.representation import (
DIFFERENTIAL_CLASSES,
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
BaseRepresentation,
CartesianDifferential,
CartesianRepresentation,
CylindricalDifferential,
CylindricalRepresentation,
PhysicsSphericalDifferential,
PhysicsSphericalRepresentation,
RadialDifferential,
RadialRepresentation,
SphericalCosLatDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose_quantity
from astropy.utils import isiterable
from astropy.utils.compat import COPY_IF_NEEDED
from astropy.utils.exceptions import DuplicateRepresentationWarning
# create matrices for use in testing ``.transform()`` methods
matrices = {
"rotation": rotation_matrix(-10, "z", u.deg),
"general": np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
}
# Preserve the original REPRESENTATION_CLASSES dict so that importing
# the test file doesn't add a persistent test subclass (LogDRepresentation)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def components_equal(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
for component in rep1.components:
result &= getattr(rep1, component) == getattr(rep2, component)
return result
def components_allclose(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
for component in rep1.components:
result &= u.allclose(getattr(rep1, component), getattr(rep2, component))
return result
def representation_equal(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
if getattr(rep1, "_differentials", False):
if rep1._differentials.keys() != rep2._differentials.keys():
return False
for key, diff1 in rep1._differentials.items():
result &= components_equal(diff1, rep2._differentials[key])
elif getattr(rep2, "_differentials", False):
return False
return result & components_equal(rep1, rep2)
def representation_equal_up_to_angular_type(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
if getattr(rep1, "_differentials", False):
if rep1._differentials.keys() != rep2._differentials.keys():
return False
for key, diff1 in rep1._differentials.items():
result &= components_allclose(diff1, rep2._differentials[key])
elif getattr(rep2, "_differentials", False):
return False
return result & components_allclose(rep1, rep2)
class TestRadialRepresentation:
def test_transform(self):
"""Test the ``transform`` method. Only multiplication matrices pass."""
rep = RadialRepresentation(distance=10 * u.kpc)
# a rotation matrix does not work
matrix = rotation_matrix(10 * u.deg)
with pytest.raises(ValueError, match="scaled identity matrix"):
rep.transform(matrix)
# only a scaled identity matrix
matrix = 3 * np.identity(3)
newrep = rep.transform(matrix)
assert newrep.distance == 30 * u.kpc
# let's also check with differentials
dif = RadialDifferential(d_distance=-3 * u.km / u.s)
rep = rep.with_differentials({"s": dif})
newrep = rep.transform(matrix)
assert newrep.distance == 30 * u.kpc
assert newrep.differentials["s"].d_distance == -9 * u.km / u.s
class TestSphericalRepresentation:
def test_name(self):
assert SphericalRepresentation.name == "spherical"
assert SphericalRepresentation.name in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = SphericalRepresentation()
def test_init_quantity(self):
s3 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc
)
assert s3.lon == 8.0 * u.hourangle
assert s3.lat == 5.0 * u.deg
assert s3.distance == 10 * u.kpc
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
assert isinstance(s3.distance, Distance)
def test_init_no_mutate_input(self):
lon = -1 * u.hourangle
s = SphericalRepresentation(
lon=lon, lat=-1 * u.deg, distance=1 * u.kpc, copy=True
)
# The longitude component should be wrapped at 24 hours
assert_allclose_quantity(s.lon, 23 * u.hourangle)
# The input should not have been mutated by the constructor
assert_allclose_quantity(lon, -1 * u.hourangle)
def test_init_lonlat(self):
s2 = SphericalRepresentation(
Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc)
)
assert s2.lon == 8.0 * u.hourangle
assert s2.lat == 5.0 * u.deg
assert s2.distance == 10.0 * u.kpc
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
assert isinstance(s2.distance, Distance)
# also test that wrap_angle is preserved
s3 = SphericalRepresentation(
Longitude(-90, u.degree, wrap_angle=180 * u.degree),
Latitude(-45, u.degree),
Distance(1.0, u.Rsun),
)
assert s3.lon == -90.0 * u.degree
assert s3.lon.wrap_angle == 180 * u.degree
def test_init_subclass(self):
class Longitude180(Longitude):
_default_wrap_angle = 180 * u.degree
s = SphericalRepresentation(
Longitude180(-90, u.degree), Latitude(-45, u.degree), Distance(1.0, u.Rsun)
)
assert isinstance(s.lon, Longitude180)
assert s.lon == -90.0 * u.degree
assert s.lon.wrap_angle == 180 * u.degree
def test_init_array(self):
s1 = SphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=[1, 2] * u.kpc
)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert_allclose(s1.distance.kpc, [1, 2])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
assert isinstance(s1.distance, Distance)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
distance = Distance([1, 2] * u.kpc)
s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
distance[:] = [8, 9] * u.Mpc
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
assert_allclose_quantity(distance, s1.distance)
def test_init_float32_array(self):
"""Regression test against #2983"""
lon = Longitude(np.float32([1.0, 2.0]), u.degree)
lat = Latitude(np.float32([3.0, 4.0]), u.degree)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
assert s1.lon.dtype == np.float32
assert s1.lat.dtype == np.float32
assert s1._values["lon"].dtype == np.float32
assert s1._values["lat"].dtype == np.float32
def test_reprobj(self):
s1 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc
)
s2 = SphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8.0 * u.hourangle)
assert_allclose_quantity(s2.lat, 5.0 * u.deg)
assert_allclose_quantity(s2.distance, 10 * u.kpc)
s3 = SphericalRepresentation(s1)
assert representation_equal(s1, s3)
def test_broadcasting(self):
s1 = SphericalRepresentation(
lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg, distance=10 * u.kpc
)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
assert_allclose_quantity(s1.distance, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = SphericalRepresentation(
lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc,
)
assert (
exc.value.args[0]
== "Input parameters lon, lat, and distance cannot be broadcast"
)
def test_broadcasting_and_nocopy(self):
s1 = SphericalRepresentation(
lon=[200] * u.deg, lat=[0] * u.deg, distance=[0] * u.kpc, copy=False
)
# With no copying, we should be able to modify the wrap angle of the longitude component
s1.lon.wrap_angle = 180 * u.deg
s2 = SphericalRepresentation(
lon=[200] * u.deg, lat=0 * u.deg, distance=0 * u.kpc, copy=False
)
# We should be able to modify the wrap angle of the longitude component even if other
# components need to be broadcasted
s2.lon.wrap_angle = 180 * u.deg
def test_readonly(self):
s1 = SphericalRepresentation(
lon=8 * u.hourangle, lat=5 * u.deg, distance=1.0 * u.kpc
)
with pytest.raises(AttributeError):
s1.lon = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.lat = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.distance = 1.0 * u.kpc
def test_getitem_len_iterable(self):
s = SphericalRepresentation(
lon=np.arange(10) * u.deg, lat=-np.arange(10) * u.deg, distance=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc)
assert len(s) == 10
assert isiterable(s)
def test_getitem_len_iterable_scalar(self):
s = SphericalRepresentation(lon=1 * u.deg, lat=-2 * u.deg, distance=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
with pytest.raises(TypeError):
len(s)
assert not isiterable(s)
def test_setitem(self):
s = SphericalRepresentation(
lon=np.arange(5) * u.deg, lat=-np.arange(5) * u.deg, distance=1 * u.kpc
)
s[:2] = SphericalRepresentation(
lon=10.0 * u.deg, lat=2.0 * u.deg, distance=5.0 * u.kpc
)
assert_allclose_quantity(s.lon, [10, 10, 2, 3, 4] * u.deg)
assert_allclose_quantity(s.lat, [2, 2, -2, -3, -4] * u.deg)
assert_allclose_quantity(s.distance, [5, 5, 1, 1, 1] * u.kpc)
def test_negative_distance(self):
"""Only allowed if explicitly passed on."""
with pytest.raises(ValueError, match="allow_negative"):
SphericalRepresentation(10 * u.deg, 20 * u.deg, -10 * u.m)
s1 = SphericalRepresentation(
10 * u.deg, 20 * u.deg, Distance(-10 * u.m, allow_negative=True)
)
assert s1.distance == -10.0 * u.m
def test_nan_distance(self):
"""This is a regression test: calling represent_as() and passing in the
same class as the object shouldn't round-trip through cartesian.
"""
sph = SphericalRepresentation(1 * u.deg, 2 * u.deg, np.nan * u.kpc)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
dif = SphericalCosLatDifferential(
1 * u.mas / u.yr, 2 * u.mas / u.yr, 3 * u.km / u.s
)
sph = sph.with_differentials(dif)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
def test_raise_on_extra_arguments(self):
with pytest.raises(TypeError, match="got multiple values"):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1.0 * u.kpc, lat=10)
with pytest.raises(TypeError, match="unexpected keyword.*parrot"):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1.0 * u.kpc, parrot=10)
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = SphericalCosLatDifferential(
4 * u.mas / u.yr, 5 * u.mas / u.yr, 6 * u.km / u.s
)
sph = SphericalRepresentation(
1 * u.deg, 2 * u.deg, 3 * u.kpc, differentials={"s": difs}
)
got = sph.represent_as(
PhysicsSphericalRepresentation, PhysicsSphericalDifferential
)
assert np.may_share_memory(sph.lon, got.phi)
assert np.may_share_memory(sph.distance, got.r)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation, PhysicsSphericalDifferential
)
# equal up to angular type
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation, UnitSphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(RadialRepresentation, RadialDifferential)
assert np.may_share_memory(sph.distance, got.distance)
expected = BaseRepresentation.represent_as(
sph, RadialRepresentation, RadialDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s,
)
s1 = SphericalRepresentation(
lon=[1, 2] * u.deg,
lat=[3, 4] * u.deg,
distance=[5, 6] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
# check differentials. they shouldn't have changed.
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_distance, ds1.d_distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(SphericalRepresentation, SphericalDifferential)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s,
)
s1 = SphericalRepresentation(
lon=[1, 2] * u.deg,
lat=[3, 4] * u.deg,
distance=[5, np.nan] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# the 2nd component is NaN since the 2nd distance is NaN
# TODO! this will change when ``.transform`` skips Cartesian
assert_array_equal(np.isnan(ds2.d_lon), (False, True))
assert_array_equal(np.isnan(ds2.d_lat), (False, True))
assert_array_equal(np.isnan(ds2.d_distance), (False, True))
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(
SphericalRepresentation, differential_class=SphericalDifferential
)
)
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.lon), (False, False))
assert_array_equal(np.isnan(s3.lat), (False, False))
assert_array_equal(np.isnan(s3.distance), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_lon), (False, True))
assert_array_equal(np.isnan(ds3.d_lat), (False, True))
assert_array_equal(np.isnan(ds3.d_distance), (False, True))
# through Cartesian should
assert_array_equal(np.isnan(thruC.lon), (False, True))
assert_array_equal(np.isnan(thruC.lat), (False, True))
assert_array_equal(np.isnan(thruC.distance), (False, True))
assert_array_equal(np.isnan(dthruC.d_lon), (False, True))
assert_array_equal(np.isnan(dthruC.d_lat), (False, True))
assert_array_equal(np.isnan(dthruC.d_distance), (False, True))
# test that they are close on the first value
assert_allclose_quantity(s3.lon[0], thruC.lon[0])
assert_allclose_quantity(s3.lat[0], thruC.lat[0])
assert_allclose_quantity(ds3.d_lon[0], dthruC.d_lon[0])
assert_allclose_quantity(ds3.d_lat[0], dthruC.d_lat[0])
class TestUnitSphericalRepresentation:
def test_name(self):
assert UnitSphericalRepresentation.name == "unitspherical"
assert UnitSphericalRepresentation.name in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = UnitSphericalRepresentation()
def test_init_quantity(self):
s3 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
assert s3.lon == 8.0 * u.hourangle
assert s3.lat == 5.0 * u.deg
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
def test_init_lonlat(self):
s2 = UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg))
assert s2.lon == 8.0 * u.hourangle
assert s2.lat == 5.0 * u.deg
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
def test_init_array(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
def test_reprobj(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
s2 = UnitSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8.0 * u.hourangle)
assert_allclose_quantity(s2.lat, 5.0 * u.deg)
s3 = UnitSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle, lat=[5, 6] * u.deg)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = UnitSphericalRepresentation(
lon=[8, 9, 10] * u.hourangle, lat=[5, 6] * u.deg
)
assert exc.value.args[0] == "Input parameters lon and lat cannot be broadcast"
def test_readonly(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
with pytest.raises(AttributeError):
s1.lon = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.lat = 1.0 * u.deg
def test_getitem(self):
s = UnitSphericalRepresentation(
lon=np.arange(10) * u.deg, lat=-np.arange(10) * u.deg
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
def test_getitem_scalar(self):
s = UnitSphericalRepresentation(lon=1 * u.deg, lat=-2 * u.deg)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
# TODO! representation transformations with differentials cannot
# (currently) be implemented due to a mismatch between the UnitSpherical
# expected keys (e.g. "s") and that expected in the other class
# (here "s / m"). For more info, see PR #11467
# We leave the test code commented out for future use.
# diffs = UnitSphericalCosLatDifferential(4*u.mas/u.yr, 5*u.mas/u.yr,
# 6*u.km/u.s)
sph = UnitSphericalRepresentation(1 * u.deg, 2 * u.deg)
# , differentials={'s': diffs}
got = sph.represent_as(PhysicsSphericalRepresentation)
# , PhysicsSphericalDifferential)
assert np.may_share_memory(sph.lon, got.phi)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation
) # PhysicsSphericalDifferential
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(SphericalRepresentation)
# , SphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation
) # , SphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = UnitSphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
)
s1 = UnitSphericalRepresentation(
lon=[1, 2] * u.deg, lat=[3, 4] * u.deg, differentials=ds1
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = UnitSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
# compare differentials. they should be unchanged (ds1).
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert not hasattr(ds2, "d_distance")
# now with a non rotation matrix
# note that the result will be a Spherical, not UnitSpherical
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(
SphericalRepresentation, differential_class=SphericalDifferential
)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
class TestPhysicsSphericalRepresentation:
def test_name(self):
assert PhysicsSphericalRepresentation.name == "physicsspherical"
assert PhysicsSphericalRepresentation.name in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = PhysicsSphericalRepresentation()
def test_init_quantity(self):
s3 = PhysicsSphericalRepresentation(
phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc
)
assert s3.phi == 8.0 * u.hourangle
assert s3.theta == 5.0 * u.deg
assert s3.r == 10 * u.kpc
assert isinstance(s3.phi, Angle)
assert isinstance(s3.theta, Angle)
assert isinstance(s3.r, Distance)
def test_init_phitheta(self):
s2 = PhysicsSphericalRepresentation(
Angle(8, u.hour), Angle(5, u.deg), Distance(10, u.kpc)
)
assert s2.phi == 8.0 * u.hourangle
assert s2.theta == 5.0 * u.deg
assert s2.r == 10.0 * u.kpc
assert isinstance(s2.phi, Angle)
assert isinstance(s2.theta, Angle)
assert isinstance(s2.r, Distance)
def test_init_array(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=[1, 2] * u.kpc
)
assert_allclose(s1.phi.degree, [120, 135])
assert_allclose(s1.theta.degree, [5, 6])
assert_allclose(s1.r.kpc, [1, 2])
assert isinstance(s1.phi, Angle)
assert isinstance(s1.theta, Angle)
assert isinstance(s1.r, Distance)
def test_init_array_nocopy(self):
phi = Angle([8, 9] * u.hourangle)
theta = Angle([5, 6] * u.deg)
r = Distance([1, 2] * u.kpc)
s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False)
phi[:] = [1, 2] * u.rad
theta[:] = [3, 4] * u.arcmin
r[:] = [8, 9] * u.Mpc
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(theta, s1.theta)
assert_allclose_quantity(r, s1.r)
def test_reprobj(self):
s1 = PhysicsSphericalRepresentation(
phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc
)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.phi, 8.0 * u.hourangle)
assert_allclose_quantity(s2.theta, 5.0 * u.deg)
assert_allclose_quantity(s2.r, 10 * u.kpc)
s3 = PhysicsSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=10 * u.kpc
)
assert_allclose_quantity(s1.phi, [120, 135] * u.degree)
assert_allclose_quantity(s1.theta, [5, 6] * u.degree)
assert_allclose_quantity(s1.r, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(
ValueError, match="Input parameters phi, theta, and r cannot be broadcast"
):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9, 10] * u.hourangle, theta=[5, 6] * u.deg, r=[1, 2] * u.kpc
)
def test_readonly(self):
s1 = PhysicsSphericalRepresentation(
phi=[8, 9] * u.hourangle, theta=[5, 6] * u.deg, r=[10, 20] * u.kpc
)
with pytest.raises(AttributeError):
s1.phi = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.theta = 1.0 * u.deg
with pytest.raises(AttributeError):
s1.r = 1.0 * u.kpc
def test_getitem(self):
s = PhysicsSphericalRepresentation(
phi=np.arange(10) * u.deg, theta=np.arange(5, 15) * u.deg, r=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg)
assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = PhysicsSphericalRepresentation(phi=1 * u.deg, theta=2 * u.deg, r=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = PhysicsSphericalDifferential(
4 * u.mas / u.yr, 5 * u.mas / u.yr, 6 * u.km / u.s
)
sph = PhysicsSphericalRepresentation(
1 * u.deg, 2 * u.deg, 3 * u.kpc, differentials={"s": difs}
)
got = sph.represent_as(SphericalRepresentation, SphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
assert np.may_share_memory(sph.r, got.distance)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation, SphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation, UnitSphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(CylindricalRepresentation, CylindricalDifferential)
assert np.may_share_memory(sph.phi, got.phi)
expected = BaseRepresentation.represent_as(
sph, CylindricalRepresentation, CylindricalDifferential
)
assert_allclose_quantity(got.rho, expected.rho, atol=5e-17 * u.kpc)
assert_allclose_quantity(got.phi, expected.phi, atol=3e-16 * u.deg)
assert_array_equal(got.z, expected.z)
got = sph.represent_as(RadialRepresentation, RadialDifferential)
assert np.may_share_memory(sph.r, got.distance)
expected = BaseRepresentation.represent_as(
sph, RadialRepresentation, RadialDifferential
)
assert representation_equal_up_to_angular_type(got, expected)
def test_to_cylindrical_at_the_origin(self):
"""Test that the transformation to cylindrical at the origin preserves phi."""
sph = PhysicsSphericalRepresentation(
phi=270 * u.deg, theta=45 * u.deg, r=0 * u.kpc
)
cyl = sph.represent_as(CylindricalRepresentation)
assert cyl.rho == 0.0 * u.kpc
assert cyl.z == 0.0 * u.kpc
assert cyl.phi == 270 * u.deg # phi is preserved exactly
def test_initialize_with_nan(self):
# Regression test for gh-11558: initialization used to fail.
psr = PhysicsSphericalRepresentation(
[1.0, np.nan] * u.deg, [np.nan, 2.0] * u.deg, [3.0, np.nan] * u.m
)
assert_array_equal(np.isnan(psr.phi), [False, True])
assert_array_equal(np.isnan(psr.theta), [True, False])
assert_array_equal(np.isnan(psr.r), [False, True])
def test_transform(self):
"""Test ``.transform()`` on rotation and general transform matrices."""
# set up representation
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr,
d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s,
)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg,
theta=[3, 4] * u.deg,
r=[5, 6] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
# compare differentials. should be unchanged (ds1).
assert_allclose_quantity(ds2.d_phi, ds1.d_phi)
assert_allclose_quantity(ds2.d_theta, ds1.d_theta)
assert_allclose_quantity(ds2.d_r, ds1.d_r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation, PhysicsSphericalDifferential)
)
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.theta, expected.theta)
assert_allclose_quantity(s3.r, expected.r)
assert_allclose_quantity(ds3.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds3.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds3.d_r, dexpected.d_r)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr,
d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s,
)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg,
theta=[3, 4] * u.deg,
r=[5, np.nan] * u.kpc,
differentials=ds1,
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2
)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (
s1.represent_as(CartesianRepresentation, CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation, PhysicsSphericalDifferential)
)
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.phi), (False, False))
assert_array_equal(np.isnan(s3.theta), (False, False))
assert_array_equal(np.isnan(s3.r), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_phi), (False, True))
assert_array_equal(np.isnan(ds3.d_theta), (False, True))
assert_array_equal(np.isnan(ds3.d_r), (False, True))
# through Cartesian does
assert_array_equal(np.isnan(thruC.phi), (False, True))
assert_array_equal(np.isnan(thruC.theta), (False, True))
assert_array_equal(np.isnan(thruC.r), (False, True))
# so only test on the first value
assert_allclose_quantity(s3.phi[0], thruC.phi[0])
assert_allclose_quantity(s3.theta[0], thruC.theta[0])
assert_allclose_quantity(ds3.d_phi[0], dthruC.d_phi[0])
assert_allclose_quantity(ds3.d_theta[0], dthruC.d_theta[0])
class TestCartesianRepresentation:
def test_name(self):
assert CartesianRepresentation.name == "cartesian"
assert CartesianRepresentation.name in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CartesianRepresentation()
def test_init_quantity(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_singleunit(self):
s1 = CartesianRepresentation(x=1, y=2, z=3, unit=u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CartesianRepresentation(
x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.Mpc, z=[3, 4, 5] * u.kpc
)
assert s1.x.unit is u.pc
assert s1.y.unit is u.Mpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, [1, 2, 3])
assert_allclose(s1.y.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_one_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.pc
assert s1.z.unit is u.pc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
r = np.arange(27.0).reshape(3, 3, 3) * u.kpc
s2 = CartesianRepresentation(r, xyz_axis=0)
assert s2.shape == (3, 3)
assert s2.x.unit == u.kpc
assert np.all(s2.x == r[0])
assert np.all(s2.xyz == r)
assert np.all(s2.get_xyz(xyz_axis=0) == r)
s3 = CartesianRepresentation(r, xyz_axis=1)
assert s3.shape == (3, 3)
assert np.all(s3.x == r[:, 0])
assert np.all(s3.y == r[:, 1])
assert np.all(s3.z == r[:, 2])
assert np.all(s3.get_xyz(xyz_axis=1) == r)
s4 = CartesianRepresentation(r, xyz_axis=2)
assert s4.shape == (3, 3)
assert np.all(s4.x == r[:, :, 0])
assert np.all(s4.get_xyz(xyz_axis=2) == r)
s5 = CartesianRepresentation(r, unit=u.pc)
assert s5.x.unit == u.pc
assert np.all(s5.xyz == r)
s6 = CartesianRepresentation(r.value, unit=u.pc, xyz_axis=2)
assert s6.x.unit == u.pc
assert np.all(s6.get_xyz(xyz_axis=2).value == r.value)
def test_init_one_array_size_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc)
assert exc.value.args[0].startswith("too many values to unpack")
def test_init_xyz_but_more_than_one_array_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(
x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.pc, z=[3, 4, 5] * u.pc, xyz_axis=0
)
assert "xyz_axis should only be set" in str(exc.value)
def test_init_one_array_yz_fail(self):
with pytest.raises(
ValueError,
match="x, y, and z are required to instantiate CartesianRepresentation",
):
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc, y=[1, 2] * u.pc)
def test_init_array_nocopy(self):
x = [8, 9, 10] * u.pc
y = [5, 6, 7] * u.Mpc
z = [2, 3, 4] * u.kpc
s1 = CartesianRepresentation(x=x, y=y, z=z, copy=False)
x[:] = [1, 2, 3] * u.kpc
y[:] = [9, 9, 8] * u.kpc
z[:] = [1, 2, 1] * u.kpc
assert_allclose_quantity(x, s1.x)
assert_allclose_quantity(y, s1.y)
assert_allclose_quantity(z, s1.z)
def test_xyz_is_view_if_possible(self):
xyz = np.arange(1.0, 10.0).reshape(3, 3)
s1 = CartesianRepresentation(xyz, unit=u.kpc, copy=False)
s1_xyz = s1.xyz
assert s1_xyz.value[0, 0] == 1.0
xyz[0, 0] = 0.0
assert s1.x[0] == 0.0
assert s1_xyz.value[0, 0] == 0.0
# Not possible: we don't check that tuples are from the same array
xyz = np.arange(1.0, 10.0).reshape(3, 3)
s2 = CartesianRepresentation(*xyz, unit=u.kpc, copy=False)
s2_xyz = s2.xyz
assert s2_xyz.value[0, 0] == 1.0
xyz[0, 0] = 0.0
assert s2.x[0] == 0.0
assert s2_xyz.value[0, 0] == 1.0
def test_reprobj(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
s2 = CartesianRepresentation.from_representation(s1)
assert s2.x == 1 * u.kpc
assert s2.y == 2 * u.kpc
assert s2.z == 3 * u.kpc
s3 = CartesianRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=5 * u.kpc)
assert s1.x.unit == u.kpc
assert s1.y.unit == u.kpc
assert s1.z.unit == u.kpc
assert_allclose(s1.x.value, [1, 2])
assert_allclose(s1.y.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CartesianRepresentation(
x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6, 7] * u.kpc
)
assert exc.value.args[0] == "Input parameters x, y, and z cannot be broadcast"
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.x = 1.0 * u.kpc
with pytest.raises(AttributeError):
s1.y = 1.0 * u.kpc
with pytest.raises(AttributeError):
s1.z = 1.0 * u.kpc
def test_xyz(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert isinstance(s1.xyz, u.Quantity)
assert s1.xyz.unit is u.kpc
assert_allclose(s1.xyz.value, [1, 2, 3])
def test_unit_mismatch(self):
q_len = u.Quantity([1], u.km)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_nonlen, y=q_len, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_nonlen, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_len, z=q_nonlen)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
def test_unit_non_length(self):
s1 = CartesianRepresentation(x=1 * u.kg, y=2 * u.kg, z=3 * u.kg)
s2 = CartesianRepresentation(
x=1 * u.km / u.s, y=2 * u.km / u.s, z=3 * u.km / u.s
)
banana = u.def_unit("banana")
s3 = CartesianRepresentation(x=1 * banana, y=2 * banana, z=3 * banana)
def test_getitem(self):
s = CartesianRepresentation(
x=np.arange(10) * u.m, y=-np.arange(10) * u.m, z=3 * u.km
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
def test_getitem_scalar(self):
s = CartesianRepresentation(x=1 * u.m, y=-2 * u.m, z=3 * u.km)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
ds1 = CartesianDifferential(
d_x=[1, 2] * u.km / u.s, d_y=[3, 4] * u.km / u.s, d_z=[5, 6] * u.km / u.s
)
s1 = CartesianRepresentation(
x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc, differentials=ds1
)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["general"])
ds2 = s2.differentials["s"]
dexpected = CartesianDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["general"]), base=s2
)
assert_allclose_quantity(ds2.d_x, dexpected.d_x)
assert_allclose_quantity(ds2.d_y, dexpected.d_y)
assert_allclose_quantity(ds2.d_z, dexpected.d_z)
# also explicitly calculate, since we can
assert_allclose(s2.x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(s2.y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(s2.z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert_allclose(ds2.d_x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(ds2.d_y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(ds2.d_z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert s2.x.unit is u.kpc
assert s2.y.unit is u.kpc
assert s2.z.unit is u.kpc
assert ds2.d_x.unit == u.km / u.s
assert ds2.d_y.unit == u.km / u.s
assert ds2.d_z.unit == u.km / u.s
def test_transform_non_contiguous_matrix(self):
# Regression test for gh-15503 (due to pyerfa gh-123)
r = CartesianRepresentation([1, 2, 3] * u.kpc)
m = np.array([[1, 0, 0, 5], [0, 1, 0, 6], [0, 0, 1, 7]], dtype="f8")[:, :3]
assert_array_equal(m, np.eye(3))
assert representation_equal(r.transform(m), r)
class TestCylindricalRepresentation:
def test_name(self):
assert CylindricalRepresentation.name == "cylindrical"
assert CylindricalRepresentation.name in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CylindricalRepresentation()
def test_init_quantity(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
assert s1.rho.unit is u.kpc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, 1)
assert_allclose(s1.phi.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CylindricalRepresentation(
rho=[1, 2, 3] * u.pc, phi=[2, 3, 4] * u.deg, z=[3, 4, 5] * u.kpc
)
assert s1.rho.unit is u.pc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, [1, 2, 3])
assert_allclose(s1.phi.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_array_nocopy(self):
rho = [8, 9, 10] * u.pc
phi = [5, 6, 7] * u.deg
z = [2, 3, 4] * u.kpc
s1 = CylindricalRepresentation(rho=rho, phi=phi, z=z, copy=False)
rho[:] = [9, 2, 3] * u.kpc
phi[:] = [1, 2, 3] * u.arcmin
z[:] = [-2, 3, 8] * u.kpc
assert_allclose_quantity(rho, s1.rho)
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(z, s1.z)
def test_reprobj(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
s2 = CylindricalRepresentation.from_representation(s1)
assert s2.rho == 1 * u.kpc
assert s2.phi == 2 * u.deg
assert s2.z == 3 * u.kpc
s3 = CylindricalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = CylindricalRepresentation(
rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=5 * u.kpc
)
assert s1.rho.unit == u.kpc
assert s1.phi.unit == u.deg
assert s1.z.unit == u.kpc
assert_allclose(s1.rho.value, [1, 2])
assert_allclose(s1.phi.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(
ValueError, match="Input parameters rho, phi, and z cannot be broadcast"
):
s1 = CylindricalRepresentation(
rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=[5, 6, 7] * u.kpc
)
def test_readonly(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=20 * u.deg, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.rho = 1.0 * u.kpc
with pytest.raises(AttributeError):
s1.phi = 20 * u.deg
with pytest.raises(AttributeError):
s1.z = 1.0 * u.kpc
def unit_mismatch(self):
q_len = u.Quantity([1], u.kpc)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_nonlen, phi=10 * u.deg, z=q_len)
assert exc.value.args[0] == "rho and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_len, phi=10 * u.deg, z=q_nonlen)
assert exc.value.args[0] == "rho and z should have matching physical types"
def test_getitem(self):
s = CylindricalRepresentation(
rho=np.arange(10) * u.pc, phi=-np.arange(10) * u.deg, z=1 * u.kpc
)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.rho, [2, 4, 6] * u.pc)
assert_allclose_quantity(s_slc.phi, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.z, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = CylindricalRepresentation(rho=1 * u.pc, phi=-2 * u.deg, z=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
s1 = CylindricalRepresentation(
phi=[1, 2] * u.deg, z=[3, 4] * u.pc, rho=[5, 6] * u.kpc
)
s2 = s1.transform(matrices["rotation"])
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.z, s1.z)
assert_allclose_quantity(s2.rho, s1.rho)
assert s2.phi.unit is u.rad
assert s2.z.unit is u.kpc
assert s2.rho.unit is u.kpc
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
expected = (s1.to_cartesian().transform(matrices["general"])).represent_as(
CylindricalRepresentation
)
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.z, expected.z)
assert_allclose_quantity(s3.rho, expected.rho)
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = CylindricalDifferential(
d_rho=4 * u.km / u.s, d_phi=5 * u.mas / u.yr, d_z=6 * u.km / u.s
)
cyl = CylindricalRepresentation(
rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc, differentials={"s": difs}
)
# PhysicsSpherical Representation
got = cyl.represent_as(
PhysicsSphericalRepresentation, PhysicsSphericalDifferential
)
expected = BaseRepresentation.represent_as(
cyl, PhysicsSphericalRepresentation, PhysicsSphericalDifferential
)
assert_allclose_quantity(got.r, expected.r)
assert_allclose_quantity(got.phi, expected.phi)
assert_allclose_quantity(got.theta, expected.theta)
assert representation_equal_up_to_angular_type(got, expected)
def test_to_physicsspherical_at_the_origin(self):
"""Test that the transformation to physicsspherical at the origin preserves phi."""
cyl = CylindricalRepresentation(
rho=0 * u.kpc,
phi=23.5 * u.deg,
z=3 * u.kpc,
)
sph = cyl.represent_as(PhysicsSphericalRepresentation)
assert_allclose(sph.r, 3 * u.kpc)
assert_allclose(sph.theta, 0 * u.deg)
assert cyl.phi == 23.5 * u.deg # phi is preserved exactly
class TestUnitSphericalCosLatDifferential:
@pytest.mark.parametrize("matrix", list(matrices.values()))
def test_transform(self, matrix):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = UnitSphericalCosLatDifferential(
d_lon_coslat=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,
)
s1 = UnitSphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrix)
ds2 = ds1.transform(matrix, s1, s2)
dexpected = UnitSphericalCosLatDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrix), base=s2
)
assert_allclose_quantity(ds2.d_lon_coslat, dexpected.d_lon_coslat)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
def test_cartesian_spherical_roundtrip():
s1 = CartesianRepresentation(
x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc
)
s2 = SphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = SphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.lon, s4.lon)
assert_allclose_quantity(s2.lat, s4.lat)
assert_allclose_quantity(s2.distance, s4.distance)
def test_cartesian_setting_with_other():
s1 = CartesianRepresentation(
x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc
)
s1[0] = SphericalRepresentation(0.0 * u.deg, 0.0 * u.deg, 1 * u.kpc)
assert_allclose_quantity(s1.x, [1.0, 2000.0] * u.kpc)
assert_allclose_quantity(s1.y, [0.0, 4.0] * u.pc)
assert_allclose_quantity(s1.z, [0.0, 6000.0] * u.pc)
with pytest.raises(ValueError, match="loss of information"):
s1[1] = UnitSphericalRepresentation(0.0 * u.deg, 10.0 * u.deg)
def test_cartesian_physics_spherical_roundtrip():
s1 = CartesianRepresentation(
x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc
)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
def test_spherical_physics_spherical_roundtrip():
s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s3.lon)
assert_allclose_quantity(s1.lat, s3.lat)
assert_allclose_quantity(s1.distance, s3.distance)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
assert_allclose_quantity(s1.lon, s4.phi)
assert_allclose_quantity(s1.lat, 90.0 * u.deg - s4.theta)
assert_allclose_quantity(s1.distance, s4.r)
def test_cartesian_cylindrical_roundtrip():
s1 = CartesianRepresentation(
x=np.array([1.0, 2000.0]) * u.kpc,
y=np.array([3000.0, 4.0]) * u.pc,
z=np.array([5.0, 600.0]) * u.cm,
)
s2 = CylindricalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = CylindricalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.rho, s4.rho)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.z, s4.z)
def test_unit_spherical_roundtrip():
s1 = UnitSphericalRepresentation(
lon=[10.0, 30.0] * u.deg, lat=[5.0, 6.0] * u.arcmin
)
s2 = CartesianRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = UnitSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s4.lon)
assert_allclose_quantity(s1.lat, s4.lat)
def test_no_unnecessary_copies():
s1 = UnitSphericalRepresentation(
lon=[10.0, 30.0] * u.deg, lat=[5.0, 6.0] * u.arcmin
)
s2 = s1.represent_as(UnitSphericalRepresentation)
assert s2 is s1
assert np.may_share_memory(s1.lon, s2.lon)
assert np.may_share_memory(s1.lat, s2.lat)
s3 = s1.represent_as(SphericalRepresentation)
assert np.may_share_memory(s1.lon, s3.lon)
assert np.may_share_memory(s1.lat, s3.lat)
s4 = s1.represent_as(CartesianRepresentation)
s5 = s4.represent_as(CylindricalRepresentation)
assert np.may_share_memory(s5.z, s4.z)
def test_representation_repr():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert (
repr(r1) == "<SphericalRepresentation (lon, lat, distance) in (deg, deg, kpc)\n"
" (1., 2.5, 1.)>"
)
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert repr(r2) == "<CartesianRepresentation (x, y, z) in kpc\n (1., 2., 3.)>"
r3 = CartesianRepresentation(
x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc
)
assert (
repr(r3) == "<CartesianRepresentation (x, y, z) in kpc\n"
" [(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)]>"
)
def test_representation_repr_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit="m")
assert (
repr(cr) == "<CartesianRepresentation (x, y, z) in m\n"
" [[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n"
" [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n"
" [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]]>"
)
# This was broken before.
assert (
repr(cr.T) == "<CartesianRepresentation (x, y, z) in m\n"
" [[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n"
" [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n"
" [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]]>"
)
def test_representation_str():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert str(r1) == "(1., 2.5, 1.) (deg, deg, kpc)"
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert str(r2) == "(1., 2., 3.) kpc"
r3 = CartesianRepresentation(
x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc
)
assert str(r3) == "[(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)] kpc"
def test_representation_str_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit="m")
assert (
str(cr) == "[[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n"
" [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n"
" [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]] m"
)
# This was broken before.
assert (
str(cr.T) == "[[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n"
" [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n"
" [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]] m"
)
def test_subclass_representation():
from astropy.coordinates.builtin_frames import ICRS
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super().__new__(
cls, angle, unit=unit, wrap_angle=wrap_angle, **kwargs
)
return self
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = {"lon": Longitude180, "lat": Latitude, "distance": u.Quantity}
class ICRSWrap180(ICRS):
frame_specific_representation_info = (
ICRS._frame_specific_representation_info.copy()
)
frame_specific_representation_info[SphericalWrap180Representation] = (
frame_specific_representation_info[SphericalRepresentation]
)
default_representation = SphericalWrap180Representation
c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m)
assert c.ra.value == -1
assert c.ra.unit is u.deg
assert c.dec.value == -2
assert c.dec.unit is u.deg
def test_minimal_subclass():
# Basically to check what we document works;
# see doc/coordinates/representations.rst
class LogDRepresentation(BaseRepresentation):
attr_classes = {"lon": Longitude, "lat": Latitude, "logd": u.Dex}
def to_cartesian(self):
d = self.logd.physical
x = d * np.cos(self.lat) * np.cos(self.lon)
y = d * np.cos(self.lat) * np.sin(self.lon)
z = d * np.sin(self.lat)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
lon = np.arctan2(cart.y, cart.x)
lat = np.arctan2(cart.z, s)
return cls(lon=lon, lat=lat, logd=u.Dex(r), copy=False)
ld1 = LogDRepresentation(90.0 * u.deg, 0.0 * u.deg, 1.0 * u.dex(u.kpc))
ld2 = LogDRepresentation(lon=90.0 * u.deg, lat=0.0 * u.deg, logd=1.0 * u.dex(u.kpc))
assert np.all(ld1.lon == ld2.lon)
assert np.all(ld1.lat == ld2.lat)
assert np.all(ld1.logd == ld2.logd)
c = ld1.to_cartesian()
assert_allclose_quantity(c.xyz, [0.0, 10.0, 0.0] * u.kpc, atol=1.0 * u.npc)
ld3 = LogDRepresentation.from_cartesian(c)
assert np.all(ld3.lon == ld2.lon)
assert np.all(ld3.lat == ld2.lat)
assert np.all(ld3.logd == ld2.logd)
s = ld1.represent_as(SphericalRepresentation)
assert_allclose_quantity(s.lon, ld1.lon)
assert_allclose_quantity(s.distance, 10.0 * u.kpc)
assert_allclose_quantity(s.lat, ld1.lat)
with pytest.raises(TypeError):
LogDRepresentation(0.0 * u.deg, 1.0 * u.deg)
with pytest.raises(TypeError):
LogDRepresentation(
0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), lon=1.0 * u.deg
)
with pytest.raises(TypeError):
LogDRepresentation(0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), True, False)
with pytest.raises(TypeError):
LogDRepresentation(0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), foo="bar")
# if we define it a second time, even the qualnames are the same,
# so we raise
with pytest.raises(ValueError):
class LogDRepresentation(BaseRepresentation):
attr_classes = {"lon": Longitude, "lat": Latitude, "logr": u.Dex}
def test_duplicate_warning():
from astropy.coordinates.representation import (
DUPLICATE_REPRESENTATIONS,
REPRESENTATION_CLASSES,
)
with pytest.warns(DuplicateRepresentationWarning):
class UnitSphericalRepresentation(BaseRepresentation):
attr_classes = {"lon": Longitude, "lat": Latitude}
assert "unitspherical" in DUPLICATE_REPRESENTATIONS
assert "unitspherical" not in REPRESENTATION_CLASSES
assert (
"astropy.coordinates.representation.spherical.UnitSphericalRepresentation"
in REPRESENTATION_CLASSES
)
assert (
__name__ + ".test_duplicate_warning.<locals>.UnitSphericalRepresentation"
in REPRESENTATION_CLASSES
)
class TestCartesianRepresentationWithDifferential:
def test_init_differential(self):
diff = CartesianDifferential(
d_x=1 * u.km / u.s, d_y=2 * u.km / u.s, d_z=3 * u.km / u.s
)
# Check that a single differential gets turned into a 1-item dict.
s1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
# can also pass in an explicit dictionary
s1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={"s": diff}
)
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
# using the wrong key will cause it to fail
with pytest.raises(ValueError):
s1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={"1 / s2": diff}
)
# make sure other kwargs are handled properly
s1 = CartesianRepresentation(
x=1, y=2, z=3, differentials=diff, copy=COPY_IF_NEEDED, unit=u.kpc
)
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
with pytest.raises(TypeError): # invalid type passed to differentials
CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials="garmonbozia"
)
# And that one can add it to another representation.
s1 = CartesianRepresentation(
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc),
differentials=diff,
)
assert len(s1.differentials) == 1
assert s1.differentials["s"] is diff
# make sure differentials can't accept differentials
with pytest.raises(TypeError):
CartesianDifferential(
d_x=1 * u.km / u.s,
d_y=2 * u.km / u.s,
d_z=3 * u.km / u.s,
differentials=diff,
)
def test_init_differential_compatible(self):
# TODO: more extensive checking of this
# should fail - representation and differential not compatible
diff = SphericalDifferential(
d_lon=1 * u.mas / u.yr, d_lat=2 * u.mas / u.yr, d_distance=3 * u.km / u.s
)
with pytest.raises(TypeError):
CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(
d_lon_coslat=1 * u.mas / u.yr,
d_lat=2 * u.mas / u.yr,
d_distance=3 * u.km / u.s,
)
r1 = SphericalRepresentation(
lon=15 * u.deg, lat=21 * u.deg, distance=1 * u.pc, differentials=diff
)
def test_init_differential_multiple_equivalent_keys(self):
d1 = CartesianDifferential(*[1, 2, 3] * u.km / u.s)
d2 = CartesianDifferential(*[4, 5, 6] * u.km / u.s)
# verify that the check against expected_unit validates against passing
# in two different but equivalent keys
with pytest.raises(ValueError):
r1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials={"s": d1, "yr": d2}
)
def test_init_array_broadcasting(self):
arr1 = np.arange(8).reshape(4, 2) * u.km / u.s
diff = CartesianDifferential(d_x=arr1, d_y=arr1, d_z=arr1)
# shapes aren't compatible
arr2 = np.arange(27).reshape(3, 9) * u.kpc
with pytest.raises(ValueError):
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2, differentials=diff)
arr2 = np.arange(8).reshape(4, 2) * u.kpc
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2, differentials=diff)
assert rep.x.unit is u.kpc
assert rep.y.unit is u.kpc
assert rep.z.unit is u.kpc
assert len(rep.differentials) == 1
assert rep.differentials["s"] is diff
assert rep.xyz.shape == rep.differentials["s"].d_xyz.shape
def test_reprobj(self):
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(
d_lon_coslat=1 * u.mas / u.yr,
d_lat=2 * u.mas / u.yr,
d_distance=3 * u.km / u.s,
)
r1 = SphericalRepresentation(
lon=15 * u.deg, lat=21 * u.deg, distance=1 * u.pc, differentials=diff
)
r2 = CartesianRepresentation.from_representation(r1)
assert r2.name == "cartesian"
assert not r2.differentials
r3 = SphericalRepresentation(r1)
assert r3.differentials
assert representation_equal(r3, r1)
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError): # attribute is not settable
s1.differentials = "thing"
def test_represent_as(self):
diff = CartesianDifferential(
d_x=1 * u.km / u.s, d_y=2 * u.km / u.s, d_z=3 * u.km / u.s
)
rep1 = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
# Only change the representation, drop the differential
new_rep = rep1.represent_as(SphericalRepresentation)
assert new_rep.name == "spherical"
assert not new_rep.differentials # dropped
# Pass in separate classes for representation, differential
new_rep = rep1.represent_as(
SphericalRepresentation, SphericalCosLatDifferential
)
assert new_rep.name == "spherical"
assert new_rep.differentials["s"].name == "sphericalcoslat"
# Pass in a dictionary for the differential classes
new_rep = rep1.represent_as(
SphericalRepresentation, {"s": SphericalCosLatDifferential}
)
assert new_rep.name == "spherical"
assert new_rep.differentials["s"].name == "sphericalcoslat"
# make sure represent_as() passes through the differentials
for name in REPRESENTATION_CLASSES:
if name == "radial":
# TODO: Converting a CartesianDifferential to a
# RadialDifferential fails, even on `main`
continue
elif "geodetic" in name or "bodycentric" in name:
# TODO: spheroidal representations (geodetic or bodycentric)
# do not have differentials yet
continue
new_rep = rep1.represent_as(
REPRESENTATION_CLASSES[name], DIFFERENTIAL_CLASSES[name]
)
assert new_rep.name == name
assert len(new_rep.differentials) == 1
assert new_rep.differentials["s"].name == name
with pytest.raises(ValueError) as excinfo:
rep1.represent_as("name")
assert "use frame object" in str(excinfo.value)
@pytest.mark.parametrize(
"sph_diff,usph_diff",
[
(SphericalDifferential, UnitSphericalDifferential),
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential),
],
)
def test_represent_as_unit_spherical_with_diff(self, sph_diff, usph_diff):
"""Test that differential angles are correctly reduced."""
diff = CartesianDifferential(
d_x=1 * u.km / u.s, d_y=2 * u.km / u.s, d_z=3 * u.km / u.s
)
rep = CartesianRepresentation(
x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc, differentials=diff
)
sph = rep.represent_as(SphericalRepresentation, sph_diff)
usph = rep.represent_as(UnitSphericalRepresentation, usph_diff)
assert components_equal(usph, sph.represent_as(UnitSphericalRepresentation))
assert components_equal(
usph.differentials["s"], sph.differentials["s"].represent_as(usph_diff)
)
# Just to be sure components_equal and the represent_as work as advertised,
# a sanity check: d_lat is always defined and should be the same.
assert_array_equal(sph.differentials["s"].d_lat, usph.differentials["s"].d_lat)
def test_getitem(self):
d = CartesianDifferential(
d_x=np.arange(10) * u.m / u.s,
d_y=-np.arange(10) * u.m / u.s,
d_z=1.0 * u.m / u.s,
)
s = CartesianRepresentation(
x=np.arange(10) * u.m, y=-np.arange(10) * u.m, z=3 * u.km, differentials=d
)
s_slc = s[2:8:2]
s_dif = s_slc.differentials["s"]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
assert_allclose_quantity(s_dif.d_x, [2, 4, 6] * u.m / u.s)
assert_allclose_quantity(s_dif.d_y, [-2, -4, -6] * u.m / u.s)
assert_allclose_quantity(s_dif.d_z, [1, 1, 1] * u.m / u.s)
def test_setitem(self):
d = CartesianDifferential(
d_x=np.arange(5) * u.m / u.s,
d_y=-np.arange(5) * u.m / u.s,
d_z=1.0 * u.m / u.s,
)
s = CartesianRepresentation(
x=np.arange(5) * u.m, y=-np.arange(5) * u.m, z=3 * u.km, differentials=d
)
s[:2] = s[2]
assert_array_equal(s.x, [2, 2, 2, 3, 4] * u.m)
assert_array_equal(s.y, [-2, -2, -2, -3, -4] * u.m)
assert_array_equal(s.z, [3, 3, 3, 3, 3] * u.km)
assert_array_equal(s.differentials["s"].d_x, [2, 2, 2, 3, 4] * u.m / u.s)
assert_array_equal(s.differentials["s"].d_y, [-2, -2, -2, -3, -4] * u.m / u.s)
assert_array_equal(s.differentials["s"].d_z, [1, 1, 1, 1, 1] * u.m / u.s)
s2 = s.represent_as(SphericalRepresentation, SphericalDifferential)
s[0] = s2[3]
assert_allclose_quantity(s.x, [3, 2, 2, 3, 4] * u.m)
assert_allclose_quantity(s.y, [-3, -2, -2, -3, -4] * u.m)
assert_allclose_quantity(s.z, [3, 3, 3, 3, 3] * u.km)
assert_allclose_quantity(s.differentials["s"].d_x, [3, 2, 2, 3, 4] * u.m / u.s)
assert_allclose_quantity(
s.differentials["s"].d_y, [-3, -2, -2, -3, -4] * u.m / u.s
)
assert_allclose_quantity(s.differentials["s"].d_z, [1, 1, 1, 1, 1] * u.m / u.s)
s3 = CartesianRepresentation(
s.xyz,
differentials={
"s": d,
"s2": CartesianDifferential(np.ones((3, 5)) * u.m / u.s**2),
},
)
with pytest.raises(ValueError, match="same differentials"):
s[0] = s3[2]
s4 = SphericalRepresentation(
0.0 * u.deg,
0.0 * u.deg,
1.0 * u.kpc,
differentials=RadialDifferential(10 * u.km / u.s),
)
with pytest.raises(ValueError, match="loss of information"):
s[0] = s4
def test_transform(self):
d1 = CartesianDifferential(
d_x=[1, 2] * u.km / u.s, d_y=[3, 4] * u.km / u.s, d_z=[5, 6] * u.km / u.s
)
r1 = CartesianRepresentation(
x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc, differentials=d1
)
r2 = r1.transform(matrices["general"])
d2 = r2.differentials["s"]
assert_allclose_quantity(d2.d_x, [22.0, 28] * u.km / u.s)
assert_allclose_quantity(d2.d_y, [49, 64] * u.km / u.s)
assert_allclose_quantity(d2.d_z, [76, 100.0] * u.km / u.s)
def test_with_differentials(self):
# make sure with_differential correctly creates a new copy with the same
# differential
cr = CartesianRepresentation([1, 2, 3] * u.kpc)
diff = CartesianDifferential([0.1, 0.2, 0.3] * u.km / u.s)
cr2 = cr.with_differentials(diff)
assert cr.differentials != cr2.differentials
assert cr2.differentials["s"] is diff
# make sure it works even if a differential is present already
diff2 = CartesianDifferential([0.1, 0.2, 0.3] * u.m / u.s)
cr3 = CartesianRepresentation([1, 2, 3] * u.kpc, differentials=diff)
cr4 = cr3.with_differentials(diff2)
assert cr4.differentials["s"] != cr3.differentials["s"]
assert cr4.differentials["s"] == diff2
# also ensure a *scalar* differential will works
cr5 = cr.with_differentials(diff)
assert len(cr5.differentials) == 1
assert cr5.differentials["s"] == diff
# make sure we don't update the original representation's dict
d1 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km / u.s)
d2 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km / u.s**2)
r1 = CartesianRepresentation(
*np.random.random((3, 5)), unit=u.pc, differentials=d1
)
r2 = r1.with_differentials(d2)
assert r1.differentials["s"] is r2.differentials["s"]
assert "s2" not in r1.differentials
assert "s2" in r2.differentials
def test_repr_with_differentials():
diff = CartesianDifferential([0.1, 0.2, 0.3] * u.km / u.s)
cr = CartesianRepresentation([1, 2, 3] * u.kpc, differentials=diff)
assert "has differentials w.r.t.: 's'" in repr(cr)
def test_to_cartesian():
"""
Test that to_cartesian drops the differential.
"""
sd = SphericalDifferential(d_lat=1 * u.deg, d_lon=2 * u.deg, d_distance=10 * u.m)
sr = SphericalRepresentation(
lat=1 * u.deg, lon=2 * u.deg, distance=10 * u.m, differentials=sd
)
cart = sr.to_cartesian()
assert cart.name == "cartesian"
assert not cart.differentials
@pytest.fixture
def unitphysics():
"""
This fixture is used
"""
had_unit = False
if hasattr(PhysicsSphericalRepresentation, "_unit_representation"):
orig = PhysicsSphericalRepresentation._unit_representation
had_unit = True
class UnitPhysicsSphericalRepresentation(BaseRepresentation):
attr_classes = {"phi": Angle, "theta": Angle}
def __init__(self, *args, copy=True, **kwargs):
super().__init__(*args, copy=copy, **kwargs)
# Wrap/validate phi/theta
if copy:
self._phi = self._phi.wrap_at(360 * u.deg)
else:
# necessary because the above version of `wrap_at` has to be a copy
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.0 * u.deg) or np.any(self._theta > 180.0 * u.deg):
raise ValueError(
"Inclination angle(s) must be within 0 deg <= angle <= 180 deg, "
f"got {self._theta.to(u.degree)}"
)
@property
def phi(self):
return self._phi
@property
def theta(self):
return self._theta
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
"phi": CartesianRepresentation(-sinphi, cosphi, 0.0, copy=False),
"theta": CartesianRepresentation(
costheta * cosphi, costheta * sinphi, -sintheta, copy=False
),
}
def scale_factors(self):
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"phi", sintheta, "theta", l}
def to_cartesian(self):
x = np.sin(self.theta) * np.cos(self.phi)
y = np.sin(self.theta) * np.sin(self.phi)
z = np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, copy=False)
def norm(self):
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled, copy=False)
PhysicsSphericalRepresentation._unit_representation = (
UnitPhysicsSphericalRepresentation
)
yield UnitPhysicsSphericalRepresentation
if had_unit:
PhysicsSphericalRepresentation._unit_representation = orig
else:
del PhysicsSphericalRepresentation._unit_representation
# remove from the module-level representations, if present
REPRESENTATION_CLASSES.pop(UnitPhysicsSphericalRepresentation.name, None)
def test_unitphysics(unitphysics):
obj = unitphysics(phi=0 * u.deg, theta=10 * u.deg)
objkw = unitphysics(phi=0 * u.deg, theta=10 * u.deg)
assert objkw.phi == obj.phi
assert objkw.theta == obj.theta
asphys = obj.represent_as(PhysicsSphericalRepresentation)
assert asphys.phi == obj.phi
assert_allclose(asphys.theta, obj.theta)
assert_allclose_quantity(asphys.r, 1 * u.dimensionless_unscaled)
assph = obj.represent_as(SphericalRepresentation)
assert assph.lon == obj.phi
assert_allclose_quantity(assph.lat, 80 * u.deg)
assert_allclose_quantity(assph.distance, 1 * u.dimensionless_unscaled)
with pytest.raises(TypeError, match="got multiple values"):
unitphysics(1 * u.deg, 2 * u.deg, theta=10)
with pytest.raises(TypeError, match="unexpected keyword.*parrot"):
unitphysics(1 * u.deg, 2 * u.deg, parrot=10)
def test_distance_warning(recwarn):
SphericalRepresentation(1 * u.deg, 2 * u.deg, 1 * u.kpc)
with pytest.raises(ValueError) as excinfo:
SphericalRepresentation(1 * u.deg, 2 * u.deg, -1 * u.kpc)
assert "Distance must be >= 0" in str(excinfo.value)
# second check is because the "originating" ValueError says the above,
# while the representation one includes the below
assert "you must explicitly pass" in str(excinfo.value)
def test_dtype_preservation_in_indexing():
# Regression test for issue #8614 (fixed in #8876)
xyz = np.array([[1, 0, 0], [0.9, 0.1, 0]], dtype="f4")
cr = CartesianRepresentation(xyz, xyz_axis=-1, unit="km")
assert cr.xyz.dtype == xyz.dtype
cr0 = cr[0]
# This used to fail.
assert cr0.xyz.dtype == xyz.dtype
class TestInfo:
def setup_class(cls):
cls.rep = SphericalRepresentation([0, 1] * u.deg, [2, 3] * u.deg, 10 * u.pc)
cls.diff = SphericalDifferential(
[10, 20] * u.mas / u.yr, [30, 40] * u.mas / u.yr, [50, 60] * u.km / u.s
)
cls.rep_w_diff = SphericalRepresentation(cls.rep, differentials=cls.diff)
def test_info_unit(self):
assert self.rep.info.unit == "deg, deg, pc"
assert self.diff.info.unit == "mas / yr, mas / yr, km / s"
assert self.rep_w_diff.info.unit == "deg, deg, pc"
@pytest.mark.parametrize("item", ["rep", "diff", "rep_w_diff"])
def test_roundtrip(self, item):
rep_or_diff = getattr(self, item)
as_dict = rep_or_diff.info._represent_as_dict()
new = rep_or_diff.__class__.info._construct_from_dict(as_dict)
assert np.all(representation_equal(new, rep_or_diff))
@pytest.mark.parametrize(
"cls",
[
SphericalDifferential,
SphericalCosLatDifferential,
CylindricalDifferential,
PhysicsSphericalDifferential,
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
],
)
def test_differential_norm_noncartesian(cls):
# The norm of a non-Cartesian differential without specifying `base` should error
args = (0,) * len(cls.attr_classes)
rep = cls(*args)
with pytest.raises(ValueError, match=r"`base` must be provided .* " + cls.__name__):
rep.norm()
def test_differential_norm_radial():
# Unlike most non-Cartesian differentials, the norm of a radial differential does not require `base`
rep = RadialDifferential(1 * u.km / u.s)
assert_allclose_quantity(rep.norm(), 1 * u.km / u.s)
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@coordinates@tests@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/frontends/fits/__init__.py",
"type": "Python"
}
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@frontends@fits@[email protected]_END.py
|
|
{
"filename": "test_parser_asdf.py",
"repo_name": "spacetelescope/jdaviz",
"repo_path": "jdaviz_extracted/jdaviz-main/jdaviz/configs/imviz/tests/test_parser_asdf.py",
"type": "Python"
}
|
import asdf
import numpy as np
import astropy.units as u
from jdaviz.configs.imviz.tests.utils import create_example_gwcs
def test_asdf_not_rdm(imviz_helper):
# test support for ASDF files that look like Roman files
# for users with or without roman_datamodels:
in_unit = u.Jy
in_data = np.arange(16, dtype=np.float32).reshape((4, 4)) * in_unit
tree = {
'roman': {
'data': in_data,
'meta': {
'wcs': create_example_gwcs((4, 4))
},
},
}
af = asdf.AsdfFile(tree=tree)
imviz_helper.load_data(af)
out_component = imviz_helper.app.data_collection[0].get_component('DATA')
np.testing.assert_array_equal(in_data.value, out_component.data)
assert str(in_unit) == out_component.units
|
spacetelescopeREPO_NAMEjdavizPATH_START.@jdaviz_extracted@jdaviz-main@jdaviz@configs@imviz@tests@[email protected]_END.py
|
{
"filename": "mpfit.py",
"repo_name": "PaulKuin/uvotpy",
"repo_path": "uvotpy_extracted/uvotpy-master/uvotpy/mpfit.py",
"type": "Python"
}
|
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
[email protected]
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
[email protected]
Updated versions can be found at http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
[email protected]
Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/
Bug Fixes:
2011-08-26 NPMKuin (MSSL/UCL) some clarification in the documentation.
2013-11-19 NPMKuin (MSSL/UCL) changed import scipy.lib.blas[deprecated] to scipy.linalg.blas
changed trace of array in qrsolve() to a copy since it needs to be writeable.
Known bugs:
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p) # put here the function for the model.
#
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
# y(x) are the measured values, and err(x) are the errors in y.
#
return([status, (y-model)/err]
See below for applications with analytical derivatives.
Here 'x', 'y' and 'err' are the variables of the problem in the example above.
Their names can be changed as a passed parameter to mpfit. So they are
suggestive but not required. Any set of variables can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Parameters of the problem which
need optimization are then passed using the parameter list 'p'.
Use MPFITFUN and MPFITEXPR if you need ideas on how to do that.
The function *must* accept a parameter list, 'p'.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
To call the user function, you will need something like:
import mpfit
#import numpy.oldnumeric as Numeric
#
#... define your parameters
par = (p1,p2,p3,...)
#
#... get your data to define
xx = (ordinate)
yy = (measurements for each x)
e_yy = (errors in each y)
f = {'x':xx,'y':yy,'err':e_y} ANALYTIC DERIVATIVES
#
Z = mpfit.mpfit('myfunct', par, functkw=f, quiet=True)
results returned in Z.status, Z.params, Z.perror, etc.
And if you want to limit the parameters, add a list of disctionaries
in the parinfo keyword with the limits, etcetera.
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example with 5 parameters :
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]},\
{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]},\
{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]},\
{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]},\
{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
#import numpy.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit.mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
Converted from Numeric to numpy (Sergey Koposov, July 2008)
"""
from __future__ import division
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
import numpy
import types
import scipy.linalg.blas
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit(object):
blas_enorm32, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float32))
blas_enorm64, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float64))
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
damp=0., maxiter=200, factor=100., nprint=1,
iterfunct='default', iterkw={}, nocovar=0,
rescale=0, autoderivative=1, quiet=0,
diag=None, epsfcn=None, debug=0):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values. (chi-square)
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof=0
if fcn==None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if type(parinfo) != list:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if type(parinfo[0]) != dict:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
# Make sure parameters are numpy arrays
xall = numpy.asarray(xall)
# In the case if the xall is not float or if is float but has less
# than 64 bits we do convert it into double
if xall.dtype.kind != 'f' or xall.dtype.itemsize<=4:
xall = xall.astype(numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if ptied[i] != '':
self.qanytied = 1
self.ptied = ptied
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') # Tied parameters are also effectively fixed
# Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if numpy.any(qmin & qmax & (maxstep<minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = (numpy.nonzero((qmin!=0.) | (qmax!=0.)))[0]
qminmax = len(wh > 0)
# Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy() # self.params is the set of parameters to be returned
x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if numpy.any((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if numpy.any((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = (limited[:,1])[ifree]
ulim = (limits [:,1])[ifree]
qllim = (limited[:,0])[ifree]
llim = (limits [:,0])[ifree]
if numpy.any((qulim!=0.) | (qllim!=0.)):
qanylim = 1
else:
qanylim = 0
else:
# Fill in local variables with dummy values
qulim = numpy.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale != 0:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if len(diag) < n:
return
if numpy.any(diag <= 0):
return
self.errmsg = ''
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
# If the returned fvec has more than four bits I assume that we have
# double precision
# It is important that the machar is determined by the precision of
# the returned value, not by the precision of the input array
if numpy.array([fvec]).dtype.itemsize>4:
self.machar = machar(double=1)
self.blas_enorm = mpfit.blas_enorm64
else:
self.machar = machar(double=0)
self.blas_enorm = mpfit.blas_enorm32
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m-nfree
self.fnorm = self.enorm(fvec)
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter-1) % nprint) == 0:
mperr = 0
xnew0 = self.params.copy()
dof = numpy.max([len(fvec) - len(x), 0])
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if numpy.max(numpy.abs(xnew0-self.params)) > 0:
if self.qanytied:
self.params = self.tie(self.params, ptied)
x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (x == ulim)))[0]
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
# Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum0 = sum(fvec * fjac[:,whlpeg[i]])
if sum0 > 0:
fjac[:,whlpeg[i]] = 0
if nupeg > 0:
# Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum0 = sum(fvec * fjac[:,whupeg[i]])
if sum0 < 0:
fjac[:,whupeg[i]] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (rescale==0) or (len(diag) < n):
diag = wa2.copy()
diag[diag == 0] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if temp3 != 0:
fj = fjac[j:,lj]
wj = wa4[j:]
# *** optimization wa4(j:*)
wa4[j:] = wj - fj * sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
# Check for overflow. This should be a cheap test here since FJAC
# has been reduced to a (small) square matrix, and the test is
# O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = old_div(sum(fjac[0:j+1,j]*qtf[0:j+1]),self.fnorm)
gnorm = numpy.max([gnorm,numpy.abs(old_div(sum0,wa2[l]))])
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if rescale == 0:
diag = numpy.choose(diag>wa2, (wa2, diag))
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf, delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = numpy.clip( wa1[whlpeg], 0., numpy.max(wa1))
if nupeg > 0:
wa1[whupeg] = numpy.clip(wa1[whupeg], numpy.min(wa1), 0.)
dwa1 = numpy.abs(wa1) > machep
whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)))[0]
if len(whl) > 0:
t = (old_div((llim[whl] - x[whl]),
wa1[whl]))
alpha = numpy.min([alpha, numpy.min(t)])
whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)))[0]
if len(whu) > 0:
t = (old_div((ulim[whu] - x[whu]),
wa1[whu]))
alpha = numpy.min([alpha, numpy.min(t)])
# Obey any max step values.
if qminmax:
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((qmax != 0.) & (maxstep > 0)))[0]
if len(whmax) > 0:
mrat = numpy.max(old_div(numpy.abs(nwa1[whmax]),
numpy.abs(maxstep[ifree[whmax]])))
if mrat > 1:
alpha = old_div(alpha, mrat)
# Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
# Adjust the final output values. If the step put us exactly
# on a boundary, make sure it is exact.
sgnu = (ulim >= 0) * 2. - 1.
sgnl = (llim >= 0) * 2. - 1.
# Handles case of
# ... nonzero *LIM ... ...zero * LIM
ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep
llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep
wh = (numpy.nonzero((qulim!=0) & (wa2 >= ulim1)))[0]
if len(wh) > 0:
wa2[wh] = ulim[wh]
wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim1)))[0]
if len(wh) > 0:
wa2[wh] = llim[wh]
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = numpy.min([delta,pnorm])
self.params[ifree] = wa2
# Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = self.enorm(wa4)
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = - (old_div(fnorm1,self.fnorm))**2 + 1.
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = old_div(self.enorm(alpha*wa3),self.fnorm)
temp2 = old_div((numpy.sqrt(alpha*par)*pnorm),self.fnorm)
prered = temp1*temp1 + old_div((temp2*temp2),0.5)
dirder = -(temp1*temp1 + temp2*temp2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = old_div(actred,prered)
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = .5
else:
temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*numpy.min([delta,old_div(pnorm,0.1)])
par = old_div(par,temp)
else:
if (par == 0) or (ratio >= 0.75):
delta = old_div(pnorm,.5)
par = .5*par
# Test for successful iteration
if ratio >= 0.0001:
# Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
# Tests for convergence
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1) and (self.status == 2):
self.status = 3
if self.status != 0:
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (numpy.abs(actred) <= machep) and (prered <= machep) \
and (0.5*ratio <= 1):
self.status = 6
if delta <= machep*xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
break
# Check for over/underflow
if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) & \
numpy.isfinite(x)) or ~numpy.isfinite(ratio):
errmsg = ('''ERROR: parameter or function value(s) have become
'infinite; check model function for over- 'and underflow''')
self.status = -16
break
#wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
#if ct GT 0 OR finite(ratio) EQ 0 then begin
if self.status != 0:
break;
# End of outer loop.
catch_msg = 'in the termination phase'
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = numpy.max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar==0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = numpy.zeros([nn, nn], dtype=float)
for i in range(n):
self.covar[ifree,ifree[i]] = cv[:,i]
# Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, dtype=float)
d = numpy.diagonal(self.covar).copy()
wh = (numpy.nonzero(d >= 0))[0]
if len(wh) > 0:
self.perror[wh] = numpy.sqrt(d[wh])
return
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'debug': self.debug,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
#,'machar':self.machar
}.__str__()
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if self.debug:
print('Entering defiter...')
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print("Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof))
for i in range(nprint):
if (parinfo is not None) and ('parname' in parinfo[i]):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and ('mpprint' in parinfo[i]):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print(p + (pformat % x[i]) + ' ')
return 0
# DO_ITERSTOP:
# if keyword_set(iterstop) then begin
# k = get_kbrd(0)
# if k EQ string(byte(7)) then begin
# message, 'WARNING: minimization not complete', /info
# print, 'Do you want to terminate this procedure? (y/n)', $
# format='(A,$)'
# k = ''
# read, k
# if strupcase(strmid(k,0,1)) EQ 'Y' then begin
# message, 'WARNING: Procedure is terminating.', /info
# mperr = -1
# endif
# endif
# endif
# Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if self.debug:
print('Entering parinfo...')
if (n == 0) and (parinfo is not None):
n = len(parinfo)
if n == 0:
values = default
return values
values = []
for i in range(n):
if (parinfo is not None) and (key in parinfo[i]):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if type(default) == list:
test=default[0]
if isinstance(test, int):
values = numpy.asarray(values, int)
elif isinstance(test, float):
values = numpy.asarray(values, float)
return values
# Call user function or procedure, with _EXTRA or not, with
# derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if self.debug:
print('Entering call...')
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if fjac is None:
[status, f] = fcn(x, fjac=fjac, **functkw)
if self.damp > 0:
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
f = numpy.tanh(old_div(f,self.damp))
return [status, f]
else:
return fcn(x, fjac=fjac, **functkw)
def enorm(self, vec):
ans = self.blas_enorm(vec)
return ans
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if self.debug:
print('Entering fdjac2...')
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
if xall is None:
xall = x
if ifree is None:
ifree = numpy.arange(len(xall))
if step is None:
step = x * 0.
nall = len(xall)
eps = numpy.sqrt(numpy.max([epsfcn, machep]))
m = len(fvec)
n = len(x)
# Compute analytical derivative if requested
if autoderivative == 0:
mperr = 0
fjac = numpy.zeros(nall, dtype=float)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print('ERROR: Derivative matrix was not computed properly.')
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez <[email protected]>)
fjac.shape = [m,nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return fjac
fjac = numpy.zeros([m, n], dtype=float)
h = eps * numpy.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = (numpy.nonzero(stepi > 0))[0]
if len(wh) > 0:
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
dstepi = dstep[ifree]
wh = (numpy.nonzero(dstepi > 0))[0]
if len(wh) > 0:
h[wh] = numpy.abs(dstepi[wh]*x[wh])
# In case any of the step values are zero
h[h == 0] = eps
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
mask = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = (mask | ((ulimited!=0) & (x > ulimit-h)))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0:
h[wh] = - h[wh]
# Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if numpy.abs(dside[ifree[j]]) <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
# Note optimization fjac(0:*,j)
fjac[0:,j] = old_div((fp-fvec),h[j])
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
# Note optimization fjac(0:*,j)
fjac[0:,j] = old_div((fp-fm),(2*h[j]))
return fjac
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if self.debug: print('Entering qrfac...')
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, dtype=float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
# Reduce a to r with householder transformations
minmn = numpy.min([m,n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
rmax = numpy.max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax))[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0:
break
if a[j,lj] < 0:
ajnorm = -ajnorm
ajj = old_div(ajj, ajnorm)
ajj[0] = ajj[0] + 1
# *** Note optimization a(j:*,j)
a[j:,lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
# *** Note optimization a(j:*,lk)
# (corrected 20 Jul 2000)
if a[j,lj] != 0:
a[j:,lk] = ajk - ajj * sum(ajk*ajj)/a[j,lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = old_div(a[j,lk],rdiag[k])
rdiag[k] = rdiag[k] * numpy.sqrt(numpy.max([(1.-temp**2), 0.]))
temp = old_div(rdiag[k],wa[k])
if (0.05*temp*temp) <= machep:
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if self.debug:
print('Entering qrsolv...')
sz = r.shape
m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = numpy.diagonal(r).copy()
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j,n):
if sdiag[k] == 0:
break
if numpy.abs(r[k,k]) < numpy.abs(sdiag[k]):
cotan = old_div(r[k,k],sdiag[k])
sine = old_div(0.5,numpy.sqrt(.25 + .25*cotan*cotan))
cosine = sine*cotan
else:
tang = old_div(sdiag[k],r[k,k])
cosine = old_div(0.5,numpy.sqrt(.25 + .25*tang*tang))
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0))[0]
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] = old_div(wa[nsing-1],sdiag[nsing-1]) # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = old_div((wa[j]-sum0),sdiag[j])
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if self.debug:
print('Entering lmpar...')
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
rthresh = numpy.max(numpy.abs(numpy.diagonal(r).copy())) * machep
wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r).copy()) < rthresh))[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = old_div(wa1[j],r[j,j])
if j-1 >= 0:
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] = old_div(wa1[0], r[0,0]) # Degenerate case
for j in range(1,n): # Note "1" here, not zero
sum0 = sum(r[0:j,j]*wa1[0:j])
wa1[j] = old_div((wa1[j] - sum0),r[j,j])
temp = self.enorm(wa1)
parl = old_div((old_div((old_div(fp,delta)),temp)),temp)
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = old_div(sum0,diag[ipvt[j]])
gnorm = self.enorm(wa1)
paru = old_div(gnorm,delta)
if paru == 0:
paru = old_div(dwarf,numpy.min([delta,0.1]))
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = numpy.max([par,parl])
par = numpy.min([par,paru])
if par == 0:
par = old_div(gnorm,dxnorm)
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = numpy.max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if (numpy.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break;
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] = old_div(wa1[j],sdiag[j])
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = old_div(wa1[n-1],sdiag[n-1]) # Degenerate case
temp = self.enorm(wa1)
parc = old_div((old_div((old_div(fp,delta)),temp)),temp)
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = numpy.max([parl,par])
if fp < 0:
paru = numpy.min([paru,par])
# Compute an improved estimate for par
par = numpy.max([parl, par+parc])
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if self.debug:
print('Entering tie...')
if ptied is None:
return
for i in range(len(ptied)):
if ptied[i] == '':
continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if self.debug:
print('Entering calc_covar...')
if numpy.array(rr).ndim != 2:
print('ERROR: r must be a two-dimensional matrix')
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print('ERROR: r must be a square matrix')
return -1
if ipvt is None:
ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n,n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * numpy.abs(r[0,0])
for k in range(n):
if numpy.abs(r[k,k]) <= tolr:
break
r[k,k] = old_div(1.,r[k,k])
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = numpy.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i,j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii,jj] = r[i,j]
if ii < jj:
r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return r
class machar(object):
def __init__(self, double=1):
if double == 0:
info = numpy.finfo(numpy.float32)
else:
info = numpy.finfo(numpy.float64)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
|
PaulKuinREPO_NAMEuvotpyPATH_START.@uvotpy_extracted@uvotpy-master@[email protected]@.PATH_END.py
|
{
"filename": "abscal_inspect_2458043.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/abscal_inspect/abscal_inspect_2458043.ipynb",
"type": "Jupyter Notebook"
}
|
# Stage 2 Absolute Calibration Nightly Notebook
**Josh Dillon**, Last Revised 9/23/20
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from hera_cal import io, redcal, apply_cal, abscal, utils
from hera_cal.smooth_cal import build_time_blacklist
from hera_qm.metrics_io import load_metric_file
import pyuvdata
import glob
import os
from copy import deepcopy
import inspect
import h5py
import matplotlib.cm as cm
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# lst_blacklist_string = '0-1.3 2.5-4.3 5.0-5.7 6.5-9.1 10.6-11.5 11.9-14.3 16.3-1.3'
# abscal_model_glob = '/lustre/aoc/projects/hera/zmartino/hera_calib_model/H3C/abscal_files_unique_baselines/zen.2458894.?????.uvh5'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
# os.environ["LST_BLACKLIST_STRING"] = lst_blacklist_string
# os.environ["ABSCAL_MODEL_GLOB"] = abscal_model_glob
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
lst_blacklist_string = os.environ['LST_BLACKLIST_STRING']
abscal_model_glob = os.environ['ABSCAL_MODEL_GLOB']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
print(f'lst_blacklist_string = "{lst_blacklist_string}"')
print(f'abscal_model_glob = "{abscal_model_glob}"')
```
JD = "2458043"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458043"
lst_blacklist_string = ""
abscal_model_glob = "/lustre/aoc/projects/hera/H1C_IDR3/abscal_model/zen.245804*.HH.uvRXLS.uvh5"
```python
print('Looking for data in', data_path, 'on JD', JD)
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.sum.uvh5')))
if len(data_list) == 0:
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.uvh5')))
print('...found {} data files.'.format(len(data_list)))
abscal_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.abs.calfits')))
print('...found {} abscal files.'.format(len(abscal_list)))
omnical_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.sum.omni.calfits')))
print('...found {} omnical files.'.format(len(omnical_list)))
```
Looking for data in /lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458043 on JD 2458043
...found 73 data files.
...found 73 abscal files.
...found 73 omnical files.
# Load And Inspect a Single File
```python
# get all JDs and LSTs
_, _, file_lst_arrays, file_time_arrays = io.get_file_times(data_list)
# parse lst_blacklist_string
lst_blacklists = []
if len(lst_blacklist_string) > 0:
lst_blacklists = [tuple([float(arg) for arg in arg_pair.split('-', maxsplit=1)])
for arg_pair in lst_blacklist_string.split(' ')]
# get times that are blacklisted and reshape them like file_time_arrays
time_blacklisted_flat = build_time_blacklist(np.hstack(file_time_arrays), lst_blacklists=lst_blacklists)
time_blacklisted = [fta.astype(bool) for fta in file_time_arrays]
n = 0
for i in range(len(file_time_arrays)):
time_blacklisted[i] = np.zeros_like(time_blacklisted[i], dtype=bool)
for j in range(len(file_time_arrays[i])):
time_blacklisted[i][j] = time_blacklisted_flat[n]
n += 1
# pick the central time from among the not-LST blacklisted files, if possible
good_indices = [i for i, tb in enumerate(time_blacklisted) if not np.any(tb)]
if len(good_indices) > 0:
file_index = good_indices[len(good_indices)//2]
else:
file_index = len(data_list)//2
file_JD = '.'.join([s for s in data_list[file_index].split('.') if s.isdigit()])
```
/lustre/aoc/projects/hera/heramgr/anaconda2/envs/h1c_idr3/lib/python3.7/site-packages/numpy/core/_asarray.py:83: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
return array(a, dtype, copy=False, order=order)
```python
# Load abscal gains and determine ex_ants
hc = io.HERACal(abscal_list[file_index])
gains, gain_flags, _, _ = hc.read()
ex_ants = [ant for ant in gain_flags if np.all(gain_flags[ant])]
# Get min_bl_cut, we only want to compare baselines actually used in absolute calibration
try:
min_bl_cut = float(hc.history.replace('\n','').split('--min_bl_cut')[-1].split('--')[0].strip())
except:
print('Could not find min_bl_cut, setting to 1 m.')
min_bl_cut = 1.0
# Load the most common redundant baseline longer than min_bl_cut
hd = io.HERAData(data_list[file_index])
bls_to_plot = []
for pol in ['ee', 'nn']:
reds = redcal.get_reds(hd.antpos, pols=[pol])
# reds = redcal.filter_reds(reds, ex_ants=ex_ants)
reds = sorted(reds, key=len, reverse=True)
bl_lens = np.array([np.linalg.norm(hd.antpos[red[0][1]] - hd.antpos[red[0][0]]) for red in reds])
try:
bl_group_to_plot = (np.array(reds)[bl_lens >= min_bl_cut])[0]
except:
bl_group_to_plot = reds[0]
bls_to_plot.extend(bl_group_to_plot)
# reds = sorted(reds, key=len, reverse=True)
data, flags, nsamples = hd.read(bls=bls_to_plot)
apply_cal.calibrate_in_place(data, gains, data_flags=flags, cal_flags=gain_flags)
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
plt.figure(figsize=(8,8))
plt.scatter(np.array(list(hd.antpos.values()))[:,0],
np.array(list(hd.antpos.values()))[:,1], c='w', s=0)
for ant,pos in hd.antpos.items():
bad = ant in [ant[0] for ant in ex_ants]
plt.gca().add_artist(plt.Circle(tuple(pos[0:2]), radius=7,
fill=(~bad), color=['grey','r'][bad]))
plt.text(pos[0],pos[1],str(ant), va='center', ha='center', color='w')
plt.xlabel("Antenna East-West Position (meters)")
plt.ylabel("Antenna North-South Position (meters)")
plt.title('Antenna Positions on {} (Red = Flagged)'.format(file_JD));
plt.axis('equal')
plt.tight_layout()
plt.show()
```

### Figure 1: Array and Flagged Antennas
#### OBSERVER CHECKLIST:
* Check that the array configuration looks reasonable.
* Check that all flags expected to be flagged are actually flagged but also that not everything is getting flagged.
```python
#check whether the model is redudnant by looking at the history
model_is_redundant = ('--model_is_redundant' in "".join(hc.history.split()))
# Find files that overlap with this file
abscal_matched_files = list(abscal.match_times(data_list[file_index],
sorted(glob.glob(abscal_model_glob)),
filetype='uvh5', atol=1e-5))
hdm = io.HERAData(abscal_matched_files)
# Get model baselines to load
model_bls = hdm.bls
model_antpos = hdm.antpos
if isinstance(model_bls, dict):
model_bls = list(model_bls.values())[0]
model_antpos = {ant: pos for antpos in hdm.antpos.values() for ant, pos in antpos.items()}
_, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(bls_to_plot, model_bls,
hd.antpos, model_antpos=model_antpos,
model_is_redundant=model_is_redundant)
model, model_flags, _ = hdm.read(bls=model_bl_to_load)
# Rephase model at index of best match to mean LST in the data
model_index = np.argmin(np.abs(model.lsts - np.mean(data.lsts)))
model_blvecs = {bl: model.antpos[bl[0]] - model.antpos[bl[1]] for bl in model.keys()}
utils.lst_rephase(model, model_blvecs, model.freqs, np.mean(data.lsts) - model.lsts[model_index],
lat=hdm.telescope_location_lat_lon_alt_degrees[0], inplace=True)
if not model_is_redundant:
model, _, _ = utils.red_average(model, flags=model_flags)
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for pol in ['ee', 'nn']:
for func, plot, ylabel in zip([np.abs, np.angle], [plt.semilogy, plt.plot], ['Amplitude (Jy)', 'Phase (Radians)']):
plt.figure(figsize=(16,4))
for bl in [k for k in bls_to_plot if k[2] == pol]:
ant0, ant1 = utils.split_bl(bl)
blvec = hd.antpos[ant0[0]] - hd.antpos[ant1[0]]
if (ant0 not in ex_ants) and (ant1 not in ex_ants):
to_plot = deepcopy(data[bl])
to_plot[flags[bl]] = np.nan + 1.0j * np.nan
to_plot = np.nanmedian(np.real(to_plot), axis=0) + 1.0j * np.nanmedian(np.imag(to_plot), axis=0)
plot(hd.freqs/1e6, func(to_plot))
for bl in [k for k in model if k[2] == pol]:
plot(hd.freqs/1e6, func(model[bl][model_index]), 'k-', label='Abscal Model')
plt.xlabel('Frequency (MHz)')
plt.ylabel(ylabel)
plt.legend(loc='lower right')
plt.title('{}-Polarized, {:f} m East, {:f} m North Visibility on {}'.format(pol, blvec[0], blvec[1], file_JD))
```




### Figure 2: Example redundant baseline group, absolute calibrated, compared to the Abscal Model
#### OBSERVER CHECKLIST:
* Check that the data all look pretty redundant.
* Check that the model isn't wildly out of line with the data.
# Load a whole day
```python
# Load chisq and flagging info from abscal gains
ant_flags_dict = {}
chisq_ee_dict = {}
chisq_nn_dict = {}
cspa_med_dict = {}
ants = set([])
for cal in abscal_list:
hc = io.HERACal(cal)
_, flags, cspa, chisq = hc.read()
ants |= set(flags.keys())
ant_flags_dict[cal] = {ant: np.all(flags[ant]) for ant in flags}
chisq_ee_dict[cal] = chisq['Jee']
chisq_nn_dict[cal] = chisq['Jnn']
cspa_med_dict[cal] = {ant: np.nanmedian(cspa[ant], axis=1) for ant in cspa}
all_flagged_dict = {ant: np.all([af[ant] for af in ant_flags_dict.values()]) for ant in ants}
cspa = {ant: np.hstack([np.squeeze(cspa_med_dict[cal][ant]) / \
~ant_flags_dict[cal][ant] for cal in abscal_list]) for ant in ants}
ee_chisq = np.vstack(np.array(list(chisq_ee_dict.values())))
nn_chisq = np.vstack(np.array(list(chisq_nn_dict.values())))
```
invalid value encountered in true_divide
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
# save middle-numbered ants with a minimal number of flags
ants_to_save = {}
for pol in ['Jee', 'Jnn']:
min_flags = np.min([np.sum(~np.isfinite(cspa[ant]))
for ant in cspa if ant[1] == pol])
ant_candidates = sorted([ant for ant in cspa if ant[1] == pol and
np.sum(~np.isfinite(cspa[ant])) == min_flags])
Nac = len(ant_candidates)
ants_to_save[pol] = ant_candidates[(Nac // 2 - 1):(Nac // 2 + 1)]
# Reload abscal gains
times_dict = {}
gain_dict = {}
flag_dict = {}
for cal in abscal_list:
hc = io.HERACal(cal)
gains, flags, _, _ = hc.read()
times_dict[cal] = hc.times
gain_dict[cal] = {ant: gains[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
flag_dict[cal] = {ant: flags[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
times = np.hstack(list(times_dict.values()))
lsts = 12 / np.pi * pyuvdata.utils.get_lst_for_time(times, *hd.telescope_location_lat_lon_alt_degrees)
gains = {ant: np.vstack([gain_dict[cal][ant] for cal in gain_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flags = {ant: np.vstack([flag_dict[cal][ant] for cal in flag_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flag_mask = np.all([f for f in flags.values()], axis=0)
```
# Inspect a whole day
```python
# for overplotting blacklisted LSTs
my_cmap = cm.binary
my_cmap.set_under('k', alpha=0)
blacklist = np.ones_like(ee_chisq) * np.hstack(time_blacklisted)[:, np.newaxis]
```
You are modifying the state of a globally registered colormap. In future versions, you will not be able to modify a registered colormap in-place. To remove this warning, you can make a copy of the colormap first. cmap = copy.copy(mpl.cm.get_cmap("binary"))
```python
# Grid and plot overall chi^2 for each polarization
ee_chisq = np.vstack(np.array(list(chisq_ee_dict.values())))
nn_chisq = np.vstack(np.array(list(chisq_nn_dict.values())))
fig, axes = plt.subplots(1, 2, figsize=(20,12))
for ax, cs, t in zip(axes, [ee_chisq, nn_chisq], ['ee-polarized', 'nn-polarized']):
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(cs / ~flag_mask, aspect='auto', vmin=0, cmap='inferno', vmax=10, interpolation='nearest', extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title('Overall Abscal $\chi^2$ / $N_{bls}$: ' + t)
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, label='$\chi^2$ / $N_{bls}$ (unitless)')
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 3 Overall Abscal $\chi^2 / N_{bls}$
This computes the difference between the calibrated data and the abscal model, normalized by the thermal noise. Grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing.
#### OBSERVER CHECKLIST:
* Look for regions of high $\chi^2$ that are not blacklisted.
```python
# Pick vmax to not saturate 90% of the data
vmax = np.max([np.percentile(np.abs(gains[ants_to_save[pol][1]][~flag_mask]), 90) for pol in ['Jee', 'Jnn']])
# Plot abscal gain amplitude waterfalls for a single antenna
fig, axes = plt.subplots(3, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
gains_here = deepcopy(gains[ant])
gains_here[flags[ant]] = np.nan
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.abs(gains_here), aspect='auto', cmap='inferno',
interpolation='nearest', vmin=0, vmax=vmax, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Abscal Gain Amplitude of Antenna {ant[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.07)
# Now plot median gain spectra and time series
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
gains_here = deepcopy(gains[ant])
gains_here[flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(hd.freqs / 1e6, np.nanmedian(np.abs(gains_here[~np.hstack(time_blacklisted), :]), axis=0))
ax.set_ylim([0, vmax])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Non-Blacklisted Abscal Gain Amplitude Spectrum of Antenna {ant[0]}: {pol[1:]}-polarized')
# Now plot median gain time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
gains_here = deepcopy(gains[ant])
gains_here[flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(lsts[~np.hstack(time_blacklisted)],
np.nanmedian(np.abs(gains_here[~np.hstack(time_blacklisted), :]), axis=1),
'b.', label='Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
ax.plot(lsts[np.hstack(time_blacklisted)],
np.nanmedian(np.abs(gains_here[np.hstack(time_blacklisted), :]), axis=1),
'r.', label='Blacklisted LSTs')
ax.set_ylim([0, vmax])
ax.set_xlabel('LST (hours)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Abscal Gain Amplitude Time-Series of Antenna {ant[0]}: {pol[1:]}-polarized')
ax.legend()
plt.tight_layout()
```
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered

### Figure 4: Example Abscal Gain Amplitudes
Abscal gain amplitudes for an example antenna. In the waterfall, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted amplitude as a function of frequency (middle row) and the median amplitude as a function of time (bottom row)
#### OBSERVER CHECKLIST:
* Look to see that non-blacklisted times are relatively stable in amplitude
* Check to see if the bandpass looks reasonable
```python
# Plot abscal gain phase waterfalls for a single antenna/refant
fig, axes = plt.subplots(3, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
gains_ratio_here = gains[ant0] / gains[ant1]
gains_ratio_here[flags[ant0] | flags[ant1]] = np.nan
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.angle(gains_ratio_here), aspect='auto', cmap='inferno',
interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Abscal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.07)
# Now plot median gain spectra and time series
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
gains_ratio_here = gains[ant0] / gains[ant1]
gains_ratio_here[flags[ant0] | flags[ant1]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
re_med = np.nanmedian(gains_ratio_here[~np.hstack(time_blacklisted), :].real, axis=0)
im_med = np.nanmedian(gains_ratio_here[~np.hstack(time_blacklisted), :].imag, axis=0)
ax.plot(hd.freqs / 1e6, np.angle(re_med + 1.0j * im_med))
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel(f'Phase of g$_{{ant0[0]}}$ / g$_{{ant1[0]}}$')
ax.set_title(f'Median Non-Blacklisted Abscal Gain Phase Spectrum of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
# Now plot a single gain angle time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
gains_ratio_here = gains[ant0] / gains[ant1]
gains_ratio_here[flags[ant0] | flags[ant1]] = np.nan
# pick channel with minimum phase variance in the middle 100 channels
possible_chans = np.arange(len(hd.freqs))[len(hd.freqs)//2 - 50:len(hd.freqs)//2 + 50]
best_chan = np.argmin(np.var(np.angle(gains_ratio_here), axis=0)[len(hd.freqs)//2 - 50:len(hd.freqs)//2 + 50])
chan = possible_chans[best_chan]
if not np.all(np.hstack(time_blacklisted)):
ax.plot(lsts[~np.hstack(time_blacklisted)],
np.angle(gains_ratio_here[~np.hstack(time_blacklisted), chan]),
'b.', label='Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
ax.plot(lsts[np.hstack(time_blacklisted)],
np.angle(gains_ratio_here[np.hstack(time_blacklisted), chan]),
'r.', label='Blacklisted LSTs')
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('LST (hours)')
ax.set_ylabel(f'Phase of g$_{ant0[0]}$ / g$_{ant1[0]}$')
ax.set_title(f'Abscal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]} at Channel {chan}: {pol[1:]}-polarized')
ax.legend()
plt.tight_layout()
```
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered

### Figure 5: Example Abscal Gain Phases
Relative gain phases of two example antennas. In the waterfall, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted phases as a function of frequency (middle row) and the phase of the specific channel within 50 channels of the middle with minimal phase variance (bottom row).
#### OBSERVER CHECKLIST:
* Look for regions of "hashy" phase structure that are not blacklisted or attributable to RFI.
# Metadata
```python
print(redcal.version.history_string())
```
------------
This file was produced by the function <module>() in <ipython-input-1-c6de44361328> using:
git_branch: HEAD
git_description: v3.0-731-g859d8598
git_hash: 859d85989feef96a2c58afdf75e34edaad50c272
git_origin: [email protected]:HERA-Team/hera_cal.git
version: 3.0
------------
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@abscal_inspect@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "nanograv/PINT",
"repo_path": "PINT_extracted/PINT-master/src/pint/templates/__init__.py",
"type": "Python"
}
|
nanogravREPO_NAMEPINTPATH_START.@PINT_extracted@PINT-master@src@pint@templates@[email protected]_END.py
|
|
{
"filename": "conf.py",
"repo_name": "apertif/apercal",
"repo_path": "apercal_extracted/apercal-master/docs/source/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'Apercal'
copyright = '2019, Apercal Team'
author = 'Apercal Team'
# The short X.Y version
version = '2.5'
# The full version, including alpha/beta/rc tags
release = '2.5.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
# 'sphinx.ext.intersphinx',
# 'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
# 'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["../cwb", "../ipython-notebooks", "../test"]
autodoc_mock_imports = ["libs", "drivecasa",
"pandas", "casacore", "tables", "aipy", "pymp", "irods", "getdata_alta", "backports"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Apercaldoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Apercal.tex', 'Apercal Documentation',
'Apercal Team', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'apercal', 'Apercal Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Apercal', 'Apercal Documentation',
author, 'Apercal', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
apertifREPO_NAMEapercalPATH_START.@apercal_extracted@apercal-master@docs@[email protected]@.PATH_END.py
|
{
"filename": "test_funcs.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/tests/test_funcs.py",
"type": "Python"
}
|
import os
from nose.tools import assert_raises
from numpy.testing import assert_equal
from yt.funcs import (
just_one,
levenshtein_distance,
simple_download_file,
validate_axis,
validate_center,
)
from yt.testing import fake_amr_ds
from yt.units import YTArray, YTQuantity
def test_validate_axis():
validate_axis(None, 0)
validate_axis(None, "X")
ds = fake_amr_ds(geometry="cylindrical")
ds.slice("Theta", 0.25)
with assert_raises(TypeError) as ex:
# default geometry is cartesian
ds = fake_amr_ds()
ds.slice("r", 0.25)
desired = "Expected axis to be any of [0, 1, 2, 'x', 'y', 'z', 'X', 'Y', 'Z'], received 'r'"
actual = str(ex.exception)
assert actual == desired
def test_validate_center():
validate_center("max")
validate_center("MIN_")
with assert_raises(TypeError) as ex:
validate_center("avg")
desired = (
"Expected 'center' to be in ['c', 'center', 'm', 'max', 'min'] "
"or the prefix to be 'max_'/'min_', received 'avg'."
)
assert_equal(str(ex.exception), desired)
validate_center(YTQuantity(0.25, "cm"))
validate_center([0.25, 0.25, 0.25])
class CustomCenter:
def __init__(self, center):
self.center = center
with assert_raises(TypeError) as ex:
validate_center(CustomCenter(10))
desired = (
"Expected 'center' to be a numeric object of type "
"list/tuple/np.ndarray/YTArray/YTQuantity, received "
"'yt.tests.test_funcs.test_validate_center.<locals>."
"CustomCenter'."
)
assert_equal(str(ex.exception)[:50], desired[:50])
def test_just_one():
# Check that behaviour of this function is consistent before and after refactor
# PR 2893
for unit in ["mm", "cm", "km", "pc", "g", "kg", "M_sun"]:
obj = YTArray([0.0, 1.0], unit)
expected = YTQuantity(obj.flat[0], obj.units, registry=obj.units.registry)
jo = just_one(obj)
assert jo == expected
def test_levenshtein():
assert_equal(levenshtein_distance("abcdef", "abcdef"), 0)
# Deletions / additions
assert_equal(levenshtein_distance("abcdef", "abcde"), 1)
assert_equal(levenshtein_distance("abcdef", "abcd"), 2)
assert_equal(levenshtein_distance("abcdef", "abc"), 3)
assert_equal(levenshtein_distance("abcdf", "abcdef"), 1)
assert_equal(levenshtein_distance("cdef", "abcdef"), 2)
assert_equal(levenshtein_distance("bde", "abcdef"), 3)
# Substitutions
assert_equal(levenshtein_distance("abcd", "abc_"), 1)
assert_equal(levenshtein_distance("abcd", "ab__"), 2)
assert_equal(levenshtein_distance("abcd", "a___"), 3)
assert_equal(levenshtein_distance("abcd", "____"), 4)
# Deletion + Substitutions
assert_equal(levenshtein_distance("abcd", "abc_z"), 2)
assert_equal(levenshtein_distance("abcd", "ab__zz"), 4)
assert_equal(levenshtein_distance("abcd", "a___zzz"), 6)
assert_equal(levenshtein_distance("abcd", "____zzzz"), 8)
# Max distance
assert_equal(levenshtein_distance("abcd", "", max_dist=0), 1)
assert_equal(levenshtein_distance("abcd", "", max_dist=3), 4)
assert_equal(levenshtein_distance("abcd", "", max_dist=10), 4)
assert_equal(levenshtein_distance("abcd", "", max_dist=1), 2)
assert_equal(levenshtein_distance("abcd", "a", max_dist=2), 3)
assert_equal(levenshtein_distance("abcd", "ad", max_dist=2), 2)
assert_equal(levenshtein_distance("abcd", "abd", max_dist=2), 1)
assert_equal(levenshtein_distance("abcd", "abcd", max_dist=2), 0)
def test_simple_download_file():
fn = simple_download_file("http://yt-project.org", "simple-download-file")
try:
assert fn == "simple-download-file"
assert os.path.exists("simple-download-file")
finally:
# Clean up after ourselves.
try:
os.unlink("simple-download-file")
except FileNotFoundError:
pass
with assert_raises(RuntimeError) as ex:
simple_download_file("http://yt-project.org/404", "simple-download-file")
desired = "Attempt to download file from http://yt-project.org/404 failed with error 404: Not Found."
actual = str(ex.exception)
assert actual == desired
assert not os.path.exists("simple-download-file")
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@tests@[email protected]_END.py
|
{
"filename": "_opacity.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattermapbox/selected/marker/_opacity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="opacity",
parent_name="scattermapbox.selected.marker",
**kwargs
):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattermapbox@selected@marker@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "kammerje/spaceKLIP",
"repo_path": "spaceKLIP_extracted/spaceKLIP-main/spaceKLIP/resources/PCEs/MIRI/__init__.py",
"type": "Python"
}
|
kammerjeREPO_NAMEspaceKLIPPATH_START.@spaceKLIP_extracted@spaceKLIP-main@spaceKLIP@resources@PCEs@MIRI@[email protected]_END.py
|
|
{
"filename": "_y.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/slider/_y.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="y", parent_name="layout.slider", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@slider@[email protected]_END.py
|
{
"filename": "test_sort_index.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/frame/methods/test_sort_index.py",
"type": "Python"
}
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index._is_lexsorted()
assert not df.index.is_monotonic_increasing
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_monotonic_increasing
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_monotonic_increasing
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
with tm.assert_produces_warning(None):
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic_increasing
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.default_rng(2).standard_normal((8, 2)),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_nan(self):
# GH#3917
# Test DataFrame with nan label
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
expected = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position="first")
expected = DataFrame(
{"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]},
index=[np.nan, 1, 2, 3, 4, 5, 6],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=False)
expected = DataFrame(
{"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind="quicksort", ascending=False, na_position="first"
)
expected = DataFrame(
{"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]},
index=[np.nan, 6, 5, 4, 3, 2, 1],
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_index_multi_index(self):
# GH#25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
)
result = df.set_index(list("abc")).sort_index(level=list("ba"))
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
)
expected = expected.set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_values = unordered["A"]
df = unordered.copy()
return_value = df.sort_index(inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
# GH 44153 related
# Used to be a_id != id(df["A"]), but flaky in the CI
assert a_values is not df["A"]
df = unordered.copy()
return_value = df.sort_index(ascending=False, inplace=True)
assert return_value is None
expected = frame[::-1]
tm.assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ["D", "B", "C", "A"]]
df = unordered.copy()
return_value = df.sort_index(axis=1, inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
df = unordered.copy()
return_value = df.sort_index(axis=1, ascending=False, inplace=True)
assert return_value is None
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.default_rng(2).permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame(
{"A": A, "B": B, "C": np.random.default_rng(2).standard_normal(100)}
)
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
# test with multiindex, too
idf = df.set_index(["A", "B"])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
tm.assert_frame_equal(result, expected)
# also, Series!
result = idf["C"].sort_index(ascending=[1, 0])
tm.assert_series_equal(result, expected["C"])
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level="A", sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["A", "B"], sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
# Error thrown by sort_index when
# first index is sorted last (GH#26053)
result = df.sort_index(level=["C", "B", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["B", "C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
def test_sort_index_categorical_index(self):
df = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
def test_sort_index(self):
# GH#13496
frame = DataFrame(
np.arange(16).reshape(4, 4),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
tm.assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
tm.assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
tm.assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
# GH#13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples(
[[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC")
)
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC")
)
expected = DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi)
result = df.sort_index(level=level)
tm.assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC")
)
expected = DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
tm.assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.default_rng(2).standard_normal(100))
x1 = Series(np.sign(np.random.default_rng(2).standard_normal(100)))
x2 = pd.cut(
Series(np.random.default_rng(2).standard_normal(100)),
bins=[-3, -0.5, 0, 0.5, 3],
)
model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"])
result = model.groupby(["X1", "X2"], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right"
)
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, False, [5, 3, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, False, [2, 3, 5]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114
original_index = [2, 5, 3]
df = DataFrame(original_dict, index=original_index)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
def test_respect_ignore_index(self, inplace, ignore_index):
# GH 43591
df = DataFrame({"a": [1, 2, 3]}, index=RangeIndex(4, -1, -2))
result = df.sort_index(
ascending=False, ignore_index=ignore_index, inplace=inplace
)
if inplace:
result = df
if ignore_index:
expected = DataFrame({"a": [1, 2, 3]})
else:
expected = DataFrame({"a": [1, 2, 3]}, index=RangeIndex(4, -1, -2))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
False,
MultiIndex.from_tuples([(2, 1), (3, 4)], names=list("AB")),
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
False,
MultiIndex.from_tuples([(3, 4), (2, 1)], names=list("AB")),
),
],
)
def test_sort_index_ignore_index_multi_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114, this is to test ignore_index on MultiIndex of index
mi = MultiIndex.from_tuples([(2, 1), (3, 4)], names=list("AB"))
df = DataFrame(original_dict, index=mi)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=mi))
def test_sort_index_categorical_multiindex(self):
# GH#15058
df = DataFrame(
{
"a": range(6),
"l1": pd.Categorical(
["a", "a", "b", "b", "c", "c"],
categories=["c", "a", "b"],
ordered=True,
),
"l2": [0, 1, 0, 1, 0, 1],
}
)
result = df.set_index(["l1", "l2"]).sort_index()
expected = DataFrame(
[4, 5, 0, 1, 2, 3],
columns=["a"],
index=MultiIndex(
levels=[
CategoricalIndex(
["c", "a", "b"],
categories=["c", "a", "b"],
ordered=True,
name="l1",
dtype="category",
),
[0, 1],
],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=["l1", "l2"],
),
)
tm.assert_frame_equal(result, expected)
def test_sort_index_and_reconstruction(self):
# GH#15622
# lexsortedness should be identical
# across MultiIndex construction methods
df = DataFrame([[1, 1], [2, 2]], index=list("ab"))
expected = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples(
[(0.5, "a"), (0.5, "b"), (0.8, "a"), (0.8, "b")]
),
)
assert expected.index._is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list("ab")]),
)
result = result.sort_index()
assert result.index.is_monotonic_increasing
tm.assert_frame_equal(result, expected)
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex(
levels=[[0.5, 0.8], ["a", "b"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = result.sort_index()
assert result.index._is_lexsorted()
tm.assert_frame_equal(result, expected)
concatted = pd.concat([df, df], keys=[0.8, 0.5])
result = concatted.sort_index()
assert result.index.is_monotonic_increasing
tm.assert_frame_equal(result, expected)
# GH#14015
df = DataFrame(
[[1, 2], [6, 7]],
columns=MultiIndex.from_tuples(
[(0, "20160811 12:00:00"), (0, "20160809 12:00:00")],
names=["l1", "Date"],
),
)
df.columns = df.columns.set_levels(
pd.to_datetime(df.columns.levels[1]), level=1
)
assert not df.columns.is_monotonic_increasing
result = df.sort_index(axis=1)
assert result.columns.is_monotonic_increasing
result = df.sort_index(axis=1, level=1)
assert result.columns.is_monotonic_increasing
# TODO: better name, de-duplicate with test_sort_index_level above
def test_sort_index_level2(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = frame["A"].sort_index(level=0)
# preserve names
assert a_sorted.index.names == frame.index.names
# inplace
rs = frame.copy()
return_value = rs.sort_index(level=0, inplace=True)
assert return_value is None
tm.assert_frame_equal(rs, frame.sort_index(level=0))
def test_sort_index_level_large_cardinality(self):
# GH#2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(
np.random.default_rng(2).standard_normal(4000).astype("int64"), index=index
)
# it works!
result = df.sort_index(level=0)
assert result.index._lexsort_depth == 3
# GH#2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(
np.random.default_rng(2).standard_normal(4000).astype("int32"), index=index
)
# it works!
result = df.sort_index(level=0)
assert (result.dtypes.values == df.dtypes.values).all()
assert result.index._lexsort_depth == 3
def test_sort_index_level_by_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
frame.index.names = ["first", "second"]
result = frame.sort_index(level="second")
expected = frame.sort_index(level=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level_mixed(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
sorted_before = frame.sort_index(level=1)
df = frame.copy()
df["foo"] = "bar"
sorted_after = df.sort_index(level=1)
tm.assert_frame_equal(sorted_before, sorted_after.drop(["foo"], axis=1))
dft = frame.T
sorted_before = dft.sort_index(level=1, axis=1)
dft["foo", "three"] = "bar"
sorted_after = dft.sort_index(level=1, axis=1)
tm.assert_frame_equal(
sorted_before.drop([("foo", "three")], axis=1),
sorted_after.drop([("foo", "three")], axis=1),
)
def test_sort_index_preserve_levels(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.sort_index()
assert result.index.names == frame.index.names
@pytest.mark.parametrize(
"gen,extra",
[
([1.0, 3.0, 2.0, 5.0], 4.0),
([1, 3, 2, 5], 4),
(
[
Timestamp("20130101"),
Timestamp("20130103"),
Timestamp("20130102"),
Timestamp("20130105"),
],
Timestamp("20130104"),
),
(["1one", "3one", "2one", "5one"], "4one"),
],
)
def test_sort_index_multilevel_repr_8017(self, gen, extra):
data = np.random.default_rng(2).standard_normal((3, 4))
columns = MultiIndex.from_tuples([("red", i) for i in gen])
df = DataFrame(data, index=list("def"), columns=columns)
df2 = pd.concat(
[
df,
DataFrame(
"world",
index=list("def"),
columns=MultiIndex.from_tuples([("red", extra)]),
),
],
axis=1,
)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
assert str(df2).splitlines()[0].split() == ["red"]
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:, [0, 2, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:, [0, 2, 1, 4, 3]]
tm.assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[("red", extra)] = "world"
result = result.sort_index(axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"categories",
[
pytest.param(["a", "b", "c"], id="str"),
pytest.param(
[pd.Interval(0, 1), pd.Interval(1, 2), pd.Interval(2, 3)],
id="pd.Interval",
),
],
)
def test_sort_index_with_categories(self, categories):
# GH#23452
df = DataFrame(
{"foo": range(len(categories))},
index=CategoricalIndex(
data=categories, categories=categories, ordered=True
),
)
df.index = df.index.reorder_categories(df.index.categories[::-1])
result = df.sort_index()
expected = DataFrame(
{"foo": reversed(range(len(categories)))},
index=CategoricalIndex(
data=categories[::-1], categories=categories[::-1], ordered=True
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"ascending",
[
None,
[True, None],
[False, "True"],
],
)
def test_sort_index_ascending_bad_value_raises(self, ascending):
# GH 39434
df = DataFrame(np.arange(64))
length = len(df.index)
df.index = [(i - length / 2) % length for i in range(length)]
match = 'For argument "ascending" expected type bool'
with pytest.raises(ValueError, match=match):
df.sort_index(axis=0, ascending=ascending, na_position="first")
@pytest.mark.parametrize(
"ascending",
[(True, False), [True, False]],
)
def test_sort_index_ascending_tuple(self, ascending):
df = DataFrame(
{
"legs": [4, 2, 4, 2, 2],
},
index=MultiIndex.from_tuples(
[
("mammal", "dog"),
("bird", "duck"),
("mammal", "horse"),
("bird", "penguin"),
("mammal", "kangaroo"),
],
names=["class", "animal"],
),
)
# parameter `ascending`` is a tuple
result = df.sort_index(level=(0, 1), ascending=ascending)
expected = DataFrame(
{
"legs": [2, 2, 2, 4, 4],
},
index=MultiIndex.from_tuples(
[
("bird", "penguin"),
("bird", "duck"),
("mammal", "kangaroo"),
("mammal", "horse"),
("mammal", "dog"),
],
names=["class", "animal"],
),
)
tm.assert_frame_equal(result, expected)
class TestDataFrameSortIndexKey:
def test_sort_multi_index_key(self):
# GH 25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
).set_index(list("abc"))
result = df.sort_index(level=list("ac"), key=lambda x: x)
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=list("ac"), key=lambda x: -x)
expected = DataFrame(
{"a": [3, 2, 1], "b": [0, 0, 0], "c": [0, 2, 1], "d": list("acb")}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_key(self): # issue 27237
df = DataFrame(np.arange(6, dtype="int64"), index=list("aaBBca"))
result = df.sort_index()
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(key=lambda x: x.str.lower())
expected = df.iloc[[0, 1, 5, 2, 3, 4]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(key=lambda x: x.str.lower(), ascending=False)
expected = df.iloc[[4, 2, 3, 0, 1, 5]]
tm.assert_frame_equal(result, expected)
def test_sort_index_key_int(self):
df = DataFrame(np.arange(6, dtype="int64"), index=np.arange(6, dtype="int64"))
result = df.sort_index()
tm.assert_frame_equal(result, df)
result = df.sort_index(key=lambda x: -x)
expected = df.sort_index(ascending=False)
tm.assert_frame_equal(result, expected)
result = df.sort_index(key=lambda x: 2 * x)
tm.assert_frame_equal(result, df)
def test_sort_multi_index_key_str(self):
# GH 25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": ["B", "a", "C"], "b": [0, 1, 0], "c": list("abc"), "d": [0, 1, 2]}
).set_index(list("abc"))
result = df.sort_index(level="a", key=lambda x: x.str.lower())
expected = DataFrame(
{"a": ["a", "B", "C"], "b": [1, 0, 0], "c": list("bac"), "d": [1, 0, 2]}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
result = df.sort_index(
level=list("abc"), # can refer to names
key=lambda x: x.str.lower() if x.name in ["a", "c"] else -x,
)
expected = DataFrame(
{"a": ["a", "B", "C"], "b": [1, 0, 0], "c": list("bac"), "d": [1, 0, 2]}
).set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_changes_length_raises(self):
df = DataFrame({"A": [1, 2, 3]})
with pytest.raises(ValueError, match="change the shape"):
df.sort_index(key=lambda x: x[:1])
def test_sort_index_multiindex_sparse_column(self):
# GH 29735, testing that sort_index on a multiindexed frame with sparse
# columns fills with 0.
expected = DataFrame(
{
i: pd.array([0.0, 0.0, 0.0, 0.0], dtype=pd.SparseDtype("float64", 0.0))
for i in range(4)
},
index=MultiIndex.from_product([[1, 2], [1, 2]]),
)
result = expected.sort_index(level=0)
tm.assert_frame_equal(result, expected)
def test_sort_index_na_position(self):
# GH#51612
df = DataFrame([1, 2], index=MultiIndex.from_tuples([(1, 1), (1, pd.NA)]))
expected = df.copy()
result = df.sort_index(level=[0, 1], na_position="last")
tm.assert_frame_equal(result, expected)
def test_sort_index_multiindex_sort_remaining(self, ascending):
# GH #24247
df = DataFrame(
{"A": [1, 2, 3, 4, 5], "B": [10, 20, 30, 40, 50]},
index=MultiIndex.from_tuples(
[("a", "x"), ("a", "y"), ("b", "x"), ("b", "y"), ("c", "x")]
),
)
result = df.sort_index(level=1, sort_remaining=False, ascending=ascending)
if ascending:
expected = DataFrame(
{"A": [1, 3, 5, 2, 4], "B": [10, 30, 50, 20, 40]},
index=MultiIndex.from_tuples(
[("a", "x"), ("b", "x"), ("c", "x"), ("a", "y"), ("b", "y")]
),
)
else:
expected = DataFrame(
{"A": [2, 4, 1, 3, 5], "B": [20, 40, 10, 30, 50]},
index=MultiIndex.from_tuples(
[("a", "y"), ("b", "y"), ("a", "x"), ("b", "x"), ("c", "x")]
),
)
tm.assert_frame_equal(result, expected)
def test_sort_index_with_sliced_multiindex():
# GH 55379
mi = MultiIndex.from_tuples(
[
("a", "10"),
("a", "18"),
("a", "25"),
("b", "16"),
("b", "26"),
("a", "45"),
("b", "28"),
("a", "5"),
("a", "50"),
("a", "51"),
("b", "4"),
],
names=["group", "str"],
)
df = DataFrame({"x": range(len(mi))}, index=mi)
result = df.iloc[0:6].sort_index()
expected = DataFrame(
{"x": [0, 1, 2, 5, 3, 4]},
index=MultiIndex.from_tuples(
[
("a", "10"),
("a", "18"),
("a", "25"),
("a", "45"),
("b", "16"),
("b", "26"),
],
names=["group", "str"],
),
)
tm.assert_frame_equal(result, expected)
def test_axis_columns_ignore_index():
# GH 56478
df = DataFrame([[1, 2]], columns=["d", "c"])
result = df.sort_index(axis="columns", ignore_index=True)
expected = DataFrame([[2, 1]])
tm.assert_frame_equal(result, expected)
def test_axis_columns_ignore_index_ascending_false():
# GH 57293
df = DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
result = df.sort_index(axis="columns", ignore_index=True, ascending=False)
expected = df.copy()
expected.columns = RangeIndex(2)
tm.assert_frame_equal(result, expected)
def test_sort_index_stable_sort():
# GH 57151
df = DataFrame(
data=[
(Timestamp("2024-01-30 13:00:00"), 13.0),
(Timestamp("2024-01-30 13:00:00"), 13.1),
(Timestamp("2024-01-30 12:00:00"), 12.0),
(Timestamp("2024-01-30 12:00:00"), 12.1),
],
columns=["dt", "value"],
).set_index(["dt"])
result = df.sort_index(level="dt", kind="stable")
expected = DataFrame(
data=[
(Timestamp("2024-01-30 12:00:00"), 12.0),
(Timestamp("2024-01-30 12:00:00"), 12.1),
(Timestamp("2024-01-30 13:00:00"), 13.0),
(Timestamp("2024-01-30 13:00:00"), 13.1),
],
columns=["dt", "value"],
).set_index(["dt"])
tm.assert_frame_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@frame@methods@[email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "microsoft/vscode",
"repo_path": "vscode_extracted/vscode-main/extensions/simple-browser/README.md",
"type": "Markdown"
}
|
# Simple Browser
**Notice:** This extension is bundled with Visual Studio Code. It can be disabled but not uninstalled.
Provides a very basic browser preview using an iframe embedded in a [webviewW](). This extension is primarily meant to be used by other extensions for showing simple web content.
|
microsoftREPO_NAMEvscodePATH_START.@vscode_extracted@vscode-main@extensions@[email protected]@.PATH_END.py
|
{
"filename": "derivative_util.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/Util/derivative_util.py",
"type": "Python"
}
|
"""Routines to compute derivatives of spherical functions."""
import numpy as np
from lenstronomy.Util.package_util import exporter
export, __all__ = exporter()
@export
def d_r_dx(x, y):
"""Derivative of r with respect to x :param x:
:param y:
:return:
"""
return x / np.sqrt(x**2 + y**2)
@export
def d_r_dy(x, y):
"""Differential dr/dy.
:param x:
:param y:
:return:
"""
return y / np.sqrt(x**2 + y**2)
@export
def d_r_dxx(x, y):
"""Second derivative dr/dxdx :param x:
:param y:
:return:
"""
return y**2 / (x**2 + y**2) ** (3.0 / 2)
@export
def d_r_dyy(x, y):
"""Second derivative dr/dxdx :param x:
:param y:
:return:
"""
return x**2 / (x**2 + y**2) ** (3.0 / 2)
@export
def d_r_dxy(x, y):
"""Second derivative dr/dxdx :param x:
:param y:
:return:
"""
return -x * y / (x**2 + y**2) ** (3 / 2.0)
@export
def d_phi_dx(x, y):
"""Angular derivative in respect to x when phi = arctan2(y, x)
:param x:
:param y:
:return:
"""
return -y / (x**2 + y**2)
@export
def d_phi_dy(x, y):
"""Angular derivative in respect to y when phi = arctan2(y, x)
:param x:
:param y:
:return:
"""
return x / (x**2 + y**2)
@export
def d_phi_dxx(x, y):
"""Second derivative of the orientation angle.
:param x:
:param y:
:return:
"""
return 2 * x * y / (x**2 + y**2) ** 2
@export
def d_phi_dyy(x, y):
"""Second derivative of the orientation angle in dydy.
:param x:
:param y:
:return:
"""
return -2 * x * y / (x**2 + y**2) ** 2
@export
def d_phi_dxy(x, y):
"""Second derivative of the orientation angle in dxdy.
:param x:
:param y:
:return:
"""
return (-(x**2) + y**2) / (x**2 + y**2) ** 2
@export
def d_x_diffr_dx(x, y):
"""Derivative of d(x/r)/dx equivalent to second order derivatives dr_dxx.
:param x:
:param y:
:return:
"""
return y**2 / (x**2 + y**2) ** (3 / 2.0)
@export
def d_y_diffr_dy(x, y):
"""Derivative of d(y/r)/dy equivalent to second order derivatives dr_dyy.
:param x:
:param y:
:return:
"""
return x**2 / (x**2 + y**2) ** (3 / 2.0)
@export
def d_y_diffr_dx(x, y):
"""Derivative of d(y/r)/dx equivalent to second order derivatives dr_dxy.
:param x:
:param y:
:return:
"""
return -x * y / (x**2 + y**2) ** (3 / 2.0)
@export
def d_x_diffr_dy(x, y):
"""Derivative of d(x/r)/dy equivalent to second order derivatives dr_dyx.
:param x:
:param y:
:return:
"""
return -x * y / (x**2 + y**2) ** (3 / 2.0)
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@Util@[email protected]_END.py
|
{
"filename": "inter_rater.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/stats/inter_rater.py",
"type": "Python"
}
|
"""Inter Rater Agreement
contains
--------
fleiss_kappa
cohens_kappa
aggregate_raters:
helper function to get data into fleiss_kappa format
to_table:
helper function to create contingency table, can be used for cohens_kappa
Created on Thu Dec 06 22:57:56 2012
Author: Josef Perktold
License: BSD-3
References
----------
Wikipedia: kappa's initially based on these two pages
https://en.wikipedia.org/wiki/Fleiss%27_kappa
https://en.wikipedia.org/wiki/Cohen's_kappa
SAS-Manual : formulas for cohens_kappa, especially variances
see also R package irr
TODO
----
standard errors and hypothesis tests for fleiss_kappa
other statistics and tests,
in R package irr, SAS has more
inconsistent internal naming, changed variable names as I added more
functionality
convenience functions to create required data format from raw data
DONE
"""
import numpy as np
from scipy import stats #get rid of this? need only norm.sf
class ResultsBunch(dict):
template = '%r'
def __init__(self, **kwds):
dict.__init__(self, kwds)
self.__dict__ = self
self._initialize()
def _initialize(self):
pass
def __str__(self):
return self.template % self
def _int_ifclose(x, dec=1, width=4):
'''helper function for creating result string for int or float
only dec=1 and width=4 is implemented
Parameters
----------
x : int or float
value to format
dec : 1
number of decimals to print if x is not an integer
width : 4
width of string
Returns
-------
xint : int or float
x is converted to int if it is within 1e-14 of an integer
x_string : str
x formatted as string, either '%4d' or '%4.1f'
'''
xint = int(round(x))
if np.max(np.abs(xint - x)) < 1e-14:
return xint, '%4d' % xint
else:
return x, '%4.1f' % x
def aggregate_raters(data, n_cat=None):
'''convert raw data with shape (subject, rater) to (subject, cat_counts)
brings data into correct format for fleiss_kappa
bincount will raise exception if data cannot be converted to integer.
Parameters
----------
data : array_like, 2-Dim
data containing category assignment with subjects in rows and raters
in columns.
n_cat : None or int
If None, then the data is converted to integer categories,
0,1,2,...,n_cat-1. Because of the relabeling only category levels
with non-zero counts are included.
If this is an integer, then the category levels in the data are already
assumed to be in integers, 0,1,2,...,n_cat-1. In this case, the
returned array may contain columns with zero count, if no subject
has been categorized with this level.
Returns
-------
arr : nd_array, (n_rows, n_cat)
Contains counts of raters that assigned a category level to individuals.
Subjects are in rows, category levels in columns.
categories : nd_array, (n_category_levels,)
Contains the category levels.
'''
data = np.asarray(data)
n_rows = data.shape[0]
if n_cat is None:
#I could add int conversion (reverse_index) to np.unique
cat_uni, cat_int = np.unique(data.ravel(), return_inverse=True)
n_cat = len(cat_uni)
data_ = cat_int.reshape(data.shape)
else:
cat_uni = np.arange(n_cat) #for return only, assumed cat levels
data_ = data
tt = np.zeros((n_rows, n_cat), int)
for idx, row in enumerate(data_):
ro = np.bincount(row)
tt[idx, :len(ro)] = ro
return tt, cat_uni
def to_table(data, bins=None):
'''convert raw data with shape (subject, rater) to (rater1, rater2)
brings data into correct format for cohens_kappa
Parameters
----------
data : array_like, 2-Dim
data containing category assignment with subjects in rows and raters
in columns.
bins : None, int or tuple of array_like
If None, then the data is converted to integer categories,
0,1,2,...,n_cat-1. Because of the relabeling only category levels
with non-zero counts are included.
If this is an integer, then the category levels in the data are already
assumed to be in integers, 0,1,2,...,n_cat-1. In this case, the
returned array may contain columns with zero count, if no subject
has been categorized with this level.
If bins are a tuple of two array_like, then the bins are directly used
by ``numpy.histogramdd``. This is useful if we want to merge categories.
Returns
-------
arr : nd_array, (n_cat, n_cat)
Contingency table that contains counts of category level with rater1
in rows and rater2 in columns.
Notes
-----
no NaN handling, delete rows with missing values
This works also for more than two raters. In that case the dimension of
the resulting contingency table is the same as the number of raters
instead of 2-dimensional.
'''
data = np.asarray(data)
n_rows, n_cols = data.shape
if bins is None:
#I could add int conversion (reverse_index) to np.unique
cat_uni, cat_int = np.unique(data.ravel(), return_inverse=True)
n_cat = len(cat_uni)
data_ = cat_int.reshape(data.shape)
bins_ = np.arange(n_cat+1) - 0.5
#alternative implementation with double loop
#tt = np.asarray([[(x == [i,j]).all(1).sum() for j in cat_uni]
# for i in cat_uni] )
#other altervative: unique rows and bincount
elif np.isscalar(bins):
bins_ = np.arange(bins+1) - 0.5
data_ = data
else:
bins_ = bins
data_ = data
tt = np.histogramdd(data_, (bins_,)*n_cols)
return tt[0], bins_
def fleiss_kappa(table, method='fleiss'):
"""Fleiss' and Randolph's kappa multi-rater agreement measure
Parameters
----------
table : array_like, 2-D
assumes subjects in rows, and categories in columns. Convert raw data
into this format by using
:func:`statsmodels.stats.inter_rater.aggregate_raters`
method : str
Method 'fleiss' returns Fleiss' kappa which uses the sample margin
to define the chance outcome.
Method 'randolph' or 'uniform' (only first 4 letters are needed)
returns Randolph's (2005) multirater kappa which assumes a uniform
distribution of the categories to define the chance outcome.
Returns
-------
kappa : float
Fleiss's or Randolph's kappa statistic for inter rater agreement
Notes
-----
no variance or hypothesis tests yet
Interrater agreement measures like Fleiss's kappa measure agreement relative
to chance agreement. Different authors have proposed ways of defining
these chance agreements. Fleiss' is based on the marginal sample distribution
of categories, while Randolph uses a uniform distribution of categories as
benchmark. Warrens (2010) showed that Randolph's kappa is always larger or
equal to Fleiss' kappa. Under some commonly observed condition, Fleiss' and
Randolph's kappa provide lower and upper bounds for two similar kappa_like
measures by Light (1971) and Hubert (1977).
References
----------
Wikipedia https://en.wikipedia.org/wiki/Fleiss%27_kappa
Fleiss, Joseph L. 1971. "Measuring Nominal Scale Agreement among Many
Raters." Psychological Bulletin 76 (5): 378-82.
https://doi.org/10.1037/h0031619.
Randolph, Justus J. 2005 "Free-Marginal Multirater Kappa (multirater
K [free]): An Alternative to Fleiss' Fixed-Marginal Multirater Kappa."
Presented at the Joensuu Learning and Instruction Symposium, vol. 2005
https://eric.ed.gov/?id=ED490661
Warrens, Matthijs J. 2010. "Inequalities between Multi-Rater Kappas."
Advances in Data Analysis and Classification 4 (4): 271-86.
https://doi.org/10.1007/s11634-010-0073-4.
"""
table = 1.0 * np.asarray(table) #avoid integer division
n_sub, n_cat = table.shape
n_total = table.sum()
n_rater = table.sum(1)
n_rat = n_rater.max()
#assume fully ranked
assert n_total == n_sub * n_rat
#marginal frequency of categories
p_cat = table.sum(0) / n_total
table2 = table * table
p_rat = (table2.sum(1) - n_rat) / (n_rat * (n_rat - 1.))
p_mean = p_rat.mean()
if method == 'fleiss':
p_mean_exp = (p_cat*p_cat).sum()
elif method.startswith('rand') or method.startswith('unif'):
p_mean_exp = 1 / n_cat
kappa = (p_mean - p_mean_exp) / (1- p_mean_exp)
return kappa
def cohens_kappa(table, weights=None, return_results=True, wt=None):
'''Compute Cohen's kappa with variance and equal-zero test
Parameters
----------
table : array_like, 2-Dim
square array with results of two raters, one rater in rows, second
rater in columns
weights : array_like
The interpretation of weights depends on the wt argument.
If both are None, then the simple kappa is computed.
see wt for the case when wt is not None
If weights is two dimensional, then it is directly used as a weight
matrix. For computing the variance of kappa, the maximum of the
weights is assumed to be smaller or equal to one.
TODO: fix conflicting definitions in the 2-Dim case for
wt : {None, str}
If wt and weights are None, then the simple kappa is computed.
If wt is given, but weights is None, then the weights are set to
be [0, 1, 2, ..., k].
If weights is a one-dimensional array, then it is used to construct
the weight matrix given the following options.
wt in ['linear', 'ca' or None] : use linear weights, Cicchetti-Allison
actual weights are linear in the score "weights" difference
wt in ['quadratic', 'fc'] : use linear weights, Fleiss-Cohen
actual weights are squared in the score "weights" difference
wt = 'toeplitz' : weight matrix is constructed as a toeplitz matrix
from the one dimensional weights.
return_results : bool
If True (default), then an instance of KappaResults is returned.
If False, then only kappa is computed and returned.
Returns
-------
results or kappa
If return_results is True (default), then a results instance with all
statistics is returned
If return_results is False, then only kappa is calculated and returned.
Notes
-----
There are two conflicting definitions of the weight matrix, Wikipedia
versus SAS manual. However, the computation are invariant to rescaling
of the weights matrix, so there is no difference in the results.
Weights for 'linear' and 'quadratic' are interpreted as scores for the
categories, the weights in the computation are based on the pairwise
difference between the scores.
Weights for 'toeplitz' are a interpreted as weighted distance. The distance
only depends on how many levels apart two entries in the table are but
not on the levels themselves.
example:
weights = '0, 1, 2, 3' and wt is either linear or toeplitz means that the
weighting only depends on the simple distance of levels.
weights = '0, 0, 1, 1' and wt = 'linear' means that the first two levels
are zero distance apart and the same for the last two levels. This is
the sample as forming two aggregated levels by merging the first two and
the last two levels, respectively.
weights = [0, 1, 2, 3] and wt = 'quadratic' is the same as squaring these
weights and using wt = 'toeplitz'.
References
----------
Wikipedia
SAS Manual
'''
table = np.asarray(table, float) #avoid integer division
agree = np.diag(table).sum()
nobs = table.sum()
probs = table / nobs
freqs = probs #TODO: rename to use freqs instead of probs for observed
probs_diag = np.diag(probs)
freq_row = table.sum(1) / nobs
freq_col = table.sum(0) / nobs
prob_exp = freq_col * freq_row[:, None]
assert np.allclose(prob_exp.sum(), 1)
#print prob_exp.sum()
agree_exp = np.diag(prob_exp).sum() #need for kappa_max
if weights is None and wt is None:
kind = 'Simple'
kappa = (agree / nobs - agree_exp) / (1 - agree_exp)
if return_results:
#variance
term_a = probs_diag * (1 - (freq_row + freq_col) * (1 - kappa))**2
term_a = term_a.sum()
term_b = probs * (freq_col[:, None] + freq_row)**2
d_idx = np.arange(table.shape[0])
term_b[d_idx, d_idx] = 0 #set diagonal to zero
term_b = (1 - kappa)**2 * term_b.sum()
term_c = (kappa - agree_exp * (1-kappa))**2
var_kappa = (term_a + term_b - term_c) / (1 - agree_exp)**2 / nobs
#term_c = freq_col * freq_row[:, None] * (freq_col + freq_row[:,None])
term_c = freq_col * freq_row * (freq_col + freq_row)
var_kappa0 = (agree_exp + agree_exp**2 - term_c.sum())
var_kappa0 /= (1 - agree_exp)**2 * nobs
else:
if weights is None:
weights = np.arange(table.shape[0])
#weights follows the Wikipedia definition, not the SAS, which is 1 -
kind = 'Weighted'
weights = np.asarray(weights, float)
if weights.ndim == 1:
if wt in ['ca', 'linear', None]:
weights = np.abs(weights[:, None] - weights) / \
(weights[-1] - weights[0])
elif wt in ['fc', 'quadratic']:
weights = (weights[:, None] - weights)**2 / \
(weights[-1] - weights[0])**2
elif wt == 'toeplitz':
#assume toeplitz structure
from scipy.linalg import toeplitz
#weights = toeplitz(np.arange(table.shape[0]))
weights = toeplitz(weights)
else:
raise ValueError('wt option is not known')
else:
rows, cols = table.shape
if (table.shape != weights.shape):
raise ValueError('weights are not square')
#this is formula from Wikipedia
kappa = 1 - (weights * table).sum() / nobs / (weights * prob_exp).sum()
#TODO: add var_kappa for weighted version
if return_results:
var_kappa = np.nan
var_kappa0 = np.nan
#switch to SAS manual weights, problem if user specifies weights
#w is negative in some examples,
#but weights is scale invariant in examples and rough check of source
w = 1. - weights
w_row = (freq_col * w).sum(1)
w_col = (freq_row[:, None] * w).sum(0)
agree_wexp = (w * freq_col * freq_row[:, None]).sum()
term_a = freqs * (w - (w_col + w_row[:, None]) * (1 - kappa))**2
fac = 1. / ((1 - agree_wexp)**2 * nobs)
var_kappa = term_a.sum() - (kappa - agree_wexp * (1 - kappa))**2
var_kappa *= fac
freqse = freq_col * freq_row[:, None]
var_kappa0 = (freqse * (w - (w_col + w_row[:, None]))**2).sum()
var_kappa0 -= agree_wexp**2
var_kappa0 *= fac
kappa_max = (np.minimum(freq_row, freq_col).sum() - agree_exp) / \
(1 - agree_exp)
if return_results:
res = KappaResults( kind=kind,
kappa=kappa,
kappa_max=kappa_max,
weights=weights,
var_kappa=var_kappa,
var_kappa0=var_kappa0)
return res
else:
return kappa
_kappa_template = '''\
%(kind)s Kappa Coefficient
--------------------------------
Kappa %(kappa)6.4f
ASE %(std_kappa)6.4f
%(alpha_ci)s%% Lower Conf Limit %(kappa_low)6.4f
%(alpha_ci)s%% Upper Conf Limit %(kappa_upp)6.4f
Test of H0: %(kind)s Kappa = 0
ASE under H0 %(std_kappa0)6.4f
Z %(z_value)6.4f
One-sided Pr > Z %(pvalue_one_sided)6.4f
Two-sided Pr > |Z| %(pvalue_two_sided)6.4f
'''
'''
Weighted Kappa Coefficient
--------------------------------
Weighted Kappa 0.4701
ASE 0.1457
95% Lower Conf Limit 0.1845
95% Upper Conf Limit 0.7558
Test of H0: Weighted Kappa = 0
ASE under H0 0.1426
Z 3.2971
One-sided Pr > Z 0.0005
Two-sided Pr > |Z| 0.0010
'''
class KappaResults(ResultsBunch):
'''Results for Cohen's kappa
Attributes
----------
kappa : cohen's kappa
var_kappa : variance of kappa
std_kappa : standard deviation of kappa
alpha : one-sided probability for confidence interval
kappa_low : lower (1-alpha) confidence limit
kappa_upp : upper (1-alpha) confidence limit
var_kappa0 : variance of kappa under H0: kappa=0
std_kappa0 : standard deviation of kappa under H0: kappa=0
z_value : test statistic for H0: kappa=0, is standard normal distributed
pvalue_one_sided : one sided p-value for H0: kappa=0 and H1: kappa>0
pvalue_two_sided : two sided p-value for H0: kappa=0 and H1: kappa!=0
distribution_kappa : asymptotic normal distribution of kappa
distribution_zero_null : asymptotic normal distribution of kappa under
H0: kappa=0
The confidence interval for kappa and the statistics for the test of
H0: kappa=0 are based on the asymptotic normal distribution of kappa.
'''
template = _kappa_template
def _initialize(self):
if 'alpha' not in self:
self['alpha'] = 0.025
self['alpha_ci'] = _int_ifclose(100 - 0.025 * 200)[1]
self['std_kappa'] = np.sqrt(self['var_kappa'])
self['std_kappa0'] = np.sqrt(self['var_kappa0'])
self['z_value'] = self['kappa'] / self['std_kappa0']
self['pvalue_one_sided'] = stats.norm.sf(self['z_value'])
self['pvalue_two_sided'] = stats.norm.sf(np.abs(self['z_value'])) * 2
delta = stats.norm.isf(self['alpha']) * self['std_kappa']
self['kappa_low'] = self['kappa'] - delta
self['kappa_upp'] = self['kappa'] + delta
self['distribution_kappa'] = stats.norm(loc=self['kappa'],
scale=self['std_kappa'])
self['distribution_zero_null'] = stats.norm(loc=0,
scale=self['std_kappa0'])
def __str__(self):
return self.template % self
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@stats@[email protected]_END.py
|
{
"filename": "test_INS.py",
"repo_name": "mwilensky768/SSINS",
"repo_path": "SSINS_extracted/SSINS-master/SSINS/tests/test_INS.py",
"type": "Python"
}
|
from SSINS import INS, SS
from SSINS.data import DATA_PATH
import numpy as np
import os
import pytest
from pyuvdata import UVData, UVFlag
from datetime import datetime
@pytest.fixture
def mix_obs():
return "1061312640_mix"
@pytest.fixture
def mix_file(mix_obs):
return os.path.join(DATA_PATH, f"{mix_obs}.uvfits")
@pytest.fixture
def cross_obs(mix_obs):
return f"{mix_obs}_cross_SSINS_data"
@pytest.fixture
def cross_testfile(cross_obs):
return os.path.join(DATA_PATH, f"{cross_obs}.h5")
@pytest.fixture
def tv_obs():
return '1061313128_99bl_1pol_half_time'
@pytest.fixture
def tv_testfile(tv_obs):
return os.path.join(DATA_PATH, f'{tv_obs}.uvfits')
@pytest.fixture
def tv_ins_testfile(tv_obs):
return os.path.join(DATA_PATH, f"{tv_obs}_SSINS.h5")
@pytest.mark.filterwarnings("ignore:Reordering", "ignore:invalid value",
"ignore:SS.read")
def test_init(tv_testfile):
ss = SS()
ss.read(tv_testfile, flag_choice='original', diff=True)
# Needs to be in time order for averaging comparison to work
ss.reorder_blts(order='time')
ins = INS(ss)
# Mock the averaging method
new_shape = [ss.Ntimes, ss.Nbls, ss.Nfreqs, ss.Npols]
test_dat = np.mean(np.abs(ss.data_array).reshape(new_shape), axis=1)
# Mock the weights array
test_weights = np.sum(np.logical_not(ss.data_array.mask).reshape(new_shape), axis=1)
# Check that the data array averaged correctly
# Weights are floating-point, which introdices itty bitty errors compared to masked average.
assert np.all(np.isclose(test_dat, ins.metric_array, rtol=1e-6, atol=1e-7)), "Averaging did not work as intended."
# Check that the weights summed correctly
assert np.all(test_weights == ins.weights_array), "Weights did not sum properly"
def test_no_diff_start(tv_testfile):
# Don't diff - will fail to mask data array
ss = SS()
with pytest.warns(UserWarning, match="flag_choice will be ignored"):
ss.read(tv_testfile, flag_choice='original', diff=False)
with pytest.warns(UserWarning, match="diff on read defaults to False"):
ss.read(tv_testfile, flag_choice='original', diff=False)
ins = INS(ss)
assert ss.flag_choice is None
@pytest.mark.filterwarnings("ignore:Reordering", "ignore:SS.read")
def test_mean_subtract(tv_testfile):
ss = SS()
ss.read(tv_testfile, diff=True)
ins = INS(ss, order=0)
old_dat = np.copy(ins.metric_ms)
# Mask the first five frequencies and last two at the first and second times
ins.metric_array[0, :5] = np.ma.masked
# Calculate the new mean-subtracted spectrum only over the first few masked frequencies
ins.metric_ms[:, :5] = ins.mean_subtract(freq_slice=slice(0, 5))
# See if a new mean was calculated over the first five frequencies
assert not np.all(old_dat[1:, :5] == ins.metric_ms[1:, :5]), "All elements of the ms array are still equal"
@pytest.mark.filterwarnings("ignore:Reordering", "ignore:SS.read")
def test_polyfit(tv_testfile):
ss = SS()
ss.read(tv_testfile, diff=True)
ins = INS(ss, order=1)
# Mock some data for which the polyfit is exact
x = np.arange(1, ins.Ntimes + 1)
for ind in range(ins.Nfreqs):
ins.metric_array[:, ind, 0] = 3 * x + 5
ins.metric_array.mask = np.zeros(ins.metric_array.shape, dtype=bool)
ins.weights_array = np.ones(ins.metric_array.shape)
ins.weights_square_array = np.copy(ins.weights_array)
ins.metric_ms, coeffs = ins.mean_subtract(return_coeffs=True)
test_coeffs = np.zeros((ins.order + 1, ) + ins.metric_ms.shape[1:])
test_coeffs[0, :] = 3
test_coeffs[1, :] = 5
assert np.all(np.allclose(ins.metric_ms, np.zeros(ins.metric_ms.shape))), "The polyfit was not exact"
assert np.all(np.allclose(coeffs, test_coeffs)), "The polyfit got the wrong coefficients"
ins.metric_array[:] = np.ma.masked
ins.metric_ms = ins.mean_subtract()
assert np.all(ins.metric_ms.mask), "The metric_ms array was not all masked"
@pytest.mark.filterwarnings("ignore:Reordering", "ignore:SS.read")
def test_mask_to_flags(tmp_path, tv_obs, tv_testfile):
prefix = os.path.join(tmp_path, f'{tv_obs}_test')
flags_outfile = f'{prefix}_SSINS_flags.h5'
ss = SS()
ss.read(tv_testfile, diff=True)
uvd = UVData()
uvd.read(tv_testfile, use_future_array_shapes=True)
uvf = UVFlag(uvd, mode='flag', waterfall=True, use_future_array_shapes=True)
# start with some flags so that we can test the intended OR operation
uvf.flag_array[6, :] = True
ins = INS(ss)
# Check error handling
with pytest.raises(ValueError):
bad_uvf = UVFlag(uvd, mode='metric', waterfall=True, use_future_array_shapes=True)
err_uvf = ins.flag_uvf(uvf=bad_uvf)
with pytest.raises(ValueError):
bad_uvf = UVFlag(uvd, mode='flag', waterfall=False, use_future_array_shapes=True)
err_uvf = ins.flag_uvf(uvf=bad_uvf)
with pytest.raises(ValueError):
bad_uvf = UVFlag(uvd, mode='flag', waterfall=True, use_future_array_shapes=True)
# Pretend the data is off by 1 day
bad_uvf.time_array += 1
err_uvf = ins.flag_uvf(uvf=bad_uvf)
# Pretend we flagged the INS object
freq_inds_1 = np.arange(0, len(ins.freq_array), 2)
freq_inds_2 = np.arange(1, len(ins.freq_array), 2)
ins.metric_array[1, freq_inds_1] = np.ma.masked
ins.metric_array[3, freq_inds_1] = np.ma.masked
ins.metric_array[7, freq_inds_2] = np.ma.masked
ins.metric_array[-2, freq_inds_2] = np.ma.masked
# Make a NEW uvflag object
new_uvf = ins.flag_uvf(uvf=uvf, inplace=False)
# Construct the expected flags by hand
test_flags = np.zeros_like(new_uvf.flag_array)
test_flags[1:5, freq_inds_1] = True
test_flags[6, :] = True
test_flags[7, freq_inds_2] = True
test_flags[8, freq_inds_2] = True
test_flags[-3:-1, freq_inds_2] = True
# Check that new flags are correct
assert np.all(new_uvf.flag_array == test_flags), "Test flags were not equal to calculated flags."
# Check that the input uvf was not edited in place
assert new_uvf != uvf, "The UVflag object was edited inplace and should not have been."
# Edit the uvf inplace
inplace_uvf = ins.flag_uvf(uvf=uvf, inplace=True)
# Check that new flags are correct
assert np.all(inplace_uvf.flag_array == test_flags), "Test flags were not equal to calculated flags."
# Check that the input uvf was not edited in place
assert inplace_uvf == uvf, "The UVflag object was not edited inplace and should have been."
# Test write/read
ins.write(prefix, output_type='flags', uvf=uvf)
read_uvf = UVFlag(flags_outfile, mode='flag', waterfall=True, use_future_array_shapes=True)
# Check equality
assert read_uvf == uvf, "UVFlag object differs after read"
@pytest.mark.filterwarnings("ignore:Reordering", "ignore:SS.read", "ignore:invalid value")
def test_write(tmp_path, tv_obs, tv_testfile):
prefix = os.path.join(tmp_path, f'{tv_obs}_test')
data_outfile = f'{prefix}_SSINS_data.h5'
z_score_outfile = f'{prefix}_SSINS_z_score.h5'
mask_outfile = f'{prefix}_SSINS_mask.h5'
match_outfile = f'{prefix}_SSINS_match_events.yml'
sep_data_outfile = f'{prefix}.SSINS.data.h5'
ss = SS()
ss.read(tv_testfile, flag_choice='original', diff=True)
ins = INS(ss)
# Mock some events
ins.match_events.append((slice(0, 1), slice(1, 3), 'shape', 5))
ins.match_events.append((slice(1, 2), slice(1, 3), 'shape', 5))
ins.metric_array[:2, 1:3] = np.ma.masked
ins.metric_ms = ins.mean_subtract()
ins.write(prefix, output_type='data', clobber=True)
ins.write(prefix, output_type='z_score', clobber=True)
ins.write(prefix, output_type='mask', clobber=True)
ins.write(prefix, output_type='match_events', clobber=True)
ins.write(prefix, output_type='data', sep='.', clobber=True)
with pytest.raises(ValueError):
ins.write(prefix, output_type='bad_label')
with pytest.raises(ValueError):
ins.write(prefix, output_type='flags')
new_ins = INS(data_outfile, mask_file=mask_outfile, match_events_file=match_outfile)
assert np.all(ins.metric_array == new_ins.metric_array), "Elements of the metric array were not equal"
assert np.all(ins.weights_array == new_ins.weights_array), "Elements of the weights array were not equal"
assert np.all(ins.metric_array.mask == new_ins.metric_array.mask), "Elements of the mask were not equal"
assert np.all(ins.metric_ms == new_ins.metric_ms), "Elements of the metric_ms were not equal"
assert np.all(ins.match_events == new_ins.match_events), "Elements of the match_events were not equal"
assert os.path.exists(sep_data_outfile), "sep_data_outfile was not written"
assert os.path.exists(z_score_outfile)
def test_write_mwaf(tmp_path, tv_obs, tv_ins_testfile):
from astropy.io import fits
prefix = os.path.join(tmp_path, f'{tv_obs}_SSINS_test')
ins = INS(tv_ins_testfile)
mwaf_files = [os.path.join(DATA_PATH, '1061313128_12.mwaf')]
bad_mwaf_files = [os.path.join(DATA_PATH, 'bad_file_path')]
metafits_file = os.path.join(DATA_PATH, '1061313128.metafits')
# Compatible shape with mwaf file
ins.metric_array = np.ma.ones([55, 384, 1])
ins.metric_array[50, 16 * 12: int(16 * (12 + 0.5))] = np.ma.masked
# metadata from the input file
NCHANS = 32
Nbls = 8256
NSCANS = 224
# hard code the answer
new_flags = np.zeros((NSCANS * Nbls, NCHANS), dtype=bool)
new_flags[Nbls * 200:Nbls * 208, :16] = 1
# Test some defensive errors
with pytest.raises(IOError):
ins.write(prefix, output_type='mwaf', mwaf_files=bad_mwaf_files,
metafits_file=metafits_file)
with pytest.raises(ValueError):
ins.write(prefix, output_type='mwaf', mwaf_files=mwaf_files,
mwaf_method='bad_method', metafits_file=metafits_file)
with pytest.raises(ValueError):
ins.write(prefix, output_type='mwaf', mwaf_files=None,
metafits_file=metafits_file)
with pytest.raises(ValueError):
ins.write(prefix, output_type='mwaf', mwaf_files=mwaf_files)
ins.write(f'{prefix}_add', output_type='mwaf', mwaf_files=mwaf_files,
metafits_file=metafits_file)
ins.write(f'{prefix}_replace', output_type='mwaf', mwaf_files=mwaf_files,
mwaf_method='replace', metafits_file=metafits_file)
with fits.open(mwaf_files[0]) as old_mwaf_hdu:
with fits.open(f'{prefix}_add_12.mwaf') as add_mwaf_hdu:
assert np.all(add_mwaf_hdu[1].data['FLAGS'] == old_mwaf_hdu[1].data['FLAGS'] + new_flags)
with fits.open(f'{prefix}_replace_12.mwaf') as replace_mwaf_hdu:
assert np.all(replace_mwaf_hdu[1].data['FLAGS'] == new_flags)
def test_select(tv_ins_testfile):
ins = INS(tv_ins_testfile)
ins.metric_array.mask[7, :12] = True
new_ins = ins.select(times=ins.time_array[3:-3], freq_chans=np.arange(24),
inplace=False)
Ntimes = len(ins.time_array)
ins.select(times=ins.time_array[3:-3], freq_chans=np.arange(24))
assert ins.metric_array.shape[0] == Ntimes - 6
assert ins.metric_array.shape[1] == 24
for param in ins._data_params:
assert getattr(ins, param).shape == ins.metric_array.shape
# Check that the mask is propagated
assert np.all(ins.metric_array.mask[4, :12])
assert np.count_nonzero(ins.metric_array.mask) == 12
# Check that new_ins is a copy of ins
assert new_ins == ins
def test_data_params(tv_ins_testfile):
ins = INS(tv_ins_testfile)
test_params = ['metric_array', 'weights_array', 'weights_square_array',
'metric_ms', 'sig_array']
assert ins._data_params == test_params
def test_spectrum_type_file_init(cross_testfile, tv_ins_testfile):
auto_obs = "1061312640_mix_auto_SSINS_data"
auto_testfile = os.path.join(DATA_PATH, f"{auto_obs}.h5")
ins = INS(tv_ins_testfile)
assert ins.spectrum_type == "cross"
with pytest.raises(ValueError, match="Requested spectrum type disagrees with saved spectrum. "):
ins = INS(auto_testfile, spectrum_type="cross")
with pytest.raises(ValueError, match="Requested spectrum type disagrees with saved spectrum. "):
ins = INS(cross_testfile, spectrum_type="auto")
del ins
ins = INS(cross_testfile) # I think this line just gets coverage?
del ins
ins = INS(auto_testfile, spectrum_type="auto")
def test_old_file():
old_ins_file = os.path.join(DATA_PATH, "1061313128_99bl_1pol_half_time_old_SSINS.h5")
try:
# this works with pyuvdata>=3.0
with pytest.raises(
ValueError, match="Required UVParameter _Nants has not been set."
):
ins = INS(old_ins_file)
except AssertionError:
# this works with pyuvdata<3.0
with pytest.raises(
ValueError, match="Required UVParameter _antenna_names has not been set."
):
ins = INS(old_ins_file)
with pytest.raises(ValueError,
match="spectrum_type is set to auto, but file input is a cross spectrum from an old file."):
ins = INS(old_ins_file, telescope_name="mwa", spectrum_type="auto")
# Just check that it reads
ins = INS(old_ins_file, telescope_name="mwa")
@pytest.mark.filterwarnings("ignore:Reordering", "ignore:SS.read")
def test_spectrum_type_bl_init(tv_testfile):
ss = SS()
ss.read(tv_testfile, diff=True)
ins = INS(ss)
assert "Initialized spectrum_type:cross from visibility data." in ins.history
with pytest.raises(ValueError, match="Requested spectrum type is 'auto', but no autos exist."):
ins = INS(ss, spectrum_type="auto")
def test_spectrum_type_bad_input(tv_ins_testfile):
with pytest.raises(ValueError, match="Requested spectrum_type is invalid."):
ins = INS(tv_ins_testfile, spectrum_type="foo")
@pytest.mark.filterwarnings("ignore:Reordering", "ignore:SS.read")
def test_no_cross_auto_spectrum():
obs = "1061312640_autos"
testfile = os.path.join(DATA_PATH, f'{obs}.uvfits')
ss = SS()
ss.read(testfile, diff=True)
with pytest.raises(ValueError, match="Requested spectrum type is 'cross', but no cross"):
ins = INS(ss)
@pytest.mark.filterwarnings("ignore:Reordering", "ignore:SS.read")
def test_mix_spectrum(mix_file):
ss = SS()
ss.read(mix_file, diff=True)
with pytest.warns(UserWarning, match="Requested spectrum type is 'cross'. Removing autos before averaging."):
ins = INS(ss)
with pytest.warns(UserWarning, match="Requested spectrum type is 'auto'. Removing"):
ins = INS(ss, spectrum_type="auto")
# Hack polarization array to check error
ss.polarization_array[0] = 1
with pytest.raises(ValueError, match="SS input has pseudo-Stokes data. SSINS does not"):
ins = INS(ss, spectrum_type="auto")
@pytest.mark.filterwarnings("ignore:Reordering", "ignore:SS.read", "ignore:invalid value")
def test_use_integration_weights(tv_testfile):
ss = SS()
ss.read(tv_testfile, flag_choice='original', diff=True)
ins = INS(ss, use_integration_weights=True)
# These will not be equal if weights are not binary to begin with
# The accuracy of return_weights_square is already checked in pyuvdata
assert not np.all(ins.weights_array == ins.weights_square_array)
def test_add(tv_ins_testfile):
truth_ins = INS(tv_ins_testfile)
first_ins = INS(tv_ins_testfile)
first_ins.select(freq_chans=np.arange(192))
second_ins = INS(tv_ins_testfile)
second_ins.select(freq_chans=np.arange(192, 384))
combo_ins = first_ins.__add__(second_ins, axis='frequency')
first_ins.__add__(second_ins, axis='frequency', inplace=True)
# Check consistency
assert np.all(combo_ins.metric_array.data == first_ins.metric_array.data)
assert np.all(combo_ins.metric_array.mask == first_ins.metric_array.mask)
assert np.all(combo_ins.metric_array.data == truth_ins.metric_array.data)
assert np.all(combo_ins.metric_array.mask == truth_ins.metric_array.mask)
def test_read_from_instance(cross_testfile):
ins = INS(cross_testfile)
with pytest.raises(NotImplementedError, match="SSINS does not currently support "):
ins.read(cross_testfile)
def test_set_weights_square_array(cross_testfile):
ins = INS(cross_testfile)
copy_ins = ins.copy()
ins.weights_square_array = None
ins.set_ins_data_params()
assert ins.weights_square_array is not None
assert np.array_equal(ins.metric_ms, copy_ins.metric_ms) # check that the goods are intact
|
mwilensky768REPO_NAMESSINSPATH_START.@SSINS_extracted@SSINS-master@SSINS@tests@[email protected]_END.py
|
{
"filename": "fibonacci.py",
"repo_name": "carronj/lenspyx",
"repo_path": "lenspyx_extracted/lenspyx-master/lenspyx/tests/fibonacci.py",
"type": "Python"
}
|
import numpy as np
from lenspyx.utils_hp import synalm
from lenspyx.utils import timer
from lenspyx.tests.helper import cls_unl
from ducc0.sht.experimental import synthesis_general
def syn_fibo(N:int, lmax:int, nthreads=4):
"""Number of points is P = 2N + 1"""
npix = 2 * N + 1
Psi = (1 + np.sqrt(5.))/2
tim = timer('fibo', False)
i = np.arange(-N, N+1, dtype=int)
loc = np.empty((npix, 2), dtype=float)
loc[:, 0] = np.arcsin(i / (N + 0.5)) + 0.5 * np.pi
loc[:, 1] = ((2 * np.pi / Psi) * i)%(2 * np.pi)
del i
tim.add('%.5f Mpix'%(npix/1e6))
alm = np.atleast_2d(synalm(cls_unl['tt'][:lmax + 1], lmax, lmax))
tim.add('synalm lmax %s'%lmax)
m = synthesis_general(alm=alm, spin=0, lmax=lmax, mmax=lmax, loc=loc, nthreads=nthreads)
tim.add('spin 0 synthesis_general')
print(tim)
return m, alm
if __name__ == '__main__':
m = syn_fibo(10, 10)
|
carronjREPO_NAMElenspyxPATH_START.@lenspyx_extracted@lenspyx-master@lenspyx@[email protected]@.PATH_END.py
|
{
"filename": "test_atomic_remote.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/atomic/tests/test_atomic_remote.py",
"type": "Python"
}
|
import numpy as np
import pytest
from bs4 import BeautifulSoup
from astropy import units as u
from astropy.table import Table
from ...atomic import AtomicLineList
pytestmark = pytest.mark.remote_data
def test_default_form_values():
default_response = AtomicLineList._request(
method="GET", url=AtomicLineList.FORM_URL,
data={}, timeout=AtomicLineList.TIMEOUT)
bs = BeautifulSoup(default_response.text, 'html5lib')
form = bs.find('form')
default_form_values = AtomicLineList._get_default_form_values(form)
assert default_form_values == {
'air': u'Vacuum',
'auto': u'Suppress',
'ener': u'cm^-1',
'form': [u'spec', u'type', u'term', u'angm', u'ener'],
'hydr': u'Suppress',
'jval': u'usej',
'mode': u'Plain',
'type': u'All',
'wave': u'Angstrom'}
def test_query_with_default_params():
table = AtomicLineList.query_object(cache=False)
assert isinstance(table, Table)
assert len(table) == 500
assert str(table[:5]) == '''
LAMBDA VAC ANG SPECTRUM TT CONFIGURATION TERM J J A_ki LEVEL ENERGY CM 1
-------------- -------- --- ------------- ---- ----- -------- ------------------
1.010799 Zn XXX E1 1*-10* 1-10 1/2-* 1.02E+11 0.00 - 98933890.00
1.013182 Zn XXX E1 1*-9* 1-9 1/2-* 1.74E+11 0.00 - 98701900.00
1.016534 Zn XXX E1 1*-8* 1-8 1/2-* 3.14E+11 0.00 - 98377600.00
1.02146 Zn XXX E1 1*-7* 1-7 1/2-* 6.13E+11 0.00 - 97904300.00
1.02916 Zn XXX E1 1*-6* 1-6 1/2-* 1.33E+12 0.00 - 97174700.00'''.strip()
def test_query_with_wavelength_params():
result = AtomicLineList.query_object(
wavelength_range=(15 * u.nm, 200 * u.Angstrom),
wavelength_type='Air',
wavelength_accuracy=20,
element_spectrum='C II-IV',
cache=False)
assert isinstance(result, Table)
assert result.colnames == ['LAMBDA VAC ANG', 'SPECTRUM', 'TT',
'CONFIGURATION', 'TERM', 'J J', 'A_ki',
'LEVEL ENERGY CM 1']
assert np.all(result['LAMBDA VAC ANG']
== np.array([196.8874, 197.7992, 199.0122]))
assert np.all(result['SPECTRUM'] == np.array(['C IV', 'C IV', 'C IV']))
assert np.all(result['TT'] == np.array(['E1', 'E1', 'E1']))
assert np.all(result['TERM'] == np.array(['2S-2Po', '2S-2Po', '2S-2Po']))
assert np.all(result['J J'] == np.array(['1/2-*', '1/2-*', '1/2-*']))
assert np.all(result['LEVEL ENERGY CM 1']
== np.array(['0.00 - 507904.40', '0.00 - 505563.30',
'0.00 - 502481.80']))
def test_empty_result_set():
result = AtomicLineList.query_object(wavelength_accuracy=0, cache=False)
assert isinstance(result, Table)
assert not result
assert len(result) == 0
def test_lower_upper_ranges():
result = AtomicLineList.query_object(
lower_level_energy_range=u.Quantity((600 * u.cm**(-1), 1000 * u.cm**(-1))),
upper_level_energy_range=u.Quantity((15000 * u.cm**(-1), 100000 * u.cm**(-1))),
element_spectrum='Ne III', cache=False)
assert isinstance(result, Table)
assert np.all(result['LAMBDA VAC ANG']
== np.array([1814.73, 3968.91, 4013.14]))
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@atomic@tests@[email protected]_END.py
|
{
"filename": "test_hawc.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/gammapy/catalog/tests/test_hawc.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.utils.data import get_pkg_data_filename
from gammapy.catalog import SourceCatalog2HWC
from gammapy.catalog.hawc import SourceCatalog3HWC
from gammapy.modeling.models import (
DiskSpatialModel,
PointSpatialModel,
PowerLawSpectralModel,
)
from gammapy.utils.gauss import Gauss2DPDF
from gammapy.utils.testing import requires_data
@pytest.fixture(scope="session")
def cat():
return SourceCatalog2HWC()
@requires_data()
class TestSourceCatalog2HWC:
@staticmethod
def test_source_table(cat):
assert cat.tag == "2hwc"
assert len(cat.table) == 40
@staticmethod
def test_positions(cat):
assert len(cat.positions) == 40
@staticmethod
def test_to_models(cat):
models = cat.to_models(which="point")
assert len(models) == 40
@requires_data()
class TestSourceCatalogObject2HWC:
@staticmethod
def test_data(cat):
assert cat[0].data["source_name"] == "2HWC J0534+220"
assert cat[0].n_models == 1
assert cat[1].data["source_name"] == "2HWC J0631+169"
assert cat[1].n_models == 2
@staticmethod
def test_str(cat):
expected = open(get_pkg_data_filename("data/2hwc_j0534+220.txt")).read()
assert str(cat[0]) == expected
expected = open(get_pkg_data_filename("data/2hwc_j0631+169.txt")).read()
assert str(cat[1]) == expected
@staticmethod
def test_position(cat):
position = cat[0].position
assert_allclose(position.ra.deg, 83.628, atol=1e-3)
assert_allclose(position.dec.deg, 22.024, atol=1e-3)
@staticmethod
def test_sky_model(cat):
model = cat[1].sky_model("extended")
assert model.name == "2HWC J0631+169"
assert isinstance(model.spectral_model, PowerLawSpectralModel)
assert isinstance(model.spatial_model, DiskSpatialModel)
with pytest.raises(ValueError):
cat[0].sky_model("extended")
@staticmethod
def test_spectral_model(cat):
m = cat[0].spectral_model()
dnde, dnde_err = m.evaluate_error(1 * u.TeV)
assert dnde.unit == "cm-2 s-1 TeV-1"
assert_allclose(dnde.value, 2.802365e-11, rtol=1e-3)
assert_allclose(dnde_err.value, 6.537506e-13, rtol=1e-3)
@staticmethod
def test_spatial_model(cat):
m = cat[1].spatial_model()
assert isinstance(m, PointSpatialModel)
assert m.lon_0.unit == "deg"
assert_allclose(m.lon_0.value, 195.614, atol=1e-2)
assert_allclose(m.lon_0.error, 0.114, atol=1e-2)
assert m.lat_0.unit == "deg"
assert_allclose(m.lat_0.value, 3.507, atol=1e-2)
assert m.frame == "galactic"
m = cat[1].spatial_model("extended")
assert isinstance(m, DiskSpatialModel)
assert m.lon_0.unit == "deg"
assert_allclose(m.lon_0.value, 195.614, atol=1e-10)
assert m.lat_0.unit == "deg"
assert_allclose(m.lat_0.value, 3.507, atol=1e-10)
assert m.frame == "galactic"
assert m.r_0.unit == "deg"
assert_allclose(m.r_0.value, 2.0, atol=1e-3)
model = cat["2HWC J0534+220"].spatial_model()
pos_err = model.position_error
scale_r95 = Gauss2DPDF().containment_radius(0.95)
assert_allclose(pos_err.height.value, 2 * 0.057 * scale_r95, rtol=1e-4)
assert_allclose(pos_err.width.value, 2 * 0.057 * scale_r95, rtol=1e-4)
assert_allclose(model.position.l.value, pos_err.center.l.value)
assert_allclose(model.position.b.value, pos_err.center.b.value)
@pytest.fixture(scope="session")
def ca_3hwc():
return SourceCatalog3HWC()
@requires_data()
class TestSourceCatalog3HWC:
@staticmethod
def test_source_table(ca_3hwc):
assert ca_3hwc.tag == "3hwc"
assert len(ca_3hwc.table) == 65
@staticmethod
def test_positions(ca_3hwc):
assert len(ca_3hwc.positions) == 65
@staticmethod
def test_to_models(ca_3hwc):
models = ca_3hwc.to_models()
assert len(models) == 65
@requires_data()
class TestSourceCatalogObject3HWC:
@staticmethod
def test_data(ca_3hwc):
assert ca_3hwc[0].data["source_name"] == "3HWC J0534+220"
assert ca_3hwc[0].n_models == 1
ca_3hwc[0].info()
assert ca_3hwc[1].data["source_name"] == "3HWC J0540+228"
assert ca_3hwc[1].n_models == 1
@staticmethod
def test_sky_model(ca_3hwc):
model = ca_3hwc[4].sky_model()
assert model.name == "3HWC J0621+382"
assert isinstance(model.spectral_model, PowerLawSpectralModel)
assert isinstance(model.spatial_model, DiskSpatialModel)
@staticmethod
def test_spectral_model(ca_3hwc):
m = ca_3hwc[1].spectral_model()
assert_allclose(m.amplitude.value, 4.7558e-15, atol=1e-3)
assert_allclose(m.amplitude.error, 7.411645e-16, atol=1e-3)
assert_allclose(m.index.value, 2.8396, atol=1e-3)
assert_allclose(m.index.error, 0.1425, atol=1e-3)
m = ca_3hwc[0].spectral_model()
dnde, dnde_err = m.evaluate_error(7 * u.TeV)
assert dnde.unit == "cm-2 s-1 TeV-1"
assert_allclose(dnde.value, 2.34e-13, rtol=1e-2)
assert_allclose(dnde_err.value, 1.4e-15, rtol=1e-1)
@staticmethod
def test_spatial_model(ca_3hwc):
m = ca_3hwc[1].spatial_model()
assert isinstance(m, PointSpatialModel)
assert m.lon_0.unit == "deg"
assert_allclose(m.lon_0.value, 184.583, atol=1e-2)
assert_allclose(m.lon_0.error, 0.112, atol=1e-2)
assert m.lat_0.unit == "deg"
assert_allclose(m.lat_0.value, -4.129, atol=1e-2)
assert m.frame == "galactic"
m = ca_3hwc["3HWC J0621+382"].spatial_model()
assert isinstance(m, DiskSpatialModel)
assert m.lon_0.unit == "deg"
assert_allclose(m.lon_0.value, 175.444254, atol=1e-10)
assert m.lat_0.unit == "deg"
assert_allclose(m.lat_0.value, 10.966142, atol=1e-10)
assert m.frame == "galactic"
assert m.r_0.unit == "deg"
assert_allclose(m.r_0.value, 0.5, atol=1e-3)
model = ca_3hwc["3HWC J0621+382"].spatial_model()
pos_err = model.position_error
scale_r95 = Gauss2DPDF().containment_radius(0.95)
assert_allclose(pos_err.height.value, 2 * 0.3005 * scale_r95, rtol=1e-3)
assert_allclose(pos_err.width.value, 2 * 0.3005 * scale_r95, rtol=1e-3)
assert_allclose(model.position.l.value, pos_err.center.l.value)
assert_allclose(model.position.b.value, pos_err.center.b.value)
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@gammapy@catalog@tests@[email protected]_END.py
|
{
"filename": "PN-MOmega.ipynb",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/NRPyPN/PN-MOmega.ipynb",
"type": "Jupyter Notebook"
}
|
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# $M\Omega$, the orbital angular velocity, up to and including 3.5 PN order
## Author: Zach Etienne
## This notebook uses NRPy to construct the orbital angular velocity up to and including 3.5 post-Newtonian order.
**Notebook Status:** <font color='green'><b> Validated </b></font>
**Validation Notes:** All expressions in this notebook were transcribed twice by hand on separate occasions, and expressions were corrected as needed to ensure consistency with published work. Published work was cross-validated and typo(s) in published work were corrected. In addition, all expressions in this notebook were validated against those in the Mathematica notebook used by [Ramos-Buades, Husa, and Pratten (2018)](https://arxiv.org/abs/1810.00036) (thanks to Toni Ramos-Buades for sharing this!) Finally, this tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). **Additional validation tests may have been performed, but are as yet, undocumented.**
### This notebook exists as the following Python module:
1. [PN_MOmega.py](../../edit/NRPyPN/PN_MOmega.py)
### This notebook & corresponding Python module depend on the following NRPy+/NRPyPN Python modules:
1. [indexedexp.py](../../edit/indexedexp.py): [**documentation+tutorial**](../Tutorial-Indexed_Expressions.ipynb)
1. [NRPyPN_shortcuts.py](../../edit/NRPyPN/NRPyPN_shortcuts.py): [**documentation**](NRPyPN_shortcuts.ipynb)
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
1. Part 1: [$M\Omega$](#momega), up to and including 3.5PN order, as derived in [Ramos-Buades, Husa, and Pratten (2018)](https://arxiv.org/abs/1810.00036)
1. Part 2: [Validation against second transcription and corresponding Python module](#code_validation)
1. Part 3: [Validation against trusted numerical values](#code_validationv2) (i.e., in Table V of [Ramos-Buades, Husa, and Pratten (2018)](https://arxiv.org/abs/1810.00036))
1. Part 4: [LaTeX PDF output](#latex_pdf_output): $\LaTeX$ PDF Output
<a id='momega'></a>
# Part 1: $M\Omega$, up to and including 3.5PN order, as derived in [Ramos-Buades, Husa, and Pratten (2018)](https://arxiv.org/abs/1810.00036) \[Back to [top](#toc)\]
$$\label{momega}$$
As described in the [nonspinning Hamiltonian notebook](PN-Hamiltonian-Nonspinning.ipynb), the basic physical system assumes two point particles of mass $m_1$ and $m_2$ with corresponding momentum vectors $\mathbf{P}_1$ and $\mathbf{P}_2$, and displacement vectors $\mathbf{X}_1$ and $\mathbf{X}_2$ with respect to the center of mass. Here we also consider the spin vectors of each point mass $\mathbf{S}_1$ and $\mathbf{S}_2$, respectively.
To reduce the possibility of copying error, the equation for $M\Omega$ is taken directly from the arXiv LaTeX source code of Eq A1 in [Ramos-Buades, Husa, and Pratten (2018)](https://arxiv.org/abs/1810.00036), and only mildly formatted to (1) improve presentation in Jupyter notebooks, (2) to ensure some degree of consistency in notation across different terms in other NRPyPN notebooks, and (3) to correct any errors. In particular, the boxed negative sign at 2.5PN order ($a_5$ below) was missing in the original equation. We will later show that this negative sign is necessary for consistency with the expression up to 3PN order in [Healy, Lousto, Nakano, and Zlochower (2017)](https://arxiv.org/abs/1702.00872):
$$
M\Omega = \frac{1}{r^{3/2}}\left(1 + \sum_{k=2}^7 \frac{a_k}{r^{k/2}}\right),
$$
where all terms in boxes should be replaced by 1:
\begin{align}
a_2 &= -\left[ \frac{ \left(3 q^2+5 q+3\right)}{2 (q+1)^2}\right] \\
a_3 &= -\frac{(3 q+4) \chi _{1z}}{4 (q+1)^2 }- \frac{q (4 q+3) \chi _{2z}}{4 (q+1)^2 } \\
a_4 &= -\frac{3 q^2 \chi _{2x}^2}{2 (q+1)^2}+ \frac{3 q^2 \chi _{2y}^2}{4 (q+1)^2 }
+\frac{3 q^2 \chi _{2z}^2}{4 (q+1)^2 }+\frac{24 q^4+103 q^3+164 q^2+103 q+24}{16 (q+1)^4 } \\
&\quad -\frac{3 \chi _{1x}^2}{2 (q+1)^2 }-\frac{3 q \chi _{1x}\chi _{2x}}{(q+1)^2 }+\frac{3 \chi _{1y}^2}{4 (q+1)^2 }+\frac{3 q \chi _{1y} \chi _{2y}}{2 (q+1)^2 }+\frac{3 \chi _{1z}^2}{4 (q+1)^2 }+\frac{3 q \chi _{1z} \chi _{2z}}{2 (q+1)^2} \\
a_5 &= \frac{3 \left(13 q^3+34 q^2+30 q+16\right) \chi _{1z}}{16 (q+1)^4}+ \frac{3 q \left(16 q^3+30 q^2+34 q+13\right) \chi _{2z}}{16 (q+1)^4 }\\
a_6 &= \frac{\left(155 q^2+180 q+76\right) \chi _{1x}^2}{16 (q+1)^4 \boxed{r^3}}+\frac{q \left(120 q^2+187 q+120\right) \chi _{1x} \chi _{2x}}{8 (q+1)^4 \boxed{r^3}}-\frac{\left(55 q^2+85 q+43\right) \chi _{1y}^2}{8 (q+1)^4 \boxed{r^3}} \\
& -\frac{q \left(54 q^2+95 q+54\right) \chi _{1y} \chi _{2y}}{4 (q+1)^4 \boxed{r^3}}-\frac{q \left(96 q^2+127 q+96\right) \chi _{1z} \chi _{2z}}{16 (q+1)^4 \boxed{r^3}}+\frac{q^2 \left(76 q^2+180 q+155\right) \chi _{2x}^2}{16 (q+1)^4 \boxed{r^3}} \\
& -\frac{q^2 \left(43 q^2+85 q+55\right) \chi _{2y}^2}{8 (q+1)^4 \boxed{r^3}}-\frac{q^2 (2 q+5) (14 q+27) \chi _{2z}^2}{32 (q+1)^4 \boxed{r^3}} -\frac{(5 q+2) (27 q+14) \chi _{1z}^2}{32 (q+1)^4 \boxed{r^3}} \\
& +\frac{501 \pi ^2 q (q+1)^4-4 \left(120 q^6+2744 q^5+10049 q^4+14820 q^3+10049 q^2+2744 q+120\right)}{384 (q+1)^6 \boxed{r^3}} \\
a_7 &= \frac{3 (4 q+1) q^3 \chi _{2 x}^2 \chi _{2 z}}{2 (q+1)^4}-\frac{3 (4 q+1) q^3 \chi _{2 y}^2 \chi _{2 z}}{8 (q+1)^4}-\frac{3 (4 q+1) q^3 \chi _{2 z}^3}{8 (q+1)^4}+\chi _{1x} \left(\frac{9 (2 q+1) q^2 \chi _{2 x} \chi _{2 z}}{4 (q+1)^4}+\frac{9 (q+2) q \chi _{2 x} \chi _{\boxed{?}z}}{4 (q+1)^4}\right) \\
& +\chi _{1y} \left(\frac{9 q^2 \chi _{2 y} \chi _{1z}}{4 (q+1)^4}+\frac{9 q^2 \chi _{2 y} \chi _{2 z}}{4 (q+1)^4}\right) \\
& +\chi _{1z} \left(\frac{9 q^2 (2 q+3) \chi _{2 x}^2}{4 (q+1)^4}-\frac{9 q^2 (q+2) \chi _{2 y}^2}{4 (q+1)^4}-\frac{9 q^2 \chi _{2 z}^2}{4 (q+1)^3}-\frac{135 q^5+385 q^4+363 q^3+377 q^2+387 q+168}{32 (q+1)^6}\right) \\
& -\frac{\left(168 q^5+387 q^4+377 q^3+363 q^2+385 q+135\right) q \chi _{2 z}}{32 (q+1)^6}+\chi _{1x}^2 \left(\frac{3 (q+4) \chi _{1z}}{2 (q+1)^4}+\frac{9 q (3 q+2) \chi _{2 z}}{4 (q+1)^4}\right)\\
&+\chi _{1y}^2 \left(-\frac{3 (q+4) \chi _{1z}}{8 (q+1)^4}-\frac{9 q (2 q+1) \chi _{2 z}}{4 (q+1)^4}\right)-\frac{9 q \chi _{1z}^2 \chi _{2 z}}{4 (q+1)^3}-\frac{3 (q+4) \chi _{1z}^3}{8 (q+1)^4},
\end{align}
Let's divide and conquer, tackling the coefficients one at a time:
\begin{align}
a_2 &= -\left[ \frac{ \left(3 q^2+5 q+3\right)}{2 (q+1)^2}\right] \\
a_3 &= -\frac{(3 q+4) \chi _{1z}}{4 (q+1)^2 }- \frac{q (4 q+3) \chi _{2z}}{4 (q+1)^2 } \\
a_4 &= -\frac{3 q^2 \chi _{2x}^2}{2 (q+1)^2}+ \frac{3 q^2 \chi _{2y}^2}{4 (q+1)^2 }
+\frac{3 q^2 \chi _{2z}^2}{4 (q+1)^2 }+\frac{24 q^4+103 q^3+164 q^2+103 q+24}{16 (q+1)^4 } \\
&\quad -\frac{3 \chi _{1x}^2}{2 (q+1)^2 }-\frac{3 q \chi _{1x}\chi _{2x}}{(q+1)^2 }+\frac{3 \chi _{1y}^2}{4 (q+1)^2 }+\frac{3 q \chi _{1y} \chi _{2y}}{2 (q+1)^2 }+\frac{3 \chi _{1z}^2}{4 (q+1)^2 }+\frac{3 q \chi _{1z} \chi _{2z}}{2 (q+1)^2}
\end{align}
```python
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import indexedexpNRPyPN as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
from NRPyPN_shortcuts import div # NRPyPN: shortcuts for e.g., vector operations
# Step 1: Construct terms a_2, a_3, and a_4, from
# Eq A2 of Ramos-Buades, Husa, and Pratten (2018)
# https://arxiv.org/abs/1810.00036
# These terms have been independently validated
# against the same terms in Eq 6 of
# Healy, Lousto, Nakano, and Zlochower (2017)
# https://arxiv.org/abs/1702.00872
def MOmega__a_2_thru_a_4(m1,m2, chi1x,chi1y,chi1z, chi2x,chi2y,chi2z):
q = m2/m1 # It is assumed that q >= 1, so m2 >= m1.
global a_2,a_3,a_4
a_2 = -((3*q**2+5*q+3)/(2*(q+1)**2))
a_3 = (-(3*q+4)*chi1z/(4*(q+1)**2) - q*(4*q+3)*chi2z/(4*(q+1)**2))
a_4 = (-3*q**2*chi2x**2/(2*(q+1)**2)
+3*q**2*chi2y**2/(4*(q+1)**2)
+3*q**2*chi2z**2/(4*(q+1)**2)
+(+24*q**4 + 103*q**3 + 164*q**2 + 103*q + 24)/(16*(q+1)**4)
-3*chi1x**2/(2*(q+1)**2)
-3*q*chi1x*chi2x/(q+1)**2
+3*chi1y**2/(4*(q+1)**2)
+3*q*chi1y*chi2y/(2*(q+1)**2)
+3*chi1z**2/(4*(q+1)**2)
+3*q*chi1z*chi2z/(2*(q+1)**2))
```
```python
# Second version, for validation purposes only.
def MOmega__a_2_thru_a_4v2(m1,m2, chi1x,chi1y,chi1z, chi2x,chi2y,chi2z):
q = m2/m1 # It is assumed that q >= 1, so m2 >= m1.
global a_2v2,a_3v2,a_4v2
a_2v2 = - (3*q**2+5*q+3)/(2*(q+1)**2)
a_3v2 = +(-(3*q+4)*chi1z/(4*(q+1)**2)
-q*(4*q+3)*chi2z/(4*(q+1)**2))
a_4v2 = +(-(3*q**2*chi2x**2)/(2*(q+1)**2) + 3*q**2*chi2y**2/(4*(q+1)**2) + 3*q**2*chi2z**2/(4*(q+1)**2)
+(24*q**4+103*q**3+164*q**2+103*q+24)/(16*(q+1)**4) - 3*chi1x**2/(2*(q+1)**2)
-3*q*chi1x*chi2x/(q+1)**2 + 3*chi1y**2/(4*(q+1)**2) + 3*q*chi1y*chi2y/(2*(q+1)**2)
+3*chi1z**2/(4*(q+1)**2) + 3*q*chi1z*chi2z/(2*(q+1)**2))
```
```python
# Third version, directly from Toni Ramos-Buades' Mathematica notebook (thanks Toni!)
def MOmega__a_2_thru_a_4v3(m1,m2, chi1x,chi1y,chi1z, chi2x,chi2y,chi2z):
q = m2/m1 # It is assumed that q >= 1, so m2 >= m1.
global a_2v3,a_3v3,a_4v3
a_2v3 = ( -(3 + 5*q + 3*q**2)/(2*(1 + q)**2) )
a_3v3 = ( (-4*chi1z - 3*chi1z*q - 3*chi2z*q - 4*chi2z*q**2)/(4*(1 + q)**2) )
a_4v3 = ( (-3*chi1x**2)/(2*(1 + q)**2) + (3*chi1y**2)/(4*(1 + q)**2) + (3*chi1z**2)/(4*(1 + q)**2) - (3*chi1x*chi2x*q)/(1 + q)**2 + (3*chi1y*chi2y*q)/(2*(1 + q)**2) + (3*chi1z*chi2z*q)/(2*(1 + q)**2) -
(3*chi2x**2*q**2)/(2*(1 + q)**2) + (3*chi2y**2*q**2)/(4*(1 + q)**2) + (3*chi2z**2*q**2)/(4*(1 + q)**2) + (24 + 103*q + 164*q**2 + 103*q**3 + 24*q**4)/(16*(1 + q)**4) )
```
Next, $a_5$ and $a_6$:
\begin{align}
a_5 &= \frac{3 \left(13 q^3+34 q^2+30 q+16\right) \chi _{1z}}{16 (q+1)^4}+ \frac{3 q \left(16 q^3+30 q^2+34 q+13\right) \chi _{2z}}{16 (q+1)^4 }\\
a_6 &= \frac{\left(155 q^2+180 q+76\right) \chi _{1x}^2}{16 (q+1)^4 \boxed{r^3}}+\frac{q \left(120 q^2+187 q+120\right) \chi _{1x} \chi _{2x}}{8 (q+1)^4 \boxed{r^3}}-\frac{\left(55 q^2+85 q+43\right) \chi _{1y}^2}{8 (q+1)^4 \boxed{r^3}} \\
& -\frac{q \left(54 q^2+95 q+54\right) \chi _{1y} \chi _{2y}}{4 (q+1)^4 \boxed{r^3}}-\frac{q \left(96 q^2+127 q+96\right) \chi _{1z} \chi _{2z}}{16 (q+1)^4 \boxed{r^3}}+\frac{q^2 \left(76 q^2+180 q+155\right) \chi _{2x}^2}{16 (q+1)^4 \boxed{r^3}} \\
& -\frac{q^2 \left(43 q^2+85 q+55\right) \chi _{2y}^2}{8 (q+1)^4 \boxed{r^3}}-\frac{q^2 (2 q+5) (14 q+27) \chi _{2z}^2}{32 (q+1)^4 \boxed{r^3}} -\frac{(5 q+2) (27 q+14) \chi _{1z}^2}{32 (q+1)^4 \boxed{r^3}} \\
& +\frac{501 \pi ^2 q (q+1)^4-4 \left(120 q^6+2744 q^5+10049 q^4+14820 q^3+10049 q^2+2744 q+120\right)}{384 (q+1)^6 \boxed{r^3}} \\
\end{align}
```python
# Construct terms a_5 and a_6, from
# Eq A1 of Ramos-Buades, Husa, and Pratten (2018)
# https://arxiv.org/abs/1810.00036
# These terms have been independently validated
# against the same terms in Eq 6 of
# Healy, Lousto, Nakano, and Zlochower (2017)
# https://arxiv.org/abs/1702.00872
def MOmega__a_5_thru_a_6(m1,m2, chi1x,chi1y,chi1z, chi2x,chi2y,chi2z):
q = m2/m1 # It is assumed that q >= 1, so m2 >= m1.
global a_5,a_6
a_5 = (+3* (13*q**3 + 34*q**2 + 30*q + 16)*chi1z/(16*(q+1)**4)
+3*q*(16*q**3 + 30*q**2 + 34*q + 13)*chi2z/(16*(q+1)**4))
a_6 = (+(+155*q**2 + 180*q + 76)*chi1x**2/(16*(q+1)**4)
+q*(+120*q**2 + 187*q + 120)*chi1x*chi2x/(8*(q+1)**4)
-(+55*q**2 + 85*q + 43)*chi1y**2/(8*(q+1)**4)
-q*(+54*q**2 + 95*q + 54)*chi1y*chi2y/( 4*(q+1)**4)
-q*(+96*q**2 +127*q + 96)*chi1z*chi2z/(16*(q+1)**4)
+q**2*(+76*q**2 + 180*q + 155)*chi2x**2/(16*(q+1)**4)
-q**2*(+43*q**2 + 85*q + 55)*chi2y**2/( 8*(q+1)**4)
-q**2*(+2*q+5)*(+14*q+27)*chi2z**2/(32*(q+1)**4)
- (+5*q+2)*(+27*q+14)*chi1z**2/(32*(q+1)**4)
+(+501*sp.pi**2*q*(q+1)**4
-4*(120*q**6 + 2744*q**5 + 10049*q**4 + 14820*q**3 + 10049*q**2 + 2744*q + 120))/(384*(q+1)**6))
```
```python
# Second version, for validation purposes only.
def MOmega__a_5_thru_a_6v2(m1,m2, chi1x,chi1y,chi1z, chi2x,chi2y,chi2z):
q = m2/m1 # It is assumed that q >= 1, so m2 >= m1.
pi = sp.pi
global a_5v2,a_6v2
a_5v2 = +(+3* (13*q**3+34*q**2+30*q+16)*chi1z/(16*(q+1)**4)
+3*q*(16*q**3+30*q**2+34*q+13)*chi2z/(16*(q+1)**4))
a_6v2 =+(+(155*q**2+180*q+76)*chi1x**2 /(16*(q+1)**4) + q*(120*q**2+187*q+120)*chi1x*chi2x/(8*(q+1)**4)
-( 55*q**2+ 85*q+43)*chi1y**2 /( 8*(q+1)**4) - q*( 54*q**2+ 95*q+ 54)*chi1y*chi2y/(4*(q+1)**4)
-q *(96*q**2+127*q+ 96)*chi1z*chi2z/(16*(q+1)**4)
+q**2*(76*q**2+180*q+155)*chi2x**2 /(16*(q+1)**4)
-q**2*(43*q**2+ 85*q+ 55)*chi2y**2 /( 8*(q+1)**4)
-q**2*(2*q+5)*(14*q+27) *chi2z**2 /(32*(q+1)**4)
- (5*q+2)*(27*q+14) *chi1z**2 /(32*(q+1)**4)
+(501*sp.pi**2*q*(q+1)**4 - 4*(120*q**6+2744*q**5+10049*q**4+14820*q**3+10049*q**2+2744*q+120))
/(384*(q+1)**6))
```
```python
# Third version, directly from Toni Ramos-Buades' Mathematica notebook (thanks Toni!)
def MOmega__a_5_thru_a_6v3(m1,m2, chi1x,chi1y,chi1z, chi2x,chi2y,chi2z):
q = m2/m1 # It is assumed that q >= 1, so m2 >= m1.
Pi = sp.pi
global a_5v3,a_6v3
a_5v3 = ( (3*(16*chi1z + 30*chi1z*q + 13*chi2z*q + 34*chi1z*q**2 + 34*chi2z*q**2 + 13*chi1z*q**3 + 30*chi2z*q**3 + 16*chi2z*q**4))/(16*(1 + q)**4) )
a_6v3 = ( (167*Pi**2*q)/(128.*(1 + q)**2) - (chi2z**2*q**2*(135 + 124*q + 28*q**2))/(32.*(1 + q)**4) - (chi2y**2*q**2*(55 + 85*q + 43*q**2))/(8.*(1 + q)**4) - (chi1y*chi2y*q*(54 + 95*q + 54*q**2))/(4.*(1 + q)**4) -
(chi1y**2*(43 + 85*q + 55*q**2))/(8.*(1 + q)**4) + (chi2x**2*q**2*(155 + 180*q + 76*q**2))/(16.*(1 + q)**4) - (chi1z*chi2z*q*(96 + 127*q + 96*q**2))/(16.*(1 + q)**4) +
(chi1x*chi2x*q*(120 + 187*q + 120*q**2))/(8.*(1 + q)**4) - (chi1z**2*(28 + 124*q + 135*q**2))/(32.*(1 + q)**4) + (chi1x**2*(76 + 180*q + 155*q**2))/(16.*(1 + q)**4) -
(120 + 2744*q + 10049*q**2 + 14820*q**3 + 10049*q**4 + 2744*q**5 + 120*q**6)/(96.*(1 + q)**6) )
```
Finally, $a_7$:
\begin{align}
a_7 &= \frac{3 (4 q+1) q^3 \chi _{2 x}^2 \chi _{2 z}}{2 (q+1)^4}-\frac{3 (4 q+1) q^3 \chi _{2 y}^2 \chi _{2 z}}{8 (q+1)^4}-\frac{3 (4 q+1) q^3 \chi _{2 z}^3}{8 (q+1)^4}+\chi _{1x} \left(\frac{9 (2 q+1) q^2 \chi _{2 x} \chi _{2 z}}{4 (q+1)^4}+\frac{9 (q+2) q \chi _{2 x} \chi _{\boxed{?}z}}{4 (q+1)^4}\right) \\
& +\chi _{1y} \left(\frac{9 q^2 \chi _{2 y} \chi _{1z}}{4 (q+1)^4}+\frac{9 q^2 \chi _{2 y} \chi _{2 z}}{4 (q+1)^4}\right) \\
& +\chi _{1z} \left(\frac{9 q^2 (2 q+3) \chi _{2 x}^2}{4 (q+1)^4}-\frac{9 q^2 (q+2) \chi _{2 y}^2}{4 (q+1)^4}-\frac{9 q^2 \chi _{2 z}^2}{4 (q+1)^3}-\frac{135 q^5+385 q^4+363 q^3+377 q^2+387 q+168}{32 (q+1)^6}\right) \\
& -\frac{\left(168 q^5+387 q^4+377 q^3+363 q^2+385 q+135\right) q \chi _{2 z}}{32 (q+1)^6}+\chi _{1x}^2 \left(\frac{3 (q+4) \chi _{1z}}{2 (q+1)^4}+\frac{9 q (3 q+2) \chi _{2 z}}{4 (q+1)^4}\right)\\
&+\chi _{1y}^2 \left(-\frac{3 (q+4) \chi _{1z}}{8 (q+1)^4}-\frac{9 q (2 q+1) \chi _{2 z}}{4 (q+1)^4}\right)-\frac{9 q \chi _{1z}^2 \chi _{2 z}}{4 (q+1)^3}-\frac{3 (q+4) \chi _{1z}^3}{8 (q+1)^4}
\end{align}
```python
# Construct term a_7, from Eq A1 of
# Ramos-Buades, Husa, and Pratten (2018)
# https://arxiv.org/abs/1810.00036
def MOmega__a_7(m1,m2, chi1x,chi1y,chi1z, chi2x,chi2y,chi2z):
q = m2/m1 # It is assumed that q >= 1, so m2 >= m1.
global a_7
a_7 = (+3*(4*q+1)*q**3*chi2x**2*chi2z/(2*(q+1)**4)
-3*(4*q+1)*q**3*chi2y**2*chi2z/(8*(q+1)**4)
-3*(4*q+1)*q**3*chi2z**3 /(8*(q+1)**4)
+chi1x*(+9*(2*q+1)*q**2*chi2x*chi2z/(4*(q+1)**4)
+9*(1*q+2)*q *chi2x*chi1z/(4*(q+1)**4))
+chi1y*(+9*q**2*chi2y*chi1z/(4*(q+1)**4)
+9*q**2*chi2y*chi2z/(4*(q+1)**4))
+chi1z*(+9*q**2*(2*q+3)*chi2x**2/(4*(q+1)**4)
-9*q**2*( q+2)*chi2y**2/(4*(q+1)**4)
-9*q**2 *chi2z**2/(4*(q+1)**3)
-(135*q**5 + 385*q**4 + 363*q**3 + 377*q**2 + 387*q + 168)/(32*(q+1)**6))
-(+168*q**5 + 387*q**4 + 377*q**3 + 363*q**2 + 385*q + 135)*q*chi2z/(32*(q+1)**6)
+chi1x**2*(+3*(q+4)*chi1z/(2*(q+1)**4)
+9*q*(3*q+2)*chi2z/(4*(q+1)**4))
+chi1y**2*(-3*(q+4)*chi1z/(8*(q+1)**4)
-9*q*(2*q+1)*chi2z/(4*(q+1)**4))
-9*q*chi1z**2*chi2z/(4*(q+1)**3)
-3*(q+4)*chi1z**3/(8*(q+1)**4))
```
```python
# Second version, for validation purposes only.
def MOmega__a_7v2(m1,m2, chi1x,chi1y,chi1z, chi2x,chi2y,chi2z):
q = m2/m1 # It is assumed that q >= 1, so m2 >= m1.
global a_7v2
a_7v2 = +(+(3*(4*q+1)*q**3*chi2x**2*chi2z)/(2*(q+1)**4)
-(3*(4*q+1)*q**3*chi2y**2*chi2z)/(8*(q+1)**4)
-(3*(4*q+1)*q**3*chi2z**3) /(8*(q+1)**4)
+chi1x*(+(9*(2*q+1)*q**2*chi2x*chi2z)/(4*(q+1)**4)
+(9*(1*q+2)*q *chi2x*chi1z)/(4*(q+1)**4))
+chi1y*(+(9*q**2*chi2y*chi1z)/(4*(q+1)**4)
+(9*q**2*chi2y*chi2z)/(4*(q+1)**4))
+chi1z*(+(9*q**2*(2*q+3)*chi2x**2)/(4*(q+1)**4)
-(9*q**2*(1*q+2)*chi2y**2)/(4*(q+1)**4)
-(9*q**2 *chi2z**2)/(4*(q+1)**3)
-(135*q**5+385*q**4+363*q**3+377*q**2+387*q+168)/(32*(q+1)**6))
-(168*q**5+387*q**4+377*q**3+363*q**2+385*q+135)*q*chi2z/(32*(q+1)**6)
+chi1x**2*(+3*(q+4)*chi1z/(2*(q+1)**4) + 9*q*(3*q+2)*chi2z/(4*(q+1)**4))
+chi1y**2*(-3*(q+4)*chi1z/(8*(q+1)**4) - 9*q*(2*q+1)*chi2z/(4*(q+1)**4))
-9*q*chi1z**2*chi2z/(4*(q+1)**3) - 3*(q+4)*chi1z**3/(8*(q+1)**4))
```
```python
# Third version, directly from Toni Ramos-Buades' Mathematica notebook (thanks Toni!)
def MOmega__a_7v3(m1,m2, chi1x,chi1y,chi1z, chi2x,chi2y,chi2z):
q = m2/m1 # It is assumed that q >= 1, so m2 >= m1.
global a_7v3
a_7v3 = ( (-3*(4 + q)*chi1z**3)/(8*(1 + q)**4) - (q*(135 + 385*q + 363*q**2 + 377*q**3 + 387*q**4 + 168*q**5)*chi2z)/(32*(1 + q)**6) - (9*q*chi1z**2*chi2z)/(4*(1 + q)**3) +
(3*q**3*(1 + 4*q)*chi2x**2*chi2z)/(2*(1 + q)**4) - (3*q**3*(1 + 4*q)*chi2y**2*chi2z)/(8*(1 + q)**4) - (3*q**3*(1 + 4*q)*chi2z**3)/(8*(1 + q)**4) +
chi1y**2*((-3*(4 + q)*chi1z)/(8*(1 + q)**4) - (9*q*(1 + 2*q)*chi2z)/(4*(1 + q)**4)) + chi1x**2*((3*(4 + q)*chi1z)/(2*(1 + q)**4) + (9*q*(2 + 3*q)*chi2z)/(4*(1 + q)**4)) +
chi1x*((9*q*(2 + q)*chi1z*chi2x)/(4*(1 + q)**4) + (9*q**2*(1 + 2*q)*chi2x*chi2z)/(4*(1 + q)**4)) + chi1y*((9*q**2*chi1z*chi2y)/(4*(1 + q)**4) + (9*q**2*chi2y*chi2z)/(4*(1 + q)**4)) +
chi1z*(-(168 + 387*q + 377*q**2 + 363*q**3 + 385*q**4 + 135*q**5)/(32*(1 + q)**6) + (9*q**2*(3 + 2*q)*chi2x**2)/(4*(1 + q)**4) - (9*q**2*(2 + q)*chi2y**2)/(4*(1 + q)**4) - (9*q**2*chi2z**2)/(4*(1 + q)**3)) )
```
Putting it all together, recall that
$$
M\Omega = \frac{1}{r^{3/2}}\left(1 + \sum_{k=2}^7 \frac{a_k}{r^{k/2}}\right),
$$
where $k/2$ is the post-Newtonian order.
```python
# Finally, sum the expressions for a_k to construct p_t as prescribed:
# MOmega = 1/r^(3/2) * (1 + \sum_{k=2}^7 (a_k/r^{k/2}))
def f_MOmega(m1,m2, chi1U,chi2U, r):
a = ixp.zerorank1(DIM=10)
MOmega__a_2_thru_a_4(m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
a[2] = a_2
a[3] = a_3
a[4] = a_4
MOmega__a_5_thru_a_6(m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
a[5] = a_5
a[6] = a_6
MOmega__a_7( m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
a[7] = a_7
global MOmega
MOmega = 1 # Term prior to the sum in parentheses
for k in range(8):
MOmega += a[k]/r**div(k,2)
MOmega *= 1/r**div(3,2)
```
```python
# Second version, for validation purposes only.
def f_MOmegav2(m1,m2, chi1U,chi2U, r):
a = ixp.zerorank1(DIM=10)
MOmega__a_2_thru_a_4v2(m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
a[2] = a_2v2
a[3] = a_3v2
a[4] = a_4v2
MOmega__a_5_thru_a_6v2(m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
a[5] = a_5v2
a[6] = a_6v2
MOmega__a_7v2( m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
a[7] = a_7v2
global MOmegav2
MOmegav2 = 1 # Term prior to the sum in parentheses
for k in range(8):
MOmegav2 += a[k]/r**div(k,2)
MOmegav2 *= 1/r**div(3,2)
```
<a id='code_validation'></a>
# Part 2: Validation against second transcription and corresponding Python module \[Back to [top](#toc)\]
$$\label{code_validation}$$
As a code validation check, we verify agreement between
* the SymPy expressions transcribed from the cited published work on two separate occasions, and
* the SymPy expressions generated in this notebook, and the corresponding Python module.
```python
from NRPyPN_shortcuts import m1,m2, chi1U,chi2U, q # NRPyPN: Import needed input variables
def error(varname):
print("ERROR: When comparing Python module & notebook, "+varname+" was found not to match.")
sys.exit(1)
MOmega__a_2_thru_a_4v2(m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
MOmega__a_2_thru_a_4v3(m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
if sp.simplify(a_2v2 - a_2v3) != 0: error("a_2v2")
if sp.simplify(a_3v2 - a_3v3) != 0: error("a_3v2")
if sp.simplify(a_4v2 - a_4v3) != 0: error("a_4v2")
MOmega__a_5_thru_a_6v2(m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
MOmega__a_5_thru_a_6v3(m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
if sp.simplify(a_5v2 - a_5v3) != 0: error("a_5v2")
if sp.simplify(a_6v2 - a_6v3) != 0: error("a_6v2")
MOmega__a_7v2(m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
MOmega__a_7v3(m1,m2, chi1U[0],chi1U[1],chi1U[2], chi2U[0],chi2U[1],chi2U[2])
if sp.simplify(a_7v2 - a_7v3) != 0: error("a_7v2")
f_MOmega(m1,m2, chi1U,chi2U, q)
# Validation against second transcription of the expressions:
f_MOmegav2(m1,m2, chi1U,chi2U, q)
if sp.simplify(MOmega - MOmegav2) != 0: error("MOmegav2")
# Validation against corresponding Python module:
import PN_MOmega as MOm
MOm.f_MOmega(m1,m2, chi1U,chi2U, q)
if sp.simplify(MOmega - MOm.MOmega) != 0: error("MOm.MOmega")
print("ALL TESTS PASS")
```
ALL TESTS PASS
<a id='latex_pdf_output'></a>
# Part 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[PN-MOmega.pdf](PN-MOmega.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```python
import os,sys # Standard Python modules for multiplatform OS-level functions
import cmdline_helperNRPyPN as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("PN-MOmega")
```
Created PN-MOmega.tex, and compiled LaTeX file to PDF file PN-MOmega.pdf
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@[email protected]@.PATH_END.py
|
{
"filename": "get_flux_mass_conv.py",
"repo_name": "sbetti22/ImSeg",
"repo_path": "ImSeg_extracted/ImSeg-master/get_flux_mass_conv.py",
"type": "Python"
}
|
## written by Sarah Betti 2019
## updated 5 November 2019
import numpy as np
from astropy import units as u
from astropy import constants as const
def get_flux_mass_conv(pcdist,T, wavelength=1100):
'''
PURPOSE
--------
Return mass conversion factor for total flux in Jy.
Parameters
--------
pcdist: float in pc
distance to object in pc
T: float in Kelvin
temperature of object
wavelength: float in microns
wavelength at which to find flux. Can only be [350, 850, 1100] microns
default is 1100 microns (1.1 mm)
Returns
--------
m_XXum: float
conversion factor to convert total flux to XX Jy
Raises
-------
ValueError if wavelength is not 350, 850, 1100
Examples
--------
m_350um = get_aztec_mass_conv(800, 18, 350)
Comments
--------
written in IDL by Rob Gutermith
converted to python by Sarah Betti 2019
'''
h = const.h.cgs.value # g cm^2 / s OR erg s
k = const.k_B.cgs.value # erg / K
c = const.c.cgs.value # cm /s
d = pcdist *3.0857e18 # cm
if wavelength == 350.:
### Ossenkopf & Henning 1994 #5 opacities; "OH5"
kappa_350um = 0.101 # cm^2 / g (includes gas-to-dust)
### Dunham et al. 2009 in prep.
snu_350um = 151.2*1.0*10**-23 # erg/cm^2/s/Hz
snu_350um_alt1 = 81.3*1.0*10**-23 # erg/cm^2/s/Hz
snu_350um_alt2 = 34.6*1.0*10**-23 # erg/cm^2/s/Hz
nu_350um = (2.99792458e14)/350. # Hz
planck_350um = (2.*h*(nu_350um**3.)/(c**2.))/((np.exp((h*nu_350um)/(k*T)))-1.)
m_350um = (((d**2.)*snu_350um)/(planck_350um*kappa_350um))/1.99e33
m_350um_alt1 = (((d**2.)*snu_350um_alt1)/(planck_350um*kappa_350um))/1.99e33
m_350um_alt2 = (((d**2.)*snu_350um_alt2)/(planck_350um*kappa_350um))/1.99e33
return m_350um
elif wavelength == 450.:
### Ossenkopf & Henning 1994 #5 opacities; "OH5"
kappa_450um = 0.0674 # cm^2 / g (includes gas-to-dust)
snu_450um = 1.0*10**-23 # erg/cm^2/s/Hz
nu_450um = (2.99792458e14)/450. # Hz
planck_450um = (2.*h*(nu_450um**3.)/(c**2.))/((np.exp((h*nu_450um)/(k*T)))-1.)
m_450um = (((d**2.)*snu_450um)/(planck_450um*kappa_450um))/1.99e33
return m_450um
elif wavelength == 850.:
### Ossenkopf & Henning 1994 #5 opacities; "OH5"
kappa_850um = 0.0114 # cm^2 / g (includes gas-to-dust) (OH*4*, 845um)
snu_850um = 1.0*10**-23 # erg/cm^2/s/Hz
nu_850um = (2.99792458e14)/850. # Hz
planck_850um = (2.*h*(nu_850um**3.)/(c**2.))/((np.exp((h*nu_850um)/(k*T)))-1.)
m_850um = (((d**2.)*snu_850um)/(planck_850um*kappa_850um))/1.99e33
return m_850um
elif wavelength == 1100.:
### Ossenkopf & Henning 1994 #5 opacities; "OH5"
#kappa_1100um = 0.0114 # cm^2 / g (includes gas-to-dust) (OH*4*, 845um)
kappa_1100um = 0.0121 # cm^2 / g (includes gas-to-dust) (close enough?)
#kappa_1100um = 0.0069 # cm^2 / g (includes gas-to-dust) (OH*4*, 1.1mm)
snu_1100um = 1.0*10**-23 # erg/cm^2/s/Hz
nu_1100um = (2.99792458e14)/1100.0 # Hz
planck_1100um = (2.*h*(nu_1100um**3.)/(c**2.))/((np.exp((h*nu_1100um)/(k*T)))-1.)
m_1100um = (((d**2.)*snu_1100um)/(planck_1100um*kappa_1100um))/1.99e33
return m_1100um
else:
raise ValueError('wavelength does not exist. Use either 350, 345, 850, 1100 microns instead.')
|
sbetti22REPO_NAMEImSegPATH_START.@ImSeg_extracted@ImSeg-master@[email protected]_END.py
|
{
"filename": "run_node.py",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/src/nextline_rdb/schema/nodes/run_node.py",
"type": "Python"
}
|
import datetime
from typing import TYPE_CHECKING, Annotated, Optional, cast
import strawberry
from sqlalchemy import select
from strawberry.types import Info
from nextline_rdb.db import DB
from nextline_rdb.models import Prompt, Run, Stdout, Trace, TraceCall
from nextline_rdb.pagination import SortField
from ..pagination import Connection, load_connection
if TYPE_CHECKING:
from .prompt_node import PromptNode
from .stdout_node import StdoutNode
from .trace_call_node import TraceCallNode
from .trace_node import TraceNode
async def _resolve_traces(
info: Info,
root: 'RunNode',
before: Optional[str] = None,
after: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
) -> Connection['TraceNode']:
from .trace_node import TraceNode
sort = [SortField('trace_no')]
select_model = select(Trace).where(Trace.run_id == root._model.id)
db = cast(DB, info.context['db'])
async with db.session() as session:
return await load_connection(
session,
Trace,
create_node_from_model=TraceNode.from_model,
select_model=select_model,
sort=sort,
before=before,
after=after,
first=first,
last=last,
)
async def _resolve_trace_calls(
info: Info,
root: 'RunNode',
before: Optional[str] = None,
after: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
) -> Connection['TraceCallNode']:
from .trace_call_node import TraceCallNode
sort = [SortField('trace_call_no')]
select_model = select(TraceCall).where(TraceCall.run_id == root._model.id)
db = cast(DB, info.context['db'])
async with db.session() as session:
return await load_connection(
session,
TraceCall,
create_node_from_model=TraceCallNode.from_model,
select_model=select_model,
sort=sort,
before=before,
after=after,
first=first,
last=last,
)
async def _resolve_prompts(
info: Info,
root: 'RunNode',
before: Optional[str] = None,
after: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
) -> Connection['PromptNode']:
from .prompt_node import PromptNode
sort = [SortField('prompt_no')]
select_model = select(Prompt).where(Prompt.run_id == root._model.id)
db = cast(DB, info.context['db'])
async with db.session() as session:
return await load_connection(
session,
Prompt,
create_node_from_model=PromptNode.from_model,
select_model=select_model,
sort=sort,
before=before,
after=after,
first=first,
last=last,
)
async def _resolve_stdouts(
info: Info,
root: 'RunNode',
before: Optional[str] = None,
after: Optional[str] = None,
first: Optional[int] = None,
last: Optional[int] = None,
) -> Connection['StdoutNode']:
from .stdout_node import StdoutNode
sort = [SortField('written_at')]
select_model = select(Stdout).where(Stdout.run_id == root._model.id)
db = cast(DB, info.context['db'])
async with db.session() as session:
return await load_connection(
session,
Stdout,
create_node_from_model=StdoutNode.from_model,
select_model=select_model,
sort=sort,
before=before,
after=after,
first=first,
last=last,
)
@strawberry.type
class RunNode:
_model: strawberry.Private[Run]
id: int
run_no: int
state: Optional[str]
started_at: Optional[datetime.datetime]
ended_at: Optional[datetime.datetime]
script: Optional[str]
exception: Optional[str]
traces: Connection[Annotated['TraceNode', strawberry.lazy('.trace_node')]] = (
strawberry.field(resolver=_resolve_traces)
)
trace_calls: Connection[
Annotated['TraceCallNode', strawberry.lazy('.trace_call_node')]
] = strawberry.field(resolver=_resolve_trace_calls)
prompts: Connection[Annotated['PromptNode', strawberry.lazy('.prompt_node')]] = (
strawberry.field(resolver=_resolve_prompts)
)
stdouts: Connection[Annotated['StdoutNode', strawberry.lazy('.stdout_node')]] = (
strawberry.field(resolver=_resolve_stdouts)
)
@classmethod
def from_model(cls: type['RunNode'], model: Run) -> 'RunNode':
script = model.script.script if model.script else None
return cls(
_model=model,
id=model.id,
run_no=model.run_no,
state=model.state,
started_at=model.started_at,
ended_at=model.ended_at,
script=script,
exception=model.exception,
)
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@src@nextline_rdb@schema@nodes@[email protected]_END.py
|
{
"filename": "parameter.py",
"repo_name": "lmfit/lmfit-py",
"repo_path": "lmfit-py_extracted/lmfit-py-master/lmfit/parameter.py",
"type": "Python"
}
|
"""Parameter class."""
from copy import deepcopy
import json
from asteval import Interpreter, get_ast_names, valid_symbol_name
from numpy import arcsin, array, cos, inf, isclose, sin, sqrt
from scipy.linalg import LinAlgError
import scipy.special
from uncertainties import correlated_values, ufloat
from uncertainties import wrap as uwrap
from .jsonutils import decode4js, encode4js
from .lineshapes import tiny
from .printfuncs import params_html_table
SCIPY_FUNCTIONS = {'gamfcn': scipy.special.gamma,
'loggammafcn': scipy.special.loggamma,
'betalnfnc': scipy.special.betaln}
for fnc_name in ('erf', 'erfc', 'wofz'):
SCIPY_FUNCTIONS[fnc_name] = getattr(scipy.special, fnc_name)
def check_ast_errors(expr_eval):
"""Check for errors derived from asteval."""
if len(expr_eval.error) > 0:
expr_eval.raise_exception(None)
class Writer:
"""Replace 'stdout' and 'stderr' for asteval."""
def __init__(self, **kws):
self.messages = []
for k, v in kws.items():
setattr(self, k, v)
def write(self, msg):
"""Internal writer."""
o = msg.strip()
if len(o) > 0:
self.messages.append(msg)
def asteval_with_uncertainties(*vals, obj=None, pars=None, names=None, **kwargs):
"""Calculate object value, given values for variables.
This is used by the uncertainties package to calculate the
uncertainty in an object even with a complicated expression.
"""
_asteval = getattr(pars, '_asteval', None)
if obj is None or pars is None or names is None or _asteval is None:
return 0
for val, name in zip(vals, names):
_asteval.symtable[name] = val
# re-evaluate constraint parameters topropagate uncertainties
[p._getval() for p in pars.values()]
return _asteval.eval(obj._expr_ast)
class Parameters(dict):
"""A dictionary of Parameter objects.
It should contain all Parameter objects that are required to specify
a fit model. All minimization and Model fitting routines in lmfit will
use exactly one Parameters object, typically given as the first
argument to the objective function.
All keys of a Parameters() instance must be strings and valid Python
symbol names, so that the name must match ``[a-z_][a-z0-9_]*`` and
cannot be a Python reserved word.
All values of a Parameters() instance must be Parameter objects.
A Parameters(xs) instance includes an `asteval` Interpreter used for
evaluation of constrained Parameters.
Parameters() support copying and pickling, and have methods to convert
to and from serializations using json strings.
"""
def __init__(self, usersyms=None):
"""
Arguments
---------
usersyms : dict, optional
Dictionary of symbols to add to the
:class:`asteval.Interpreter` (default is None).
"""
super().__init__(self)
self._ast_msgs = Writer()
self._asteval = Interpreter(writer=self._ast_msgs,
err_writer=self._ast_msgs)
_syms = {}
_syms.update(SCIPY_FUNCTIONS)
if usersyms is not None:
_syms.update(usersyms)
for key, val in _syms.items():
self._asteval.symtable[key] = val
def _writer(self, msg):
self._asteval_msgs.append(msg)
def copy(self):
"""Parameters.copy() should always be a deepcopy."""
return self.__deepcopy__(None)
def update(self, other):
"""Update values and symbols with another Parameters object."""
if not isinstance(other, Parameters):
raise ValueError(f"'{other}' is not a Parameters object")
self.add_many(*other.values())
for sym in other._asteval.user_defined_symbols():
self._asteval.symtable[sym] = other._asteval.symtable[sym]
return self
def __copy__(self):
"""Parameters.copy() should always be a deepcopy."""
return self.__deepcopy__(None)
def __deepcopy__(self, memo):
"""Implementation of Parameters.deepcopy().
The method needs to make sure that `asteval` is available and that
all individual Parameter objects are copied.
"""
_pars = self.__class__()
# find the symbols that were added by users, not during construction
unique_symbols = {}
for key in self._asteval.user_defined_symbols():
try:
val = deepcopy(self._asteval.symtable[key])
unique_symbols[key] = val
except (TypeError, ValueError):
unique_symbols[key] = self._asteval.symtable[key]
_pars._asteval.symtable.update(unique_symbols)
# we're just about to add a lot of Parameter objects to the newly
parameter_list = []
for key, par in self.items():
if isinstance(par, Parameter):
param = Parameter(name=par.name,
value=par.value,
min=par.min,
max=par.max)
param.vary = par.vary
param.brute_step = par.brute_step
param.stderr = par.stderr
param.correl = deepcopy(par.correl)
param.init_value = par.init_value
param.expr = par.expr
param.user_data = deepcopy(par.user_data)
parameter_list.append(param)
_pars.add_many(*parameter_list)
return _pars
def __setitem__(self, key, par):
"""Set items of Parameters object."""
if key not in self and not valid_symbol_name(key):
raise KeyError(f"'{key}' is not a valid Parameters name")
if par is not None and not isinstance(par, Parameter):
raise ValueError(f"'{par}' is not a Parameter")
dict.__setitem__(self, key, par)
par.name = key
par._expr_eval = self._asteval
self._asteval.symtable[key] = par.value
def __add__(self, other):
"""Add Parameters objects."""
if not isinstance(other, Parameters):
raise ValueError(f"'{other}' is not a Parameters object")
out = deepcopy(self)
out.add_many(*other.values())
for sym in other._asteval.user_defined_symbols():
if sym not in out._asteval.symtable:
out._asteval.symtable[sym] = other._asteval.symtable[sym]
return out
def __iadd__(self, other):
"""Add/assign Parameters objects."""
self.update(other)
return self
def __array__(self):
"""Convert Parameters to array."""
return array([float(k) for k in self.values()])
def __reduce__(self):
"""Reduce Parameters instance such that it can be pickled."""
# make a list of all the parameters
params = [self[k] for k in self]
# find the symbols from _asteval.symtable, that need to be remembered.
unique_symbols = {key: deepcopy(self._asteval.symtable[key])
for key in self._asteval.user_defined_symbols()}
return self.__class__, (), {'unique_symbols': unique_symbols,
'params': params}
def __setstate__(self, state):
"""Unpickle a Parameters instance.
Parameters
----------
state : dict
state['unique_symbols'] is a dictionary containing symbols
that need to be injected into `_asteval.symtable`.
state['params'] is a list of Parameter instances to be added.
"""
# first update the Interpreter symbol table. This needs to be done
# first because Parameter's early in the list may depend on later
# Parameter's. This leads to problems because add_many eventually leads
# to a Parameter value being retrieved with _getval, which, if the
# dependent value hasn't already been added to the symtable, leads to
# an Error. Another way of doing this would be to remove all the expr
# from the Parameter instances before they get added, then to restore
# them.
symtab = self._asteval.symtable
for key, val in state['unique_symbols'].items():
if key not in symtab:
symtab[key] = val
# then add all the parameters
self.add_many(*state['params'])
def __repr__(self):
"""__repr__ from OrderedDict."""
if not self:
return f'{self.__class__.__name__}()'
return f'{self.__class__.__name__}({list(self.items())!r})'
def eval(self, expr):
"""Evaluate a statement using the `asteval` Interpreter.
Parameters
----------
expr : str
An expression containing parameter names and other symbols
recognizable by the `asteval` Interpreter.
Returns
-------
float
The result of evaluating the expression.
"""
return self._asteval.eval(expr)
def update_constraints(self):
"""Update all constrained parameters.
This method ensures that dependencies are evaluated as needed.
"""
requires_update = {name for name, par in self.items() if par._expr is
not None}
updated_tracker = set(requires_update)
def _update_param(name):
"""Update a parameter value, including setting bounds.
For a constrained parameter (one with an `expr` defined), this
first updates (recursively) all parameters on which the
parameter depends (using the 'deps' field).
"""
par = self.__getitem__(name)
if par._expr_eval is None:
par._expr_eval = self._asteval
for dep in par._expr_deps:
if dep in updated_tracker:
_update_param(dep)
self._asteval.symtable[name] = par.value
updated_tracker.discard(name)
for name in requires_update:
_update_param(name)
def pretty_repr(self, oneline=False):
"""Return a pretty representation of a Parameters class.
Parameters
----------
oneline : bool, optional
If True prints a one-line parameters representation (default
is False).
Returns
-------
s: str
Parameters representation.
"""
if oneline:
return self.__repr__()
s = "Parameters({\n"
for key in self.keys():
s += f" '{key}': {self[key]}, \n"
s += " })\n"
return s
def pretty_print(self, oneline=False, colwidth=8, precision=4, fmt='g',
columns=['value', 'min', 'max', 'stderr', 'vary', 'expr',
'brute_step']):
"""Pretty-print of parameters data.
Parameters
----------
oneline : bool, optional
If True prints a one-line parameters representation [False]
colwidth : int, optional
Column width for all columns specified in `columns` [8]
precision : int, optional
Number of digits to be printed after floating point [4]
fmt : {'g', 'e', 'f'}, optional
Single-character numeric formatter. Valid values are: `'g'`
floating point and exponential (default), `'e'` exponential,
or `'f'` floating point.
columns : :obj:`list` of :obj:`str`, optional
List of :class:`Parameter` attribute names to print (default
is to show all attributes).
"""
if oneline:
print(self.pretty_repr(oneline=oneline))
return
name_len = max(len(s) for s in self)
allcols = ['name'] + columns
title = '{:{name_len}} ' + len(columns) * ' {:>{n}}'
print(title.format(*allcols, name_len=name_len, n=colwidth).title())
numstyle = '{%s:>{n}.{p}{f}}' # format for numeric columns
otherstyles = dict(name='{name:<{name_len}} ', stderr='{stderr!s:>{n}}',
vary='{vary!s:>{n}}', expr='{expr!s:>{n}}',
brute_step='{brute_step!s:>{n}}')
line = ' '.join(otherstyles.get(k, numstyle % k) for k in allcols)
for name, values in sorted(self.items()):
pvalues = {k: getattr(values, k) for k in columns}
pvalues['name'] = name
# stderr is a special case: it is either numeric or None (i.e. str)
if 'stderr' in columns and pvalues['stderr'] is not None:
pvalues['stderr'] = (numstyle % '').format(
pvalues['stderr'], n=colwidth, p=precision, f=fmt)
elif 'brute_step' in columns and pvalues['brute_step'] is not None:
pvalues['brute_step'] = (numstyle % '').format(
pvalues['brute_step'], n=colwidth, p=precision, f=fmt)
print(line.format(name_len=name_len, n=colwidth, p=precision,
f=fmt, **pvalues))
def _repr_html_(self):
"""Return a HTML representation of parameters data."""
return params_html_table(self)
def set(self, **kws):
"""Set Parameter values and other attributes.
Parameters
----------
**kws : optional
Parameter names and initial values or dictionaries of
values and attributes.
Returns
-------
None
Notes
-----
1. keyword arguments will be used to create parameter names.
2. values can either be numbers (floats or integers) to set the
parameter value, or can be dictionaries with any of the following
keywords: ``value``, ``vary``, ``min``, ``max``, ``expr``,
``brute_step``, or ``is_init_value`` to set those parameter attributes.
3. for each parameter, ``is_init_value`` controls whether to set
``init_value`` when setting ``value``, and defaults to True.
Examples
--------
>>> params = Parameters()
>>> params.add('xvar', value=0.50, min=0, max=1)
>>> params.add('yvar', expr='1.0 - xvar')
>>> params.set(xvar=0.80, zvar={'value':3, 'min':0})
"""
for name, val in kws.items():
if name not in self:
self.__setitem__(name, Parameter(value=-inf, name=name,
vary=True, min=-inf, max=inf,
expr=None, brute_step=None))
par = self.__getitem__(name)
if isinstance(val, (float, int)):
val = {'value': val}
if 'is_init_value' not in val:
val['is_init_value'] = True
par.set(**val)
def add(self, name, value=None, vary=True, min=-inf, max=inf, expr=None,
brute_step=None):
"""Add a Parameter.
Parameters
----------
name : str or Parameter
If ``name`` refers to a Parameter object it will be added directly
to the Parameters instance, otherwise a new Parameter object with name
``string`` is created before adding it. In both cases, ``name`` must
match ``[a-z_][a-z0-9_]*`` and cannot be a Python reserved word.
value : float, optional
Numerical Parameter value, typically the *initial value*.
vary : bool, optional
Whether the Parameter is varied during a fit (default is True).
min : float, optional
Lower bound for value (default is ``-numpy.inf``, no lower
bound).
max : float, optional
Upper bound for value (default is ``numpy.inf``, no upper
bound).
expr : str, optional
Mathematical expression used to constrain the value during the
fit (default is None).
brute_step : float, optional
Step size for grid points in the `brute` method (default is
None).
Examples
--------
>>> params = Parameters()
>>> params.add('xvar', value=0.50, min=0, max=1)
>>> params.add('yvar', expr='1.0 - xvar')
which is equivalent to:
>>> params = Parameters()
>>> params['xvar'] = Parameter(name='xvar', value=0.50, min=0, max=1)
>>> params['yvar'] = Parameter(name='yvar', expr='1.0 - xvar')
"""
if isinstance(name, Parameter):
self.__setitem__(name.name, name)
else:
self.__setitem__(name, Parameter(value=value, name=name, vary=vary,
min=min, max=max, expr=expr,
brute_step=brute_step))
if len(self._asteval.error) > 0:
err = self._asteval.error[0]
raise err.exc(err.msg)
def add_many(self, *parlist):
"""Add many parameters, using a sequence of tuples.
Parameters
----------
*parlist : :obj:`sequence` of :obj:`tuple` or Parameter
A sequence of tuples, or a sequence of `Parameter` instances.
If it is a sequence of tuples, then each tuple must contain at
least a `name`. The order in each tuple must be
``(name, value, vary, min, max, expr, brute_step)``.
Examples
--------
>>> params = Parameters()
# add with tuples: (NAME VALUE VARY MIN MAX EXPR BRUTE_STEP)
>>> params.add_many(('amp', 10, True, None, None, None, None),
... ('cen', 4, True, 0.0, None, None, None),
... ('wid', 1, False, None, None, None, None),
... ('frac', 0.5))
# add a sequence of Parameters
>>> f = Parameter('par_f', 100)
>>> g = Parameter('par_g', 2.)
>>> params.add_many(f, g)
"""
__params = []
for par in parlist:
if not isinstance(par, Parameter):
par = Parameter(*par)
__params.append(par)
par._delay_asteval = True
self.__setitem__(par.name, par)
for para in __params:
para._delay_asteval = False
def valuesdict(self):
"""Return an ordered dictionary of parameter values.
Returns
-------
dict
A dictionary of :attr:`name`::attr:`value` pairs for each
Parameter.
"""
return {p.name: p.value for p in self.values()}
def create_uvars(self, covar=None):
"""Return a dict of uncertainties ufloats from the current Parameter
values and stderr, and an optionally-supplied covariance matrix.
Uncertainties in Parameters with constraint expressions will be
calculated, propagating uncertaintes (and including correlations)
Parameters
----------
covar : optional
Nvar x Nvar covariance matrix from fit
Returns
-------
dict with keys of Parameter names and values of uncertainties.ufloats.
Notes
-----
1. if covar is provide, it must correspond to the existing *variable*
Parameters. If covar is given, the returned uncertainties ufloats
will take the correlations into account when combining values.
2. See the uncertainties package documentation
(https://pythonhosted.org/uncertainties) for more details.
"""
uvars = {}
has_expr = False
vnames, vbest, vindex = [], [], -1
savevals = self.valuesdict()
for par in self.values():
has_expr = has_expr or par.expr is not None
if par.vary:
vindex += 1
vnames.append(par.name)
vbest.append(par.value)
if getattr(par, 'stderr', None) is None and covar is not None:
par.stderr = sqrt(covar[vindex, vindex])
stderr = getattr(par, 'stderr', 0.0)
if stderr is None:
stderr = 0.0
uvars[par.name] = ufloat(par.value, stderr)
corr_uvars = None
if covar is not None:
try:
corr_uvars = correlated_values(vbest, covar)
for name, cuv in zip(vnames, corr_uvars):
uvars[name] = cuv
except (LinAlgError, ValueError):
pass
if has_expr and corr_uvars is not None:
# for uncertainties on constrained parameters, use the calculated
# correlated values, evaluate the uncertainties on the constrained
# parameters and reset the Parameters to best-fit value
wrap_ueval = uwrap(asteval_with_uncertainties)
for par in self.values():
if getattr(par, '_expr_ast', None) is not None:
try:
uval = wrap_ueval(*corr_uvars, obj=par,
pars=self, names=vnames)
par.stderr = uval.std_dev
uvars[par.name] = uval
except Exception:
par.stderr = 0
# restore all param values to saved best values
for parname, param in self.items():
param.value = savevals[parname]
return uvars
def dumps(self, **kws):
"""Represent Parameters as a JSON string.
Parameters
----------
**kws : optional
Keyword arguments that are passed to `json.dumps`.
Returns
-------
str
JSON string representation of Parameters.
See Also
--------
dump, loads, load, json.dumps
"""
params = [p.__getstate__() for p in self.values()]
unique_symbols = {key: encode4js(deepcopy(self._asteval.symtable[key]))
for key in self._asteval.user_defined_symbols()}
return json.dumps({'unique_symbols': unique_symbols,
'params': params}, **kws)
def loads(self, s, **kws):
"""Load Parameters from a JSON string.
Parameters
----------
**kws : optional
Keyword arguments that are passed to `json.loads`.
Returns
-------
Parameters
Updated Parameters from the JSON string.
Notes
-----
Current Parameters will be cleared before loading the data from
the JSON string.
See Also
--------
dump, dumps, load, json.loads
"""
self.clear()
tmp = json.loads(s, **kws)
unique_symbols = {key: decode4js(tmp['unique_symbols'][key]) for key
in tmp['unique_symbols']}
state = {'unique_symbols': unique_symbols, 'params': []}
for parstate in tmp['params']:
_par = Parameter(name='')
_par.__setstate__(parstate)
state['params'].append(_par)
self.__setstate__(state)
return self
def dump(self, fp, **kws):
"""Write JSON representation of Parameters to a file-like object.
Parameters
----------
fp : file-like object
An open and `.write()`-supporting file-like object.
**kws : optional
Keyword arguments that are passed to `dumps`.
Returns
-------
int
Return value from `fp.write()`: the number of characters
written.
See Also
--------
dumps, load, json.dump
"""
return fp.write(self.dumps(**kws))
def load(self, fp, **kws):
"""Load JSON representation of Parameters from a file-like object.
Parameters
----------
fp : file-like object
An open and `.read()`-supporting file-like object.
**kws : optional
Keyword arguments that are passed to `loads`.
Returns
-------
Parameters
Updated Parameters loaded from `fp`.
See Also
--------
dump, loads, json.load
"""
return self.loads(fp.read(), **kws)
class Parameter:
"""A Parameter is an object that can be varied in a fit.
It is a central component of lmfit, and all minimization and modeling
methods use Parameter objects.
A Parameter has a `name` attribute, and a scalar floating point
`value`. It also has a `vary` attribute that describes whether the
value should be varied during the minimization. Finite bounds can be
placed on the Parameter's value by setting its `min` and/or `max`
attributes. A Parameter can also have its value determined by a
mathematical expression of other Parameter values held in the `expr`
attribute. Additional attributes include `brute_step` used as the step
size in a brute-force minimization, and `user_data` reserved
exclusively for user's need.
After a minimization, a Parameter may also gain other attributes,
including `stderr` holding the estimated standard error in the
Parameter's value, and `correl`, a dictionary of correlation values
with other Parameters used in the minimization.
"""
def __init__(self, name, value=None, vary=True, min=-inf, max=inf,
expr=None, brute_step=None, user_data=None):
"""
Parameters
----------
name : str
Name of the Parameter.
value : float, optional
Numerical Parameter value.
vary : bool, optional
Whether the Parameter is varied during a fit (default is True).
min : float, optional
Lower bound for value (default is ``-numpy.inf``, no lower
bound).
max : float, optional
Upper bound for value (default is ``numpy.inf``, no upper
bound).
expr : str, optional
Mathematical expression used to constrain the value during the
fit (default is None).
brute_step : float, optional
Step size for grid points in the `brute` method (default is
None).
user_data : optional
User-definable extra attribute used for a Parameter (default
is None).
Attributes
----------
stderr : float
The estimated standard error for the best-fit value.
correl : dict
A dictionary of the correlation with the other fitted
Parameters of the form::
{'decay': 0.404, 'phase': -0.020, 'frequency': 0.102}
"""
self.name = name
self.user_data = user_data
self.init_value = value
self.min = min
self.max = max
self.brute_step = brute_step
self._vary = vary
self._expr = expr
self._expr_ast = None
self._expr_eval = None
self._expr_deps = []
self._delay_asteval = False
self.stderr = None
self.correl = None
self.from_internal = lambda val: val
self._val = value
self._init_bounds()
def set(self, value=None, vary=None, min=None, max=None, expr=None,
brute_step=None, is_init_value=True):
"""Set or update Parameter attributes.
Parameters
----------
value : float, optional
Numerical Parameter value.
vary : bool, optional
Whether the Parameter is varied during a fit.
min : float, optional
Lower bound for value. To remove a lower bound you must use
``-numpy.inf``.
max : float, optional
Upper bound for value. To remove an upper bound you must use
``numpy.inf``.
expr : str, optional
Mathematical expression used to constrain the value during the
fit. To remove a constraint you must supply an empty string.
brute_step : float, optional
Step size for grid points in the `brute` method. To remove the
step size you must use ``0``.
is_init_value: bool, optional
Whether to set value as `init_value`, when setting value.
Notes
-----
Each argument to `set()` has a default value of None, which will
leave the current value for the attribute unchanged. Thus, to lift
a lower or upper bound, passing in None will not work. Instead,
you must set these to ``-numpy.inf`` or ``numpy.inf``, as with::
par.set(min=None) # leaves lower bound unchanged
par.set(min=-numpy.inf) # removes lower bound
Similarly, to clear an expression, pass a blank string, (not
None!) as with::
par.set(expr=None) # leaves expression unchanged
par.set(expr='') # removes expression
Explicitly setting a value or setting ``vary=True`` will also
clear the expression.
Finally, to clear the brute_step size, pass ``0``, not None::
par.set(brute_step=None) # leaves brute_step unchanged
par.set(brute_step=0) # removes brute_step
"""
if vary is not None:
self._vary = vary
if vary:
self.__set_expression('')
if min is not None:
self.min = min
if max is not None:
self.max = max
# need to set this after min and max, so that it will use new
# bounds in the setter for value
if value is not None:
is_init_value = is_init_value or self.value in (None, -inf, inf)
self.value = value
if is_init_value:
self.init_value = value
self.__set_expression("")
if expr is not None:
self.__set_expression(expr)
if brute_step is not None:
if brute_step == 0.0:
self.brute_step = None
else:
self.brute_step = brute_step
def _init_bounds(self):
"""Make sure initial bounds are self-consistent."""
# _val is None means - infinity.
if self.max is None:
self.max = inf
if self.min is None:
self.min = -inf
if self._val is None:
self._val = -inf
if self.min > self.max:
self.min, self.max = self.max, self.min
if isclose(self.min, self.max, atol=1e-13, rtol=1e-13):
raise ValueError(f"Parameter '{self.name}' has min == max")
if self._val > self.max:
self._val = self.max
if self._val < self.min:
self._val = self.min
self.setup_bounds()
def __getstate__(self):
"""Get state for pickle."""
return (self.name, self.value, self._vary, self.expr, self.min,
self.max, self.brute_step, self.stderr, self.correl,
self.init_value, self.user_data)
def __setstate__(self, state):
"""Set state for pickle."""
(self.name, _value, self._vary, self.expr, self.min, self.max,
self.brute_step, self.stderr, self.correl, self.init_value,
self.user_data) = state
self._expr_ast = None
self._expr_eval = None
self._expr_deps = []
self._delay_asteval = False
self._val = _value
self._init_bounds()
self.value = _value
def __repr__(self):
"""Return printable representation of a Parameter object."""
s = []
sval = f"value={repr(self._getval())}"
if not self._vary and self._expr is None:
sval += " (fixed)"
elif self.stderr is not None:
sval += f" +/- {self.stderr:.3g}"
s.append(sval)
s.append(f"bounds=[{repr(self.min)}:{repr(self.max)}]")
if self._expr is not None:
s.append(f"expr='{self.expr}'")
if self.brute_step is not None:
s.append(f"brute_step={self.brute_step}")
return f"<Parameter '{self.name}', {', '.join(s)}>"
def setup_bounds(self):
"""Set up Minuit-style internal/external parameter transformation
of min/max bounds.
As a side-effect, this also defines the self.from_internal method
used to re-calculate self.value from the internal value, applying
the inverse Minuit-style transformation. This method should be
called prior to passing a Parameter to the user-defined objective
function.
This code borrows heavily from JJ Helmus' leastsqbound.py
Returns
-------
_val : float
The internal value for parameter from `self.value` (which holds
the external, user-expected value). This internal value should
actually be used in a fit.
"""
if self.min is None:
self.min = -inf
if self.max is None:
self.max = inf
if self.min == -inf and self.max == inf:
self.from_internal = lambda val: val
_val = self._val
elif self.max == inf:
self.from_internal = lambda val: self.min - 1.0 + sqrt(val*val + 1)
_val = sqrt((self._val - self.min + 1.0)**2 - 1)
elif self.min == -inf:
self.from_internal = lambda val: self.max + 1 - sqrt(val*val + 1)
_val = sqrt((self.max - self._val + 1.0)**2 - 1)
else:
self.from_internal = lambda val: self.min + (sin(val) + 1) * \
(self.max - self.min) / 2.0
_val = arcsin(2*(self._val - self.min)/(self.max - self.min) - 1)
if abs(_val) < tiny:
_val = 0.0
return _val
def scale_gradient(self, val):
"""Return scaling factor for gradient.
Parameters
----------
val : float
Numerical Parameter value.
Returns
-------
float
Scaling factor for gradient the according to Minuit-style
transformation.
"""
if self.min == -inf and self.max == inf:
return 1.0
if self.max == inf:
return val / sqrt(val*val + 1)
if self.min == -inf:
return -val / sqrt(val*val + 1)
return cos(val) * (self.max - self.min) / 2.0
def _getval(self):
"""Get value, with bounds applied."""
# Note assignment to self._val has been changed to self.value
# The self.value property setter makes sure that the
# _expr_eval.symtable is kept up-to-date.
# If you just assign to self._val then _expr_eval.symtable[self.name]
# becomes stale if parameter.expr is not None.
if self._expr is not None:
if self._expr_ast is None:
self.__set_expression(self._expr)
if self._expr_eval is not None and not self._delay_asteval:
self.value = self._expr_eval(self._expr_ast)
check_ast_errors(self._expr_eval)
return self._val
@property
def value(self):
"""Return the numerical Parameter value, with bounds applied."""
return self._getval()
@value.setter
def value(self, val):
"""Set the numerical Parameter value."""
self._val = val
if self._val is not None:
if self._val > self.max:
self._val = self.max
elif self._val < self.min:
self._val = self.min
if not hasattr(self, '_expr_eval'):
self._expr_eval = None
if self._expr_eval is not None:
self._expr_eval.symtable[self.name] = self._val
@property
def vary(self):
"""Return whether the parameter is variable"""
return self._vary
@vary.setter
def vary(self, val):
"""Set whether a parameter is varied"""
self._vary = val
if val:
self.__set_expression('')
@property
def expr(self):
"""Return the mathematical expression used to constrain the value in fit."""
return self._expr
@expr.setter
def expr(self, val):
"""Set the mathematical expression used to constrain the value in fit.
To remove a constraint you must supply an empty string.
"""
self.__set_expression(val)
def __set_expression(self, val):
if val == '':
val = None
self._expr = val
if val is not None:
self._vary = False
if not hasattr(self, '_expr_eval'):
self._expr_eval = None
if val is None:
self._expr_ast = None
if val is not None and self._expr_eval is not None:
self._expr_eval.error = []
self._expr_eval.error_msg = None
self._expr_ast = self._expr_eval.parse(val)
check_ast_errors(self._expr_eval)
self._expr_deps = get_ast_names(self._expr_ast)
def __array__(self):
"""array"""
return array(float(self._getval()))
def __str__(self):
"""string"""
return self.__repr__()
def __abs__(self):
"""abs"""
return abs(self._getval())
def __neg__(self):
"""neg"""
return -self._getval()
def __pos__(self):
"""positive"""
return +self._getval()
def __bool__(self):
"""bool"""
return self._getval() != 0
def __int__(self):
"""int"""
return int(self._getval())
def __float__(self):
"""float"""
return float(self._getval())
def __trunc__(self):
"""trunc"""
return self._getval().__trunc__()
def __add__(self, other):
"""+"""
return self._getval() + other
def __sub__(self, other):
"""-"""
return self._getval() - other
def __truediv__(self, other):
"""/"""
return self._getval() / other
def __floordiv__(self, other):
"""//"""
return self._getval() // other
def __divmod__(self, other):
"""divmod"""
return divmod(self._getval(), other)
def __mod__(self, other):
"""%"""
return self._getval() % other
def __mul__(self, other):
"""*"""
return self._getval() * other
def __pow__(self, other):
"""**"""
return self._getval() ** other
def __gt__(self, other):
""">"""
return self._getval() > other
def __ge__(self, other):
""">="""
return self._getval() >= other
def __le__(self, other):
"""<="""
return self._getval() <= other
def __lt__(self, other):
"""<"""
return self._getval() < other
def __eq__(self, other):
"""=="""
return self._getval() == other
def __ne__(self, other):
"""!="""
return self._getval() != other
def __radd__(self, other):
"""+ (right)"""
return other + self._getval()
def __rtruediv__(self, other):
"""/ (right)"""
return other / self._getval()
def __rdivmod__(self, other):
"""divmod (right)"""
return divmod(other, self._getval())
def __rfloordiv__(self, other):
"""// (right)"""
return other // self._getval()
def __rmod__(self, other):
"""% (right)"""
return other % self._getval()
def __rmul__(self, other):
"""* (right)"""
return other * self._getval()
def __rpow__(self, other):
"""** (right)"""
return other ** self._getval()
def __rsub__(self, other):
"""- (right)"""
return other - self._getval()
def create_params(**kws):
"""Create lmfit.Parameters instance and set initial values and attributes.
Parameters
----------
**kws
keywords are parameter names, value are dictionaries of Parameter
values and attributes.
Returns
-------
Parameters instance
Notes
-----
1. keyword arguments will be used to create parameter names.
2. values can either be numbers (floats or integers) to set the parameter
value, or can be dictionaries with any of the following keywords:
``value``, ``vary``, ``min``, ``max``, ``expr``, ``brute_step``, or
``is_init_value`` to set those parameter attributes.
3. for each parameter, ``is_init_value`` controls whether to set
``init_value`` when setting ``value``, and defaults to True.
Examples
--------
>>> params = create_params(amplitude=2, center=200,
sigma={'value': 3, 'min':0},
fwhm={'expr': '2.0*sigma'})
"""
params = Parameters()
params.set(**kws)
return params
|
lmfitREPO_NAMElmfit-pyPATH_START.@lmfit-py_extracted@lmfit-py-master@[email protected]@.PATH_END.py
|
{
"filename": "likelihoods.py",
"repo_name": "stevengiacalone/triceratops",
"repo_path": "triceratops_extracted/triceratops-master/triceratops/likelihoods.py",
"type": "Python"
}
|
import numpy as np
from astropy import constants
from pytransit import QuadraticModel
Msun = constants.M_sun.cgs.value
Rsun = constants.R_sun.cgs.value
Rearth = constants.R_earth.cgs.value
G = constants.G.cgs.value
au = constants.au.cgs.value
pi = np.pi
tm = QuadraticModel(interpolate=False)
tm_sec = QuadraticModel(interpolate=False)
def simulate_TP_transit(time: np.ndarray, R_p: float, P_orb: float,
inc: float, a: float, R_s: float, u1: float,
u2: float, ecc: float, argp: float,
companion_fluxratio: float = 0.0,
companion_is_host: bool = False,
exptime: float = 0.00139,
nsamples: int = 20):
"""
Simulates a transiting planet light curve using PyTransit.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
R_p (float): Planet radius [Earth radii].
P_orb (float): Orbital period [days].
inc (float): Orbital inclination [degrees].
a (float): Semimajor axis [cm].
R_s (float): Star radius [Solar radii].
u1 (float): 1st coefficient in quadratic limb darkening law.
u2 (float): 2nd coefficient in quadratic limb darkening law.
ecc (float): Orbital eccentricity.
argp (float): Argument of periastron [degrees].
companion_fluxratio (float): Proportion of flux provided by
the unresolved companion.
companion_is_host (bool): True if the transit is around the
unresolved companion and False if
it is not.
exptime (float): Exposure time of observations [days].
nsamples (int): Sampling rate for supersampling.
Returns:
m.light_curve (numpy array): Normalized flux at eat time given.
"""
F_target = 1
F_comp = companion_fluxratio/(1-companion_fluxratio)
# step 1: simulate light curve assuming only the host star exists
tm.set_data(time, exptimes=exptime, nsamples=nsamples)
flux = tm.evaluate_ps(
k=R_p*Rearth/(R_s*Rsun),
ldc=[float(u1), float(u2)],
t0=0.0,
p=P_orb,
a=a/(R_s*Rsun),
i=inc*(pi/180.),
e=ecc,
w=(90-argp)*(pi/180.)
)
# step 2: adjust the light curve to account for flux dilution
# from non-host star
if companion_is_host:
F_dilute = F_target / F_comp
flux = (flux + F_dilute)/(1 + F_dilute)
else:
F_dilute = F_comp / F_target
flux = (flux + F_dilute)/(1 + F_dilute)
return flux
def simulate_EB_transit(time: np.ndarray, R_EB: float,
EB_fluxratio: float, P_orb: float, inc: float,
a: float, R_s: float, u1: float, u2: float,
ecc: float, argp: float,
companion_fluxratio: float = 0.0,
companion_is_host: bool = False,
exptime: float = 0.00139,
nsamples: int = 20):
"""
Simulates an eclipsing binary light curve using PyTransit.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
R_EB (float): EB radius [Solar radii].
EB_fluxratio (float): F_EB / (F_EB + F_target).
P_orb (float): Orbital period [days].
inc (float): Orbital inclination [degrees].
a (float): Semimajor axis [cm].
R_s (float): Star radius [Solar radii].
u1 (float): 1st coefficient in quadratic limb darkening law.
u2 (float): 2nd coefficient in quadratic limb darkening law.
ecc (float): Orbital eccentricity.
argp (float): Argument of periastron [degrees].
companion_fluxratio (float): F_comp / (F_comp + F_target).
companion_is_host (bool): True if the transit is around the
unresolved companion and False if it
is not.
exptime (float): Exposure time of observations [days].
nsamples (int): Sampling rate for supersampling.
Returns:
m.light_curve (numpy array): Normalized flux at eat time given.
"""
F_target = 1
F_comp = companion_fluxratio/(1 - companion_fluxratio)
F_EB = EB_fluxratio/(1 - EB_fluxratio)
# step 1: simulate light curve assuming only the host star exists
# calculate primary eclipse
tm.set_data(time, exptimes=exptime, nsamples=nsamples)
k = R_EB/R_s
if abs(k - 1.0) < 1e-6:
k *= 0.999
flux = tm.evaluate_ps(
k=k,
ldc=[float(u1), float(u2)],
t0=0.0,
p=P_orb,
a=a/(R_s*Rsun),
i=inc*(pi/180.),
e=ecc,
w=(90-argp)*(pi/180.)
)
# calculate secondary eclipse depth
tm_sec.set_data(np.linspace(-0.05,0.05,25))
sec_flux = tm_sec.evaluate_ps(
k=1/k,
ldc=[float(u1), float(u2)],
t0=0.0, p=P_orb,
a=a/(R_s*Rsun),
i=inc*(pi/180.),
e=ecc,
w=(90-argp+180)*(pi/180.)
)
sec_flux = np.min(sec_flux)
# step 2: adjust the light curve to account for flux dilution
# from EB and non-host star
if companion_is_host:
flux = (flux + F_EB/F_comp)/(1 + F_EB/F_comp)
sec_flux = (sec_flux + F_comp/F_EB)/(1 + F_comp/F_EB)
F_dilute = F_target/(F_comp + F_EB)
flux = (flux + F_dilute)/(1 + F_dilute)
secdepth = 1 - (sec_flux + F_dilute)/(1 + F_dilute)
else:
flux = (flux + F_EB/F_target)/(1 + F_EB/F_target)
sec_flux = (sec_flux + F_target/F_EB)/(1 + F_target/F_EB)
F_dilute = F_comp/(F_target + F_EB)
flux = (flux + F_dilute)/(1 + F_dilute)
secdepth = 1 - (sec_flux + F_dilute)/(1 + F_dilute)
return flux, secdepth
def lnL_TP(time: np.ndarray, flux: np.ndarray, sigma: float, R_p: float,
P_orb: float, inc: float, a: float, R_s: float,
u1: float, u2: float, ecc: float, argp: float,
companion_fluxratio: float = 0.0,
companion_is_host: bool = False,
exptime: float = 0.00139,
nsamples: int = 20):
"""
Calculates the log likelihood of a transiting planet scenario by
comparing a simulated light curve and the TESS light curve.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
R_p (float): Planet radius [Earth radii].
P_orb (float): Orbital period [days].
inc (float): Orbital inclination [degrees].
a (float): Semimajor axis [cm].
R_s (float): Star radius [Solar radii].
u1 (float): 1st coefficient in quadratic limb darkening law.
u2 (float): 2nd coefficient in quadratic limb darkening law.
ecc (float): Orbital eccentricity.
argp (float): Argument of periastron [degrees].
companion_fluxratio (float): Proportion of flux provided by
the unresolved companion.
companion_is_host (bool): True if the transit is around the
unresolved companion and False if
it is not.
exptime (float): Exposure time of observations [days].
nsamples (int): Sampling rate for supersampling.
Returns:
Log likelihood (float).
"""
model = simulate_TP_transit(
time, R_p, P_orb, inc, a, R_s, u1, u2,
ecc, argp,
companion_fluxratio, companion_is_host,
exptime, nsamples
)
return 0.5*(np.sum((flux-model)**2 / sigma**2))
def lnL_EB(time: np.ndarray, flux: np.ndarray, sigma: float,
R_EB: float, EB_fluxratio: float, P_orb: float, inc: float,
a: float, R_s: float, u1: float, u2: float,
ecc: float, argp: float,
companion_fluxratio: float = 0.0,
companion_is_host: bool = False,
exptime: float = 0.00139,
nsamples: int = 20):
"""
Calculates the log likelihood of an eclipsing binary scenario with
q < 0.95 by comparing a simulated light curve and the
TESS light curve.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
R_EB (float): EB radius [Solar radii].
EB_fluxratio (float): F_EB / (F_EB + F_target).
P_orb (float): Orbital period [days].
inc (float): Orbital inclination [degrees].
a (float): Semimajor axis [cm].
R_s (float): Star radius [Solar radii].
u1 (float): 1st coefficient in quadratic limb darkening law.
u2 (float): 2nd coefficient in quadratic limb darkening law.
ecc (float): Orbital eccentricity.
argp (float): Argument of periastron [degrees].
companion_fluxratio (float): Proportion of flux provided by
the unresolved companion.
companion_is_host (bool): True if the transit is around the
unresolved companion and False if
it is not.
exptime (float): Exposure time of observations [days].
nsamples (int): Sampling rate for supersampling.
Returns:
Log likelihood (float).
"""
model, secdepth = simulate_EB_transit(
time, R_EB, EB_fluxratio, P_orb, inc, a, R_s, u1, u2,
ecc, argp,
companion_fluxratio, companion_is_host,
exptime, nsamples
)
if secdepth < 1.5*sigma:
return 0.5*(np.sum((flux-model)**2 / sigma**2))
else:
return np.inf
def lnL_EB_twin(time: np.ndarray, flux: np.ndarray, sigma: float,
R_EB: float, EB_fluxratio: float, P_orb: float,
inc: float, a: float, R_s: float, u1: float, u2: float,
ecc:float, argp: float,
companion_fluxratio: float = 0.0,
companion_is_host: bool = False,
exptime: float = 0.00139,
nsamples: int = 20):
"""
Calculates the log likelihood of an eclipsing binary scenario with
q >= 0.95 and 2xP_orb by comparing a simulated light curve
and the TESS light curve.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
R_EB (float): EB radius [Solar radii].
EB_fluxratio (float): F_EB / (F_EB + F_target).
P_orb (float): Orbital period [days].
inc (float): Orbital inclination [degrees].
a (float): Semimajor axis [cm].
R_s (float): Star radius [Solar radii].
u1 (float): 1st coefficient in quadratic limb darkening law.
u2 (float): 2nd coefficient in quadratic limb darkening law.
ecc (float): Orbital eccentricity.
argp (float): Argument of periastron [degrees].
companion_fluxratio (float): Proportion of flux provided by
the unresolved companion.
companion_is_host (bool): True if the transit is around the
unresolved companion and False
if it is not.
exptime (float): Exposure time of observations [days].
nsamples (int): Sampling rate for supersampling.
Returns:
Log likelihood (float).
"""
model, secdepth = simulate_EB_transit(
time, R_EB, EB_fluxratio, P_orb, inc, a, R_s, u1, u2,
ecc, argp,
companion_fluxratio, companion_is_host,
exptime, nsamples
)
return 0.5*(np.sum((flux-model)**2 / sigma**2))
def simulate_TP_transit_p(time: np.ndarray, R_p: np.ndarray,
P_orb: float, inc: np.ndarray,
a: np.ndarray, R_s: np.ndarray,
u1: np.ndarray, u2: np.ndarray,
ecc: np.ndarray, argp: np.ndarray,
companion_fluxratio: np.ndarray,
companion_is_host: bool = False,
exptime: float = 0.00139,
nsamples: int = 20):
"""
Simulates a transiting planet light curve using PyTransit.
Calculates light curves in parallel.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
R_p (numpy array): Planet radius [Earth radii].
P_orb (float): Orbital period [days].
inc (numpy array): Orbital inclination [degrees].
a (numpy array): Semimajor axis [cm].
R_s (numpy array): Star radius [Solar radii].
u1 (numpy array): 1st coefficient in quadratic limb darkening law.
u2 (numpy array): 2nd coefficient in quadratic limb darkening law.
ecc (numpy array): Orbital eccentricity.
argp (numpy array): Argument of periastron [degrees].
companion_fluxratio (numpy array): Proportion of flux provided by
the unresolved companion.
companion_is_host (bool): True if the transit is around the
unresolved companion and False if
it is not.
exptime (float): Exposure time of observations [days].
nsamples (int): Sampling rate for supersampling.
Returns:
flux (numpy array): Flux for all simulated light curves.
"""
F_target = 1
F_comp = companion_fluxratio/(1-companion_fluxratio)
F_comp = F_comp.reshape(F_comp.shape[0], 1)
# step 1: simulate light curve assuming only the host star exists
k = R_p*Rearth/(R_s*Rsun)
t0 = np.full_like(k, 0.)
P_orb = np.full_like(k, P_orb)
a = a/(R_s*Rsun)
inc *= (pi/180.)
w = (90-argp)*(pi/180.)
pvp = np.array([k, t0, P_orb, a, inc, ecc, w]).T
ldc = np.array([u1, u2]).T
tm.set_data(time, exptimes=exptime, nsamples=nsamples)
flux = tm.evaluate_pv(pvp=pvp, ldc=ldc)
# step 2: adjust the light curve to account for flux dilution
# from non-host star
if companion_is_host:
F_dilute = F_target / F_comp
flux = (flux + F_dilute)/(1 + F_dilute)
else:
F_dilute = F_comp / F_target
flux = (flux + F_dilute)/(1 + F_dilute)
return flux
def simulate_EB_transit_p(time: np.ndarray, R_EB: np.ndarray,
EB_fluxratio: np.ndarray,
P_orb: float, inc: np.ndarray,
a: np.ndarray, R_s: np.ndarray,
u1: np.ndarray, u2: np.ndarray,
ecc: np.ndarray, argp: np.ndarray,
companion_fluxratio: np.ndarray,
companion_is_host: bool = False,
exptime: float = 0.00139,
nsamples: int = 20):
"""
Simulates an eclipsing binary light curve using PyTransit.
Calculates light curves in parallel.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
R_EB (numpy array): EB radius [Solar radii].
EB_fluxratio (numpy array): F_EB / (F_EB + F_target).
P_orb (float): Orbital period [days].
inc (numpy array): Orbital inclination [degrees].
a (numpy array): Semimajor axis [cm].
R_s (numpy array): Star radius [Solar radii].
u1 (numpy array): 1st coefficient in quadratic limb darkening law.
u2 (numpy array): 2nd coefficient in quadratic limb darkening law.
ecc (numpy array): Orbital eccentricity.
argp (numpy array): Argument of periastron [degrees].
companion_fluxratio (numpy array): F_comp / (F_comp + F_target).
companion_is_host (bool): True if the transit is around the
unresolved companion and False if it
is not.
exptime (float): Exposure time of observations [days].
nsamples (int): Sampling rate for supersampling.
Returns:
flux (numpy array): Flux for all simulated light curves.
sec_depth (numpy array): Max secondary depth for all simulated
light curves.
"""
F_target = 1
F_comp = companion_fluxratio/(1 - companion_fluxratio)
F_comp = F_comp.reshape(F_comp.shape[0], 1)
F_EB = EB_fluxratio/(1 - EB_fluxratio)
F_EB = F_EB.reshape(F_EB.shape[0], 1)
# step 1: simulate light curve assuming only the host star exists
# calculate primary eclipse
k = R_EB/R_s
k[(k - 1.0) < 1e-6] *= 0.999
t0 = np.full_like(k, 0.)
P_orb = np.full_like(k, P_orb)
a = a/(R_s*Rsun)
inc *= (pi/180.)
w = (90-argp)*(pi/180.)
pvp = np.array([k, t0, P_orb, a, inc, ecc, w]).T
ldc = np.array([u1, u2]).T
tm.set_data(time, exptimes=exptime, nsamples=nsamples)
flux = tm.evaluate_pv(pvp=pvp, ldc=ldc)
# calculate secondary eclipse depth
k = R_s/R_EB
k[(k - 1.0) < 1e-6] *= 0.999
w = (90-argp+180)*(pi/180.)
pvp = np.array([k, t0, P_orb, a, inc, ecc, w]).T
tm_sec.set_data(np.linspace(-0.05,0.05,25))
sec_flux = tm_sec.evaluate_pv(pvp=pvp, ldc=ldc)
sec_flux = np.min(sec_flux, axis=1)
sec_flux = sec_flux.reshape(sec_flux.shape[0], 1)
# step 2: adjust the light curve to account for flux dilution
# from EB and non-host star
if companion_is_host:
flux = (flux + F_EB/F_comp)/(1 + F_EB/F_comp)
sec_flux = (sec_flux + F_comp/F_EB)/(1 + F_comp/F_EB)
F_dilute = F_target/(F_comp + F_EB)
flux = (flux + F_dilute)/(1 + F_dilute)
secdepth = 1 - (sec_flux + F_dilute)/(1 + F_dilute)
else:
flux = (flux + F_EB/F_target)/(1 + F_EB/F_target)
sec_flux = (sec_flux + F_target/F_EB)/(1 + F_target/F_EB)
F_dilute = F_comp/(F_target + F_EB)
flux = (flux + F_dilute)/(1 + F_dilute)
secdepth = 1 - (sec_flux + F_dilute)/(1 + F_dilute)
return flux, secdepth
def lnL_TP_p(time: np.ndarray, flux: np.ndarray, sigma: float,
R_p: np.ndarray, P_orb: float, inc: np.ndarray,
a: np.ndarray, R_s: np.ndarray,
u1: np.ndarray, u2: np.ndarray,
ecc: np.ndarray, argp: np.ndarray,
companion_fluxratio: np.ndarray,
companion_is_host: bool = False,
exptime: float = 0.00139,
nsamples: int = 20):
"""
Calculates the log likelihood of a transiting planet scenario by
comparing a simulated light curve and the TESS light curve.
Calculates light curves in parallel.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
R_p (numpy array): Planet radius [Earth radii].
P_orb (float): Orbital period [days].
inc (numpy array): Orbital inclination [degrees].
a (numpy array): Semimajor axis [cm].
R_s (numpy array): Star radius [Solar radii].
u1 (numpy array): 1st coefficient in quadratic limb darkening law.
u2 (numpy array): 2nd coefficient in quadratic limb darkening law.
ecc (numpy array): Orbital eccentricity.
argp (numpy array): Argument of periastron [degrees].
companion_fluxratio (numpy array): Proportion of flux provided by
the unresolved companion.
companion_is_host (bool): True if the transit is around the
unresolved companion and False if
it is not.
exptime (float): Exposure time of observations [days].
nsamples (int): Sampling rate for supersampling.
Returns:
lnL (numpy array): Log likelihood.
"""
model = simulate_TP_transit_p(
time, R_p, P_orb, inc, a, R_s, u1, u2,
ecc, argp,
companion_fluxratio, companion_is_host,
exptime, nsamples
)
lnL = 0.5*(np.sum((flux-model)**2 / sigma**2, axis=1))
return lnL
def lnL_EB_p(time: np.ndarray, flux: np.ndarray, sigma: float,
R_EB: np.ndarray, EB_fluxratio: np.ndarray,
P_orb: float, inc: np.ndarray,
a: np.ndarray, R_s: np.ndarray,
u1: np.ndarray, u2: np.ndarray,
ecc: np.ndarray, argp: np.ndarray,
companion_fluxratio: np.ndarray,
companion_is_host: bool = False,
exptime: float = 0.00139,
nsamples: int = 20):
"""
Calculates the log likelihood of an eclipsing binary scenario with
q < 0.95 by comparing a simulated light curve and the
TESS light curve. Calculates light curves in parallel.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
R_EB (numpy array): EB radius [Solar radii].
EB_fluxratio (numpy array): F_EB / (F_EB + F_target).
P_orb (float): Orbital period [days].
inc (numpy array): Orbital inclination [degrees].
a (numpy array): Semimajor axis [cm].
R_s (numpy array): Star radius [Solar radii].
u1 (numpy array): 1st coefficient in quadratic limb darkening law.
u2 (numpy array): 2nd coefficient in quadratic limb darkening law.
ecc (numpy array): Orbital eccentricity.
argp (numpy array): Argument of periastron [degrees].
companion_fluxratio (numpy array): F_comp / (F_comp + F_target).
companion_is_host (bool): True if the transit is around the
unresolved companion and False if
it is not.
exptime (float): Exposure time of observations [days].
nsamples (int): Sampling rate for supersampling.
Returns:
lnL (numpy array): Log likelihood.
"""
model, secdepth = simulate_EB_transit_p(
time, R_EB, EB_fluxratio, P_orb, inc, a, R_s, u1, u2,
ecc, argp,
companion_fluxratio, companion_is_host,
exptime, nsamples
)
lnL = np.zeros(R_EB.shape[0])
mask = (secdepth < 1.5*sigma)
mask = mask[:,0]
lnL[mask] = 0.5*(np.sum((flux-model[mask])**2 / sigma**2, axis=1))
lnL[~mask] = np.inf
return lnL
def lnL_EB_twin_p(time: np.ndarray, flux: np.ndarray, sigma: float,
R_EB: np.ndarray, EB_fluxratio: np.ndarray,
P_orb: float, inc: np.ndarray,
a: np.ndarray, R_s: np.ndarray,
u1: np.ndarray, u2: np.ndarray,
ecc: np.ndarray, argp: np.ndarray,
companion_fluxratio: np.ndarray,
companion_is_host: bool = False,
exptime: float = 0.00139,
nsamples: int = 20):
"""
Calculates the log likelihood of an eclipsing binary scenario with
q >= 0.95 and 2xP_orb by comparing a simulated light curve
and the TESS light curve. Calculates light curves in parallel.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
R_EB (numpy array): EB radius [Solar radii].
EB_fluxratio (numpy array): F_EB / (F_EB + F_target).
P_orb (float): Orbital period [days].
inc (numpy array): Orbital inclination [degrees].
a (numpy array): Semimajor axis [cm].
R_s (numpy array): Star radius [Solar radii].
u1 (numpy array): 1st coefficient in quadratic limb darkening law.
u2 (numpy array): 2nd coefficient in quadratic limb darkening law.
ecc (numpy array): Orbital eccentricity.
argp (numpy array): Argument of periastron [degrees].
companion_fluxratio (numpy array): F_comp / (F_comp + F_target).
companion_is_host (bool): True if the transit is around the
unresolved companion and False
if it is not.
exptime (float): Exposure time of observations [days].
nsamples (int): Sampling rate for supersampling.
Returns:
Log likelihood (float).
"""
model, secdepth = simulate_EB_transit_p(
time, R_EB, EB_fluxratio, P_orb, inc, a, R_s, u1, u2,
ecc, argp,
companion_fluxratio, companion_is_host,
exptime, nsamples
)
lnL = 0.5*(np.sum((flux-model)**2 / sigma**2, axis=1))
return lnL
|
stevengiacaloneREPO_NAMEtriceratopsPATH_START.@triceratops_extracted@triceratops-master@[email protected]@.PATH_END.py
|
{
"filename": "plotmodel.py",
"repo_name": "valboz/RTModel",
"repo_path": "RTModel_extracted/RTModel-main/RTModel/plotmodel/plotmodel.py",
"type": "Python"
}
|
import VBMicrolensing
import math
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
import matplotlib.animation as animation
from matplotlib.patches import Circle
import os
import numpy as np
import shutil
from PIL import Image
from tqdm import tqdm
import sys
import inspect
import glob
class plotmodel:
def __init__(self, eventname,model = '', tmin = '', tmax = '', referencephot = 0, timesteps = 300, \
modelfile = None, parameters = [], line = 0,printpars = True, animate = False,interval = 1000, satellitedir = '.', accuracy = 0.01):
self.satellitedir = satellitedir
self.parameters = parameters
filin=inspect.getfile(VBMicrolensing)
self.filout= os.path.dirname(filin) + '/data/ESPL.tbl'
self.eventname = eventname
self.model = model
self.tmin = tmin
self.tmax = tmax
self.timesteps = timesteps
self.referencephot = referencephot
self.modelfile = modelfile
self.line = line
self.printpars = printpars
self.animate = animate
if(self.model == ''):
if(modelfile == None):
print('Please specify a model or a modelfile')
return
self.model = os.path.basename(self.modelfile)
if(modelfile == None and parameters == []):
print('Please specify model parameters')
return
self.vbm = VBMicrolensing.VBMicrolensing()
self.vbm.Tol = accuracy
self.vbm.SetMethod(VBMicrolensing.VBMicrolensing.Multipoly)
# General information on models
self.modelcodes= ['PS','PX','BS','BO','LS','LX','LO','LK','TS','TX']
self.npars=[4,6,7,10,7,9,12,14,10,12]
self.logposs=[[0,1,3],
[1,3],
[0,1,6],
[2,3,9],
[0,1,4,5],
[0,1,4,5],
[0,1,4,5],
[0,1,4,5],
[0,1,4,5,7,8],
[0,1,4,5,7,8]]
self.parnames = [['u0','tE','t0','rho'],
['u0','tE','t0','rho','piN','piE'],
['tE','FR','u01','u02','t0','t02','rho'],
['u0','t0','tE','rho','xi1','xi2','om','inc','phi','qs'],
['s','q','u0','alpha','rho','tE','t0'],
['s','q','u0','alpha','rho','tE','t0','piN','piE'],
['s','q','u0','alpha','rho','tE','t0','piN','piE','gamma1','gamma2','gammaz'],
['s','q','u0','alpha','rho','tE','t0','piN','piE','gamma1','gamma2','gammaz','sz_s','a_s3d'],
['s','q','u0','alpha','rho','tE','t0','s2','q2','beta'],
['s','q','u0','alpha','rho','tE','t0','s2','q2','beta','piN','piE']]
self.colors = ['blue','red','green','darkorange','magenta','cyan','gray','teal','maroon','gold','lime','darkviolet']
self.sourcecolor = 'pink'
self.causticcolor = (0.5,0.5,0.5)
self.satellitecolors = ['k-','r-','g-','b-','y-']
self.legendlocation = 'best'
self.parstring = ''
if(animate):
self.animateplots(interval = interval)
else:
self.readdata()
self.readparameters()
self.calculate()
self.showall()
# Reading data from LCToFit.txt
def readdata(self):
if(self.eventname== None):
self.lightcurves = []
self.telescopes = []
self.nfil = 0
self.npoints = 0
else:
os.chdir(self.eventname)
with open('LCToFit.txt') as f:
self.npoints=int(f.readline())
ofil=0
data=[[]]
while True:
line=f.readline()
if(line == ''):
break
chunks=line.split(' ')
self.nfil=int(chunks[0])
if(self.nfil != ofil):
data.append([])
ofil=self.nfil
data[self.nfil].append([float(chunks[1]),float(chunks[2]),float(chunks[3]),int(chunks[4])])
self.nfil +=1
self.lightcurves = [ [np.array([dl[0] for dl in d]),np.array([dl[1] for dl in d]),np.array([dl[2] for dl in d]),d[0][3]] for d in data]
with open('FilterToData.txt') as f:
self.telescopes = f.readlines()
for i in range(0,self.nfil):
self.telescopes[i] = self.telescopes[i][0:self.telescopes[i].index('.')]
self.telescopes = [tel.split('_')[0] for tel in self.telescopes]
while(len(self.colors)<self.nfil):
self.colors.extend(self.colors)
# Reading model parameters
def readparameters(self):
self.modnumber = self.modelcodes.index(self.model[0:2])
if(self.eventname != None):
os.chdir(self.eventname)
if(self.parameters == []):
with open(self.modelfile) as f:
lines = f.readlines()
self.maxlines = len(lines)
if(self.line>=self.maxlines):
return False
else:
line = lines[self.line]
chunks = line.split(' ')
values = [float(v) for v in chunks]
self.pars = values[0:self.npars[self.modnumber]]
self.parsprint=self.pars[:]
for i in self.logposs[self.modnumber]:
self.pars[i] = math.log(self.pars[i])
self.blends = np.array([values[self.npars[self.modnumber]+i*2] for i in range(0,self.nfil)])
self.sources = np.array([values[self.npars[self.modnumber]+i*2+1] for i in range(0,self.nfil)])
self.chi2=values[-1]
#Errors
if(not self.animate):
line = lines[self.line +1 ]
chunks = line.split(' ')
values = [float(v) for v in chunks]
self.parerrs = values[0:self.npars[self.modnumber]]
self.blenderrs = np.array([values[self.npars[self.modnumber]+i*2] for i in range(0,self.nfil)])
self.sourceerrs = np.array([values[self.npars[self.modnumber]+i*2+1] for i in range(0,self.nfil)])
return True
else:
self.pars = self.parameters[0:self.npars[self.modnumber]]
self.parsprint=self.pars[:]
for i in self.logposs[self.modnumber]:
self.pars[i] = math.log(self.pars[i])
self.blends = []
self.sources = []
self.chi2 = 0
for i in range(0,self.nfil):
lc0 = self.lightcurves[i]
lcarr = np.array(lc0[0:3])
lctran=np.transpose(lcarr)
lc = np.transpose(lctran)
self.t = lc[0]
self.vbm.satellite = lc0[3]
self.lightcurve()
f = np.array(self.results[0])
sumf = (f/(lc[2]*lc[2])).sum()
sumf2 = (f*f/(lc[2]*lc[2])).sum()
sumsigma =(1/(lc[2]*lc[2])).sum()
sumy = (lc[1]/(lc[2]*lc[2])).sum()
sumfy = (f*lc[1]/(lc[2]*lc[2])).sum()
sumy2 = (lc[1]*lc[1]/(lc[2]*lc[2])).sum()
p1=sumf*sumf-sumf2*sumsigma + 1.0e-50;
self.blends.append((sumf*sumfy-sumf2*sumy)/p1)
self.sources.append((sumf*sumy-sumsigma*sumfy)/p1)
self.chi2 += sumy2+(sumfy*sumfy*sumsigma+sumf2*sumy*sumy-2*sumf*sumy*sumfy)/p1
def lightcurve(self):
if(self.modnumber < 4):
self.vbm.LoadESPLTable(self.filout)
if(self.modnumber == 1 or self.modnumber > 4):
self.vbm.SetObjectCoordinates(glob.glob('Data/*.coordinates')[0],self.satellitedir)
self.vbm.parallaxsystem = 1
if(self.modnumber == 0):
self.results = self.vbm.ESPLLightCurve(self.pars,self.t)
elif(self.modnumber == 1):
self.results = self.vbm.ESPLLightCurveParallax(self.pars,self.t)
elif(self.modnumber == 2):
self.results = self.vbm.BinSourceExtLightCurve(self.pars,self.t)
elif(self.modnumber == 3):
self.results = self.vbm.BinSourceSingleLensXallarap(self.pars,self.t)
elif(self.modnumber == 4):
self.results = self.vbm.BinaryLightCurve(self.pars,self.t)
elif(self.modnumber == 5):
self.results = self.vbm.BinaryLightCurveParallax(self.pars,self.t)
elif(self.modnumber == 6):
self.results = self.vbm.BinaryLightCurveOrbital(self.pars,self.t)
elif(self.modnumber == 7):
self.results = self.vbm.BinaryLightCurveKepler(self.pars,self.t)
elif(self.modnumber == 8):
self.results = self.vbm.TripleLightCurve(self.pars,self.t)
elif(self.modnumber == 9):
self.results = self.vbm.TripleLightCurveParallax(self.pars,self.t)
def calculate(self):
# Light curve calculation
t0i = self.parnames[self.modnumber].index('t0')
tEi = self.parnames[self.modnumber].index('tE')
rhoi = self.parnames[self.modnumber].index('rho')
self.rho = self.parsprint[rhoi]
if(self.tmin == ''):
self.tmin = self.pars[t0i]-2*self.parsprint[tEi]
if(self.tmax == ''):
self.tmax = self.pars[t0i]+2*self.parsprint[tEi]
self.lctimes=[]
self.lcmags=[]
self.lcerrs=[]
self.satellites = []
for i in range(0,self.nfil):
lc0 = self.lightcurves[i]
lcarr = np.array(lc0[0:3])
lctran=np.transpose(lcarr)
lcsel = [x for x in lctran if(x[0]<self.tmax and x[0]>self.tmin and (x[1]-self.blends[i])/self.sources[i]*self.sources[self.referencephot] +self.blends[self.referencephot]>0)]
lc = np.transpose(lcsel)
if(len(lc)>0):
self.lctimes.append(lc[0])
self.lcmags.append(np.array([-2.5*math.log10((y-self.blends[i])/self.sources[i]*self.sources[self.referencephot]+self.blends[self.referencephot]) for y in lc[1]]))
self.lcerrs.append(lc[2]/lc[1]*2.5/math.log(10.0))
else:
self.lctimes.append(np.array([]))
self.lcmags.append(np.array([]))
self.lcerrs.append(np.array([]))
self.satellites.append(lc0[3])
self.t0 = np.linspace(self.tmin,self.tmax,self.timesteps)
self.usedsatellites = list(set(self.satellites))
if(self.eventname == None):
self.usedsatellites = [0]
self.referencephot = 0
self.sources = [1]
self.blends = [0]
while(len(self.satellitecolors)<len(self.usedsatellites)):
self.satellitecolors.extend(self.satellitecolors)
self.minmag = 1000
self.maxmag = -1000
self.maxy1 = -1000
self.maxy2 = -1000
self.miny1 = 1000
self.miny2 = 1000
self.magnifications = []
self.trajectories = []
for satellite in self.usedsatellites:
self.t =self.t0[:]
for i in range(self.nfil):
if(self.satellites[i] == satellite):
self.t = np.concatenate((self.t,self.lctimes[i]))
self.t = np.sort(self.t)
self.vbm.satellite = satellite
self.lightcurve()
self.mags = [-2.5*math.log10(self.sources[self.referencephot]*yi+self.blends[self.referencephot]) for yi in self.results[0]]
self.y1 = self.results[1]
self.y2 = self.results[2]
self.magnifications.append([self.t,self.mags])
self.trajectories.append([self.y1,self.y2])
minmag = min(self.mags)
maxmag = max(self.mags)
self.minmag = min(self.minmag, minmag)
self.maxmag = max(self.maxmag, maxmag)
miny = min(self.y1)
maxy = max(self.y1)
self.miny1 = min(self.miny1, miny)
self.maxy1 = max(self.maxy1, maxy)
miny = min(self.y2)
maxy = max(self.y2)
self.miny2 = min(self.miny2, miny)
self.maxy2 = max(self.maxy2, maxy)
margin = (self.maxmag-self.minmag)*0.1
self.minmag -= margin
self.maxmag += margin
self.lcress = []
for i in range(self.nfil):
ress = []
isat = np.where(np.array(self.usedsatellites) == self.satellites[i])[0][0]
for j in range(0,len(self.lctimes[i])):
ip = np.where(self.magnifications[isat][0] == self.lctimes[i][j])[0][0]
ress.append(self.magnifications[isat][1][ip]-self.lcmags[i][j])
self.lcress.append(ress)
# Caustic plot preparation
self.rancau = max([self.maxy1-self.miny1,self.maxy2 - self.miny2])
if(self.modnumber>7):
self.caus = self.vbm.Multicaustics()
elif(self.modnumber>3):
self.caus = self.vbm.Caustics(self.parsprint[0],self.parsprint[1])
else:
self.caus = np.array([[[0,1,0,-1,0],[1,0,-1,0,1]]])*self.rancau*0.001
# tE=parsprint[parnames[modnumber].index('tE')]
def printparameters(self):
self.parstring = 'Parameters\n'
for i in range(self.npars[self.modnumber]):
if((not self.animate) and len(self.parameters)==0):
self.parstring = self.parstring + self.parnames[self.modnumber][i] + ' = ' + self.approx(i) + '\n' #+ str(self.parsprint[i]) + ' +- ' + str(self.parerrs[i]) + '\n'
else:
self.parstring = self.parstring + self.parnames[self.modnumber][i] + ' = ' + str(self.parsprint[i]) + '\n'
self.parstring = self.parstring + '\n'
self.parstring = self.parstring + 'blending = ' + str(np.array(self.blends)/np.array(self.sources)) + '\n'
self.parstring = self.parstring + 'baseline = ' + str(-2.5*np.log10(np.array(self.blends)+np.array(self.sources))) + '\n'
self.parstring = self.parstring + 'chi2 =' + str(self.chi2)
print(self.parstring)
def fexp(self, f):
return int(math.floor(math.log10(abs(f)))) if f != 0 else 0
def approx(self, i):
exerr= self.fexp(self.parerrs[i])-1
return f'{self.parsprint[i]:.{max(0,-exerr+3)}f}' + ' +- ' + f'{self.parerrs[i]:.{max(0,-exerr)}f}'
def axeslightcurve(self,ax):
for i in range(0,self.nfil):
ax.errorbar(self.lctimes[i],self.lcmags[i],yerr=self.lcerrs[i],color=self.colors[i],fmt='.',label=self.telescopes[i])
for i in range(len(self.usedsatellites)):
ax.plot(self.magnifications[i][0],self.magnifications[i][1],self.satellitecolors[i],linewidth=0.5)
ax.set_ylabel('mag')
ax.set_ylim([self.maxmag,self.minmag])
ax.set_xlim([self.tmin,self.tmax])
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
if(self.eventname != None):
ax.legend(loc=self.legendlocation)
def axesresiduals(self,ax):
ax.plot((self.tmin,self.tmax),(0,0),'k-',linewidth=0.5)
for i in range(0,self.nfil):
ax.errorbar(self.lctimes[i],self.lcress[i],yerr=self.lcerrs[i],color=self.colors[i],fmt='.')
ax.set_xlabel('t')
ax.set_ylabel('Res')
ax.set_ylim([-0.1,0.1])
ax.set_xlim([self.tmin,self.tmax])
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
def showlightcurve(self):
plt.figure()
fig, axs =plt.subplots(2,1,figsize=(12,6), gridspec_kw={'height_ratios': [5, 1]},sharex='col')
self.axeslightcurve(axs[0])
self.axesresiduals(axs[1])
plt.subplots_adjust(hspace=0.1)
self.figure = fig
def showicon(self,outputfile):
plt.figure()
fig, ax =plt.subplots(1,1,figsize=(6,6))
for i in range(0,self.nfil):
ax.errorbar(self.lctimes[i],self.lcmags[i],yerr=self.lcerrs[i],color=self.colors[i],fmt='.',label=self.telescopes[i])
for i in range(len(self.usedsatellites)):
ax.plot(self.magnifications[i][0],self.magnifications[i][1],self.satellitecolors[i],linewidth=2.5)
ax.set_ylim([self.maxmag,self.minmag])
ax.set_xlim([self.tmin,self.tmax])
ax.tick_params(left = False, right = False , labelleft = False ,
labelbottom = False, bottom = False)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(6) # change width
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
self.figure = fig
self.figure.savefig(outputfile)
plt.close()
def showcaustics(self):
plt.figure()
fig, ax =plt.subplots(figsize=[5,5])
self.axescaustics(ax)
self.figure = fig
def axescaustics(self,ax):
for cau in self.caus:
ax.plot(cau[0],cau[1],color = self.causticcolor)
ax.set_xlabel('y1')
ax.set_ylabel('y2')
ax.set_xlim([(self.miny1+self.maxy1-self.rancau)/2,(self.miny1+self.maxy1+self.rancau)/2])
ax.set_ylim([(self.miny2+self.maxy2-self.rancau)/2,(self.miny2+self.maxy2+self.rancau)/2])
for i in range(len(self.usedsatellites)):
ax.plot(self.trajectories[i][0],self.trajectories[i][1],self.satellitecolors[i])
n = len(self.trajectories[i][0])
arrlength = self.rancau*0.01
i0 = n-10
dir = np.array([self.trajectories[i][0][i0+1]-self.trajectories[i][0][i0],self.trajectories[i][1][i0+1]-self.trajectories[i][1][i0]])
dir = dir/(math.sqrt(dir.dot(dir)))
dirort = np.array([-dir[1], dir[0]])
p0 = np.array([self.trajectories[i][0][i0],self.trajectories[i][1][i0]])
p1 = (-dir +dirort)*arrlength + p0
p2 = (-dir -dirort)*arrlength + p0
ax.plot((p0[0],p1[0]),(p0[1],p1[1]),self.satellitecolors[i])
ax.plot((p0[0],p2[0]),(p0[1],p2[1]),self.satellitecolors[i])
i0 = 20
dir = np.array([self.trajectories[i][0][i0+1]-self.trajectories[i][0][i0],self.trajectories[i][1][i0+1]-self.trajectories[i][1][i0]])
dir = dir/(math.sqrt(dir.dot(dir)))
dirort = np.array([-dir[1], dir[0]])
p0 = np.array([self.trajectories[i][0][i0],self.trajectories[i][1][i0]])
p1 = (-dir +dirort)*arrlength + p0
p2 = (-dir -dirort)*arrlength + p0
ax.plot((p0[0],p1[0]),(p0[1],p1[1]),self.satellitecolors[i])
ax.plot((p0[0],p2[0]),(p0[1],p2[1]),self.satellitecolors[i])
i0 = int(n*0.5)
p0 = np.array([self.trajectories[i][0][i0],self.trajectories[i][1][i0]])
ax.add_patch(Circle(p0,self.rho,color = self.sourcecolor))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
def showall(self):
if(self.eventname != None):
fig, axes = plt.subplot_mosaic([['tl','right'],['bl','right']],figsize=(12,5.5), gridspec_kw={'height_ratios': [5, 1]})
self.axesresiduals(axes['bl'])
axes['tl'].xaxis.set_ticklabels([])
fig.suptitle(self.model);
else:
fig, axes = plt.subplot_mosaic([['tl','right']],figsize=(12,5.5))
self.axeslightcurve(axes['tl'])
self.axescaustics(axes['right'])
plt.subplots_adjust(hspace=0.1)
plt.show()
self.figure = fig
if(self.printpars):
self.printparameters()
def update(self, frame):
self.im.set_array(self.images_array[frame])
return self.im,
def animateplots(self, interval = 1000):
if(os.path.exists('tmpsteps')):
shutil.rmtree('tmpsteps')
os.mkdir('tmpsteps')
os.chdir('tmpsteps')
dirocco = os.getcwd()
self.line = 0
self.readdata()
with open(self.modelfile) as f:
lines = f.readlines()
self.maxlines = len(lines)
pbar = tqdm(total = self.maxlines,desc = 'Frames',file=sys.stdout, colour='GREEN', smoothing = 0)
for frame in range(self.maxlines):
self.line = frame
self.readparameters()
self.fig, self.axes = plt.subplot_mosaic([['tl','right'],['bl','right']],figsize=(12,6), gridspec_kw={'height_ratios': [5, 1]})
self.fig.suptitle(self.model + ' - frame: ' + str(self.line));
self.calculate()
self.axeslightcurve(self.axes['tl'])
self.axes['tl'].xaxis.set_ticklabels([])
self.axesresiduals(self.axes['bl'])
self.axescaustics(self.axes['right'])
_ = plt.subplots_adjust(hspace=0.1)
os.chdir(dirocco)
plt.savefig(str(self.line) + '.png', bbox_inches = 'tight', dpi = 300)
plt.close(self.fig)
pbar.update(1)
pbar.close()
self.images_array = []
for i in range(self.maxlines):
image = Image.open(str(i) + '.png')
self.images_array.append(image)
plt.close(self.fig)
self.fig, ax = plt.subplots()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
self.fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
self.im = ax.imshow(self.images_array[0], animated=True)
self.animation_fig = animation.FuncAnimation(self.fig, self.update, frames=len(self.images_array), interval=interval, blit=True,repeat = False)
os.chdir(dirocco)
os.chdir('..')
self.animation_fig.save('ani.gif',dpi = 150)
plt.close(self.fig)
def plotchain(eventname, model, par1, par2):
chains = []
filenames = glob.glob(eventname+ '/PreModels/' + model + '/*step*')
for fil in filenames:
with open(fil) as f:
chainstrings = f.readlines()
chainlist = []
for st in chainstrings:
chunks = st.split(' ')
chainlist.append([float(v) for v in chunks])
chain = np.array(chainlist)
chains.append(chain)
colors = ['blue','red','green','darkorange','magenta','cyan','gray','teal','maroon','gold','lime','darkviolet']
while(len(colors)<len(chains)):
colors.extend(colors)
fig, ax = plt.subplots()
# ax.set_xlabel('s')
# ax.set_ylabel('q')
for i in range(len(filenames)):
chain = chains[i]
x = chain.transpose()[par1]
y = chain.transpose()[par2]
ax.plot(x,y,color = colors[i])
ax.scatter(x[-1], y[-1],s=20,color = colors[i])
|
valbozREPO_NAMERTModelPATH_START.@RTModel_extracted@RTModel-main@RTModel@[email protected]@.PATH_END.py
|
{
"filename": "test_geodesic.py",
"repo_name": "einsteinpy/einsteinpy",
"repo_path": "einsteinpy_extracted/einsteinpy-main/tests/test_geodesic/test_geodesic.py",
"type": "Python"
}
|
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose
from einsteinpy.geodesic import Geodesic, Nulllike, Timelike
from einsteinpy.geodesic.utils import _kerr
@pytest.fixture()
def dummy_timegeod():
"""
Equatorial Capture
"""
return Timelike(
metric="Kerr",
metric_params=(0.9,),
position=[2.15, np.pi / 2, 0.],
momentum=[0., 0., 1.5],
steps=50,
delta=0.5,
omega=0.01, # Close orbit
return_cartesian=True,
suppress_warnings=True,
)
@pytest.fixture()
def dummy_nullgeod():
"""
Equatorial Geodesic
"""
return Nulllike(
metric="Kerr",
metric_params=(0.5,),
position=[4., np.pi / 2, 0.],
momentum=[0., 0., 2.],
steps=50,
delta=0.5,
return_cartesian=False,
suppress_warnings=True,
)
def test_str_repr(dummy_timegeod):
geod = dummy_timegeod
assert str(geod) == repr(geod)
def test_NotImplementedError():
try:
geod = Nulllike(
metric="Ker",
metric_params=(0.9,),
position=[2.5, np.pi / 2, 0.],
momentum=[0., 0., -8.5],
)
assert False
except NotImplementedError:
assert True
def test_geodesic_attributes1(dummy_timegeod):
geod = dummy_timegeod
traj = geod.trajectory
assert traj
assert isinstance(traj, tuple)
assert isinstance(traj[0], np.ndarray)
def test_geodesic_attributes2(dummy_timegeod):
geod = dummy_timegeod
traj = geod.trajectory
assert isinstance(traj[1], np.ndarray)
assert traj[0].shape[0] == traj[1].shape[0]
assert traj[1].shape[1] == 8
def test_constant_angular_momentum(dummy_nullgeod):
L = dummy_nullgeod.momentum[-1]
assert_allclose(dummy_nullgeod.trajectory[1][:, -1], L, atol=1e-4, rtol=1e-4)
def test_equatorial_geodesic(dummy_nullgeod):
theta = dummy_nullgeod.position[2]
assert_allclose(dummy_nullgeod.trajectory[1][:, 2], theta, atol=1e-6, rtol=1e-6)
def test_constant_rad():
geod = Timelike(
metric="Kerr",
metric_params=(0.99,),
position=[4., np.pi / 3, 0.],
momentum=[0., 0.767851, 2.],
return_cartesian=False,
steps=50,
delta=1.,
)
r = geod.trajectory[1][:, 1]
assert_allclose(r, 4., atol=1e-2, rtol=1e-2)
def test_kerr0_eq_sch():
metric_params = (0.,)
q0 = [4., np.pi / 2, 0.]
p0 = [0., 0., 0.]
k = Timelike(
metric="Kerr",
metric_params=metric_params,
position=q0,
momentum=p0,
steps=50,
delta=0.5,
return_cartesian=True,
suppress_warnings=True,
)
s = Timelike(
metric="Schwarzschild",
metric_params=metric_params,
position=q0,
momentum=p0,
steps=50,
delta=0.5,
return_cartesian=True,
suppress_warnings=True,
)
assert_allclose(k.trajectory[0], s.trajectory[0], atol=1e-6, rtol=1e-6)
assert_allclose(k.trajectory[1], s.trajectory[1], atol=1e-6, rtol=1e-6)
def test_kerr0_eq_kn00():
metric_params = (0.5, 0.)
q0 = [2.5, np.pi / 2, 0.]
p0 = [0., 0., -8.5]
k = Timelike(
metric="Kerr",
metric_params=metric_params,
position=q0,
momentum=p0,
steps=50,
delta=0.5,
return_cartesian=True,
suppress_warnings=True,
)
kn = Timelike(
metric="KerrNewman",
metric_params=metric_params,
position=q0,
momentum=p0,
steps=50,
delta=0.5,
return_cartesian=True,
suppress_warnings=True,
)
assert_allclose(k.trajectory[0], kn.trajectory[0], atol=1e-6, rtol=1e-6)
assert_allclose(k.trajectory[1], kn.trajectory[1], atol=1e-6, rtol=1e-6)
def test_custom_callable_metric():
metric_params = (0.,)
q0 = [4., np.pi / 2, 0.]
p0 = [0., 0., 0.]
try:
c = Timelike(
metric=_kerr,
metric_params=metric_params,
position=q0,
momentum=p0,
steps=50,
delta=0.5,
return_cartesian=True,
suppress_warnings=True,
)
assert c.metric_name == _kerr.__name__
assert c.metric == _kerr
except:
assert False
|
einsteinpyREPO_NAMEeinsteinpyPATH_START.@einsteinpy_extracted@einsteinpy-main@tests@test_geodesic@[email protected]_END.py
|
{
"filename": "infeed_test.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/tests/infeed_test.py",
"type": "Python"
}
|
# Copyright 2019 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from unittest import SkipTest
from absl.testing import absltest
import jax
from jax import lax, numpy as jnp
from jax._src import core
from jax._src import xla_bridge
from jax._src.lib import xla_client
import jax._src.test_util as jtu
import numpy as np
jax.config.parse_flags_with_absl()
class InfeedTest(jtu.JaxTestCase):
def setUp(self):
if xla_bridge.using_pjrt_c_api():
raise SkipTest("infeed not implemented in PJRT C API")
super().setUp()
@jax.numpy_rank_promotion("allow") # Test explicitly exercises implicit rank promotion.
def testInfeed(self):
raise SkipTest("skipping temporarily for stackless")
@jax.jit
def f(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(core.ShapedArray((3, 4), jnp.float32),))
(z,), _ = lax.infeed(
token, shape=(core.ShapedArray((3, 1, 1), jnp.float32),))
return x + y + z
x = np.float32(1.5)
y = np.reshape(np.arange(12, dtype=np.float32), (3, 4)) # self.rng().randn(3, 4).astype(np.float32)
z = self.rng().randn(3, 1, 1).astype(np.float32)
device = jax.local_devices()[0]
device.transfer_to_infeed((y,))
device.transfer_to_infeed((z,))
self.assertAllClose(f(x), x + y + z)
def testInfeedPytree(self):
raise SkipTest("skipping temporarily for stackless")
x = np.float32(1.5)
y = np.reshape(np.arange(12, dtype=np.int16), (3, 4))
to_infeed = dict(a=x, b=y)
to_infeed_shape = dict(a=core.ShapedArray((), dtype=np.float32),
b=core.ShapedArray((3, 4), dtype=np.int16))
@jax.jit
def f(x):
token = lax.create_token(x)
res, token = lax.infeed(token, shape=to_infeed_shape)
return res
device = jax.local_devices()[0]
# We must transfer the flattened data, as a tuple!!!
flat_to_infeed, _ = jax.tree.flatten(to_infeed)
device.transfer_to_infeed(tuple(flat_to_infeed))
self.assertAllClose(f(x), to_infeed)
@jax.numpy_rank_promotion("allow") # Test explicitly exercises implicit rank promotion.
def testInfeedThenOutfeed(self):
@jax.jit
def f(x):
token = lax.create_token(x)
y, token = lax.infeed(
token, shape=core.ShapedArray((3, 4), jnp.float32))
token = lax.outfeed(token, y + np.float32(1))
return x - 1
x = np.float32(7.5)
y = self.rng().randn(3, 4).astype(np.float32)
execution = threading.Thread(target=lambda: f(x))
execution.start()
device = jax.local_devices()[0]
device.transfer_to_infeed((y,))
out, = device.transfer_from_outfeed(
xla_client.shape_from_pyval((y,)).with_major_to_minor_layout_if_absent())
execution.join()
self.assertAllClose(out, y + np.float32(1))
def testInfeedThenOutfeedInALoop(self):
def doubler(_, token):
y, token = lax.infeed(
token, shape=core.ShapedArray((3, 4), jnp.float32))
return lax.outfeed(token, y * np.float32(2))
@jax.jit
def f(n):
token = lax.create_token(n)
token = lax.fori_loop(0, n, doubler, token)
return n
device = jax.local_devices()[0]
n = 10
execution = threading.Thread(target=lambda: f(n))
execution.start()
for _ in range(n):
x = self.rng().randn(3, 4).astype(np.float32)
device.transfer_to_infeed((x,))
y, = device.transfer_from_outfeed(xla_client.shape_from_pyval((x,))
.with_major_to_minor_layout_if_absent())
self.assertAllClose(y, x * np.float32(2))
execution.join()
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@[email protected]_END.py
|
{
"filename": "_bordercolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/hoverlabel/_bordercolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="surface.hoverlabel", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@hoverlabel@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._area import Area
from ._bar import Bar
from ._barpolar import Barpolar
from ._box import Box
from ._candlestick import Candlestick
from ._carpet import Carpet
from ._choropleth import Choropleth
from ._choroplethmapbox import Choroplethmapbox
from ._cone import Cone
from ._contour import Contour
from ._contourcarpet import Contourcarpet
from ._densitymapbox import Densitymapbox
from ._deprecations import AngularAxis
from ._deprecations import Annotation
from ._deprecations import Annotations
from ._deprecations import ColorBar
from ._deprecations import Contours
from ._deprecations import Data
from ._deprecations import ErrorX
from ._deprecations import ErrorY
from ._deprecations import ErrorZ
from ._deprecations import Font
from ._deprecations import Frames
from ._deprecations import Histogram2dcontour
from ._deprecations import Legend
from ._deprecations import Line
from ._deprecations import Margin
from ._deprecations import Marker
from ._deprecations import RadialAxis
from ._deprecations import Scene
from ._deprecations import Stream
from ._deprecations import Trace
from ._deprecations import XAxis
from ._deprecations import XBins
from ._deprecations import YAxis
from ._deprecations import YBins
from ._deprecations import ZAxis
from ._figure import Figure
from ._frame import Frame
from ._funnel import Funnel
from ._funnelarea import Funnelarea
from ._heatmap import Heatmap
from ._heatmapgl import Heatmapgl
from ._histogram import Histogram
from ._histogram2d import Histogram2d
from ._histogram2dcontour import Histogram2dContour
from ._image import Image
from ._indicator import Indicator
from ._isosurface import Isosurface
from ._layout import Layout
from ._mesh3d import Mesh3d
from ._ohlc import Ohlc
from ._parcats import Parcats
from ._parcoords import Parcoords
from ._pie import Pie
from ._pointcloud import Pointcloud
from ._sankey import Sankey
from ._scatter import Scatter
from ._scatter3d import Scatter3d
from ._scattercarpet import Scattercarpet
from ._scattergeo import Scattergeo
from ._scattergl import Scattergl
from ._scattermapbox import Scattermapbox
from ._scatterpolar import Scatterpolar
from ._scatterpolargl import Scatterpolargl
from ._scatterternary import Scatterternary
from ._splom import Splom
from ._streamtube import Streamtube
from ._sunburst import Sunburst
from ._surface import Surface
from ._table import Table
from ._treemap import Treemap
from ._violin import Violin
from ._volume import Volume
from ._waterfall import Waterfall
from . import area
from . import bar
from . import barpolar
from . import box
from . import candlestick
from . import carpet
from . import choropleth
from . import choroplethmapbox
from . import cone
from . import contour
from . import contourcarpet
from . import densitymapbox
from . import funnel
from . import funnelarea
from . import heatmap
from . import heatmapgl
from . import histogram
from . import histogram2d
from . import histogram2dcontour
from . import image
from . import indicator
from . import isosurface
from . import layout
from . import mesh3d
from . import ohlc
from . import parcats
from . import parcoords
from . import pie
from . import pointcloud
from . import sankey
from . import scatter
from . import scatter3d
from . import scattercarpet
from . import scattergeo
from . import scattergl
from . import scattermapbox
from . import scatterpolar
from . import scatterpolargl
from . import scatterternary
from . import splom
from . import streamtube
from . import sunburst
from . import surface
from . import table
from . import treemap
from . import violin
from . import volume
from . import waterfall
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[
".area",
".bar",
".barpolar",
".box",
".candlestick",
".carpet",
".choropleth",
".choroplethmapbox",
".cone",
".contour",
".contourcarpet",
".densitymapbox",
".funnel",
".funnelarea",
".heatmap",
".heatmapgl",
".histogram",
".histogram2d",
".histogram2dcontour",
".image",
".indicator",
".isosurface",
".layout",
".mesh3d",
".ohlc",
".parcats",
".parcoords",
".pie",
".pointcloud",
".sankey",
".scatter",
".scatter3d",
".scattercarpet",
".scattergeo",
".scattergl",
".scattermapbox",
".scatterpolar",
".scatterpolargl",
".scatterternary",
".splom",
".streamtube",
".sunburst",
".surface",
".table",
".treemap",
".violin",
".volume",
".waterfall",
],
[
"._area.Area",
"._bar.Bar",
"._barpolar.Barpolar",
"._box.Box",
"._candlestick.Candlestick",
"._carpet.Carpet",
"._choropleth.Choropleth",
"._choroplethmapbox.Choroplethmapbox",
"._cone.Cone",
"._contour.Contour",
"._contourcarpet.Contourcarpet",
"._densitymapbox.Densitymapbox",
"._deprecations.AngularAxis",
"._deprecations.Annotation",
"._deprecations.Annotations",
"._deprecations.ColorBar",
"._deprecations.Contours",
"._deprecations.Data",
"._deprecations.ErrorX",
"._deprecations.ErrorY",
"._deprecations.ErrorZ",
"._deprecations.Font",
"._deprecations.Frames",
"._deprecations.Histogram2dcontour",
"._deprecations.Legend",
"._deprecations.Line",
"._deprecations.Margin",
"._deprecations.Marker",
"._deprecations.RadialAxis",
"._deprecations.Scene",
"._deprecations.Stream",
"._deprecations.Trace",
"._deprecations.XAxis",
"._deprecations.XBins",
"._deprecations.YAxis",
"._deprecations.YBins",
"._deprecations.ZAxis",
"._figure.Figure",
"._frame.Frame",
"._funnel.Funnel",
"._funnelarea.Funnelarea",
"._heatmap.Heatmap",
"._heatmapgl.Heatmapgl",
"._histogram.Histogram",
"._histogram2d.Histogram2d",
"._histogram2dcontour.Histogram2dContour",
"._image.Image",
"._indicator.Indicator",
"._isosurface.Isosurface",
"._layout.Layout",
"._mesh3d.Mesh3d",
"._ohlc.Ohlc",
"._parcats.Parcats",
"._parcoords.Parcoords",
"._pie.Pie",
"._pointcloud.Pointcloud",
"._sankey.Sankey",
"._scatter.Scatter",
"._scatter3d.Scatter3d",
"._scattercarpet.Scattercarpet",
"._scattergeo.Scattergeo",
"._scattergl.Scattergl",
"._scattermapbox.Scattermapbox",
"._scatterpolar.Scatterpolar",
"._scatterpolargl.Scatterpolargl",
"._scatterternary.Scatterternary",
"._splom.Splom",
"._streamtube.Streamtube",
"._sunburst.Sunburst",
"._surface.Surface",
"._table.Table",
"._treemap.Treemap",
"._violin.Violin",
"._volume.Volume",
"._waterfall.Waterfall",
],
)
if sys.version_info < (3, 7):
try:
import ipywidgets as _ipywidgets
from distutils.version import LooseVersion as _LooseVersion
if _LooseVersion(_ipywidgets.__version__) >= _LooseVersion("7.0.0"):
from ..graph_objs._figurewidget import FigureWidget
else:
raise ImportError()
except Exception:
from ..missing_ipywidgets import FigureWidget
else:
__all__.append("FigureWidget")
orig_getattr = __getattr__
def __getattr__(import_name):
if import_name == "FigureWidget":
try:
import ipywidgets
from distutils.version import LooseVersion
if LooseVersion(ipywidgets.__version__) >= LooseVersion("7.0.0"):
from ..graph_objs._figurewidget import FigureWidget
return FigureWidget
else:
raise ImportError()
except Exception:
from ..missing_ipywidgets import FigureWidget
return FigureWidget
return orig_getattr(import_name)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@[email protected]_END.py
|
{
"filename": "galCat.py",
"repo_name": "CosmoStatGW/DarkSirensStat",
"repo_path": "DarkSirensStat_extracted/DarkSirensStat-master/DarkSirensStat/galCat.py",
"type": "Python"
}
|
#
# Copyright (c) 2021 Andreas Finke <[email protected]>,
# Michele Mancarella <[email protected]>
#
# All rights reserved. Use of this source code is governed by a modified BSD
# license that can be found in the LICENSE file.
####
# This module contains a abstract classes to handle a galaxy catalogue
####
import pandas as pd
import healpy as hp
import numpy as np
import matplotlib.pyplot as plt
from abc import ABC, abstractmethod
from copy import deepcopy
from globals import *
from keelin import bounded_keelin_3_discrete_probabilities
class GalCat(ABC):
def __init__(self, foldername, completeness, useDirac, verbose, **kwargs):
self._path = os.path.join(dirName, 'data', foldername)
self._nside = 128
self._useDirac = useDirac
self.data = pd.DataFrame()
self.verbose = verbose
self.load(**kwargs)
self.selectedData = self.data
self._completeness = deepcopy(completeness)
self._completeness.verbose = verbose
self._completeness.compute(self.data, useDirac)
def get_data(self):
return self.selectedData
def select_area(self, pixels, nside):
if self.verbose:
print("Restricting area of the catalogue to %s pixels with nside=%s" %(pixels.shape[0], nside))
pixname = "pix" + str(nside)
if not pixname in self.data:
self.data.loc[:, pixname] = hp.ang2pix(nside, self.data.theta, self.data.phi)
mask = self.data.isin({pixname: pixels}).any(1)
self.selectedData = self.data[mask]
if self.verbose:
print('%s galaxies kept' %self.selectedData.shape[0])
def set_z_range_for_selection(self, zMin, zMax):
if self.verbose:
print("Setting z range of the catalogue between %s, %s" %(np.round(zMin,3), np.round(zMax,3)))
self.selectedData = self.selectedData[(self.selectedData.z >= zMin) & (self.selectedData.z < zMax)]
if self.verbose:
print('%s galaxies kept' %self.selectedData.shape[0])
def count_selection(self):
return self.selectedData.shape[0]
@abstractmethod
def load(self):
pass
def completeness(self, theta, phi, z, oneZPerAngle=False):
return self._completeness.get(theta, phi, z, oneZPerAngle) + 1e-9
def group_correction(self, df, df_groups, which_z='z_cosmo'):
'''
Corrects cosmological redshift in heliocentric frame
for peculiar velocities in galaxies
inside the group galaxy catalogue in arXiv:1705.08068, table 2
To be applied BEFORE changing to CMB frame
Inputs: df - dataframe to correct
df groups - dataframe of group velocities
which_z - name of column to correct
Output : df, but with the column given by which_z corrected for peculiar velocities
in the relevant cases and a new column named which_z+'_or'
with the original redshift
'''
#print('Correcting %s for group velocities...' %which_z)
# df_groups.loc[:, 'isInCat'] = df_groups['PGC'].isin(df['PGC'])
# print(df_groups)
# df_groups.set_index(keys=['PGC1'], drop=False, inplace=True)
# groups = df_groups.groupby(level=0)
# #groups = df_groups.groupby('PGC1')
#
# isModified = np.zeros(len(df), dtype=int)
#
# for grname, gr in groups:
#
# if gr.isInCat.any():
# print('y')
# galnames = gr['PGC']
# mask = df['PGC'].isin(galnames)
#
# z_group = gr.HRV.mean()/clight
#
# df.loc[mask, which_z] = z_group
# isModified = isModified | mask.to_numpy().astype(int)
#
# df.loc[:, 'group_correction'] = isModified
df.loc[:, which_z+'_or'] = df[which_z].values
zs = df.loc[df['PGC'].isin(df_groups['PGC'])][['PGC', which_z]]
z_corr_arr = []
#z_group_arr = []
for PGC in zs.PGC.values:
#z_or=zs.loc[zs['PGC']==PGC][which_z_correct].values[0]
PGC1=df_groups[df_groups['PGC']==PGC]['PGC1'].values[0]
#print(PGC1)
z_group = df_groups[df_groups['PGC1']== PGC1].HRV.mean()/clight
z_corr_arr.append(z_group)
z_corr_arr=np.array(z_corr_arr)
df.loc[df['PGC'].isin(df_groups['PGC']), which_z] = z_corr_arr
correction_flag_array = np.where(df[which_z+'_or'] != df[which_z], 1, 0)
df.loc[:, 'group_correction'] = correction_flag_array
def CMB_correction(self, df, which_z='z_cosmo'):
'''
Gives cosmological redshift in CMB frame starting from heliocentric
Inputs: df - dataframe to correct
which_z - name of column to correct
Output : df, with a new column given by
which_z +'_CMB'
'''
#print('Correcting %s for CMB reference frame...' %which_z)
v_gal = clight*df[which_z].values
phi_CMB, dec_CMB = gal_to_eq(np.radians(l_CMB), np.radians(b_CMB))
theta_CMB = 0.5 * np.pi - dec_CMB
delV = v_CMB*(np.sin(df.theta)*np.sin(theta_CMB)*np.cos(df.phi-phi_CMB) +np.cos(df.theta)*np.cos(theta_CMB))
v_corr = v_gal+delV # at first order in v/c ...
z_corr = v_corr/clight
df.loc[:,which_z+'_CMB'] = z_corr
def include_vol_prior(self, df):
batchSize = 10000
nBatches = max(int(len(df)/batchSize), 1)
if self.verbose:
print("Computing galaxy posteriors...")
from keelin import convolve_bounded_keelin_3
from astropy.cosmology import FlatLambdaCDM
fiducialcosmo = FlatLambdaCDM(H0=70.0, Om0=0.3)
zGrid = np.linspace(0, 1.4*np.max(df.z_upperbound), 500)
jac = fiducialcosmo.comoving_distance(zGrid).value**2 / fiducialcosmo.H(zGrid).value
from scipy import interpolate
func = interpolate.interp1d(zGrid, jac, kind='cubic')
def convolve_batch(df, func, batchId, nBatches):
N = len(df)
# actual batch size, different from batchSize only due to integer rounding
n = int(N/nBatches)
start = n*batchId
stop = n*(batchId+1)
if batchId == nBatches-1:
stop = N
batch = df.iloc[start:stop]
if self.verbose:
if batchId % 100 == 0:
print("Batch " + str(batchId) + " of " + str(nBatches) )
ll = batch.z_lowerbound.to_numpy()
l = batch.z_lower.to_numpy()
m = batch.z.to_numpy()
u = batch.z_upper.to_numpy()
uu = batch.z_upperbound.to_numpy()
return convolve_bounded_keelin_3(func, 0.16, l, m, u, ll, uu, N=1000)
res = np.vstack(parmap(lambda b: convolve_batch(df, func, b, nBatches), range(nBatches)))
mask = (res[:,0] >= res[:,1]) | (res[:,1] >= res[:,2]) | (res[:,2] >= res[:,3]) | (res[:,3] >= res[:,4]) | (res[:,0] < 0)
if self.verbose:
print('Removing ' + str( np.sum(mask) ) + ' galaxies with unfeasible redshift pdf after r-squared prior correction.' )
df.z_lowerbound = res[:, 0]
df.z_lower = res[:, 1]
df.z = res[:, 2]
df.z_upper = res[:, 3]
df.z_upperbound = res[:, 4]
df = df[~mask]
return df
def gal_to_eq(l, b):
'''
input: galactic coordinates (l, b) in radians
returns equatorial coordinates (RA, dec) in radians
https://en.wikipedia.org/wiki/Celestial_coordinate_system#Equatorial_↔_galactic
'''
l_NCP = np.radians(122.93192)
del_NGP = np.radians(27.128336)
alpha_NGP = np.radians(192.859508)
RA = np.arctan((np.cos(b)*np.sin(l_NCP-l))/(np.cos(del_NGP)*np.sin(b)-np.sin(del_NGP)*np.cos(b)*np.cos(l_NCP-l)))+alpha_NGP
dec = np.arcsin(np.sin(del_NGP)*np.sin(b)+np.cos(del_NGP)*np.cos(b)*np.cos(l_NCP-l))
return RA, dec
class GalCompleted(object):
def __init__(self, completionType = 'mix', **kwargs):
self._galcats = []
self._catweights = []
self._additive = False
self._multiplicative = False
if completionType == 'add':
self._additive = True
elif completionType == 'mult':
self._multiplicative = True
def add_cat(self, cat, weight = 1):
self._galcats.append(cat)
self._catweights.append(weight)
def total_completeness(self, theta, phi, z, oneZPerAngle=False):
# sums completenesses of all catalogs, taking into account the additional
# catalog weights
res = 0
for c, w in zip(self._galcats, self._catweights):
res += w*c.completeness(theta, phi, z, oneZPerAngle)
return res
#return sum(list(map(lambda c: c.completeness, self._galcats)))
def select_area(self, pixels, nside):
for c in self._galcats:
c.select_area(pixels, nside)
def set_z_range_for_selection(self, zMin, zMax):
for c in self._galcats:
c.set_z_range_for_selection(zMin, zMax)
def count_selection(self):
return [c.count_selection() for c in self._galcats]
def get_inhom_contained(self, zGrid, nside):
''' return pixels : array N_galaxies
weights: array N_galaxies x len(zGrid)
'''
allpixels = []
allweights = []
# iterate through catalogs and add results to lists
catweightTotal = 0
for c, w in zip(self._galcats, self._catweights):
# shorthand
d = c.get_data()
pixname = "pix" + str(nside)
# compute this only once
if not pixname in c.get_data():
d.loc[:, pixname] = hp.ang2pix(nside, d.theta.to_numpy(), d.phi.to_numpy())
# pixels are already known
allpixels.append(d[pixname].to_numpy())
# keelin weights. N has to be tuned for speed vs quality
# for each gal, on zGrid
weights = bounded_keelin_3_discrete_probabilities(zGrid, 0.16, d.z_lower, d.z, d.z_upper, d.z_lowerbound, d.z_upperbound, N=40, P=0.99999)
if weights.ndim == 1:
weights = weights[np.newaxis, :]
weights *= d.w[:, np.newaxis]
if self._additive and (len(self._galcats) == 1): # no need to evaluate completeness...
catweightTotal = w
else:
# completeness eval for each gal, on grid - same shape as weights
completeness = c.completeness(d.theta.to_numpy(), d.phi.to_numpy(), zGrid)
# multiplicative completion
weights /= completeness
# downweighted for low completeness
weights *= self.confidence(completeness)
# catalog weighting based also on completeness
catweight = w*np.mean(completeness)
weights *= catweight
catweightTotal += catweight
# normalize in case different goals are used for different catalogs, to make them comparable
weights /= c._completeness._comovingDensityGoal
allweights.append(weights)
allweights = np.vstack(allweights)
allweights /= catweightTotal
#return np.squeeze(np.vstack(allpixels)), np.vstack(allweights)
return np.hstack(allpixels), allweights
def get_inhom(self, nside):
'''
returns pixels, redshifts and weights of all galaxies (redshift medians) in the selection, ignoring galaxy redshift errror pdfs
returns:
pixels : array nGal
redshifts: array nGal
weights: array nGal
'''
allpixels = []
allredshifts = []
allweights = []
catweightTotal = 0
for c, w in zip(self._galcats, self._catweights):
# shorthand
d = c.get_data()
pixname = "pix" + str(nside)
# compute this only once
if not pixname in c.get_data():
d.loc[:, pixname] = hp.ang2pix(nside, d.theta.to_numpy(), d.phi.to_numpy())
allpixels.append(d[pixname].to_numpy())
weights = d.w.to_numpy().copy()
redshifts = d.z.to_numpy()
allredshifts.append(redshifts)
if self._additive and (len(self._galcats) == 1): # no need to evaluate completeness...
catweightTotal = w
else:
# completeness eval for each gal
completeness = c.completeness(d.theta.to_numpy(), d.phi.to_numpy(), redshifts, oneZPerAngle = True)
# multiplicative completion
weights /= completeness
# downweighted for low completeness
weights *= self.confidence(completeness)
# catalog weighting based also on completeness
catweight = w*np.mean(completeness)
weights *= catweight
catweightTotal += catweight
# normalize in case different goals are used for different catalogs, to make them comparable
weights /= c._completeness._comovingDensityGoal
allweights.append(weights)
allweights = np.hstack(allweights)
allweights /= catweightTotal
return np.hstack(allpixels), np.hstack(allredshifts), allweights
def eval_inhom(self, Omega, z):
'''
For the future if we had posterior samples
'''
pass
def eval_hom(self, theta, phi, z, MC=True):
'''
Homogeneous completion part. Second term in 2.59
'''
if MC:
assert(len(theta) == len(z))
ret = np.zeros(len(theta))
catweightTotal = 0
for c, w in zip(self._galcats, self._catweights):
# completeness eval for each point
completeness = c.completeness(theta, phi, z, oneZPerAngle = True)
# how much of homogeneous stuff to add - note in case of additive completion, confidence returns its argument, and we have 1 - completeness, the correct prefactor in that case
retcat = (1-self.confidence(completeness))
# catalog weighting based also on completeness
catweight = w*np.mean(completeness)
retcat *= catweight
catweightTotal += catweight
ret += retcat
# for catalog averaging (3)
ret /= catweightTotal
return ret
def confidence(self, compl):
if self._multiplicative:
return 1
elif self._additive:
return compl
else: #interpolation between multiplicative and additive
confpower = 0.05
complb = np.clip(compl, a_min=2e-3, a_max=1)
return np.exp(confpower*(1-1/complb))
|
CosmoStatGWREPO_NAMEDarkSirensStatPATH_START.@DarkSirensStat_extracted@DarkSirensStat-master@[email protected]@.PATH_END.py
|
{
"filename": "colored.py",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/colored.py",
"type": "Python"
}
|
import sys
def not_colored(a,_):
return repr(a)
colors = {
"red":"\033[31m",
"green":"\033[32m",
"yellow":"\033[33m",
"blue":"\033[34m",
"magenta":"\033[35m",
"cyan":"\033[36m",
}
reset = "\033[0m"
def colored(arg,c):
assert isinstance(c, str)
assert c in colors
s = str(arg)
return colors[c] + s + reset
if hasattr(sys.stdout,"isatty"):
is_tty = sys.stdout.isatty()
else:
is_tty = False
is_jupyter = type(sys.stdout).__name__ == 'OutStream' and type(sys.stdout).__module__ == 'ipykernel.iostream'
if (not is_tty) and (not is_jupyter):
colored = not_colored
if __name__ == "__main__":
if installed:
print(colored("Colored was installed", "green"))
else:
print("Colored was NOT installed")
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@[email protected]@.PATH_END.py
|
{
"filename": "Example 2 - Loading observational data.ipynb",
"repo_name": "ACCarnall/bagpipes",
"repo_path": "bagpipes_extracted/bagpipes-master/examples/Example 2 - Loading observational data.ipynb",
"type": "Jupyter Notebook"
}
|
# Loading observational data into Bagpipes
This example will show you how to input observational data. For further information see the [loading observational data](http://bagpipes.readthedocs.io/en/latest/loading_galaxies.html) page of the documentation.
## The load_data function
In order to load up data you need to write a function which accepts an ID (as a string) and returns your data in the format Bagpipes expects.
For photometry this format is a nx2 array containing fluxes and flux errors in microJanskys. For spectroscopy this format is a nx3 array containing wavelengths in angstroms, fluxes and flux errors in ergs/s/cm^2/A.
Below is an example load data function for loading photometry from the [Guo et al. (2013) catalogue](https://archive.stsci.edu/prepds/candels). This catalogue is not included in the repository.
```python
import numpy as np
import bagpipes as pipes
from astropy.io import fits
def load_goodss(ID):
""" Load UltraVISTA photometry from catalogue. """
# load up the relevant columns from the catalogue.
cat = np.loadtxt("hlsp_candels_hst_wfc3_goodss-tot-multiband_f160w_v1-1photom_cat.txt",
usecols=(10, 13, 16, 19, 25, 28, 31, 34, 37, 40, 43, 46, 49, 52, 55,
11, 14, 17, 20, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 56))
# Find the correct row for the object we want.
row = int(ID) - 1
# Extract the object we want from the catalogue.
fluxes = cat[row, :15]
fluxerrs = cat[row, 15:]
# Turn these into a 2D array.
photometry = np.c_[fluxes, fluxerrs]
# blow up the errors associated with any missing fluxes.
for i in range(len(photometry)):
if (photometry[i, 0] == 0.) or (photometry[i, 1] <= 0):
photometry[i,:] = [0., 9.9*10**99.]
# Enforce a maximum SNR of 20, or 10 in the IRAC channels.
for i in range(len(photometry)):
if i < 10:
max_snr = 20.
else:
max_snr = 10.
if photometry[i, 0]/photometry[i, 1] > max_snr:
photometry[i, 1] = photometry[i, 0]/max_snr
return photometry
```
Let's pick object 17433 as a test case to check this works.
```python
print(load_goodss("17433"))
```
[[6.987040e-02 7.049980e-03]
[1.923840e-01 4.059220e-02]
[8.308090e-01 4.154045e-02]
[2.950840e+00 1.475420e-01]
[7.377370e+00 3.688685e-01]
[9.502210e+00 4.751105e-01]
[0.000000e+00 9.900000e+99]
[1.516060e+01 7.580300e-01]
[2.142950e+01 1.071475e+00]
[3.460530e+01 1.730265e+00]
[3.460900e+01 3.460900e+00]
[5.131500e+01 5.131500e+00]
[3.950320e+01 3.950320e+00]
[2.687740e+01 2.687740e+00]
[1.799710e+01 1.799710e+00]]
All that's needed now is to pass the ID and load_goodss function to the bagpipes galaxy class, along with the filt_list from Example 1.
As we're not inputting any spectroscopic data we'll also need to set the keyword argument spectrum_exists to False:
```python
goodss_filt_list = np.loadtxt("filters/goodss_filt_list.txt", dtype="str")
galaxy = pipes.galaxy("17433", load_goodss, spectrum_exists=False, filt_list=goodss_filt_list)
fig = galaxy.plot()
```
/Users/adam/anaconda/lib/python2.7/site-packages/matplotlib/font_manager.py:1331: UserWarning: findfont: Font family [u'sans-serif'] not found. Falling back to DejaVu Sans
(prop.get_family(), self.defaultFamily[fontext]))

## Adding spectroscopic data
The data load function can return photometry, spectroscopy or both. Let's try an example with spectroscopy. Fortuitously, the object above has also been observed by the VANDELS survey. The fits file in this directory comes from DR2. We can load it with the following function:
```python
def bin(spectrum, binn):
""" Bins up two or three column spectral data by a specified factor. """
binn = int(binn)
nbins = len(spectrum) // binn
binspec = np.zeros((nbins, spectrum.shape[1]))
for i in range(binspec.shape[0]):
spec_slice = spectrum[i*binn:(i+1)*binn, :]
binspec[i, 0] = np.mean(spec_slice[:, 0])
binspec[i, 1] = np.mean(spec_slice[:, 1])
if spectrum.shape[1] == 3:
binspec[i,2] = (1./float(binn)
*np.sqrt(np.sum(spec_slice[:, 2]**2)))
return binspec
def load_vandels_spec(ID):
""" Loads VANDELS spectroscopic data from file. """
hdulist = fits.open("VANDELS_CDFS_" + ID + ".fits")
spectrum = np.c_[hdulist[1].data["WAVE"][0],
hdulist[1].data["FLUX"][0],
hdulist[1].data["ERR"][0]]
mask = (spectrum[:,0] < 9250.) & (spectrum[:,0] > 5250.)
return bin(spectrum[mask], 2)
```
This time there's no need to pass the filt_list argument, and it's the photometry_exists keyword argument we need to set to False:
```python
galaxy = pipes.galaxy("017433", load_vandels_spec, photometry_exists=False)
fig = galaxy.plot()
```

## Loading photometry and spectroscopy
Now let's put it all together. We can define a function which calls both load_uvista and load_legac_spec and returns the spectrum and photometry. When we're loading both kinds of data, bagpipes expects the spectrum first and then the photometry, like so:
```python
def load_both(ID):
spectrum = load_vandels_spec(ID)
phot = load_goodss(ID)
return spectrum, phot
galaxy = pipes.galaxy("017433", load_both, filt_list=goodss_filt_list)
fig = galaxy.plot()
```

That's about all there is to know, you can see that photometric points which fall within the wavelength range of the spectrum are plotted on the upper panel so you can see how well the calibration of the spectrum and photometry agrees.
```python
```
|
ACCarnallREPO_NAMEbagpipesPATH_START.@bagpipes_extracted@bagpipes-master@examples@Example 2 - Loading observational [email protected]_END.py
|
{
"filename": "README.md",
"repo_name": "hopehhchen/Droplets",
"repo_path": "Droplets_extracted/Droplets-master/README.md",
"type": "Markdown"
}
|
# Droplets
Repo for an upcoming paper analyzing gravitationally unbound, coherent structures with significant velocity gradients in nearby star forming molecular clouds. The repo is prepared to work with [binder](http://mybinder.org) and will be shared alongside the paper on Authorea. By working between *Github* and other data/code/article sharing services, we hope to present what a journal article could look like in an era of open data and reproducible/reusable codes.
Please contact Hope Chen at [email protected] if you have any questions. Any suggestions are also welcome via [the issue tracker of this repo](https://github.com/hopehhchen/Droplets/issues).
### Collaboration
The project is lead by [Hope Chen](https://github.com/hopehhchen) at Harvard-Smithsonian Center for Astrophysics, and is a collaboration with [Jaime Pineda](https://github.com/jpinedaf), Alyssa Goodman, and Andreas Burkert.
## Abstract
Using data from the [*GBT Ammonia Survey* (GAS) Data Release 1](https://dataverse.harvard.edu/dataverse/GAS_Project) ([Friesen and Pineda et al., 2017](https://ui.adsabs.harvard.edu/#abs/2017ApJ...843...63F/abstract)), we look into the physical properties of structures that are coherent (previous examples in [Goodman et al., 1998](https://ui.adsabs.harvard.edu/#abs/1998ApJ...504..223G/abstract) and in [Pineda et al, 2010](https://ui.adsabs.harvard.edu/#abs/2010ApJ...712L.116P/abstract)) and show significant velocity gradients (previous examples in [Goodman et al., 1993](https://ui.adsabs.harvard.edu/#abs/1993ApJ...406..528G/abstract)). With a much improved physical resolution of ~4000 AU (compared to ~0.1 pc in the 1990s) at the distance of Ophiuchus and Taurus, one goal of the analysis is to provide updated numbers for properties of (potentially) rotational motions within 0.1 pc, which has been essential in simulations and analytic models alike in the past two decades. In our first analysis, these cores seem to be gravitationally unbound and are termed "droplets." In the paper, we hope to provide a sensible guess to what role these structures could play in the process of star formation.
## Data from GAS DR1 and Herschel
The main dataset used in the analyses is from the GAS DR1 ([Friesen and Pineda et al., 2017](https://ui.adsabs.harvard.edu/#abs/2017ApJ...843...63F/abstract)) and is hosted on [Dataverse](https://dataverse.harvard.edu/dataverse/GAS_Project). The *Herschel* column density maps are derived by Ayushi Singh and Peter Martin at University of Toronto, using archival data from the *Herschel Space Observatory*. The data in this repo are copies of the data hosted on Dataverse, without any modification.
Due to the *Github* policy, data files larger than 100 MB are not hosted here. These include the raw data cubes and the position-position-velocity cubes based on results of Gaussian line fitting to the ammonia hyperfine lines. These large files are not needed to run codes in this repo. Please look in the GBT DR1 dataverse for the files.
## Status of the Project and the Github Repo
A manuscript is being prepared on Authorea. Meanwhile, results of analyses are actively shared in this repo, together with the code used for these analyses. Currently, the codes are *not* yet stable enough to be applied on other datasets. Please approach with caution.
### A Note on the Idea of Shared Data and Codes
It has become fashionable to share data and codes in the fast developing field of data science in the study of natural sciences. However, the lack of a clear guideline for data sharing has often resulted in difficulties to reproduce (and, importantly, test) the results published using the original version of the shared data, and to reuse the shared data in other analyses.
We find an example of a complete set of guidelines presented by [Ellis & Leek](https://doi.org/10.7287/peerj.preprints.3139v2) very useful. [Ellis & Leek](https://doi.org/10.7287/peerj.preprints.3139v2) list the following as "the information you should pass to a statistician":
1. The raw data
2. A tidy data set
3. A code book describing each variable and its values in the tidy data set
4. An explicit and exact recipe you used to go from 1 -> 2, 3. (*sic*)
In this project, we try to follow these guidelines, by: 1) pointing to the raw datasets in the original surveys, 2) storing the tidied datasets in this github repo, and 3) providing the code book and the recipe in the format of ipython notebooks. We hope that, by doing so, this project could be an example of data sharing for future scientific projects.
|
hopehhchenREPO_NAMEDropletsPATH_START.@Droplets_extracted@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "spacetelescope/astronify",
"repo_path": "astronify_extracted/astronify-main/astronify/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import * # noqa
# ----------------------------------------------------------------------------
from . import series # noqa
from . import simulator # noqa
from . import utils # noqa
__all__ = ['series', 'simulator', 'utils'] # noqa
|
spacetelescopeREPO_NAMEastronifyPATH_START.@astronify_extracted@astronify-main@astronify@[email protected]_END.py
|
{
"filename": "ssl_support.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/setuptools/ssl_support.py",
"type": "Python"
}
|
import os
import socket
import atexit
import re
import functools
from setuptools.extern.six.moves import urllib, http_client, map, filter
from pkg_resources import ResolutionError, ExtractionError
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
/usr/local/share/certs/ca-root-nss.crt
/etc/ssl/ca-bundle.pem
""".strip().split()
try:
HTTPSHandler = urllib.request.HTTPSHandler
HTTPSConnection = http_client.HTTPSConnection
except AttributeError:
HTTPSHandler = HTTPSConnection = object
is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
# change self.host to mean the proxy server host when tunneling is
# being used. Adapt, since we are interested in the destination
# host for the match_hostname() comparison.
actual_host = self._tunnel_host
else:
actual_host = self.host
if hasattr(ssl, 'create_default_context'):
ctx = ssl.create_default_context(cafile=self.ca_bundle)
self.sock = ctx.wrap_socket(sock, server_hostname=actual_host)
else:
# This is for python < 2.7.9 and < 3.4?
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), actual_host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib.request.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
# from jaraco.functools
def once(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(func, 'always_returns'):
func.always_returns = func(*args, **kwargs)
return func.always_returns
return wrapper
@once
def get_win_certfile():
try:
import wincertstore
except ImportError:
return None
class CertFile(wincertstore.CertFile):
def __init__(self):
super(CertFile, self).__init__()
atexit.register(self.close)
def close(self):
try:
super(CertFile, self).close()
except OSError:
pass
_wincerts = CertFile()
_wincerts.addstore('CA')
_wincerts.addstore('ROOT')
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
extant_cert_paths = filter(os.path.isfile, cert_paths)
return (
get_win_certfile()
or next(extant_cert_paths, None)
or _certifi_where()
)
def _certifi_where():
try:
return __import__('certifi').where()
except (ImportError, ResolutionError, ExtractionError):
pass
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@setuptools@[email protected]_END.py
|
{
"filename": "_hovertemplate.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2d/_hovertemplate.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="hovertemplate", parent_name="histogram2d", **kwargs
):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2d@[email protected]_END.py
|
{
"filename": "_stream.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/_stream.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="stream", parent_name="histogram", **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Stream"),
data_docs=kwargs.pop(
"data_docs",
"""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
""",
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@histogram@[email protected]_END.py
|
{
"filename": "correlation.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/graphics/correlation.py",
"type": "Python"
}
|
'''correlation plots
Author: Josef Perktold
License: BSD-3
example for usage with different options in
statsmodels/sandbox/examples/thirdparty/ex_ratereturn.py
'''
import numpy as np
from . import utils
def plot_corr(dcorr, xnames=None, ynames=None, title=None, normcolor=False,
ax=None, cmap='RdYlBu_r'):
"""Plot correlation of many variables in a tight color grid.
Parameters
----------
dcorr : ndarray
Correlation matrix, square 2-D array.
xnames : list[str], optional
Labels for the horizontal axis. If not given (None), then the
matplotlib defaults (integers) are used. If it is an empty list, [],
then no ticks and labels are added.
ynames : list[str], optional
Labels for the vertical axis. Works the same way as `xnames`.
If not given, the same names as for `xnames` are re-used.
title : str, optional
The figure title. If None, the default ('Correlation Matrix') is used.
If ``title=''``, then no title is added.
normcolor : bool or tuple of scalars, optional
If False (default), then the color coding range corresponds to the
range of `dcorr`. If True, then the color range is normalized to
(-1, 1). If this is a tuple of two numbers, then they define the range
for the color bar.
ax : AxesSubplot, optional
If `ax` is None, then a figure is created. If an axis instance is
given, then only the main plot but not the colorbar is created.
cmap : str or Matplotlib Colormap instance, optional
The colormap for the plot. Can be any valid Matplotlib Colormap
instance or name.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import statsmodels.graphics.api as smg
>>> hie_data = sm.datasets.randhie.load_pandas()
>>> corr_matrix = np.corrcoef(hie_data.data.T)
>>> smg.plot_corr(corr_matrix, xnames=hie_data.names)
>>> plt.show()
.. plot:: plots/graphics_correlation_plot_corr.py
"""
if ax is None:
create_colorbar = True
else:
create_colorbar = False
fig, ax = utils.create_mpl_ax(ax)
nvars = dcorr.shape[0]
if ynames is None:
ynames = xnames
if title is None:
title = 'Correlation Matrix'
if isinstance(normcolor, tuple):
vmin, vmax = normcolor
elif normcolor:
vmin, vmax = -1.0, 1.0
else:
vmin, vmax = None, None
axim = ax.imshow(dcorr, cmap=cmap, interpolation='nearest',
extent=(0,nvars,0,nvars), vmin=vmin, vmax=vmax)
# create list of label positions
labelPos = np.arange(0, nvars) + 0.5
if isinstance(ynames, list) and len(ynames) == 0:
ax.set_yticks([])
elif ynames is not None:
ax.set_yticks(labelPos)
ax.set_yticks(labelPos[:-1]+0.5, minor=True)
ax.set_yticklabels(ynames[::-1], fontsize='small',
horizontalalignment='right')
if isinstance(xnames, list) and len(xnames) == 0:
ax.set_xticks([])
elif xnames is not None:
ax.set_xticks(labelPos)
ax.set_xticks(labelPos[:-1]+0.5, minor=True)
ax.set_xticklabels(xnames, fontsize='small', rotation=45,
horizontalalignment='right')
if not title == '':
ax.set_title(title)
if create_colorbar:
fig.colorbar(axim, use_gridspec=True)
fig.tight_layout()
ax.tick_params(which='minor', length=0)
ax.tick_params(direction='out', top=False, right=False)
try:
ax.grid(True, which='minor', linestyle='-', color='w', lw=1)
except AttributeError:
# Seems to fail for axes created with AxesGrid. MPL bug?
pass
return fig
def plot_corr_grid(dcorrs, titles=None, ncols=None, normcolor=False, xnames=None,
ynames=None, fig=None, cmap='RdYlBu_r'):
"""
Create a grid of correlation plots.
The individual correlation plots are assumed to all have the same
variables, axis labels can be specified only once.
Parameters
----------
dcorrs : list or iterable of ndarrays
List of correlation matrices.
titles : list[str], optional
List of titles for the subplots. By default no title are shown.
ncols : int, optional
Number of columns in the subplot grid. If not given, the number of
columns is determined automatically.
normcolor : bool or tuple, optional
If False (default), then the color coding range corresponds to the
range of `dcorr`. If True, then the color range is normalized to
(-1, 1). If this is a tuple of two numbers, then they define the range
for the color bar.
xnames : list[str], optional
Labels for the horizontal axis. If not given (None), then the
matplotlib defaults (integers) are used. If it is an empty list, [],
then no ticks and labels are added.
ynames : list[str], optional
Labels for the vertical axis. Works the same way as `xnames`.
If not given, the same names as for `xnames` are re-used.
fig : Figure, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
cmap : str or Matplotlib Colormap instance, optional
The colormap for the plot. Can be any valid Matplotlib Colormap
instance or name.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
In this example we just reuse the same correlation matrix several times.
Of course in reality one would show a different correlation (measuring a
another type of correlation, for example Pearson (linear) and Spearman,
Kendall (nonlinear) correlations) for the same variables.
>>> hie_data = sm.datasets.randhie.load_pandas()
>>> corr_matrix = np.corrcoef(hie_data.data.T)
>>> sm.graphics.plot_corr_grid([corr_matrix] * 8, xnames=hie_data.names)
>>> plt.show()
.. plot:: plots/graphics_correlation_plot_corr_grid.py
"""
if ynames is None:
ynames = xnames
if not titles:
titles = ['']*len(dcorrs)
n_plots = len(dcorrs)
if ncols is not None:
nrows = int(np.ceil(n_plots / float(ncols)))
else:
# Determine number of rows and columns, square if possible, otherwise
# prefer a wide (more columns) over a high layout.
if n_plots < 4:
nrows, ncols = 1, n_plots
else:
nrows = int(np.sqrt(n_plots))
ncols = int(np.ceil(n_plots / float(nrows)))
# Create a figure with the correct size
aspect = min(ncols / float(nrows), 1.8)
vsize = np.sqrt(nrows) * 5
fig = utils.create_mpl_fig(fig, figsize=(vsize * aspect + 1, vsize))
for i, c in enumerate(dcorrs):
ax = fig.add_subplot(nrows, ncols, i+1)
# Ensure to only plot labels on bottom row and left column
_xnames = xnames if nrows * ncols - (i+1) < ncols else []
_ynames = ynames if (i+1) % ncols == 1 else []
plot_corr(c, xnames=_xnames, ynames=_ynames, title=titles[i],
normcolor=normcolor, ax=ax, cmap=cmap)
# Adjust figure margins and add a colorbar
fig.subplots_adjust(bottom=0.1, left=0.09, right=0.9, top=0.9)
cax = fig.add_axes([0.92, 0.1, 0.025, 0.8])
fig.colorbar(fig.axes[0].images[0], cax=cax)
return fig
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@[email protected]@.PATH_END.py
|
{
"filename": "parameter_server_strategy.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/distribute/parameter_server_strategy.py",
"type": "Python"
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class implementing a multi-worker parameter server tf.distribute strategy."""
import copy
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import input_util
from tensorflow.python.distribute import mirrored_run
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import ps_values
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import cluster_resolver as cluster_resolver_lib
from tensorflow.python.distribute.cluster_resolver import tfconfig_cluster_resolver
from tensorflow.python.distribute.v1 import input_lib as input_lib_v1
from tensorflow.python.eager import context
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_setter
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
_LOCAL_CPU = "/device:CPU:0"
@tf_export(v1=["distribute.experimental.ParameterServerStrategy"]) # pylint: disable=missing-docstring
class ParameterServerStrategyV1(distribute_lib.StrategyV1):
"""An asynchronous multi-worker parameter server tf.distribute strategy.
This strategy requires two roles: workers and parameter servers. Variables and
updates to those variables will be assigned to parameter servers and other
operations are assigned to workers.
When each worker has more than one GPU, operations will be replicated on all
GPUs. Even though operations may be replicated, variables are not and each
worker shares a common view for which parameter server a variable is assigned
to.
By default it uses `TFConfigClusterResolver` to detect configurations for
multi-worker training. This requires a 'TF_CONFIG' environment variable and
the 'TF_CONFIG' must have a cluster spec.
This class assumes each worker is running the same code independently, but
parameter servers are running a standard server. This means that while each
worker will synchronously compute a single gradient update across all GPUs,
updates between workers proceed asynchronously. Operations that occur only on
the first replica (such as incrementing the global step), will occur on the
first replica *of every worker*.
It is expected to call `call_for_each_replica(fn, ...)` for any
operations which potentially can be replicated across replicas (i.e. multiple
GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra
caution needs to be taken:
1) It is generally not recommended to open a device scope under the strategy's
scope. A device scope (i.e. calling `tf.device`) will be merged with or
override the device for operations but will not change the device for
variables.
2) It is also not recommended to open a colocation scope (i.e. calling
`tf.compat.v1.colocate_with`) under the strategy's scope. For colocating
variables, use `strategy.extended.colocate_vars_with` instead. Colocation of
ops will possibly create device assignment conflicts.
Note: This strategy only works with the Estimator API. Pass an instance of
this strategy to the `experimental_distribute` argument when you create the
`RunConfig`. This instance of `RunConfig` should then be passed to the
`Estimator` instance on which `train_and_evaluate` is called.
For Example:
```
strategy = tf.distribute.experimental.ParameterServerStrategy()
run_config = tf.estimator.RunConfig(
experimental_distribute.train_distribute=strategy)
estimator = tf.estimator.Estimator(config=run_config)
tf.estimator.train_and_evaluate(estimator,...)
```
"""
def __init__(self, cluster_resolver=None):
"""Initializes this strategy with an optional `cluster_resolver`.
Args:
cluster_resolver: Optional
`tf.distribute.cluster_resolver.ClusterResolver` object. Defaults to a
`tf.distribute.cluster_resolver.TFConfigClusterResolver`.
"""
if cluster_resolver is None:
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
super(ParameterServerStrategyV1, self).__init__(
ParameterServerStrategyExtended(
self, cluster_resolver=cluster_resolver))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"ParameterServerStrategy")
def experimental_distribute_dataset(self, dataset, options=None):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function`."
)
self._raise_pss_error_if_eager()
super(ParameterServerStrategyV1,
self).experimental_distribute_dataset(dataset=dataset,
options=options)
def distribute_datasets_from_function(self, dataset_fn, options=None):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`experimental_distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy")
self._raise_pss_error_if_eager()
super(ParameterServerStrategyV1, self).distribute_datasets_from_function(
dataset_fn=dataset_fn, options=options)
def run(self, fn, args=(), kwargs=None, options=None):
self._raise_pss_error_if_eager()
super(ParameterServerStrategyV1, self).run(
fn, args=args, kwargs=kwargs, options=options)
def scope(self):
self._raise_pss_error_if_eager()
return super(ParameterServerStrategyV1, self).scope()
def _raise_pss_error_if_eager(self):
if context.executing_eagerly():
raise NotImplementedError(
"`tf.compat.v1.distribute.experimental.ParameterServerStrategy` "
"currently only works with the tf.Estimator API")
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class ParameterServerStrategyExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of ParameterServerStrategy and CentralStorageStrategy."""
def __init__(self,
container_strategy,
cluster_resolver=None,
compute_devices=None,
parameter_device=None):
super(ParameterServerStrategyExtended, self).__init__(container_strategy)
self._initialize_strategy(
cluster_resolver=cluster_resolver,
compute_devices=compute_devices,
parameter_device=parameter_device)
# We typically don't need to do all-reduce in this strategy.
self._cross_device_ops = (
cross_device_ops_lib.ReductionToOneDevice(reduce_to_device=_LOCAL_CPU))
def _initialize_strategy(self,
cluster_resolver=None,
compute_devices=None,
parameter_device=None):
if cluster_resolver and cluster_resolver.cluster_spec():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(
compute_devices, parameter_device, cluster_resolver=cluster_resolver)
def _initialize_multi_worker(self, cluster_resolver):
"""Initialize devices for multiple workers.
It creates variable devices and compute devices. Variables and operations
will be assigned to them respectively. We have one compute device per
replica. The variable device is a device function or device string. The
default variable device assigns variables to parameter servers in a
round-robin fashion.
Args:
cluster_resolver: a descendant of `ClusterResolver` object.
Raises:
ValueError: if the cluster doesn't have ps jobs.
"""
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(
cluster_resolver, tfconfig_cluster_resolver.TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
# Save the num_gpus_per_worker for configure method.
self._num_gpus_per_worker = num_gpus
cluster_spec = cluster_resolver.cluster_spec()
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if not task_type or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`")
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
assert cluster_spec.as_dict()
self._worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._input_host_device = numpy_dataset.SingleDevice(self._worker_device)
# Define compute devices which is a list of device strings and one for each
# replica. When there are GPUs, replicate operations on these GPUs.
# Otherwise, place operations on CPU.
if num_gpus > 0:
compute_devices = tuple(
"%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus))
else:
compute_devices = (self._worker_device,)
self._compute_devices = [
device_util.canonicalize(d) for d in compute_devices]
# In distributed mode, place variables on ps jobs in a round-robin fashion.
# Note that devices returned from `replica_device_setter` are not
# canonical and therefore we don't canonicalize all variable devices to
# make them consistent.
# TODO(yuefengz): support passing a strategy object to control variable
# assignment.
# TODO(yuefengz): merge the logic of replica_device_setter into this
# class.
num_ps_replicas = len(cluster_spec.as_dict().get("ps", []))
if num_ps_replicas == 0:
raise ValueError("The cluster spec needs to have `ps` jobs.")
self._variable_device = device_setter.replica_device_setter(
ps_tasks=num_ps_replicas,
worker_device=self._worker_device,
merge_devices=True,
cluster=cluster_spec)
# The `_parameter_devices` is needed for the `parameter_devices` property
# and is a list of all variable devices. Here parameter devices are all
# tasks of the "ps" job.
self._parameter_devices = tuple(map("/job:ps/task:{}".format,
range(num_ps_replicas)))
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = self._worker_device
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
logging.info(
"Multi-worker ParameterServerStrategy with "
"cluster_spec = %r, task_type = %r, task_id = %r, "
"num_ps_replicas = %r, is_chief = %r, compute_devices = %r, "
"variable_device = %r", cluster_spec.as_dict(), task_type, task_id,
num_ps_replicas, self._is_chief, self._compute_devices,
self._variable_device)
# TODO(yuefengz): get rid of cluster_resolver argument when contrib's
# version no longer depends on this class.
def _initialize_local(self,
compute_devices,
parameter_device,
cluster_resolver=None):
"""Initialize local devices for training."""
self._worker_device = device_util.canonicalize("/device:CPU:0")
self._input_host_device = numpy_dataset.SingleDevice(self._worker_device)
if compute_devices is None:
if not cluster_resolver:
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
# Save the num_gpus_per_worker for configure method which is used by the
# contrib version.
self._num_gpus_per_worker = num_gpus
compute_devices = device_util.local_devices_from_num_gpus(num_gpus)
compute_devices = [device_util.canonicalize(d) for d in compute_devices]
if parameter_device is None:
# If there is only one GPU, put everything on that GPU. Otherwise, place
# variables on CPU.
if len(compute_devices) == 1:
parameter_device = compute_devices[0]
else:
parameter_device = _LOCAL_CPU
self._variable_device = parameter_device
self._compute_devices = compute_devices
self._parameter_devices = (parameter_device,)
self._is_chief = True
self._cluster_spec = None
self._task_type = None
self._task_id = None
logging.info(
"ParameterServerStrategy (CentralStorageStrategy if you are using a "
"single machine) with compute_devices = %r, variable_device = %r",
compute_devices, self._variable_device)
def _input_workers_with_options(self, options=None):
if not options or options.experimental_fetch_to_device:
return input_lib.InputWorkers(
[(self._worker_device, self._compute_devices)])
else:
return input_lib.InputWorkers(
[(self._worker_device,
(self._worker_device,) * len(self._compute_devices))])
@property
def _input_workers(self):
return self._input_workers_with_options()
def _validate_colocate_with_variable(self, colocate_with_variable):
distribute_utils.validate_colocate(colocate_with_variable, self)
def _experimental_distribute_dataset(self, dataset, options):
return input_util.get_distributed_dataset(
dataset,
self._input_workers_with_options(options),
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
options=options)
def _make_dataset_iterator(self, dataset):
return input_lib_v1.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the dataset to each local GPU."""
if self._cluster_spec:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
num_input_pipelines = multi_worker_util.worker_count(
self._cluster_spec, self._task_type)
else:
input_pipeline_id = 0
num_input_pipelines = 1
input_context = distribute_lib.InputContext(
num_input_pipelines=num_input_pipelines,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_lib_v1.InputFunctionIterator(input_fn, self._input_workers,
[input_context],
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, self._input_host_device, session)
def _distribute_datasets_from_function(self, dataset_fn, options):
if self._cluster_spec:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
num_input_pipelines = multi_worker_util.worker_count(
self._cluster_spec, self._task_type)
else:
input_pipeline_id = 0
num_input_pipelines = 1
input_context = distribute_lib.InputContext(
num_input_pipelines=num_input_pipelines,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_util.get_distributed_datasets_from_function(
dataset_fn,
self._input_workers_with_options(options), [input_context],
self._container_strategy(),
options=options)
def _experimental_distribute_values_from_function(self, value_fn):
per_replica_values = []
for replica_id in range(self._num_replicas_in_sync):
per_replica_values.append(
value_fn(distribute_lib.ValueContext(replica_id,
self._num_replicas_in_sync)))
return distribute_utils.regroup(per_replica_values, always_wrap=True)
def _broadcast_to(self, tensor, destinations):
# This is both a fast path for Python constants, and a way to delay
# converting Python values to a tensor until we know what type it
# should be converted to. Otherwise we have trouble with:
# global_step.assign_add(1)
# since the `1` gets broadcast as an int32 but global_step is int64.
if isinstance(tensor, (float, int)):
return tensor
if not cross_device_ops_lib.check_destinations(destinations):
# TODO(josh11b): Use current logical device instead of 0 here.
destinations = self._compute_devices
return self._cross_device_ops.broadcast(tensor, destinations)
def _allow_variable_partition(self):
return not context.executing_eagerly()
def _create_var_creator(self, next_creator, **kwargs):
if self._num_replicas_in_sync > 1:
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in (
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA
):
raise ValueError("Invalid variable aggregation mode: " + aggregation +
" for variable: " + kwargs["name"])
def var_creator(**kwargs):
"""Create an AggregatingVariable and fix up collections."""
# Record what collections this variable should be added to.
collections = kwargs.pop("collections", None)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# Create and wrap the variable.
v = next_creator(**kwargs)
wrapped = ps_values.AggregatingVariable(self._container_strategy(), v,
aggregation)
# Add the wrapped variable to the requested collections.
# The handling of eager mode and the global step matches
# ResourceVariable._init_from_args().
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the contained
# variable to the TRAINABLE_VARIABLES collection, so we manually
# remove it and replace with the wrapper. We can't set "trainable"
# to False for next_creator() since that causes functions like
# implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
if v in l:
l.remove(v)
g.add_to_collections(collections, wrapped)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, wrapped)
return wrapped
return var_creator
else:
return next_creator
# TODO(yuefengz): Not all ops in device_setter.STANDARD_PS_OPS will go through
# this creator, such as "MutableHashTable".
def _create_variable(self, next_creator, **kwargs):
var_creator = self._create_var_creator(next_creator, **kwargs)
if "colocate_with" in kwargs:
colocate_with = kwargs["colocate_with"]
if isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return var_creator(**kwargs)
with ops.device(None):
with ops.colocate_with(colocate_with):
return var_creator(**kwargs)
with ops.colocate_with(None, ignore_existing=True):
with ops.device(self._variable_device):
return var_creator(**kwargs)
def _call_for_each_replica(self, fn, args, kwargs):
return mirrored_run.call_for_each_replica(self._container_strategy(), fn,
args, kwargs)
def _verify_destinations_not_different_worker(self, destinations):
if not self._cluster_spec:
return
if destinations is None:
return
for d in cross_device_ops_lib.get_devices_from(destinations):
d_spec = tf_device.DeviceSpec.from_string(d)
if d_spec.job == self._task_type and d_spec.task != self._task_id:
raise ValueError(
"Cannot reduce to another worker: %r, current worker is %r" %
(d, self._worker_device))
def _gather_to_implementation(self, value, destinations, axis,
options):
self._verify_destinations_not_different_worker(destinations)
if not isinstance(value, values.DistributedValues):
return value
return self._cross_device_ops._gather( # pylint: disable=protected-access
value,
destinations=destinations,
axis=axis,
options=options)
def _reduce_to(self, reduce_op, value, destinations, options):
self._verify_destinations_not_different_worker(destinations)
if not isinstance(value, values.DistributedValues):
# pylint: disable=protected-access
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, self._num_replicas_in_sync)
return self._cross_device_ops.reduce(
reduce_op, value, destinations=destinations, options=options)
def _batch_reduce_to(self, reduce_op, value_destination_pairs, options):
for _, destinations in value_destination_pairs:
self._verify_destinations_not_different_worker(destinations)
return self._cross_device_ops.batch_reduce(reduce_op,
value_destination_pairs, options)
def _select_single_value(self, structured):
"""Select any single value in `structured`."""
def _select_fn(x): # pylint: disable=g-missing-docstring
if isinstance(x, values.Mirrored) or isinstance(x, values.PerReplica):
return x._primary # pylint: disable=protected-access
else:
return x
return nest.map_structure(_select_fn, structured)
def _update(self, var, fn, args, kwargs, group):
if isinstance(var, ps_values.AggregatingVariable):
var = var.get()
if not resource_variable_ops.is_resource_variable(var):
raise ValueError(
"You can not update `var` %r. It must be a Variable." % var)
with ops.colocate_with(var), distribute_lib.UpdateContext(var.device):
result = fn(var, *self._select_single_value(args),
**self._select_single_value(kwargs))
if group:
return result
else:
return nest.map_structure(self._local_results, result)
# TODO(yuefengz): does it need to call _select_single_value?
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
with ops.device(
colocate_with.device), distribute_lib.UpdateContext(colocate_with):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def value_container(self, val):
if (hasattr(val, "_aggregating_container") and
not isinstance(val, ps_values.AggregatingVariable)):
wrapper = val._aggregating_container() # pylint: disable=protected-access
if wrapper is not None:
return wrapper
return val
def read_var(self, var):
# No need to distinguish between normal variables and replica-local
# variables.
return array_ops.identity(var)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the strategy class with `cluster_spec`.
The strategy object will be re-initialized if `cluster_spec` is passed to
`configure` but was not passed when instantiating the strategy.
Args:
session_config: Session config object.
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type.
task_id: the current task id.
Raises:
ValueError: if `cluster_spec` is given but `task_type` or `task_id` is
not.
"""
if cluster_spec:
# Use the num_gpus_per_worker recorded in constructor since _configure
# doesn't take num_gpus.
cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker})
self._initialize_multi_worker(cluster_resolver)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
if not self._cluster_spec:
updated_config.isolate_session_state = True
return updated_config
updated_config.isolate_session_state = False
assert self._task_type
assert self._task_id is not None
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
if self._task_type in ["chief", "worker"]:
updated_config.device_filters.extend(
["/job:%s/task:%d" % (self._task_type, self._task_id), "/job:ps"])
elif self._task_type == "evaluator":
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return self._cluster_spec is not None
@property
def _num_replicas_in_sync(self):
return len(self._compute_devices)
@property
def worker_devices(self):
return self._compute_devices
@property
def worker_devices_by_replica(self):
return [[d] for d in self._compute_devices]
@property
def parameter_devices(self):
return self._parameter_devices
def non_slot_devices(self, var_list):
return min(var_list, key=lambda x: x.name)
@property
def experimental_between_graph(self):
# TODO(yuefengz): Should this return False in the local case?
return True
@property
def experimental_should_init(self):
return self._is_chief
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def _get_local_replica_id(self, replica_id_in_sync_group):
return replica_id_in_sync_group
def _get_replica_id_in_sync_group(self, replica_id):
return replica_id
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@distribute@[email protected]_END.py
|
{
"filename": "axes_size.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/mpl_toolkits/axes_grid/axes_size.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from mpl_toolkits.axes_grid1.axes_size import *
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@[email protected]@site-packages@mpl_toolkits@axes_grid@[email protected]_END.py
|
{
"filename": "freq_vs_phase.py",
"repo_name": "plazar/TOASTER",
"repo_path": "TOASTER_extracted/TOASTER-master/diagnostics/freq_vs_phase.py",
"type": "Python"
}
|
import tempfile
import os
import shutil
import utils
import errors
import base
class FreqVsPhasePlotDiagnostic(base.PlotDiagnostic):
name = 'Freq vs. Phase'
def _compute(self):
utils.print_info("Creating freq vs. phase plot for %s" % self.fn, 3)
params = utils.prep_file(self.fn)
if not (params['nchan'] > 1):
raise errors.DiagnosticNotApplicable("Archive (%s) only has " \
"a single channel. Freq vs. phase diagnostic " \
"doesn't apply to this data file." % self.fn)
handle, tmpfn = tempfile.mkstemp(suffix=".png")
os.close(handle)
cmd = ["psrplot", "-p", "freq", "-j", "DTp", "-c", \
"above:c=%s" % os.path.split(self.fn)[-1], \
"-D", "%s/PNG" % tmpfn, "%s" % self.fn]
utils.execute(cmd)
tmpdir = os.path.split(tmpfn)[0]
pngfn = os.path.join(tmpdir, self.fn+".freq.png")
shutil.move(tmpfn, pngfn)
return pngfn
Diagnostic = FreqVsPhasePlotDiagnostic
|
plazarREPO_NAMETOASTERPATH_START.@TOASTER_extracted@TOASTER-master@diagnostics@[email protected]_END.py
|
{
"filename": "_side.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contourcarpet/colorbar/title/_side.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="contourcarpet.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contourcarpet@colorbar@title@[email protected]_END.py
|
{
"filename": "test_ccddata.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/nddata/tests/test_ccddata.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import textwrap
from contextlib import nullcontext
import numpy as np
import pytest
from astropy import log
from astropy import units as u
from astropy.io import fits
from astropy.nddata import _testing as nd_testing
from astropy.nddata.ccddata import CCDData
from astropy.nddata.nduncertainty import (
InverseVariance,
MissingDataAssociationException,
StdDevUncertainty,
VarianceUncertainty,
)
from astropy.table import Table
from astropy.tests.helper import PYTEST_LT_8_0
from astropy.utils import NumpyRNGContext
from astropy.utils.data import (
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_filenames,
)
from astropy.utils.exceptions import AstropyWarning
from astropy.wcs import WCS, FITSFixedWarning
DEFAULT_DATA_SIZE = 100
with NumpyRNGContext(123):
_random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE])
_random_psf = np.random.normal(size=(20, 20))
@pytest.fixture
def home_is_tmpdir(tmp_path, monkeypatch, request):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables will be temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
def create_ccd_data():
"""
Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE
with units of ADU.
"""
data = _random_array.copy()
fake_meta = {"my_key": 42, "your_key": "not 42"}
ccd = CCDData(data, unit=u.adu)
ccd.header = fake_meta
return ccd
def test_ccddata_empty():
with pytest.raises(TypeError):
CCDData() # empty initializer should fail
def test_ccddata_must_have_unit():
with pytest.raises(ValueError):
CCDData(np.zeros([2, 2]))
def test_ccddata_unit_cannot_be_set_to_none():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
ccd_data.unit = None
def test_ccddata_meta_header_conflict():
with pytest.raises(ValueError, match=".*can't have both header and meta.*"):
CCDData([1, 2, 3], unit="", meta={1: 1}, header={2: 2})
def test_ccddata_simple():
ccd_data = create_ccd_data()
assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE)
assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE
assert ccd_data.dtype == np.dtype(float)
def test_ccddata_init_with_string_electron_unit():
ccd = CCDData(np.zeros([2, 2]), unit="electron")
assert ccd.unit is u.electron
def test_initialize_from_FITS(tmp_path):
ccd_data = create_ccd_data()
hdu = fits.PrimaryHDU(ccd_data)
hdulist = fits.HDUList([hdu])
filename = str(tmp_path / "a_file.fits")
hdulist.writeto(filename)
cd = CCDData.read(filename, unit=u.electron)
assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE)
assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE
assert np.issubdtype(cd.data.dtype, np.floating)
for k, v in hdu.header.items():
assert cd.meta[k] == v
def test_initialize_from_fits_with_unit_in_header(tmp_path):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header["bunit"] = u.adu.to_string()
filename = str(tmp_path / "a_file.fits")
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
# An explicit unit in the read overrides any unit in the FITS file
ccd2 = CCDData.read(filename, unit="photon")
assert ccd2.unit is u.photon
def test_initialize_from_fits_with_ADU_in_header(tmp_path):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header["bunit"] = "ADU"
filename = str(tmp_path / "a_file.fits")
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
def test_initialize_from_fits_with_invalid_unit_in_header(tmp_path):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header["bunit"] = "definetely-not-a-unit"
filename = str(tmp_path / "a_file.fits")
hdu.writeto(filename)
with pytest.raises(ValueError):
CCDData.read(filename)
def test_initialize_from_fits_with_technically_invalid_but_not_really(tmp_path):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header["bunit"] = "ELECTRONS/S"
filename = str(tmp_path / "a_file.fits")
hdu.writeto(filename)
ccd = CCDData.read(filename)
assert ccd.unit == u.electron / u.s
def test_initialize_from_fits_with_data_in_different_extension(tmp_path):
fake_img = np.arange(4).reshape(2, 2)
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(fake_img)
hdus = fits.HDUList([hdu1, hdu2])
filename = str(tmp_path / "a_file.fits")
hdus.writeto(filename)
ccd = CCDData.read(filename, unit="adu")
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img)
# check that the header is the combined header
assert hdu2.header + hdu1.header == ccd.header
def test_initialize_from_fits_with_extension(tmp_path):
fake_img1 = np.zeros([2, 2])
fake_img2 = np.arange(4).reshape(2, 2)
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(fake_img1, name="first", ver=1)
hdu2 = fits.ImageHDU(fake_img2, name="second", ver=1)
hdus = fits.HDUList([hdu0, hdu1, hdu2])
filename = str(tmp_path / "a_file.fits")
hdus.writeto(filename)
ccd = CCDData.read(filename, hdu=2, unit="adu")
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img2)
# check hdu string parameter
ccd = CCDData.read(filename, hdu="second", unit="adu")
np.testing.assert_array_equal(ccd.data, fake_img2)
# check hdu tuple parameter
ccd = CCDData.read(filename, hdu=("second", 1), unit="adu")
np.testing.assert_array_equal(ccd.data, fake_img2)
def test_write_unit_to_hdu():
ccd_data = create_ccd_data()
ccd_unit = ccd_data.unit
hdulist = ccd_data.to_hdu()
assert "bunit" in hdulist[0].header
assert hdulist[0].header["bunit"] == ccd_unit.to_string()
def test_initialize_from_FITS_bad_keyword_raises_error(tmp_path):
# There are two fits.open keywords that are not permitted in ccdproc:
# do_not_scale_image_data and scale_back
ccd_data = create_ccd_data()
filename = str(tmp_path / "test.fits")
ccd_data.write(filename)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit, scale_back=True)
def test_ccddata_writer(tmp_path):
ccd_data = create_ccd_data()
filename = str(tmp_path / "test.fits")
ccd_data.write(filename)
ccd_disk = CCDData.read(filename, unit=ccd_data.unit)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
def test_ccddata_writer_as_imagehdu(tmp_path):
ccd_data = create_ccd_data()
filename = str(tmp_path / "test.fits")
ccd_data.write(filename, as_image_hdu=False)
with fits.open(filename) as hdus:
assert len(hdus) == 1
filename = str(tmp_path / "test2.fits")
ccd_data.write(filename, as_image_hdu=True)
with fits.open(filename) as hdus:
assert len(hdus) == 2
assert isinstance(hdus[1], fits.ImageHDU)
def test_ccddata_meta_is_case_sensitive():
ccd_data = create_ccd_data()
key = "SoMeKEY"
ccd_data.meta[key] = 10
assert key.lower() not in ccd_data.meta
assert key.upper() not in ccd_data.meta
assert key in ccd_data.meta
def test_ccddata_meta_is_not_fits_header():
ccd_data = create_ccd_data()
ccd_data.meta = {"OBSERVER": "Edwin Hubble"}
assert not isinstance(ccd_data.meta, fits.Header)
def test_fromMEF(tmp_path):
ccd_data = create_ccd_data()
hdu = fits.PrimaryHDU(ccd_data)
hdu2 = fits.PrimaryHDU(2 * ccd_data.data)
hdulist = fits.HDUList(hdu)
hdulist.append(hdu2)
filename = str(tmp_path / "a_file.fits")
hdulist.writeto(filename)
# by default, we reading from the first extension
cd = CCDData.read(filename, unit=u.electron)
np.testing.assert_array_equal(cd.data, ccd_data.data)
# but reading from the second should work too
cd = CCDData.read(filename, hdu=1, unit=u.electron)
np.testing.assert_array_equal(cd.data, 2 * ccd_data.data)
def test_metafromheader():
hdr = fits.header.Header()
hdr["observer"] = "Edwin Hubble"
hdr["exptime"] = "3600"
d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron)
assert d1.meta["OBSERVER"] == "Edwin Hubble"
assert d1.header["OBSERVER"] == "Edwin Hubble"
def test_metafromdict():
dic = {"OBSERVER": "Edwin Hubble", "EXPTIME": 3600}
d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron)
assert d1.meta["OBSERVER"] == "Edwin Hubble"
def test_header2meta():
hdr = fits.header.Header()
hdr["observer"] = "Edwin Hubble"
hdr["exptime"] = "3600"
d1 = CCDData(np.ones((5, 5)), unit=u.electron)
d1.header = hdr
assert d1.meta["OBSERVER"] == "Edwin Hubble"
assert d1.header["OBSERVER"] == "Edwin Hubble"
def test_metafromstring_fail():
hdr = "this is not a valid header"
with pytest.raises(TypeError):
CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu)
def test_setting_bad_uncertainty_raises_error():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
# Uncertainty is supposed to be an instance of NDUncertainty
ccd_data.uncertainty = 10
def test_setting_uncertainty_with_array():
ccd_data = create_ccd_data()
ccd_data.uncertainty = None
fake_uncertainty = np.sqrt(np.abs(ccd_data.data))
ccd_data.uncertainty = fake_uncertainty.copy()
np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty)
def test_setting_uncertainty_wrong_shape_raises_error():
ccd_data = create_ccd_data()
with pytest.raises(ValueError):
ccd_data.uncertainty = np.zeros([3, 4])
def test_to_hdu():
ccd_data = create_ccd_data()
ccd_data.meta = {"observer": "Edwin Hubble"}
fits_hdulist = ccd_data.to_hdu()
assert isinstance(fits_hdulist, fits.HDUList)
for k, v in ccd_data.meta.items():
assert fits_hdulist[0].header[k] == v
np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data)
def test_to_hdu_as_imagehdu():
ccd_data = create_ccd_data()
fits_hdulist = ccd_data.to_hdu(as_image_hdu=False)
assert isinstance(fits_hdulist[0], fits.PrimaryHDU)
fits_hdulist = ccd_data.to_hdu(as_image_hdu=True)
assert isinstance(fits_hdulist[0], fits.ImageHDU)
def test_copy():
ccd_data = create_ccd_data()
ccd_copy = ccd_data.copy()
np.testing.assert_array_equal(ccd_copy.data, ccd_data.data)
assert ccd_copy.unit == ccd_data.unit
assert ccd_copy.meta == ccd_data.meta
@pytest.mark.parametrize(
"operation,affects_uncertainty",
[
("multiply", True),
("divide", True),
],
)
@pytest.mark.parametrize(
"operand",
[
2.0,
2 * u.dimensionless_unscaled,
2 * u.photon / u.adu,
],
)
@pytest.mark.parametrize("with_uncertainty", [True, False])
def test_mult_div_overload(operand, with_uncertainty, operation, affects_uncertainty):
ccd_data = create_ccd_data()
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = getattr(ccd_data, operation)
np_method = getattr(np, operation)
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert result.uncertainty is None or isinstance(
result.uncertainty, StdDevUncertainty
)
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(
result.uncertainty.array,
np_method(ccd_data.uncertainty.array, op_value),
)
else:
np.testing.assert_array_equal(
result.uncertainty.array, ccd_data.uncertainty.array
)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
# Need the "1 *" below to force arguments to be Quantity to work around
# astropy/astropy#2377
expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit
assert result.unit == expected_unit
else:
assert result.unit == ccd_data.unit
@pytest.mark.parametrize(
"operation,affects_uncertainty",
[
("add", False),
("subtract", False),
],
)
@pytest.mark.parametrize(
"operand,expect_failure",
[
(2.0, u.UnitsError), # fail--units don't match image
(2 * u.dimensionless_unscaled, u.UnitsError), # same
(2 * u.adu, False),
],
)
@pytest.mark.parametrize("with_uncertainty", [True, False])
def test_add_sub_overload(
operand, expect_failure, with_uncertainty, operation, affects_uncertainty
):
ccd_data = create_ccd_data()
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = getattr(ccd_data, operation)
np_method = getattr(np, operation)
if expect_failure:
with pytest.raises(expect_failure):
result = method(operand)
return
else:
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert result.uncertainty is None or isinstance(
result.uncertainty, StdDevUncertainty
)
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(
result.uncertainty.array,
np_method(ccd_data.uncertainty.array, op_value),
)
else:
np.testing.assert_array_equal(
result.uncertainty.array, ccd_data.uncertainty.array
)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
assert result.unit == ccd_data.unit and result.unit == operand.unit
else:
assert result.unit == ccd_data.unit
def test_arithmetic_overload_fails():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
ccd_data.multiply("five")
with pytest.raises(TypeError):
ccd_data.divide("five")
with pytest.raises(TypeError):
ccd_data.add("five")
with pytest.raises(TypeError):
ccd_data.subtract("five")
def test_arithmetic_no_wcs_compare():
ccd = CCDData(np.ones((10, 10)), unit="")
assert ccd.add(ccd, compare_wcs=None).wcs is None
assert ccd.subtract(ccd, compare_wcs=None).wcs is None
assert ccd.multiply(ccd, compare_wcs=None).wcs is None
assert ccd.divide(ccd, compare_wcs=None).wcs is None
def test_arithmetic_with_wcs_compare():
def return_true(_, __):
return True
wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2)
ccd1 = CCDData(np.ones((10, 10)), unit="", wcs=wcs1)
ccd2 = CCDData(np.ones((10, 10)), unit="", wcs=wcs2)
nd_testing.assert_wcs_seem_equal(ccd1.add(ccd2, compare_wcs=return_true).wcs, wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.subtract(ccd2, compare_wcs=return_true).wcs, wcs1
)
nd_testing.assert_wcs_seem_equal(
ccd1.multiply(ccd2, compare_wcs=return_true).wcs, wcs1
)
nd_testing.assert_wcs_seem_equal(
ccd1.divide(ccd2, compare_wcs=return_true).wcs, wcs1
)
def test_arithmetic_with_wcs_compare_fail():
def return_false(_, __):
return False
ccd1 = CCDData(np.ones((10, 10)), unit="", wcs=WCS())
ccd2 = CCDData(np.ones((10, 10)), unit="", wcs=WCS())
with pytest.raises(ValueError):
ccd1.add(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.subtract(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.multiply(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.divide(ccd2, compare_wcs=return_false)
def test_arithmetic_overload_ccddata_operand():
ccd_data = create_ccd_data()
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
operand = ccd_data.copy()
result = ccd_data.add(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data, 2 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.subtract(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data, 0 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.multiply(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data, ccd_data.data**2)
expected_uncertainty = (
np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array
)
np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty)
result = ccd_data.divide(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data))
expected_uncertainty = (
np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array
)
np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty)
def test_arithmetic_overload_differing_units():
a = np.array([1, 2, 3]) * u.m
b = np.array([1, 2, 3]) * u.cm
ccddata = CCDData(a)
# TODO: Could also be parametrized.
res = ccddata.add(b)
np.testing.assert_array_almost_equal(res.data, np.add(a, b).value)
assert res.unit == np.add(a, b).unit
res = ccddata.subtract(b)
np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value)
assert res.unit == np.subtract(a, b).unit
res = ccddata.multiply(b)
np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value)
assert res.unit == np.multiply(a, b).unit
res = ccddata.divide(b)
np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value)
assert res.unit == np.divide(a, b).unit
def test_arithmetic_add_with_array():
ccd = CCDData(np.ones((3, 3)), unit="")
res = ccd.add(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3)
ccd = CCDData(np.ones((3, 3)), unit="adu")
with pytest.raises(ValueError):
ccd.add(np.arange(3))
def test_arithmetic_subtract_with_array():
ccd = CCDData(np.ones((3, 3)), unit="")
res = ccd.subtract(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3)
ccd = CCDData(np.ones((3, 3)), unit="adu")
with pytest.raises(ValueError):
ccd.subtract(np.arange(3))
def test_arithmetic_multiply_with_array():
ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m)
res = ccd.multiply(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3)
assert res.unit == ccd.unit
def test_arithmetic_divide_with_array():
ccd = CCDData(np.ones((3, 3)), unit=u.m)
res = ccd.divide(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3)
assert res.unit == ccd.unit
def test_history_preserved_if_metadata_is_fits_header(tmp_path):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header["history"] = "one"
hdu.header["history"] = "two"
hdu.header["history"] = "three"
assert len(hdu.header["history"]) == 3
tmp_file = str(tmp_path / "temp.fits")
hdu.writeto(tmp_file)
ccd_read = CCDData.read(tmp_file, unit="adu")
assert ccd_read.header["history"] == hdu.header["history"]
def test_infol_logged_if_unit_in_fits_header(tmp_path):
ccd_data = create_ccd_data()
tmpfile = str(tmp_path / "temp.fits")
ccd_data.write(tmpfile)
log.setLevel("INFO")
explicit_unit_name = "photon"
with log.log_to_list() as log_list:
_ = CCDData.read(tmpfile, unit=explicit_unit_name)
assert explicit_unit_name in log_list[0].message
def test_wcs_attribute(tmp_path):
"""
Check that WCS attribute gets added to header, and that if a CCDData
object is created from a FITS file with a header, and the WCS attribute
is modified, then the CCDData object is turned back into an hdu, the
WCS object overwrites the old WCS information in the header.
"""
ccd_data = create_ccd_data()
tmpfile = str(tmp_path / "temp.fits")
# This wcs example is taken from the astropy.wcs docs.
wcs = WCS(naxis=2)
wcs.wcs.crpix = np.array(ccd_data.shape) / 2
wcs.wcs.cdelt = np.array([-0.066667, 0.066667])
wcs.wcs.crval = [0, -90]
wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"]
wcs.wcs.set_pv([(2, 1, 45.0)])
ccd_data.header = ccd_data.to_hdu()[0].header
ccd_data.header.extend(wcs.to_header(), useblanks=False)
ccd_data.write(tmpfile)
# Get the header length after it has been extended by the WCS keywords
original_header_length = len(ccd_data.header)
ccd_new = CCDData.read(tmpfile)
# WCS attribute should be set for ccd_new
assert ccd_new.wcs is not None
# WCS attribute should be equal to wcs above.
assert ccd_new.wcs.wcs == wcs.wcs
# Converting CCDData object with wcs to an hdu shouldn't
# create duplicate wcs-related entries in the header.
ccd_new_hdu = ccd_new.to_hdu()[0]
assert len(ccd_new_hdu.header) == original_header_length
# Making a CCDData with WCS (but not WCS in the header) should lead to
# WCS information in the header when it is converted to an HDU.
ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu")
hdu = ccd_wcs_not_in_header.to_hdu()[0]
wcs_header = wcs.to_header()
for k in wcs_header.keys():
# Skip these keywords if they are in the WCS header because they are
# not WCS-specific.
if k in ["", "COMMENT", "HISTORY"]:
continue
# No keyword from the WCS should be in the header.
assert k not in ccd_wcs_not_in_header.header
# Every keyword in the WCS should be in the header of the HDU
assert hdu.header[k] == wcs_header[k]
# Now check that if WCS of a CCDData is modified, then the CCDData is
# converted to an HDU, the WCS keywords in the header are overwritten
# with the appropriate keywords from the header.
#
# ccd_new has a WCS and WCS keywords in the header, so try modifying
# the WCS.
ccd_new.wcs.wcs.cdelt *= 2
ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0]
assert ccd_new_hdu_mod_wcs.header["CDELT1"] == ccd_new.wcs.wcs.cdelt[0]
assert ccd_new_hdu_mod_wcs.header["CDELT2"] == ccd_new.wcs.wcs.cdelt[1]
def test_wcs_keywords_removed_from_header():
"""
Test, for the file included with the nddata tests, that WCS keywords are
properly removed from header.
"""
from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
data_file = get_pkg_data_filename("data/sip-wcs.fits")
ccd = CCDData.read(data_file)
with pytest.warns(
AstropyWarning, match=r"Some non-standard WCS keywords were excluded"
):
wcs_header = ccd.wcs.to_header()
assert not (set(wcs_header) & set(ccd.meta) - keepers)
# Make sure that exceptions are not raised when trying to remove missing
# keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'.
data_file1 = get_pkg_data_filename(
"data/o4sp040b0_raw.fits", package="astropy.io.fits.tests"
)
if PYTEST_LT_8_0:
ctx = nullcontext()
else:
ctx = pytest.warns(FITSFixedWarning, match="'datfix' made the change")
with pytest.warns(FITSFixedWarning, match="'unitfix' made the change"), ctx:
ccd = CCDData.read(data_file1, unit="count")
def test_wcs_SIP_coefficient_keywords_removed():
# If SIP polynomials are present, check that no more polynomial
# coefficients remain in the header. See #8598
# The SIP paper is ambiguous as to whether keywords like
# A_0_0 can appear in the header for a 2nd order or higher
# polynomial. The paper clearly says that the corrections
# are only for quadratic or higher order, so A_0_0 and the like
# should be zero if they are present, but they apparently can be
# there (or at least astrometry.net produces them).
# astropy WCS does not write those coefficients, so they were
# not being removed from the header even though they are WCS-related.
data_file = get_pkg_data_filename("data/sip-wcs.fits")
test_keys = ["A_0_0", "B_0_1"]
# Make sure the keywords added to this file for testing are there
with fits.open(data_file) as hdu:
for key in test_keys:
assert key in hdu[0].header
ccd = CCDData.read(data_file)
# Now the test...the two keywords above should have been removed.
for key in test_keys:
assert key not in ccd.header
@pytest.mark.filterwarnings("ignore")
def test_wcs_keyword_removal_for_wcs_test_files():
"""
Test, for the WCS test files, that keyword removal works as
expected. Those cover a much broader range of WCS types than
test_wcs_keywords_removed_from_header.
Includes regression test for #8597
"""
from astropy.nddata.ccddata import (
_KEEP_THESE_KEYWORDS_IN_HEADER,
_CDs,
_generate_wcs_and_update_header,
_PCs,
)
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
# NOTE: pyinstaller requires relative path here.
wcs_headers = get_pkg_data_filenames("../../wcs/tests/data", pattern="*.hdr")
for hdr in wcs_headers:
# Skip the files that are expected to be bad...
if (
"invalid" in hdr
or "nonstandard" in hdr
or "segfault" in hdr
or "chandra-pixlist-wcs" in hdr
):
continue
header_string = get_pkg_data_contents(hdr)
header = fits.Header.fromstring(header_string)
wcs = WCS(header_string)
header_from_wcs = wcs.to_header(relax=True)
new_header, new_wcs = _generate_wcs_and_update_header(header)
new_wcs_header = new_wcs.to_header(relax=True)
# Make sure all of the WCS-related keywords generated by astropy
# have been removed.
assert not (set(new_header) & set(new_wcs_header) - keepers)
# Check that new_header contains no remaining WCS information.
# Specifically, check that
# 1. The combination of new_header and new_wcs does not contain
# both PCi_j and CDi_j keywords. See #8597.
# Check for 1
final_header = new_header + new_wcs_header
final_header_set = set(final_header)
if _PCs & final_header_set:
assert not (_CDs & final_header_set)
elif _CDs & final_header_set:
assert not (_PCs & final_header_set)
# Check that the new wcs is the same as the old.
for k, v in new_wcs_header.items():
if isinstance(v, str):
assert header_from_wcs[k] == v
else:
np.testing.assert_almost_equal(header_from_wcs[k], v)
def test_read_wcs_not_creatable(tmp_path):
# The following Header can't be converted to a WCS object. See also #6499.
hdr_txt_example_WCS = textwrap.dedent(
"""
SIMPLE = T / Fits standard
BITPIX = 16 / Bits per pixel
NAXIS = 2 / Number of axes
NAXIS1 = 1104 / Axis length
NAXIS2 = 4241 / Axis length
CRVAL1 = 164.98110962 / Physical value of the reference pixel X
CRVAL2 = 44.34089279 / Physical value of the reference pixel Y
CRPIX1 = -34.0 / Reference pixel in X (pixel)
CRPIX2 = 2041.0 / Reference pixel in Y (pixel)
CDELT1 = 0.10380000 / X Scale projected on detector (#/pix)
CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix)
CTYPE1 = 'RA---TAN' / Pixel coordinate system
CTYPE2 = 'WAVELENGTH' / Pixel coordinate system
CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1
CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2
CD1_1 = 0.20760000 / Pixel Coordinate translation matrix
CD1_2 = 0.00000000 / Pixel Coordinate translation matrix
CD2_1 = 0.00000000 / Pixel Coordinate translation matrix
CD2_2 = 0.10380000 / Pixel Coordinate translation matrix
C2YPE1 = 'RA---TAN' / Pixel coordinate system
C2YPE2 = 'DEC--TAN' / Pixel coordinate system
C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1
C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2
RADECSYS= 'FK5 ' / The equatorial coordinate system
"""
)
hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep="\n")
hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)])
filename = str(tmp_path / "a_file.fits")
hdul.writeto(filename)
# The hdr cannot be converted to a WCS object because of an
# InconsistentAxisTypesError but it should still open the file
ccd = CCDData.read(filename, unit="adu")
assert ccd.wcs is None
def test_header():
ccd_data = create_ccd_data()
a = {"Observer": "Hubble"}
ccd = CCDData(ccd_data, header=a)
assert ccd.meta == a
def test_wcs_arithmetic():
ccd_data = create_ccd_data()
wcs = WCS(naxis=2)
ccd_data.wcs = wcs
result = ccd_data.multiply(1.0)
nd_testing.assert_wcs_seem_equal(result.wcs, wcs)
@pytest.mark.parametrize("operation", ["multiply", "divide", "add", "subtract"])
def test_wcs_arithmetic_ccd(operation):
ccd_data = create_ccd_data()
ccd_data2 = ccd_data.copy()
ccd_data.wcs = WCS(naxis=2)
method = getattr(ccd_data, operation)
result = method(ccd_data2)
nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs)
assert ccd_data2.wcs is None
def test_wcs_sip_handling():
"""
Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive
a roundtrip unchanged.
"""
data_file = get_pkg_data_filename("data/sip-wcs.fits")
def check_wcs_ctypes(header):
expected_wcs_ctypes = {"CTYPE1": "RA---TAN-SIP", "CTYPE2": "DEC--TAN-SIP"}
return [header[k] == v for k, v in expected_wcs_ctypes.items()]
ccd_original = CCDData.read(data_file)
# After initialization the keywords should be in the WCS, not in the
# meta.
with fits.open(data_file) as raw:
good_ctype = check_wcs_ctypes(raw[0].header)
assert all(good_ctype)
ccd_new = ccd_original.to_hdu()
good_ctype = check_wcs_ctypes(ccd_new[0].header)
assert all(good_ctype)
# Try converting to header with wcs_relax=False and
# the header should contain the CTYPE keywords without
# the -SIP
ccd_no_relax = ccd_original.to_hdu(wcs_relax=False)
good_ctype = check_wcs_ctypes(ccd_no_relax[0].header)
assert not any(good_ctype)
assert ccd_no_relax[0].header["CTYPE1"] == "RA---TAN"
assert ccd_no_relax[0].header["CTYPE2"] == "DEC--TAN"
@pytest.mark.parametrize("operation", ["multiply", "divide", "add", "subtract"])
def test_mask_arithmetic_ccd(operation):
ccd_data = create_ccd_data()
ccd_data2 = ccd_data.copy()
ccd_data.mask = ccd_data.data > 0
method = getattr(ccd_data, operation)
result = method(ccd_data2)
np.testing.assert_equal(result.mask, ccd_data.mask)
def test_write_read_multiextensionfits_mask_default(tmp_path):
# Test that if a mask is present the mask is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
filename = str(tmp_path / "a_file.fits")
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
@pytest.mark.parametrize(
"uncertainty_type", [StdDevUncertainty, VarianceUncertainty, InverseVariance]
)
def test_write_read_multiextensionfits_uncertainty_default(tmp_path, uncertainty_type):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = str(tmp_path / "a_file.fits")
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(
ccd_data.uncertainty.array, ccd_after.uncertainty.array
)
@pytest.mark.parametrize(
"uncertainty_type", [StdDevUncertainty, VarianceUncertainty, InverseVariance]
)
def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key(
tmp_path, uncertainty_type
):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = str(tmp_path / "a_file.fits")
ccd_data.write(filename, key_uncertainty_type="Blah")
ccd_after = CCDData.read(filename, key_uncertainty_type="Blah")
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(
ccd_data.uncertainty.array, ccd_after.uncertainty.array
)
def test_write_read_multiextensionfits_not(tmp_path):
# Test that writing mask and uncertainty can be disabled
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = str(tmp_path / "a_file.fits")
ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
def test_write_read_multiextensionfits_custom_ext_names(tmp_path):
# Test writing mask, uncertainty in another extension than default
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = str(tmp_path / "a_file.fits")
ccd_data.write(filename, hdu_mask="Fun", hdu_uncertainty="NoFun")
# Try reading with defaults extension names
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
# Try reading with custom extension names
ccd_after = CCDData.read(filename, hdu_mask="Fun", hdu_uncertainty="NoFun")
assert ccd_after.uncertainty is not None
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
np.testing.assert_array_equal(
ccd_data.uncertainty.array, ccd_after.uncertainty.array
)
def test_read_old_style_multiextensionfits(tmp_path):
# Regression test for https://github.com/astropy/ccdproc/issues/664
#
# Prior to astropy 3.1 there was no uncertainty type saved
# in the multiextension fits files generated by CCDData
# because the uncertainty had to be StandardDevUncertainty.
#
# Current version should be able to read those in.
#
size = 4
# Value of the variables below are not important to the test.
data = np.zeros([size, size])
mask = data > 0.9
uncert = np.sqrt(data)
ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit="adu")
# We'll create the file manually to ensure we have the
# right extension names and no uncertainty type.
hdulist = ccd.to_hdu()
del hdulist[2].header["UTYPE"]
file_name = str(tmp_path / "old_ccddata_mef.fits")
hdulist.writeto(file_name)
ccd = CCDData.read(file_name)
assert isinstance(ccd.uncertainty, StdDevUncertainty)
def test_wcs():
ccd_data = create_ccd_data()
wcs = WCS(naxis=2)
ccd_data.wcs = wcs
assert ccd_data.wcs is wcs
def test_recognized_fits_formats_for_read_write(tmp_path):
# These are the extensions that are supposed to be supported.
ccd_data = create_ccd_data()
supported_extensions = ["fit", "fits", "fts"]
for ext in supported_extensions:
path = str(tmp_path / f"test.{ext}")
ccd_data.write(path)
from_disk = CCDData.read(path)
assert (ccd_data.data == from_disk.data).all()
def test_stddevuncertainty_compat_descriptor_no_parent():
with pytest.raises(MissingDataAssociationException):
StdDevUncertainty(np.ones((10, 10))).parent_nddata
def test_stddevuncertainty_compat_descriptor_no_weakref():
# TODO: Remove this test if astropy 1.0 isn't supported anymore
# This test might create a Memoryleak on purpose, so the last lines after
# the assert are IMPORTANT cleanup.
ccd = CCDData(np.ones((10, 10)), unit="")
uncert = StdDevUncertainty(np.ones((10, 10)))
uncert._parent_nddata = ccd
assert uncert.parent_nddata is ccd
uncert._parent_nddata = None
# https://github.com/astropy/astropy/issues/7595
def test_read_returns_image(tmp_path):
# Test if CCData.read returns a image when reading a fits file containing
# a table and image, in that order.
tbl = Table(np.ones(10).reshape(5, 2))
img = np.ones((5, 5))
hdul = fits.HDUList(
hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()), fits.ImageHDU(img)]
)
filename = str(tmp_path / "table_image.fits")
hdul.writeto(filename)
ccd = CCDData.read(filename, unit="adu")
# Expecting to get (5, 5), the size of the image
assert ccd.data.shape == (5, 5)
# https://github.com/astropy/astropy/issues/9664
def test_sliced_ccdata_to_hdu():
wcs = WCS(naxis=2)
wcs.wcs.crpix = 10, 10
ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit="pixel")
trimmed = ccd[2:-2, 2:-2]
hdul = trimmed.to_hdu()
assert isinstance(hdul, fits.HDUList)
assert hdul[0].header["CRPIX1"] == 8
assert hdul[0].header["CRPIX2"] == 8
def test_read_write_tilde_paths(home_is_tmpdir):
# Test for reading and writing to tilde-prefixed paths without errors
ccd_data = create_ccd_data()
filename = os.path.join("~", "test.fits")
ccd_data.write(filename)
ccd_disk = CCDData.read(filename, unit=ccd_data.unit)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
# Ensure the unexpanded path doesn't exist (e.g. no directory whose name is
# a literal ~ was created)
assert not os.path.exists(filename)
def test_ccddata_with_psf():
psf = _random_psf.copy()
ccd = CCDData(_random_array.copy(), unit=u.adu, psf=psf)
assert (ccd.psf == psf).all()
# cannot pass in non-ndarray
with pytest.raises(TypeError, match="The psf must be a numpy array."):
CCDData(_random_array.copy(), unit=u.adu, psf="something")
def test_psf_setter():
psf = _random_psf.copy()
ccd = CCDData(_random_array.copy(), unit=u.adu)
ccd.psf = psf
assert (ccd.psf == psf).all()
# cannot set with non-ndarray
with pytest.raises(TypeError, match="The psf must be a numpy array."):
ccd.psf = 5
def test_write_read_psf(tmp_path):
"""Test that we can round-trip a CCDData with an attached PSF image."""
ccd_data = create_ccd_data()
ccd_data.psf = _random_psf
filename = tmp_path / "test_write_read_psf.fits"
ccd_data.write(filename)
ccd_disk = CCDData.read(filename)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
np.testing.assert_array_equal(ccd_data.psf, ccd_disk.psf)
# Try a different name for the PSF HDU.
filename = tmp_path / "test_write_read_psf_hdu.fits"
ccd_data.write(filename, hdu_psf="PSFOTHER")
# psf will be None if we don't supply the new HDU name to the reader.
ccd_disk = CCDData.read(filename)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
assert ccd_disk.psf is None
# psf will round-trip if we do supply the new HDU name.
ccd_disk = CCDData.read(filename, hdu_psf="PSFOTHER")
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
np.testing.assert_array_equal(ccd_data.psf, ccd_disk.psf)
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@nddata@tests@[email protected]_END.py
|
{
"filename": "radam.py",
"repo_name": "tingyuansen/The_Payne",
"repo_path": "The_Payne_extracted/The_Payne-master/The_Payne/radam.py",
"type": "Python"
}
|
# the codes are adapted from
#Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. "On the Variance of the Adaptive Learning Rate and Beyond." arXiv preprint arXiv:1908.03265 (2019).
import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup = 0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, warmup = warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['warmup'] > state['step']:
scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']
else:
scheduled_lr = group['lr']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss
|
tingyuansenREPO_NAMEThe_PaynePATH_START.@The_Payne_extracted@The_Payne-master@[email protected]@.PATH_END.py
|
{
"filename": "make_pollack_10cm.py",
"repo_name": "psheehan/pdspy",
"repo_path": "pdspy_extracted/pdspy-master/pdspy/dust/data/make_pollack_10cm.py",
"type": "Python"
}
|
#!/usr/bin/env python3
from pdspy.dust import *
import numpy
iron = Dust()
iron.set_optical_constants_from_henn("optical_constants/iron.txt")
iron.set_density(7.87)
olivine = Dust()
olivine.set_optical_constants_from_henn("optical_constants/olivine.txt")
olivine.set_density(3.49)
orthopyroxene = Dust()
orthopyroxene.set_optical_constants_from_henn("optical_constants/orthopyroxene.txt")
orthopyroxene.set_density(3.4)
troilite = Dust()
troilite.set_optical_constants_from_henn("optical_constants/troilite.txt")
troilite.set_density(4.83)
organics = Dust()
organics.set_optical_constants_from_henn("optical_constants/organics.txt")
organics.set_density(1.5)
water_ice = Dust()
water_ice.set_optical_constants_from_henn("optical_constants/water_ice.txt")
water_ice.set_density(0.92)
silicates = Dust()
silicates.set_optical_constants_from_draine("optical_constants/astronomical_silicates.txt")
silicates.set_density(3.3)
silicates.calculate_optical_constants_on_wavelength_grid(water_ice.lam)
organics = Dust()
organics.set_optical_constants_from_henn("optical_constants/organics.txt")
organics.set_density(1.5)
species = [silicates,troilite,organics,water_ice]
#mass_fraction = numpy.array([6.4e-3,7.68e-4,2.13e-3,1.4e-3])
mass_fraction = numpy.array([3.41e-3,7.68e-4,4.13e-3,5.55e-3])
rho = numpy.array([silicates.rho,troilite.rho,organics.rho,water_ice.rho])
abundances = (mass_fraction/rho)/(mass_fraction/rho).sum()
dust = mix_dust(species, abundances)
amin = 0.005e-4
amax = 1.000e1
pl = 3.5
dust.calculate_size_distribution_opacity(amin, amax, pl, \
coat_volume_fraction=0.0)
dust.write('pollack_10cm.hdf5')
|
psheehanREPO_NAMEpdspyPATH_START.@pdspy_extracted@pdspy-master@pdspy@dust@data@[email protected]_END.py
|
{
"filename": "_showscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattermapbox/marker/_showscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showscale", parent_name="scattermapbox.marker", **kwargs
):
super(ShowscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattermapbox@marker@[email protected]_END.py
|
{
"filename": "MCeff.ipynb",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/Sandbox/MCeff.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import sys,os
import numpy as np
import matplotlib.pyplot as plt
from pkg_resources import resource_filename
from desiutil.log import get_logger
from desitarget import cuts
#import astropy.io.fits as pyfits
import fitsio
import healpy as hp
```
```python
def mag2flux(mag) :
return 10**(-0.4*(mag-22.5))
```
```python
def flux2mag(flux) :
mag = -2.5*np.log10(flux*(flux>0)+0.001*(flux<=0)) + 22.5
mag[(flux<=0)] = 0.
return mag
```
```python
def prof(x,y,bins) :
'''
what AJR calls the '1d plots'; e.g., normalized density versus x
'''
h0,bins=np.histogram(x,bins=bins)
hx,bins=np.histogram(x,bins=bins,weights=x)
hy,bins=np.histogram(x,bins=bins,weights=y)
hy2,bins=np.histogram(x,bins=bins,weights=y**2)
ii=h0>2
mx=hx[ii]/h0[ii]
my=hy[ii]/h0[ii]
var=hy2[ii]/h0[ii]-my**2
ey=np.sqrt(var*(var>0))/np.sqrt(h0[ii])
return mx,my,ey
```
```python
def colorplot(ra,dec,z,vmin=None,vmax=None) :
nbins=300
raoffset=80.
rap=(ra+raoffset)%360
h0,xbins,ybins = np.histogram2d(rap,dec,bins=nbins)
hz,xbins,ybins = np.histogram2d(rap,dec,bins=nbins,weights=z*(z>=0))
mz=hz/(h0+(h0==0))*(h0>0)
plt.imshow(mz.T,origin=0,extent=(xbins[0]-raoffset,xbins[-1]-raoffset,ybins[0],ybins[-1]),aspect="auto",vmin=vmin,vmax=vmax)
plt.colorbar()
```
```python
target="ELG"
if target == 'LRG':
colorcuts_function = cuts.isLRG_colors
elif target == 'ELG':
colorcuts_function = cuts.isELG_colors
elif target == 'QSO':
colorcuts_function = cuts.isQSO_colors
else:
colorcuts_function = None
```
```python
#truthf = '/global/cscratch1/sd/raichoor/tmpdir/tmp.dr7.41ra44.-1dec0.clean.deep.fits' #original
truthf = '/global/cscratch1/sd/raichoor/desi_mcsyst/desi_mcsyst_truth.dr7.34ra38.-7dec-3.fits' #new HSC
```
```python
truth = fitsio.read(truthf,1)
print(truth.dtype.names)
```
('brickname', 'objid', 'type', 'ra', 'dec', 'g', 'r', 'z', 'w1', 'w2', 'galdepth_g', 'galdepth_r', 'galdepth_z', 'hsc_object_id', 'hsc_ra', 'hsc_dec', 'hsc_mizuki_photoz_best')
```python
print(len(truth))
gmag = truth["g"]
w = gmag < 24.5
truth = truth[w]
print(len(truth))
gmag = truth["g"]
rmag = truth["r"]
zmag = truth["z"]
```
549181
156861
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:3: RuntimeWarning: invalid value encountered in less
app.launch_new_instance()
```python
gflux = mag2flux(truth["g"])
rflux = mag2flux(truth["r"])
zflux = mag2flux(truth["z"])
w1flux = 2.*zflux#np.zeros(gflux.shape)#just making it bright enough to pass LRG cuts
w2flux = np.zeros(gflux.shape)
```
```python
```
```python
if target == 'LRG':
true_selection = colorcuts_function(gflux=gflux, rflux=rflux, zflux=zflux, w1flux=w1flux, w2flux=w2flux, zfiberflux = zflux,south=True)
if target == 'ELG':
true_selection = colorcuts_function(gflux=gflux, rflux=rflux, zflux=zflux, w1flux=w1flux, w2flux=w2flux,south=True)
star_selection = ((gmag-rmag)>(1.5*(rmag-zmag)+0.0))&((gmag-rmag)<(1.5*(rmag-zmag)+0.4))&((gmag-rmag)>(-1.5*(rmag-zmag)+0.3))&((gmag-rmag)<(-1.5*(rmag-zmag)+2.0))
true_mean=np.mean(true_selection.astype(float))
print(true_mean)
```
0.0519950784452477
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/code/desitarget/master/py/desitarget/cuts.py:299: RuntimeWarning: invalid value encountered in greater
elg &= r - z > 0.3 # blue cut.
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/code/desitarget/master/py/desitarget/cuts.py:300: RuntimeWarning: invalid value encountered in less
elg &= r - z < 1.6 # red cut.
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/code/desitarget/master/py/desitarget/cuts.py:301: RuntimeWarning: invalid value encountered in less
elg &= g - r < -1.2*(r - z) + 1.6 # OII flux cut.
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/code/desitarget/master/py/desitarget/cuts.py:307: RuntimeWarning: invalid value encountered in less
elg &= g - r < 1.15*(r - z) - 0.15
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:5: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:5: RuntimeWarning: invalid value encountered in less
```python
#plt.figure("model")
#plt.subplot(1,2,1)
plt.plot(rmag-zmag,gmag-rmag,".",alpha=0.2,color="gray")
plt.plot((rmag-zmag)[true_selection],(gmag-rmag)[true_selection],".",alpha=1,color="green")
plt.plot((rmag-zmag)[star_selection],(gmag-rmag)[star_selection],".",alpha=1,color="red")
plt.xlabel("rz")
plt.ylabel("gr")
plt.show()
```

```python
#plt.subplot(1,2,2)
print(min(gmag),max(gmag),len(gmag))
plt.plot(gmag,rmag,'k,')
plt.show()
h = np.histogram(gmag,range=(14,25))
print(h)
plt.hist(gmag,range=(14,25))
plt.hist(gmag[true_selection],bins=40,color="green",range=(14,25))
plt.xlabel("gmag")
plt.show()
```
13.805677 24.499989 156861

(array([ 1, 14, 67, 219, 916, 2808, 7524, 18526, 58616,
68169]), array([14. , 15.1, 16.2, 17.3, 18.4, 19.5, 20.6, 21.7, 22.8, 23.9, 25. ],
dtype=float32))

```python
seed = 1
rand = np.random.RandomState(seed)
```
```python
#read in DR8 properties map
pixfn = '/project/projectdirs/desi/target/catalogs/dr8/0.31.1/pixweight/pixweight-dr8-0.31.1.fits'
pix,header=fitsio.read(pixfn,header=True)
HPXNSIDE=header["HPXNSIDE"]
print(pix.dtype.names)
print("number of pixels=",pix.size)
ii=np.where((pix["GALDEPTH_G"]>0)&(pix["GALDEPTH_R"]>0)&(pix["GALDEPTH_Z"]>0)&(pix["FRACAREA"]>0.01))[0]
npix=ii.size
print(npix)
```
('HPXPIXEL', 'FRACAREA', 'STARDENS', 'EBV', 'PSFDEPTH_G', 'PSFDEPTH_R', 'PSFDEPTH_Z', 'GALDEPTH_G', 'GALDEPTH_R', 'GALDEPTH_Z', 'PSFDEPTH_W1', 'PSFDEPTH_W2', 'PSFSIZE_G', 'PSFSIZE_R', 'PSFSIZE_Z', 'ELG', 'LRG', 'QSO', 'BGS_ANY', 'MWS_ANY', 'ALL', 'STD_FAINT', 'STD_BRIGHT', 'LRG_1PASS', 'LRG_2PASS', 'BGS_FAINT', 'BGS_BRIGHT', 'BGS_WISE', 'MWS_BROAD', 'MWS_MAIN_RED', 'MWS_MAIN_BLUE', 'MWS_WD', 'MWS_NEARBY')
number of pixels= 786432
374065
```python
pix = pix[ii]
print(len(pix))
def thphi2radec(theta,phi):
return 180./np.pi*phi,-(180./np.pi*theta-90)
th,phi = hp.pix2ang(HPXNSIDE,pix['HPXPIXEL'])
ra,dec = thphi2radec(th,phi)
```
374065
```python
depth_keyword="PSFDEPTH"
gdepth=-2.5*np.log10(5/np.sqrt(pix[depth_keyword+"_G"]))+22.5
rdepth=-2.5*np.log10(5/np.sqrt(pix[depth_keyword+"_R"]))+22.5
zdepth=-2.5*np.log10(5/np.sqrt(pix[depth_keyword+"_Z"]))+22.5
```
```python
efficiency=np.zeros(npix)
efficiency_of_true_elgs=np.zeros(npix)
efficiency_of_stars=np.zeros(npix)
R_G=3.214 # http://legacysurvey.org/dr8/catalogs/#galactic-extinction-coefficients
R_R=2.165
R_Z=1.211
```
```python
gsigma=1./np.sqrt(pix[depth_keyword+"_G"])
rsigma=1./np.sqrt(pix[depth_keyword+"_R"])
zsigma=1./np.sqrt(pix[depth_keyword+"_Z"])
plt.hist(gsigma,bins=30,range=(0,.15))
plt.xlabel(r'$\sigma$ g flux')
plt.ylabel('# of pixels')
plt.show()
plt.hist(rsigma,bins=30,range=(0,.15))
plt.xlabel(r'$\sigma$ r flux')
plt.ylabel('# of pixels')
plt.show()
plt.hist(zsigma,bins=30,range=(0,.3))
plt.xlabel(r'$\sigma$ z flux')
plt.ylabel('# of pixels')
plt.show()
```



```python
d = np.ones(2)*10
flux2mag(d)
```
array([20., 20.])
```python
def getdper(g,r,z):
wdec = dec > -30
gsl = gsigma[wdec]
rsl = rsigma[wdec]
zsl = zsigma[wdec]
ds = (gsl > g) & (rsl > r) & (zsl > z)
return len(gsl[ds])/len(gsl)
getdper(.1,.1,.1)
```
0.0002617356814763043
```python
# keep same random number
grand = np.random.normal(size=gflux.shape)
rrand = np.random.normal(size=rflux.shape)
zrand = np.random.normal(size=zflux.shape)
```
```python
photz = truth['hsc_mizuki_photoz_best']
```
```python
print(len(gsigma),len(pix))
```
374065 374065
```python
def ELGeffcalc(gsig,rsig,zsig,south=True,snrc=True,zmin=-1,zmax=20):
'''
calculate the ELG efficiency for given g,r,z flux uncertainties and a given region's selection
'''
#seds = {'flat': [1.0, 1.0, 1.0], 'red': [2.5, 1.0, 0.4],'g': [1., 0, 0], 'r': [0, 1.0, 0],'z': [0, 1.0, 0]}
wz = (photz > zmin) & (photz <= zmax)
mgflux = gflux[wz] + grand[wz]*gsig
mrflux = rflux[wz] + rrand[wz]*rsig
mzflux = zflux[wz] + zrand[wz]*zsig
selection_snr = np.zeros_like(mgflux, dtype=bool)
snrg = mgflux/gsig
selection_snr = selection_snr | (snrg > 6.)
snrr = mrflux/rsig
selection_snr = selection_snr | (snrr > 6.)
snrz = mzflux/zsig
selection_snr = selection_snr | (snrz > 6.)
#ivars = [1./gsig**2,1./rsig**2,1./zsig**2]
flatmap = mgflux/(gsig)**2+mrflux/(rsig)**2+mzflux/(zsig)**2
fdiv = 1./(gsig)**2+1./rsig**2+1./(zsig)**2
flatmap /= np.maximum(1.e-16, fdiv)
combined_snr = flatmap * np.sqrt(fdiv) #combined signal to noise matching Dustin's vode for flat sed
#combined_snr = np.sqrt(mgflux**2/gsig**2+mrflux**2/rsig**2+mzflux**2/zsig**2) #old wrong combined signal to noise; tractor ignores anything < 6
selection_snr = selection_snr | (combined_snr > 6)
redmap = mgflux/(gsig)**2/2.5+mrflux/rsig**2+mzflux/(zsig)**2/0.4
sediv = 1./(gsig*2.5)**2+1./rsig**2+1./(zsig*0.4)**2
redmap /= np.maximum(1.e-16, sediv)
combined_snrred = redmap * np.sqrt(sediv) #combined signal to noise; red sed
selection_snr = selection_snr | (combined_snrred>6.)
selection = colorcuts_function(gflux=mgflux, rflux=mrflux, zflux=mzflux, w1flux=w1flux, w2flux=w2flux, south=south)
if snrc:
selection *= selection_snr#( combined_snr > 6 ) * ( mgflux > 4*gsig ) * ( mrflux > 3.5*rsig ) * ( mzflux > 2.*zsig )
#selpb = selection* ( mgflux > 4*gsig ) * ( mrflux > 3.5*rsig ) * ( mzflux > 2.*zsig )
efficiency=np.mean(selection.astype(float))/true_mean
#efficiencyt=np.mean(selpb.astype(float))/true_mean
#efficiency_of_true_elgs=np.mean((true_selection*selection).astype(float))/true_mean
#efficiency_of_stars=np.mean((star_selection*selection).astype(float))/true_mean
return efficiency#,efficiencyt#,efficiency_of_true_elgs,efficiency_of_stars
```
```python
wtgp = 10.**(-0.4*R_G*pix['EBV'])
wtrp = 10.**(-0.4*R_R*pix['EBV'])
wtzp = 10.**(-0.4*R_Z*pix['EBV'])
```
```python
def ELGeffcalcExt(gsig,rsig,zsig,wtg,wtr,wtz,south=True,snrc=True,zmin=-1,zmax=20):
'''
calculate the ELG efficiency for given g,r,z flux uncertainties and a given region's selection
'''
wz = (photz > zmin) & (photz <= zmax)
mgflux = gflux[wz]*wtg + grand[wz]*gsig
mrflux = rflux[wz]*wtr + rrand[wz]*rsig
mzflux = zflux[wz]*wtz + zrand[wz]*zsig
#combined_snr = np.sqrt(mgflux**2/gsig**2+mrflux**2/rsig**2+mzflux**2/zsig**2) #combined signal to noise; tractor ignores anything < 6
#snrcut = ( combined_snr > 6 ) * ( mgflux > 4*gsig ) * ( mrflux > 3.5*rsig ) * ( mzflux > 2.*zsig )
selection = colorcuts_function(gflux=mgflux/wtg, rflux=mrflux/wtr, zflux=mzflux/wtz, w1flux=w1flux, w2flux=w2flux, south=south)
selection_snr = np.zeros_like(mgflux, dtype=bool)
snrg = mgflux/gsig
selection_snr = selection_snr | (snrg > 6.)
snrr = mrflux/rsig
selection_snr = selection_snr | (snrr > 6.)
snrz = mzflux/zsig
selection_snr = selection_snr | (snrz > 6.)
#ivars = [1./gsig**2,1./rsig**2,1./zsig**2]
flatmap = mgflux/(gsig)**2+mrflux/(rsig)**2+mzflux/(zsig)**2
fdiv = 1./(gsig)**2+1./rsig**2+1./(zsig)**2
flatmap /= np.maximum(1.e-16, fdiv)
combined_snr = flatmap * np.sqrt(fdiv) #combined signal to noise matching Dustin's vode for flat sed
#combined_snr = np.sqrt(mgflux**2/gsig**2+mrflux**2/rsig**2+mzflux**2/zsig**2) #old wrong combined signal to noise; tractor ignores anything < 6
selection_snr = selection_snr | (combined_snr > 6)
redmap = mgflux/(gsig)**2/2.5+mrflux/rsig**2+mzflux/(zsig)**2/0.4
sediv = 1./(gsig*2.5)**2+1./rsig**2+1./(zsig*0.4)**2
redmap /= np.maximum(1.e-16, sediv)
combined_snrred = redmap * np.sqrt(sediv) #combined signal to noise; red sed
selection_snr = selection_snr | (combined_snrred>6.)
if snrc:
selection *= selection_snr#snrcut#( combined_snr > 6 ) * ( mgflux > 4*gsig ) * ( mrflux > 3.5*rsig ) * ( mzflux > 2.*zsig )
efficiency=np.mean(selection.astype(float))/true_mean
#efficiency_of_true_elgs=np.mean((true_selection*selection).astype(float))/true_mean
#efficiency_of_stars=np.mean((star_selection*selection).astype(float))/true_mean
return efficiency#,efficiency_of_true_elgs,efficiency_of_stars
```
```python
ELGeffcalc(0.05,0.08,0.2)
```
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:14: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:16: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:23: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:28: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:32: RuntimeWarning: invalid value encountered in greater
(1.092079450711133, 1.0820255026974008)
```python
ebv = 0.1
ELGeffcalcExt(0.02,0.04,0.08,10.**(-0.4*R_G*ebv),10.**(-0.4*R_R*ebv),10.**(-0.4*R_Z*ebv))
```
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:17: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:19: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:26: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:31: RuntimeWarning: invalid value encountered in greater
1.0137322216772928
```python
def ELGsel(gsig,rsig,zsig,south=True,snrc=True):
'''
calculate the ELG selection for given g,r,z flux uncertainties and a given region's selection
'''
mgflux = gflux + grand*gsig
mrflux = rflux + rrand*rsig
mzflux = zflux + zrand*zsig
combined_snr = np.sqrt(mgflux**2/gsig**2+mrflux**2/rsig**2+mzflux**2/zsig**2) #combined signal to noise; tractor ignores anything < 6
selection = colorcuts_function(gflux=mgflux, rflux=mrflux, zflux=mzflux, w1flux=w1flux, w2flux=w2flux, south=south)
if snrc:
selection *= ( combined_snr > 6 ) * ( mgflux > 4*gsig ) * ( mrflux > 3.5*rsig ) * ( mzflux > 2.*zsig )
return selection,mgflux,mrflux,mzflux
```
```python
def ELGzpcalcp(dg,dr,dz,south=True,snrc=True,zmin=-1,zmax=20):
'''
calculate the ELG efficiency when perturbing fluxes by constant amounts of flux given by dg, dr, dz
'''
wz = (photz > zmin) & (photz <= zmax)
mgflux = gflux[wz] + dg
mrflux = rflux[wz] + dr
mzflux = zflux[wz] + dz
#combined_snr = np.sqrt(mgflux**2/gsig**2+mrflux**2/rsig**2+mzflux**2/zsig**2) #combined signal to noise; tractor ignores anything < 6
selection = colorcuts_function(gflux=mgflux, rflux=mrflux, zflux=mzflux, w1flux=w1flux, w2flux=w2flux, south=south)
#if snrc:
# selection *= ( combined_snr > 6 ) * ( mgflux > 4*gsig ) * ( mrflux > 3.5*rsig ) * ( mzflux > 2.*zsig )
efficiency=np.mean(selection.astype(float))/true_mean
#efficiency_of_true_elgs=np.mean((true_selection*selection).astype(float))/true_mean
#efficiency_of_stars=np.mean((star_selection*selection).astype(float))/true_mean
return efficiency#,efficiency_of_true_elgs,efficiency_of_stars
```
```python
def ELGzpcalcm(dg,dr,dz,south=True,snrc=True,zmin=-1,zmax=20):
'''
calculate the ELG efficiency when multiplying by constant amounts of flux given by dg, dr, dz
'''
wz = (photz > zmin) & (photz <= zmax)
mgflux = gflux[wz]*dg
mrflux = rflux[wz]*dr
mzflux = zflux[wz]*dz
#combined_snr = np.sqrt(mgflux**2/gsig**2+mrflux**2/rsig**2+mzflux**2/zsig**2) #combined signal to noise; tractor ignores anything < 6
if target == 'LRG':
selection = colorcuts_function(gflux=mgflux, rflux=mrflux, zflux=mzflux, w1flux=w1flux, w2flux=w2flux, zfiberflux = mzflux, south=south)
if target == 'ELG':
selection = colorcuts_function(gflux=mgflux, rflux=mrflux, zflux=mzflux, w1flux=w1flux, w2flux=w2flux, south=south)
#if snrc:
# selection *= ( combined_snr > 6 ) * ( mgflux > 4*gsig ) * ( mrflux > 3.5*rsig ) * ( mzflux > 2.*zsig )
efficiency=np.mean(selection.astype(float))/true_mean
#efficiency_of_true_elgs=np.mean((true_selection*selection).astype(float))/true_mean
#efficiency_of_stars=np.mean((star_selection*selection).astype(float))/true_mean
return efficiency#,efficiency_of_true_elgs,efficiency_of_stars
```
```python
print(ELGzpcalcp(-0.02,-0.0,-0.0))
print(np.mean(gflux))
```
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-23-c91d7c455862> in <module>
----> 1 print(ELGzpcalcp(-0.02,-0.0,-0.0))
2 print(np.mean(gflux))
NameError: name 'ELGzpcalcp' is not defined
```python
print(ELGzpcalcm(.96,1.0,1.))
#true_mean
```
1.0230279274865262
```python
sel,mg,mr,mz = ELGsel(0.04,0.06,.1)
out = ~true_selection & sel
zout = sel & (photz < 0.8)
print(len(gflux[out]))
print(len(gflux[sel]),len(gflux[true_selection]))
plt.hist(gflux[sel],range=(0,2),bins=100,histtype='step')
plt.hist(gflux[true_selection],range=(0,2),bins=100,histtype='step')
plt.hist(gflux[out],range=(0,2),bins=100,histtype='step')
plt.hist(mg[zout],range=(0,2),bins=100,histtype='step')
plt.xlabel('g flux')
plt.show()
plt.hist(rflux[sel],range=(0,2),bins=100,histtype='step')
plt.hist(rflux[true_selection],range=(0,2),bins=100,histtype='step')
plt.hist(rflux[out],range=(0,2),bins=100,histtype='step')
plt.hist(mr[zout],range=(0,2),bins=100,histtype='step')
plt.xlabel('r flux')
plt.show()
plt.hist(zflux[sel],range=(0,3),bins=100,histtype='step')
plt.hist(zflux[true_selection],range=(0,3),bins=100,histtype='step')
plt.hist(zflux[out],range=(0,3),bins=100,histtype='step')
plt.hist(mz[out],range=(0,3),bins=100,histtype='step')
plt.hist(mz[zout],range=(0,3),bins=100,histtype='step')
plt.xlabel('z flux')
plt.show()
plt.hist(gflux[sel]/rflux[sel],range=(0,1.5),bins=100,histtype='step')
plt.hist(gflux[true_selection]/rflux[true_selection],range=(0,1.5),bins=100,histtype='step')
plt.hist(gflux[out]/rflux[out],range=(0,1.5),bins=100,histtype='step')
plt.hist(mg[zout]/mr[zout],range=(0,1.5),bins=100,histtype='step')
plt.xlabel('g/r')
plt.show()
plt.hist(rflux[sel]/zflux[sel],range=(0,1.5),bins=100,histtype='step')
plt.hist(rflux[true_selection]/zflux[true_selection],range=(0,1.5),bins=100,histtype='step')
plt.hist(rflux[out]/zflux[out],range=(0,1.5),bins=100,histtype='step')
plt.hist(mr[zout]/mz[zout],range=(0,1.5),bins=100,histtype='step')
plt.xlabel('r/z')
plt.show()
plt.hist(photz[sel],range=(0,1.5),bins=100,histtype='step')
plt.hist(photz[true_selection],range=(0,1.5),bins=100,histtype='step')
plt.hist(photz[out],range=(0,1.5),bins=100,histtype='step')
plt.xlabel('photoz')
plt.show()
#plt.hist(rflux[sel],range=(0,2),bins=100,histtype='step')
#plt.hist(rflux[true_selection],range=(0,2),bins=100,histtype='step')
#plt.hist(rflux[out],range=(0,2),bins=100,histtype='step')
#plt.xlabel('r flux')
#plt.show()
```
2304
8571 8156
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:315: RuntimeWarning: invalid value encountered in greater
elg &= r - z > 0.3 # blue cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:316: RuntimeWarning: invalid value encountered in less
elg &= r - z < 1.6 # red cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:317: RuntimeWarning: invalid value encountered in less
elg &= g - r < -1.2*(r - z) + 1.6 # OII flux cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:323: RuntimeWarning: invalid value encountered in less
elg &= g - r < 1.15*(r - z) - 0.15
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:13: RuntimeWarning: invalid value encountered in greater






```python
sel1,mg,mr,mz = ELGsel(0.013,0.02,.04) #deep
print(getdper(0.013,0.02,.04))
out1 = ~true_selection & sel1
sel2,mg,mr,mz = ELGsel(0.023,0.041,.06) #slightly worse than median
print(getdper(0.023,0.041,.06))
out2 = ~true_selection & sel2
sel3,mg,mr,mz = ELGsel(0.04,0.065,.085) #shallow
print(getdper(0.04,0.065,.085))
out3 = ~true_selection & sel3
plt.hist(photz[sel1],range=(0.6,1.6),bins=30,histtype='step',linewidth=3)
plt.hist(photz[sel2],range=(0.6,1.6),bins=30,histtype='step',linewidth=3)
plt.hist(photz[sel3],range=(0.6,1.6),bins=30,histtype='step',linewidth=3)
plt.xlabel('photoz')
plt.ylabel('number of targets passing selection')
plt.legend(['95th percentile depth','50th percentile depth','5th percentile depth'],loc='upper right')
plt.show()
```
0.9465196344904352
0.4985058056425611
0.05134908924611495
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:315: RuntimeWarning: invalid value encountered in greater
elg &= r - z > 0.3 # blue cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:316: RuntimeWarning: invalid value encountered in less
elg &= r - z < 1.6 # red cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:317: RuntimeWarning: invalid value encountered in less
elg &= g - r < -1.2*(r - z) + 1.6 # OII flux cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:323: RuntimeWarning: invalid value encountered in less
elg &= g - r < 1.15*(r - z) - 0.15
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:13: RuntimeWarning: invalid value encountered in greater

```python
da = 0.03,0.055,.075
sel4,mg,mr,mz = ELGsel(da[0],da[1],da[2]) #shallow
print(getdper(da[0],da[1],da[2]))
da = 0.018,0.0344,.051
sel5,mg,mr,mz = ELGsel(da[0],da[1],da[2]) #shallow
print(getdper(da[0],da[1],da[2]))
```
0.2526267045176729
0.755872514589607
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:315: RuntimeWarning: invalid value encountered in greater
elg &= r - z > 0.3 # blue cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:316: RuntimeWarning: invalid value encountered in less
elg &= r - z < 1.6 # red cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:317: RuntimeWarning: invalid value encountered in less
elg &= g - r < -1.2*(r - z) + 1.6 # OII flux cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:323: RuntimeWarning: invalid value encountered in less
elg &= g - r < 1.15*(r - z) - 0.15
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:13: RuntimeWarning: invalid value encountered in greater
```python
h1 = np.histogram(photz[sel1],range=(0.6,1.6),bins=30)
h2 = np.histogram(photz[sel2],range=(0.6,1.6),bins=30)
h3 = np.histogram(photz[sel3],range=(0.6,1.6),bins=30)
h4 = np.histogram(photz[sel4],range=(0.6,1.6),bins=30)
h5 = np.histogram(photz[sel5],range=(0.6,1.6),bins=30)
xl = np.zeros(len(h1[0]))
for i in range(0,len(xl)):
xl[i] = (h1[1][i]+h1[1][i+1])/2.
plt.plot(xl,h5[0]-h1[0])
plt.plot(xl,h2[0]-h1[0])
plt.plot(xl,h4[0]-h1[0])
plt.plot(xl,h3[0]-h1[0])
plt.plot(xl,np.ones(len(xl)),'k:')
plt.xlabel('photoz')
plt.ylabel(r'$\Delta N$ vs. 95th percentile')
plt.legend(['75th percentile depth','50th percentile depth','25th percentile depth','5th percentile depth'],loc='upper right')
plt.show()
```

```python
print(ELGeffcalc(.05,.1,.1,zmin=0.9,zmax=1.6))
print(ELGeffcalc(.2,.2,.2,snrc=False))
```
2.16374405715
2.19997547818
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:315: RuntimeWarning: invalid value encountered in greater
elg &= r - z > 0.3 # blue cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:316: RuntimeWarning: invalid value encountered in less
elg &= r - z < 1.6 # red cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:317: RuntimeWarning: invalid value encountered in less
elg &= g - r < -1.2*(r - z) + 1.6 # OII flux cut.
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/code/desitarget/master/py/desitarget/cuts.py:323: RuntimeWarning: invalid value encountered in less
elg &= g - r < 1.15*(r - z) - 0.15
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:14: RuntimeWarning: invalid value encountered in greater
```python
efficiency =np.zeros(npix)
#efficiency2 =np.zeros(npix)
#efficiency3 =np.zeros(npix)
for j in range(0,len(pix)):# loop on sky pixels using funciton
if j%5000==0 : print("{}/{}, {:3.2f}%".format(j,ii.size,float(j)/ii.size*100.))
gsig = gsigma[j]#*10**(0.4*R_G*pix["EBV"][j])
rsig = rsigma[j]#*10**(0.4*R_R*pix["EBV"][j])
zsig = zsigma[j]#*10**(0.4*R_Z*pix["EBV"][j])
#ebv = pix["EBV"][j]
#do for three redshift ranges and for overall
eff = ELGeffcalcExt(gsig,rsig,zsig,wtgp[j],wtrp[j],wtzp[j],south=True)
#eff = ELGeffcalc(gsig,rsig,zsig)
#efficiency[j]=eff
#eff = ELGeffcalc(gsig,rsig,zsig,south=True,zmin=0.6,zmax=0.8)
#efficiency1[j]=eff
#eff = ELGeffcalc(gsig,rsig,zsig,south=True,zmin=0.8,zmax=1.1)
#efficiency2[j]=eff
#eff = ELGeffcalc(gsig,rsig,zsig,south=True,zmin=1.1,zmax=1.6)
#efficiency3[j]=eff
#efficiency_of_true_elgs[j]=efft
#efficiency_of_stars[j]=effs
```
0/374065, 0.00%
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:14: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:16: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:23: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:28: RuntimeWarning: invalid value encountered in greater
5000/374065, 1.34%
10000/374065, 2.67%
15000/374065, 4.01%
20000/374065, 5.35%
25000/374065, 6.68%
30000/374065, 8.02%
35000/374065, 9.36%
40000/374065, 10.69%
45000/374065, 12.03%
50000/374065, 13.37%
55000/374065, 14.70%
60000/374065, 16.04%
65000/374065, 17.38%
70000/374065, 18.71%
75000/374065, 20.05%
80000/374065, 21.39%
85000/374065, 22.72%
90000/374065, 24.06%
95000/374065, 25.40%
100000/374065, 26.73%
105000/374065, 28.07%
110000/374065, 29.41%
115000/374065, 30.74%
120000/374065, 32.08%
125000/374065, 33.42%
130000/374065, 34.75%
135000/374065, 36.09%
140000/374065, 37.43%
145000/374065, 38.76%
150000/374065, 40.10%
155000/374065, 41.44%
160000/374065, 42.77%
165000/374065, 44.11%
170000/374065, 45.45%
175000/374065, 46.78%
180000/374065, 48.12%
185000/374065, 49.46%
190000/374065, 50.79%
200000/374065, 53.47%
205000/374065, 54.80%
210000/374065, 56.14%
215000/374065, 57.48%
220000/374065, 58.81%
225000/374065, 60.15%
230000/374065, 61.49%
235000/374065, 62.82%
240000/374065, 64.16%
245000/374065, 65.50%
250000/374065, 66.83%
255000/374065, 68.17%
260000/374065, 69.51%
265000/374065, 70.84%
270000/374065, 72.18%
275000/374065, 73.52%
280000/374065, 74.85%
285000/374065, 76.19%
290000/374065, 77.53%
295000/374065, 78.86%
300000/374065, 80.20%
305000/374065, 81.54%
310000/374065, 82.87%
315000/374065, 84.21%
320000/374065, 85.55%
325000/374065, 86.88%
330000/374065, 88.22%
335000/374065, 89.56%
340000/374065, 90.89%
345000/374065, 92.23%
350000/374065, 93.57%
355000/374065, 94.90%
360000/374065, 96.24%
365000/374065, 97.58%
370000/374065, 98.91%
```python
print(i)
#for j in range(0,len(pix)):# loop on sky pixels
# if j%5000==0 : print("{}/{}, {:3.2f}%".format(j,ii.size,float(j)/ii.size*100.))
# gsig = gsigma[j]*10**(0.4*R_G*pix["EBV"][j])
# rsig = rsigma[j]*10**(0.4*R_R*pix["EBV"][j])
# zsig = zsigma[j]*10**(0.4*R_Z*pix["EBV"][j])
# mgflux = gflux + grand*gsig
# mrflux = rflux + rrand*rsig
# mzflux = zflux + zrand*zsig
# combined_snr = np.sqrt(mgflux**2/gsig**2+mrflux**2/rsig**2+mzflux**2/zsig**2)
# selection = colorcuts_function(gflux=mgflux, rflux=mrflux, zflux=mzflux, w1flux=w1flux, w2flux=w2flux, south=True) * ( combined_snr > 6 ) * ( gflux > 4*gsig ) * ( rflux > 3.5*rsig ) * ( zflux > 2.*zsig )
# efficiency[j]=np.mean(selection.astype(float))/true_mean
# efficiency_of_true_elgs[j]=np.mean((true_selection*selection).astype(float))/true_mean
# efficiency_of_stars[j]=np.mean((star_selection*selection).astype(float))/true_mean
```
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-1-3088dfa94700> in <module>
----> 1 print(i)
2 #for j in range(0,len(pix)):# loop on sky pixels
3
4 # if j%5000==0 : print("{}/{}, {:3.2f}%".format(j,ii.size,float(j)/ii.size*100.))
5
NameError: name 'i' is not defined
```python
#print(min(efficiency),max(efficiency),np.mean(efficiency))
```
```python
#plt.hist(efficiency,bins=100)
#plt.show()
```
```python
outf = os.getenv('SCRATCH')+'/ELGMCeffHSCHPsed.fits'
```
```python
import astropy.io.fits as fits
collist = []
collist.append(fits.Column(name='HPXPIXEL',format='K',array=pix['HPXPIXEL']))
collist.append(fits.Column(name='EFF',format='D',array=efficiency))
#collist.append(fits.Column(name='EFF0608',format='D',array=efficiency1))
#collist.append(fits.Column(name='EFF0811',format='D',array=efficiency2))
#collist.append(fits.Column(name='EFF1116',format='D',array=efficiency3))
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(collist))
hdu.writeto(outf,overwrite=True)
```
```python
p = 2.
efficiencyn = (efficiency/np.mean(efficiency))**p
efficiencyn1 = (efficiency1/np.mean(efficiency1))**p
efficiencyn2 = (efficiency2/np.mean(efficiency2))**p
efficiencyn3 = (efficiency3/np.mean(efficiency3))**p
```
```python
effo = fitsio.read(os.getenv('SCRATCH')+'/ELGMCeffHSCHPfixext.fits')
print(effo.dtype.names)
```
('HPXPIXEL', 'EFF')
```python
import healpy
th,phi = healpy.pix2ang(HPXNSIDE,pix['HPXPIXEL'],nest=True)
def thphi2radec(theta,phi):
return 180./np.pi*phi,-(180./np.pi*theta-90)
ra,dec = thphi2radec(th,phi)
```
```python
plt.scatter(ra,dec,c=(efficiency/np.mean(efficiency)),s=.1,vmin=.8,vmax=1.2)
plt.title('including red filter / old')
plt.colorbar()
plt.show()
```

```python
plt.scatter(ra,dec,c=(effo['EFF']/np.mean(effo['EFF']))**2.,s=.1,vmin=.8,vmax=1.2)
plt.colorbar()
plt.show()
```

```python
plt.scatter(ra,dec,c=efficiencyn1,s=.1)
plt.colorbar()
plt.show()
plt.scatter(ra,dec,c=efficiencyn2,s=.1)
plt.colorbar()
plt.show()
plt.scatter(ra,dec,c=efficiencyn3,s=.1)
plt.colorbar()
plt.show()
```




```python
gsigl = gsigma*10**(0.4*R_G*pix["EBV"])
rsigl = rsigma*10**(0.4*R_R*pix["EBV"])
zsigl = zsigma*10**(0.4*R_Z*pix["EBV"])
```
```python
plt.plot(gsigl,efficiencyn1,'k,')
plt.show()
plt.plot(gsigl,efficiencyn2,'k,')
plt.show()
plt.plot(gsigl,efficiencyn3,'k,')
plt.show()
```



```python
old = fitsio.read(os.getenv('SCRATCH')+'/ELGMCeffHSCHP.fits')
```
```python
th,phi = healpy.pix2ang(HPXNSIDE,old['HPXPIXEL'],nest=True)
ra,dec = thphi2radec(th,phi)
plt.scatter(ra,dec,c=old['EFF'],s=.1,vmin=.9,vmax=1.2)
plt.show()
```

```python
plt.scatter(ra,dec,c=efficiency-old['EFF'],s=.1)
plt.colorbar()
plt.show()
```

```python
print(np.mean(efficiency-old['EFF']))
```
-0.00583528889854
```python
plt.hist(efficiency-old['EFF'],bins=100)
plt.show()
```

```python
gsigd = gsigma*10**(0.4*R_G*pix["EBV"])
rsigd = rsigma*10**(0.4*R_R*pix["EBV"])
zsigd = zsigma*10**(0.4*R_Z*pix["EBV"])
```
```python
plt.hist(gsigd,bins=30,range=(0,.15))
plt.show()
```

```python
plt.hist(rsigd,bins=30,range=(0,0.15))
plt.show()
```

```python
plt.hist(zsigd,bins=30,range=(0,0.4))
plt.show()
```

```python
#now check histograms for actual data
elgf = os.getenv('SCRATCH')+'/ELGtargetinfo.fits'
felg = fitsio.read(elgf)
```
```python
gsigmad=1./np.sqrt(felg[depth_keyword+"_G"])
rsigmad=1./np.sqrt(felg[depth_keyword+"_R"])
zsigmad=1./np.sqrt(felg[depth_keyword+"_Z"])
```
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:1: RuntimeWarning: divide by zero encountered in true_divide
if __name__ == '__main__':
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:2: RuntimeWarning: divide by zero encountered in true_divide
from ipykernel import kernelapp as app
/global/common/software/desi/cori/desiconda/20180709-1.2.6-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:3: RuntimeWarning: divide by zero encountered in true_divide
app.launch_new_instance()
```python
gsige = gsigmad*10**(0.4*R_G*felg["EBV"])
rsige = rsigmad*10**(0.4*R_R*felg["EBV"])
zsige = zsigmad*10**(0.4*R_Z*felg["EBV"])
```
```python
plt.hist(gsige,bins=30,range=(0,.15))
plt.show()
```

```python
plt.hist(rsige,bins=30,range=(0,.15))
plt.show()
```

```python
plt.hist(zsige,bins=30,range=(0,.4))
plt.show()
```

50**3.
```python
50**3.
```
125000.0
```python
30**3.
```
27000.0
```python
fout = os.getenv('SCRATCH')+'/ELGeffredfiltgridsouth.dat'
fo = open(fout,'w')
fo.write('#gsig rsig zsig target_elg_eff \n')
md = 0
xdg = 0.15
xdr = 0.15
xdz = 0.4
ngp = 30 #number of grid points for each band
for i in range(0,ngp):
dg = (xdg-md)/float(ngp)*i+0.5*(xdg-md)/float(ngp)
for j in range(0,ngp):
dr = (xdr-md)/float(ngp)*j+0.5*(xdr-md)/float(ngp)
for k in range(0,ngp):
dz = (xdz-md)/float(ngp)*k+0.5*(xdz-md)/float(ngp)
eff = ELGeffcalc(dg,dr,dz,south=True,snrc=True)
fo.write(str(dg)+' '+str(dr)+' '+str(dz)+' '+str(eff)+'\n')
#if i == 0:
print(i,dg,dr,dz,eff)
fo.close()
```
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:14: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:16: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:19: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:24: RuntimeWarning: invalid value encountered in greater
0 0.0025 0.1475 0.39333333333333337 0.945806768023541
1 0.0075 0.1475 0.39333333333333337 0.9489946051986268
2 0.0125 0.1475 0.39333333333333337 0.9562285434036293
3 0.017499999999999998 0.1475 0.39333333333333337 0.9585581167238844
4 0.0225 0.1475 0.39333333333333337 0.9646885728298186
5 0.0275 0.1475 0.39333333333333337 0.9717999019127024
6 0.0325 0.1475 0.39333333333333337 0.9877390877881315
7 0.037500000000000006 0.1475 0.39333333333333337 1.0036782736635605
8 0.0425 0.1475 0.39333333333333337 1.0208435507601765
9 0.0475 0.1475 0.39333333333333337 1.0369053457577244
10 0.052500000000000005 0.1475 0.39333333333333337 1.056154977930358
11 0.0575 0.1475 0.39333333333333337 1.0814124570868073
12 0.0625 0.1475 0.39333333333333337 1.1069151544874938
13 0.0675 0.1475 0.39333333333333337 1.1327856792545365
14 0.07250000000000001 0.1475 0.39333333333333337 1.1554683668464936
15 0.0775 0.1475 0.39333333333333337 1.1625796959293773
16 0.0825 0.1475 0.39333333333333337 1.158410985777342
17 0.08750000000000001 0.1475 0.39333333333333337 1.1487248651299657
18 0.0925 0.1475 0.39333333333333337 1.129107405590976
19 0.0975 0.1475 0.39333333333333337 1.1082638548307995
20 0.10250000000000001 0.1475 0.39333333333333337 1.0915890142226583
21 0.1075 0.1475 0.39333333333333337 1.0769985286905346
22 0.1125 0.1475 0.39333333333333337 1.0537027954879845
23 0.11750000000000001 0.1475 0.39333333333333337 1.0423001471309465
24 0.1225 0.1475 0.39333333333333337 1.0283227072094163
25 0.1275 0.1475 0.39333333333333337 1.0163070132417853
26 0.1325 0.1475 0.39333333333333337 1.0052721922511034
27 0.1375 0.1475 0.39333333333333337 0.9973025993133889
28 0.14250000000000002 0.1475 0.39333333333333337 0.9948504168710152
29 0.1475 0.1475 0.39333333333333337 0.9932564982834724
```python
fout = os.getenv('SCRATCH')+'/ELGeffrefiltgridnorth.dat'
fo = open(fout,'w')
fo.write('#gsig rsig zsig target_elg_eff \n')
md = 0
xdg = 0.15
xdr = 0.15
xdz = 0.4
ngp = 30 #number of grid points for each band
for i in range(0,ngp):
dg = (xdg-md)/float(ngp)*i+0.5*(xdg-md)/float(ngp)
for j in range(0,ngp):
dr = (xdr-md)/float(ngp)*j+0.5*(xdr-md)/float(ngp)
for k in range(0,ngp):
dz = (xdz-md)/float(ngp)*k+0.5*(xdz-md)/float(ngp)
eff = ELGeffcalc(dg,dr,dz,south=False,snrc=True)
fo.write(str(dg)+' '+str(dr)+' '+str(dz)+' '+str(eff)+'\n')
#if i == 0:
print(i,dg,dr,dz,eff)
fo.close()
```
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:14: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:16: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:19: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/conda/lib/python3.6/site-packages/ipykernel/__main__.py:24: RuntimeWarning: invalid value encountered in greater
/global/common/software/desi/cori/desiconda/20190804-1.3.0-spec/code/desitarget/master/py/desitarget/cuts.py:310: RuntimeWarning: invalid value encountered in less
elg &= g - r < 1.15*(r - z) - 0.35 # remove stars and low-z galaxies.
0 0.0025 0.1475 0.39333333333333337 0.8001471309465424
1 0.0075 0.1475 0.39333333333333337 0.8027219225110349
2 0.0125 0.1475 0.39333333333333337 0.807381069151545
3 0.017499999999999998 0.1475 0.39333333333333337 0.8110593428151055
4 0.0225 0.1475 0.39333333333333337 0.8195193722412948
5 0.0275 0.1475 0.39333333333333337 0.8346002942618931
6 0.0325 0.1475 0.39333333333333337 0.8501716527709662
7 0.037500000000000006 0.1475 0.39333333333333337 0.8648847474252085
8 0.0425 0.1475 0.39333333333333337 0.8846248160863168
9 0.0475 0.1475 0.39333333333333337 0.9052231486022561
10 0.052500000000000005 0.1475 0.39333333333333337 0.9264345267287887
11 0.0575 0.1475 0.39333333333333337 0.949730259931339
12 0.0625 0.1475 0.39333333333333337 0.9798921039725357
13 0.0675 0.1475 0.39333333333333337 0.9938695438940658
14 0.07250000000000001 0.1475 0.39333333333333337 0.9818538499264345
15 0.0775 0.1475 0.39333333333333337 0.9550024521824424
16 0.0825 0.1475 0.39333333333333337 0.9232466895537028
17 0.08750000000000001 0.1475 0.39333333333333337 0.8875674350171653
18 0.0925 0.1475 0.39333333333333337 0.8550760176557137
19 0.0975 0.1475 0.39333333333333337 0.8283472290338402
20 0.10250000000000001 0.1475 0.39333333333333337 0.8083619421284944
21 0.1075 0.1475 0.39333333333333337 0.7904610102991663
22 0.1125 0.1475 0.39333333333333337 0.7712113781265327
23 0.11750000000000001 0.1475 0.39333333333333337 0.757846983815596
24 0.1225 0.1475 0.39333333333333337 0.7436243256498284
25 0.1275 0.1475 0.39333333333333337 0.7320990681706719
26 0.1325 0.1475 0.39333333333333337 0.7253555664541442
27 0.1375 0.1475 0.39333333333333337 0.7213094654242277
28 0.14250000000000002 0.1475 0.39333333333333337 0.7189798921039725
29 0.1475 0.1475 0.39333333333333337 0.7168955370279548
```python
dn = np.loadtxt(fout).transpose()
```
```python
w = d[2] == 0.1
plt.scatter(d[0][w],d[1][w],c=d[3][w])
plt.xlabel('g sigma')
plt.ylabel('r sigma')
plt.colorbar()
plt.show()
```
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-48-a5f952e1aae9> in <module>()
----> 1 w = d[2] == 0.1
2 plt.scatter(d[0][w],d[1][w],c=d[3][w])
3 plt.xlabel('g sigma')
4 plt.ylabel('r sigma')
5 plt.colorbar()
NameError: name 'd' is not defined
```python
w = dn[2] == 0.1
plt.scatter(dn[0][w],dn[1][w],c=dn[3][w])
plt.xlabel('g sigma')
plt.ylabel('r sigma')
plt.colorbar()
plt.show()
```
```python
w = d[0] == 0.0325
plt.scatter(d[1][w],d[2][w],c=d[3][w])
plt.xlabel('r sigma')
plt.ylabel('z sigma')
plt.colorbar()
plt.show()
```
```python
print(np.unique(d[2]))
```
```python
w = d[1] == 0.0325
plt.scatter(d[0][w],d[2][w],c=d[3][w])
plt.xlabel('g sigma')
plt.ylabel('z sigma')
plt.colorbar()
plt.show()
```
```python
```
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@[email protected]@.PATH_END.py
|
{
"filename": "vlite.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/vectorstores/vlite.py",
"type": "Python"
}
|
from __future__ import annotations
# Standard library imports
from typing import Any, Dict, Iterable, List, Optional, Tuple
from uuid import uuid4
# LangChain imports
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.vectorstores import VectorStore
class VLite(VectorStore):
"""VLite is a simple and fast vector database for semantic search."""
def __init__(
self,
embedding_function: Embeddings,
collection: Optional[str] = None,
**kwargs: Any,
):
super().__init__()
self.embedding_function = embedding_function
self.collection = collection or f"vlite_{uuid4().hex}"
# Third-party imports
try:
from vlite import VLite
except ImportError:
raise ImportError(
"Could not import vlite python package. "
"Please install it with `pip install vlite`."
)
self.vlite = VLite(collection=self.collection, **kwargs)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
texts = list(texts)
ids = kwargs.pop("ids", [str(uuid4()) for _ in texts])
embeddings = self.embedding_function.embed_documents(texts)
if not metadatas:
metadatas = [{} for _ in texts]
data_points = [
{"text": text, "metadata": metadata, "id": id, "embedding": embedding}
for text, metadata, id, embedding in zip(texts, metadatas, ids, embeddings)
]
results = self.vlite.add(data_points)
return [result[0] for result in results]
def add_documents(
self,
documents: List[Document],
**kwargs: Any,
) -> List[str]:
"""Add a list of documents to the vectorstore.
Args:
documents: List of documents to add to the vectorstore.
kwargs: vectorstore specific parameters such as "file_path" for processing
directly with vlite.
Returns:
List of ids from adding the documents into the vectorstore.
"""
ids = kwargs.pop("ids", [str(uuid4()) for _ in documents])
texts = []
metadatas = []
for doc, id in zip(documents, ids):
if "file_path" in kwargs:
# Third-party imports
try:
from vlite.utils import process_file
except ImportError:
raise ImportError(
"Could not import vlite python package. "
"Please install it with `pip install vlite`."
)
processed_data = process_file(kwargs["file_path"])
texts.extend(processed_data)
metadatas.extend([doc.metadata] * len(processed_data))
ids.extend([f"{id}_{i}" for i in range(len(processed_data))])
else:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.add_texts(texts, metadatas, ids=ids)
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k=k)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Tuples of (doc, score), where score is the similarity score.
"""
metadata = filter or {}
embedding = self.embedding_function.embed_query(query)
results = self.vlite.retrieve(
text=query,
top_k=k,
metadata=metadata,
return_scores=True,
embedding=embedding,
)
documents_with_scores = [
(Document(page_content=text, metadata=metadata), score)
for text, score, metadata in results
]
return documents_with_scores
def update_document(self, document_id: str, document: Document) -> None:
"""Update an existing document in the vectorstore."""
self.vlite.update(
document_id, text=document.page_content, metadata=document.metadata
)
def get(self, ids: List[str]) -> List[Document]:
"""Get documents by their IDs."""
results = self.vlite.get(ids)
documents = [
Document(page_content=text, metadata=metadata) for text, metadata in results
]
return documents
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by ids."""
if ids is not None:
self.vlite.delete(ids, **kwargs)
return True
return None
@classmethod
def from_existing_index(
cls,
embedding: Embeddings,
collection: str,
**kwargs: Any,
) -> VLite:
"""Load an existing VLite index.
Args:
embedding: Embedding function
collection: Name of the collection to load.
Returns:
VLite vector store.
"""
vlite = cls(embedding_function=embedding, collection=collection, **kwargs)
return vlite
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection: Optional[str] = None,
**kwargs: Any,
) -> VLite:
"""Construct VLite wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to the vectorstore.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import VLite
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vlite = VLite.from_texts(texts, embeddings)
"""
vlite = cls(embedding_function=embedding, collection=collection, **kwargs)
vlite.add_texts(texts, metadatas, **kwargs)
return vlite
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
collection: Optional[str] = None,
**kwargs: Any,
) -> VLite:
"""Construct VLite wrapper from a list of documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to the vectorstore.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import VLite
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vlite = VLite.from_documents(documents, embeddings)
"""
vlite = cls(embedding_function=embedding, collection=collection, **kwargs)
vlite.add_documents(documents, **kwargs)
return vlite
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@[email protected]@.PATH_END.py
|
{
"filename": "test_integrations.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/tests/_internal/test_integrations.py",
"type": "Python"
}
|
import re
import prefect
from prefect._internal.integrations import KNOWN_EXTRAS_FOR_PACKAGES
def extract_extras_require(setup_py_content):
# Use regular expressions to find the extras_require dictionary
match = re.search(r"extras_require\s*=\s*(\{.*?\})", setup_py_content, re.DOTALL)
if match:
extras_require_str = match.group(1)
# Define the context for eval
client_requires = []
install_requires = []
dev_requires = []
otel_requires = []
markdown_requirements = []
markdown_tests_requires = []
# Evaluate the dictionary string to a Python dictionary
extras_require = eval(
extras_require_str,
{
"client_requires": client_requires,
"install_requires": install_requires,
"dev_requires": dev_requires,
"otel_requires": otel_requires,
"markdown_requirements": markdown_requirements,
"markdown_tests_requires": markdown_tests_requires,
},
)
return extras_require
return {}
def test_known_extras_for_packages():
setup_py_contents = (prefect.__development_base_path__ / "setup.py").read_text()
# Extract the extras_require dictionary
extras_require = extract_extras_require(setup_py_contents)
# Check each entry in known_extras
for package, extra in KNOWN_EXTRAS_FOR_PACKAGES.items():
extra_name = extra.split("[")[1][:-1]
assert (
extra_name in extras_require
), f"Extra '{extra_name}' for package '{package}' not found in setup.py"
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@tests@_internal@[email protected]_END.py
|
{
"filename": "templates.py",
"repo_name": "smsharma/fermi-gce-flows",
"repo_path": "fermi-gce-flows_extracted/fermi-gce-flows-main/utils/templates.py",
"type": "Python"
}
|
import sys
sys.path.append("../")
import numpy as np
import healpy as hp
from utils import create_mask as cm
def mod(dividends, divisor):
""" Return dividends (array) mod divisor (double)
Stolen from Nick's code
"""
output = np.zeros(len(dividends))
for i in range(len(dividends)):
output[i] = dividends[i]
done=False
while (not done):
if output[i] >= divisor:
output[i] -= divisor
elif output[i] < 0.:
output[i] += divisor
else:
done=True
return output
def rho_NFW(r, gamma=1., r_s=20.):
""" Generalized NFW profile
"""
return (r / r_s) ** -gamma * (1 + (r / r_s)) ** (-3 + gamma)
def rGC(s_ary, b_ary, l_ary, rsun=8.224):
""" Distance to GC as a function of LOS distance, latitude, longitude
"""
return np.sqrt(s_ary ** 2 - 2. * rsun * np.transpose(np.multiply.outer(s_ary, np.cos(b_ary) * np.cos(l_ary))) + rsun ** 2)
def get_NFW2_template(gamma=1.2, nside=128, exp_map=None, roi_normalize=None):
mask = cm.make_mask_total(nside=nside, band_mask = True, band_mask_range = 0,
mask_ring = True, inner = 0, outer = 40)
mask_restrict = np.where(mask == 0)[0]
# Get lon/lat array
theta_ary, phi_ary = hp.pix2ang(nside, mask_restrict)
b_ary = np.pi / 2. - theta_ary
l_ary = mod(phi_ary + np.pi, 2. * np.pi) - np.pi
s_ary = np.linspace(0, 30, 500)
# LOS integral of density^2
int_rho2_temp = np.trapz(rho_NFW(rGC(s_ary, b_ary, l_ary), gamma=gamma) ** 2, s_ary, axis=1)
int_rho2 = np.zeros(hp.nside2npix(128))
int_rho2[~mask] = int_rho2_temp
if exp_map is not None:
int_rho2 *= exp_map
if roi_normalize is not None:
int_rho2 /= np.mean(int_rho2[~roi_normalize])
return int_rho2
|
smsharmaREPO_NAMEfermi-gce-flowsPATH_START.@fermi-gce-flows_extracted@fermi-gce-flows-main@[email protected]@.PATH_END.py
|
{
"filename": "_colorsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2dcontour/hoverlabel/font/_colorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="colorsrc",
parent_name="histogram2dcontour.hoverlabel.font",
**kwargs,
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@histogram2dcontour@hoverlabel@font@[email protected]_END.py
|
{
"filename": "langsmith_dataset.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/chat_loaders/langsmith_dataset.ipynb",
"type": "Jupyter Notebook"
}
|
# LangSmith Chat Datasets
This notebook demonstrates an easy way to load a LangSmith chat dataset fine-tune a model on that data.
The process is simple and comprises 3 steps.
1. Create the chat dataset.
2. Use the LangSmithDatasetChatLoader to load examples.
3. Fine-tune your model.
Then you can use the fine-tuned model in your LangChain app.
Before diving in, let's install our prerequisites.
## Prerequisites
Ensure you've installed langchain >= 0.0.311 and have configured your environment with your LangSmith API key.
```python
%pip install --upgrade --quiet langchain langchain-openai
```
```python
import os
import uuid
uid = uuid.uuid4().hex[:6]
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "YOUR API KEY"
```
## 1. Select a dataset
This notebook fine-tunes a model directly on selecting which runs to fine-tune on. You will often curate these from traced runs. You can learn more about LangSmith datasets in the docs [docs](https://docs.smith.langchain.com/evaluation/concepts#datasets).
For the sake of this tutorial, we will upload an existing dataset here that you can use.
```python
from langsmith.client import Client
client = Client()
```
```python
import requests
url = "https://raw.githubusercontent.com/langchain-ai/langchain/master/docs/docs/integrations/chat_loaders/example_data/langsmith_chat_dataset.json"
response = requests.get(url)
response.raise_for_status()
data = response.json()
```
```python
dataset_name = f"Extraction Fine-tuning Dataset {uid}"
ds = client.create_dataset(dataset_name=dataset_name, data_type="chat")
```
```python
_ = client.create_examples(
inputs=[e["inputs"] for e in data],
outputs=[e["outputs"] for e in data],
dataset_id=ds.id,
)
```
## 2. Prepare Data
Now we can create an instance of LangSmithRunChatLoader and load the chat sessions using its lazy_load() method.
```python
from langchain_community.chat_loaders.langsmith import LangSmithDatasetChatLoader
loader = LangSmithDatasetChatLoader(dataset_name=dataset_name)
chat_sessions = loader.lazy_load()
```
#### With the chat sessions loaded, convert them into a format suitable for fine-tuning.
```python
from langchain_community.adapters.openai import convert_messages_for_finetuning
training_data = convert_messages_for_finetuning(chat_sessions)
```
## 3. Fine-tune the Model
Now, initiate the fine-tuning process using the OpenAI library.
```python
import json
import time
from io import BytesIO
import openai
my_file = BytesIO()
for dialog in training_data:
my_file.write((json.dumps({"messages": dialog}) + "\n").encode("utf-8"))
my_file.seek(0)
training_file = openai.files.create(file=my_file, purpose="fine-tune")
job = openai.fine_tuning.jobs.create(
training_file=training_file.id,
model="gpt-3.5-turbo",
)
# Wait for the fine-tuning to complete (this may take some time)
status = openai.fine_tuning.jobs.retrieve(job.id).status
start_time = time.time()
while status != "succeeded":
print(f"Status=[{status}]... {time.time() - start_time:.2f}s", end="\r", flush=True)
time.sleep(5)
status = openai.fine_tuning.jobs.retrieve(job.id).status
# Now your model is fine-tuned!
```
Status=[running]... 429.55s. 46.34s
## 4. Use in LangChain
After fine-tuning, use the resulting model ID with the ChatOpenAI model class in your LangChain app.
```python
# Get the fine-tuned model ID
job = openai.fine_tuning.jobs.retrieve(job.id)
model_id = job.fine_tuned_model
# Use the fine-tuned model in LangChain
from langchain_openai import ChatOpenAI
model = ChatOpenAI(
model=model_id,
temperature=1,
)
```
```python
model.invoke("There were three ravens sat on a tree.")
```
AIMessage(content='[{"s": "There were three ravens", "object": "tree", "relation": "sat on"}, {"s": "three ravens", "object": "a tree", "relation": "sat on"}]')
Now you have successfully fine-tuned a model using data from LangSmith LLM runs!
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@chat_loaders@[email protected]_END.py
|
{
"filename": "read_radtrans.py",
"repo_name": "tomasstolker/species",
"repo_path": "species_extracted/species-main/species/read/read_radtrans.py",
"type": "Python"
}
|
"""
Module for generating atmospheric model spectra with ``petitRADTRANS``.
Details on the radiative transfer, atmospheric setup, and opacities
can be found in `Mollière et al. (2019) <https://ui.adsabs.harvard.edu
/abs/2019A%26A...627A..67M/abstract>`_.
"""
import warnings
from typing import Dict, List, Optional, Tuple, Union
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import spectres
from matplotlib.ticker import MultipleLocator
from PyAstronomy.pyasl import fastRotBroad
from scipy.interpolate import interp1d
from spectres.spectral_resampling_numba import spectres_numba
from typeguard import typechecked
from species.core import constants
from species.core.box import ModelBox, create_box
from species.phot.syn_phot import SyntheticPhotometry
from species.read.read_filter import ReadFilter
from species.util.convert_util import logg_to_mass
from species.util.dust_util import apply_ism_ext
from species.util.retrieval_util import (
calc_metal_ratio,
calc_spectrum_clear,
calc_spectrum_clouds,
convolve_spectrum,
log_x_cloud_base,
pt_ret_model,
pt_spline_interp,
quench_pressure,
scale_cloud_abund,
)
class ReadRadtrans:
"""
Class for generating a model spectrum with ``petitRADTRANS``.
"""
@typechecked
def __init__(
self,
line_species: Optional[List[str]] = None,
cloud_species: Optional[List[str]] = None,
scattering: bool = False,
wavel_range: Optional[Tuple[float, float]] = None,
filter_name: Optional[str] = None,
pressure_grid: str = "smaller",
res_mode: str = "c-k",
cloud_wavel: Optional[Tuple[float, float]] = None,
max_press: Optional[float] = None,
pt_manual: Optional[np.ndarray] = None,
lbl_opacity_sampling: Optional[Union[int, np.int_]] = None,
) -> None:
"""
Parameters
----------
line_species : list, None
List with the line species. No line species are used if set
to ``None``.
cloud_species : list, None
List with the cloud species. No clouds are used if set to
``None``.
scattering : bool
Include scattering in the radiative transfer.
wavel_range : tuple(float, float), None
Wavelength range (:math:`\\mu`m). The wavelength range is
set to 0.8-10.0 :math:`\\mu`m if set to ``None`` or not
used if ``filter_name`` is not ``None``.
filter_name : str, None
Filter name that is used for the wavelength range. The
``wavel_range`` is used if ``filter_name`` is set to
``None``.
pressure_grid : str
The type of pressure grid that is used for the radiative
transfer. Either 'standard', to use 180 layers both for
the atmospheric structure (e.g. when interpolating the
abundances) and 180 layers with the radiative transfer,
or 'smaller' to use 60 (instead of 180) with the radiative
transfer, or 'clouds' to start with 1440 layers but
resample to ~100 layers (depending on the number of cloud
species) with a refinement around the cloud decks. For
cloudless atmospheres it is recommended to use 'smaller',
which runs faster than 'standard' and provides sufficient
accuracy. For cloudy atmosphere, one can test with
'smaller' but it is recommended to use 'clouds' for
improved accuracy fluxes.
res_mode : str
Resolution mode ('c-k' or 'lbl'). The low-resolution mode
('c-k') calculates the spectrum with the correlated-k
assumption at :math:`\\lambda/\\Delta \\lambda = 1000`. The
high-resolution mode ('lbl') calculates the spectrum with a
line-by-line treatment at
:math:`\\lambda/\\Delta \\lambda = 10^6`.
cloud_wavel : tuple(float, float), None
Tuple with the wavelength range (:math:`\\mu`m) that is
used for calculating the median optical depth of the
clouds at the gas-only photosphere and then scaling the
cloud optical depth to the value of ``log_tau_cloud``.
The range of ``cloud_wavel`` should be encompassed by
the range of ``wavel_range``. The full wavelength
range (i.e. ``wavel_range``) is used if the argument is
set to ``None``.
max_pressure : float, None
Maximum pressure (bar) for the free temperature nodes.
The default value is set to 1000 bar.
pt_manual : np.ndarray, None
A 2D array that contains the P-T profile that is used
when ``pressure_grid="manual"``. The shape of array should
be (n_pressure, 2), with pressure (bar) as first column
and temperature (K) as second column. It is recommended
that the pressures are logarithmically spaced.
lbl_opacity_sampling : int, None
This is the same parameter as in ``petitRADTRANS`` which is
used with ``res_mode='lbl'`` to downsample the line-by-line
opacities by selecting every ``lbl_opacity_sampling``-th
wavelength from the original sampling of
:math:`\\lambda/\\Delta \\lambda = 10^6`. Setting this
parameter will lower the computation time. By setting the
argument to ``None``, the original sampling is used so no
downsampling is applied.
Returns
-------
NoneType
None
"""
# Set several of the required ReadRadtrans attributes
self.filter_name = filter_name
self.wavel_range = wavel_range
self.scattering = scattering
self.pressure_grid = pressure_grid
self.cloud_wavel = cloud_wavel
self.pt_manual = pt_manual
self.lbl_opacity_sampling = lbl_opacity_sampling
# Set maximum pressure
if max_press is None:
self.max_press = 1e3
else:
self.max_press = max_press
# Set the wavelength range
if self.filter_name is not None:
transmission = ReadFilter(self.filter_name)
self.wavel_range = transmission.wavelength_range()
self.wavel_range = (0.9 * self.wavel_range[0], 1.2 * self.wavel_range[1])
elif self.wavel_range is None:
self.wavel_range = (0.8, 10.0)
# Set the list with line species
if line_species is None:
self.line_species = []
else:
self.line_species = line_species
# Set the list with cloud species and the number of P-T points
if cloud_species is None:
self.cloud_species = []
else:
self.cloud_species = cloud_species
# Set the number of pressures
if self.pressure_grid in ["standard", "smaller"]:
# Initiate 180 pressure layers but use only
# 60 layers during the radiative transfer
# when pressure_grid is set to 'smaller'
n_pressure = 180
elif self.pressure_grid == "clouds":
# Initiate 1140 pressure layers but use fewer
# layers (~100) during the radiative tranfer
# after running make_half_pressure_better
n_pressure = 1440
else:
raise ValueError(
f"The argument of pressure_grid "
f"('{self.pressure_grid}') is "
f"not recognized. Please use "
f"'standard', 'smaller', or 'clouds'."
)
# Create 180 pressure layers in log space
if self.pressure_grid == "manual":
if self.pt_manual is None:
raise UserWarning(
"A 2D array with the P-T profile "
"should be provided as argument "
"of pt_manual when using "
"pressure_grid='manual'."
)
self.pressure = self.pt_manual[:, 0]
else:
self.pressure = np.logspace(-6, np.log10(self.max_press), n_pressure)
# Import petitRADTRANS here because it is slow
print("Importing petitRADTRANS...", end="", flush=True)
from petitRADTRANS.radtrans import Radtrans
print(" [DONE]")
# Create the Radtrans object
self.rt_object = Radtrans(
line_species=self.line_species,
rayleigh_species=["H2", "He"],
cloud_species=self.cloud_species,
continuum_opacities=["H2-H2", "H2-He"],
wlen_bords_micron=self.wavel_range,
mode=res_mode,
test_ck_shuffle_comp=self.scattering,
do_scat_emis=self.scattering,
lbl_opacity_sampling=lbl_opacity_sampling,
)
# Setup the opacity arrays
if self.pressure_grid == "standard":
self.rt_object.setup_opa_structure(self.pressure)
elif self.pressure_grid == "manual":
self.rt_object.setup_opa_structure(self.pressure)
elif self.pressure_grid == "smaller":
self.rt_object.setup_opa_structure(self.pressure[::3])
elif self.pressure_grid == "clouds":
self.rt_object.setup_opa_structure(self.pressure[::24])
# Set the default of abund_smooth to None
self.abund_smooth = None
@typechecked
def get_model(
self,
model_param: Dict[str, float],
quenching: Optional[str] = None,
spec_res: Optional[float] = None,
wavel_resample: Optional[np.ndarray] = None,
plot_contribution: Optional[Union[bool, str]] = False,
temp_nodes: Optional[Union[int, np.integer]] = None,
) -> ModelBox:
"""
Function for calculating a model spectrum with
radiative transfer code of ``petitRADTRANS``.
Parameters
----------
model_param : dict
Dictionary with the model parameters. Various
parameterizations can be used for the
pressure-temperature (P-T) profile, abundances
(chemical equilibrium or free abundances), and
the cloud properties. The type of parameterizations
that will be used depend on the parameters provided
in the dictionary of ``model_param``. Below is an
(incomplete) list of the supported parameters.
Mandatory parameters:
- The surface gravity, ``logg``, should always
be included. It is provided in cgs units as
:math:`\\log_{10}{g}`.
Scaling parameters (optional):
- The radius (:math:`R_\\mathrm{J}`), ``radius``,
and parallax (mas), ``parallax``, are optional
parameters that can be included for scaling the
flux from the planet surface to the observer.
- Instead of ``parallax``, it is also possible to
provided the distance (pc) with the ``distance``
parameter.
Chemical abundances (mandatory -- one of the options
should be used):
- Chemical equilibrium requires the ``metallicity``,
``c_o_ratio`` parameters. Optionally, the
``log_p_quench`` (as :math:`\\log_{10}P/\\mathrm{bar}`)
can be included for setting a quench pressure for
CO/CH$_4$/H$_2$O. If this last parameter is used,
then the argument of ``quenching`` should be set
to ``'pressure'``.
- Free abundances requires the parameters that have the
names from ``line_species`` and ``cloud_species``.
These will be used as :math:`\\log_{10}` mass fraction
of the line and cloud species. For example, if
``line_species`` includes ``H2O_HITEMP``
then ``model_param`` should contain the ``H2O_HITEMP``
parameter. For a mass fraction of :math:`10^{-3}` the
dictionary value can be set to -3. Or, if
``cloud_species`` contains ``MgSiO3(c)_cd`` then
``model_param`` should contain the ``MgSiO3(c)``
parameter. So it is provided without the suffix,
``_cd``, for the particle shape and structure.
Pressure-temperature (P-T) profiles (mandatory -- one of
the options should be used):
- Eddington approximation requires the ``tint``
and ``log_delta`` parameters.
- Parametrization from `Mollière et al (2020)
<https://ui.adsabs.harvard.edu/abs/2020A%26A...640A.
131M/abstract>`_ that was used for HR 8799 e. It
requires ``tint``, ``alpa``, ``log_delta``, ``t1``,
``t2``, and ``t3`` as parameters.
- Arbitrary number of free temperature nodes requires
parameters ``t0``, ``t1``, ``t2``, etc. So counting
from zero up to the number of nodes that are
required. The nodes will be interpolated to a larger
number of points in log-pressure space (set with the
``pressure_grid`` parameter) by using a cubic spline.
Optionally, the ``pt_smooth`` parameter can also be
included in ``model_param``, which is used for
smoothing the interpolated P-T profile with a
Gaussian kernel in :math:`\\log{P/\\mathrm{bar}}`.
A recommended value for the kernel is 0.3 dex,
so ``pt_smooth=0.3``.
- Instead of a parametrization, it is also possible
to provide a manual P-T profile as ``numpy`` array
with the argument of ``pt_manual``.
Cloud models (optional -- one of the options can be used):
- Physical clouds as in `Mollière et al (2020)
<https://ui.adsabs.harvard.edu/abs/2020A%26A...640A.
131M/abstract>`_ require the parameters ``fsed``,
``log_kzz``, and ``sigma_lnorm``. Cloud abundances
are either specified relative to the equilibrium
abundances (when using chemical equilibrium
abundances for the line species) or as free
abundances (when using free abundances for the line
species). For the first case, the relative mass
fractions are specified for example with the
``mgsio3_fraction`` parameter if the list with
``cloud_species`` contains ``MgSiO3(c)_cd``.
- With the physical clouds, instead of including the
mass fraction with the ``_fraction`` parameters,
it is also possible to enforce the clouds (to ensure
an effect on the spectrum) by scaling the opacities
with the ``log_tau_cloud`` parameter. This is the
wavelength-averaged optical depth of the clouds down
to the gas-only photosphere. The abundances are
now specified relative to the first cloud species
that is listed in ``cloud_species``. The ratio
parameters should be provided with the ``_ratio``
suffix. For example, if
``cloud_species=['MgSiO3(c)_cd', 'Fe(c)_cd',
'Al2O3(c)_cd']`` then the ``fe_mgsio3_ratio`` and
``al2o3_mgsio3_ratio`` parameters are required.
- Instead of a single sedimentation parameter,
``fsed``, it is also possible to include two values,
``fsed_1`` and ``fsed_2``. This will calculate a
weighted combination of two cloudy spectra, to mimic
horizontal cloud variations. The weight should be
provided with the ``f_clouds`` parameter (between
0 and 1) in the ``model_param`` dictionary.
- Parametrized cloud opacities with a cloud absorption
opacity, ``log_kappa_abs``, and powerlaw index,
``opa_abs_index``. Furthermore, ``log_p_base`` and
``fsed`` are required parameters. In addition to
absorption, parametrized scattering opacities are
added with the optional ``log_kappa_sca`` and
``opa_sca_index`` parameters. Optionally, the
``lambda_ray`` can be included, which is the
wavelength at which the opacity changes to a
:math:`\\lambda^{-4}` dependence in the Rayleigh
regime. It is also possible to include
``log_tau_cloud``, which can be used for
enforcing clouds in the photospheric region by
scaling the cloud opacities.
- Parametrized cloud opacities with a total cloud
opacity, ``log_kappa_0``, and a single scattering
albedo, ``albedo``. Furthermore, ``opa_index``,
``log_p_base``, and ``fsed``, are required
parameters. This is `cloud model 2` from
`Mollière et al (2020) <https://ui.adsabs.harvard.
edu/abs/2020A%26A...640A.131M/abstract>`_
Optionally, ``log_tau_cloud`` can be used for
enforcing clouds in the photospheric region by
scaling the cloud opacities.
- Gray clouds are simply parametrized with the
``log_kappa_gray`` and ``log_cloud_top``
parameters. These clouds extend from the bottom
of the atmosphere up to the cloud top pressure and
have a constant opacity. Optionally, a single
scattering albedo, ``albedo``, can be specified.
Also ``log_tau_cloud`` can be used for enforcing
clouds in the photospheric region by scaling the
cloud opacities.
Extinction (optional):
- Extinction can optionally be applied to the spectrum
by including the ``ism_ext`` parameter, which is the
the visual extinction, $A_V$. The empirical relation
from `Cardelli et al. (1989) <https://ui.adsabs.
harvard.edu/abs/1989ApJ...345..245C/abstract>`_
is used for calculating the extinction at other
wavelengths.
- When using ``ism_ext``, the reddening, $R_V$, can
also be optionally set with the ``ism_red``
parameter. Otherwise it is set to the standard
value for the diffuse ISM, $R_V = 3.1$.
Radial velocity and broadening (optional):
- Radial velocity shift can be applied by adding the
``rad_vel`` parameter. This shifts the spectrum
by a constant velocity (km/s).
- Rotational broadening can be applied by adding the
``vsini`` parameter, which is the projected spin
velocity (km/s), :math:`v\\sin{i}`. The broadening
is applied with the ``fastRotBroad`` function from
``PyAstronomy`` (see for details the `documentation
<https://pyastronomy.readthedocs.io/en/latest/
pyaslDoc/aslDoc/ rotBroad.html#fastrotbroad-a-
faster-algorithm>`_).
quenching : str, None
Quenching type for CO/CH$_4$/H$_2$O abundances. Either
the quenching pressure (bar) is a free parameter
(``quenching='pressure'``) or the quenching pressure is
calculated from the mixing and chemical timescales
(``quenching='diffusion'``). The quenching is not applied
if the argument is set to ``None``.
spec_res : float, None
Spectral resolution, achieved by smoothing with a Gaussian
kernel. No smoothing is applied when the argument is set
to ``None``.
wavel_resample : np.ndarray, None
Wavelength points (:math:`\\mu`m) to which the spectrum
will be resampled. The original wavelengths points will
be used if the argument is set to ``None``.
plot_contribution : bool, str, None
Filename for the plot with the emission contribution. The
plot is not created if the argument is set to ``False`` or
``None``. If set to ``True``, the plot is shown in an
interface window instead of written to a file.
temp_nodes : int, None
Number of free temperature nodes.
Returns
-------
species.core.box.ModelBox
Box with the petitRADTRANS model spectrum.
"""
# Set chemistry type
if "metallicity" in model_param and "c_o_ratio" in model_param:
chemistry = "equilibrium"
else:
chemistry = "free"
check_nodes = {}
for line_item in self.line_species:
abund_count = 0
for node_idx in range(100):
if f"{line_item}_{node_idx}" in model_param:
abund_count += 1
else:
break
check_nodes[line_item] = abund_count
# Check if there are an equal number of
# abundance nodes for all the line species
nodes_list = list(check_nodes.values())
if not all(value == nodes_list[0] for value in nodes_list):
raise ValueError(
"The number of abundance nodes is "
"not equal for all the lines "
f"species: {check_nodes}"
)
if all(value == 0 for value in nodes_list):
abund_nodes = None
else:
abund_nodes = nodes_list[0]
for line_item in self.line_species:
if abund_nodes is None:
if line_item not in model_param:
raise RuntimeError(
f"The abundance of {line_item} is not "
"found in the dictionary with parameters "
"of 'model_param'. Please add the log10 "
f"mass fraction of {line_item}."
)
else:
for node_idx in range(abund_nodes):
if f"{line_item}_{node_idx}" not in model_param:
raise RuntimeError(
f"The abundance of {line_item} is not "
"found in the dictionary with parameters "
"of 'model_param'. Please add the log10 "
f"mass fraction of {line_item}."
)
# Check quenching parameter
if not hasattr(self, "quenching"):
self.quenching = quenching
if self.quenching is not None and chemistry != "equilibrium":
raise ValueError(
"The 'quenching' parameter can only be used in combination with "
"chemistry='equilibrium'."
)
if self.quenching is not None and self.quenching not in [
"pressure",
"diffusion",
]:
raise ValueError(
"The argument of 'quenching' should be of the following: "
"'pressure', 'diffusion', or None."
)
# Abundance nodes
if chemistry == "free" and abund_nodes is not None:
knot_press_abund = np.logspace(
np.log10(self.pressure[0]), np.log10(self.pressure[-1]), abund_nodes
)
else:
knot_press_abund = None
# C/O and [Fe/H]
if chemistry == "equilibrium":
# Equilibrium chemistry
metallicity = model_param["metallicity"]
c_o_ratio = model_param["c_o_ratio"]
log_x_abund = None
elif chemistry == "free":
# Free chemistry
# TODO Set [Fe/H] = 0 for Molliere P-T profile and
# cloud condensation profiles
metallicity = 0.0
# Get smoothing parameter for abundance profiles
if "abund_smooth" in model_param:
self.abund_smooth = model_param["abund_smooth"]
# Create a dictionary with the mass fractions
if abund_nodes is None:
log_x_abund = {}
for line_item in self.line_species:
log_x_abund[line_item] = model_param[line_item]
_, _, c_o_ratio = calc_metal_ratio(log_x_abund, self.line_species)
else:
log_x_abund = {}
for line_item in self.line_species:
for node_idx in range(abund_nodes):
log_x_abund[f"{line_item}_{node_idx}"] = model_param[
f"{line_item}_{node_idx}"
]
# TODO Set C/O = 0.55 for Molliere P-T profile
# and cloud condensation profiles
c_o_ratio = 0.55
# Create the P-T profile
if self.pressure_grid == "manual":
temp = self.pt_manual[:, 1]
elif (
"tint" in model_param
and "log_delta" in model_param
and "alpha" in model_param
):
temp, _, _ = pt_ret_model(
np.array([model_param["t1"], model_param["t2"], model_param["t3"]]),
10.0 ** model_param["log_delta"],
model_param["alpha"],
model_param["tint"],
self.pressure,
metallicity,
c_o_ratio,
)
elif "tint" in model_param and "log_delta" in model_param:
tau = self.pressure * 1e6 * 10.0 ** model_param["log_delta"]
temp = (0.75 * model_param["tint"] ** 4.0 * (2.0 / 3.0 + tau)) ** 0.25
elif "PTslope_1" in model_param:
num_layer = 6 # could make a variable in the future
layer_pt_slopes = np.ones(num_layer) * np.nan
for index in range(num_layer):
layer_pt_slopes[index] = model_param[f"PTslope_{num_layer - index}"]
try:
from petitRADTRANS.physics import dTdP_temperature_profile
except ImportError as import_error:
raise ImportError(
"Can't import the dTdP profile function from "
"petitRADTRANS, check that your version of pRT "
"includes this function in petitRADTRANS.physics",
import_error,
)
temp = dTdP_temperature_profile(
self.pressure,
num_layer, # could change in the future
layer_pt_slopes,
model_param["T_bottom"],
)
else:
if temp_nodes is None:
temp_nodes = 0
for temp_idx in range(100):
if f"t{temp_idx}" in model_param:
temp_nodes += 1
else:
break
knot_press = np.logspace(
np.log10(self.pressure[0]), np.log10(self.pressure[-1]), temp_nodes
)
knot_temp = []
for temp_idx in range(temp_nodes):
knot_temp.append(model_param[f"t{temp_idx}"])
knot_temp = np.asarray(knot_temp)
if "pt_smooth" in model_param:
pt_smooth = model_param["pt_smooth"]
else:
pt_smooth = None
temp = pt_spline_interp(
knot_press,
knot_temp,
self.pressure,
pt_smooth=pt_smooth,
)
# Set the log quenching pressure, log(P/bar)
if self.quenching == "pressure":
p_quench = 10.0 ** model_param["log_p_quench"]
elif self.quenching == "diffusion":
p_quench = quench_pressure(
self.pressure,
temp,
model_param["metallicity"],
model_param["c_o_ratio"],
model_param["logg"],
model_param["log_kzz"],
)
else:
if "log_p_quench" in model_param:
warnings.warn(
"The 'model_param' dictionary contains the "
"'log_p_quench' parameter but 'quenching=None'. "
"The quenching pressure from the dictionary is "
"therefore ignored."
)
p_quench = None
if (
len(self.cloud_species) > 0
or "log_kappa_0" in model_param
or "log_kappa_gray" in model_param
or "log_kappa_abs" in model_param
):
tau_cloud = None
log_x_base = None
if (
"log_kappa_0" in model_param
or "log_kappa_gray" in model_param
or "log_kappa_abs" in model_param
):
if "log_tau_cloud" in model_param:
tau_cloud = 10.0 ** model_param["log_tau_cloud"]
elif "tau_cloud" in model_param:
tau_cloud = model_param["tau_cloud"]
elif chemistry == "equilibrium":
# Create the dictionary with the mass fractions of the
# clouds relative to the maximum values allowed from
# elemental abundances
cloud_fractions = {}
for item in self.cloud_species:
if f"{item[:-3].lower()}_fraction" in model_param:
cloud_fractions[item] = model_param[
f"{item[:-3].lower()}_fraction"
]
elif f"{item[:-3].lower()}_tau" in model_param:
# Import the chemistry module here because it is slow
from poor_mans_nonequ_chem.poor_mans_nonequ_chem import (
interpol_abundances,
)
# Interpolate the abundances, following chemical equilibrium
abund_in = interpol_abundances(
np.full(self.pressure.size, c_o_ratio),
np.full(self.pressure.size, metallicity),
temp,
self.pressure,
Pquench_carbon=p_quench,
)
# Extract the mean molecular weight
mmw = abund_in["MMW"]
# Calculate the scaled mass fraction of the clouds
cloud_fractions[item] = scale_cloud_abund(
model_param,
self.rt_object,
self.pressure,
temp,
mmw,
"equilibrium",
abund_in,
item,
model_param[f"{item[:-3].lower()}_tau"],
pressure_grid=self.pressure_grid,
)
if "log_tau_cloud" in model_param:
# Set the log mass fraction to zero and use the
# optical depth parameter to scale the cloud mass
# fraction with petitRADTRANS
tau_cloud = 10.0 ** model_param["log_tau_cloud"]
elif "tau_cloud" in model_param:
# Set the log mass fraction to zero and use the
# optical depth parameter to scale the cloud mass
# fraction with petitRADTRANS
tau_cloud = model_param["tau_cloud"]
if tau_cloud is not None:
for i, item in enumerate(self.cloud_species):
if i == 0:
cloud_fractions[item] = 0.0
else:
cloud_1 = item[:-3].lower()
cloud_2 = self.cloud_species[0][:-3].lower()
cloud_fractions[item] = model_param[
f"{cloud_1}_{cloud_2}_ratio"
]
# Create a dictionary with the log mass fractions at the cloud base
log_x_base = log_x_cloud_base(c_o_ratio, metallicity, cloud_fractions)
elif chemistry == "free":
# Add the log10 mass fractions of the clouds to the dictionary
log_x_base = {}
if "log_tau_cloud" in model_param:
# Set the log mass fraction to zero and use the
# optical depth parameter to scale the cloud mass
# fraction with petitRADTRANS
tau_cloud = 10.0 ** model_param["log_tau_cloud"]
elif "tau_cloud" in model_param:
# Set the log mass fraction to zero and use the
# optical depth parameter to scale the cloud mass
# fraction with petitRADTRANS
tau_cloud = model_param["tau_cloud"]
if tau_cloud is None:
for item in self.cloud_species:
# Set the log10 of the mass fractions at th
# cloud base equal to the value from the
# parameter dictionary
log_x_base[item[:-3]] = model_param[item]
else:
# Set the log10 of the mass fractions with the
# ratios from the parameter dictionary and
# scale to the actual mass fractions with
# tau_cloud that is used in calc_spectrum_clouds
for i, item in enumerate(self.cloud_species):
if i == 0:
log_x_base[item[:-3]] = 0.0
else:
cloud_1 = item[:-3].lower()
cloud_2 = self.cloud_species[0][:-3].lower()
log_x_base[item[:-3]] = model_param[
f"{cloud_1}_{cloud_2}_ratio"
]
# Calculate the petitRADTRANS spectrum
# for a cloudy atmosphere
if "fsed_1" in model_param and "fsed_2" in model_param:
cloud_dict = model_param.copy()
cloud_dict["fsed"] = cloud_dict["fsed_1"]
(
wavelength,
flux_1,
emission_contr_1,
_,
) = calc_spectrum_clouds(
self.rt_object,
self.pressure,
temp,
c_o_ratio,
metallicity,
p_quench,
log_x_abund,
log_x_base,
cloud_dict,
model_param["logg"],
chemistry=chemistry,
knot_press_abund=knot_press_abund,
abund_smooth=self.abund_smooth,
pressure_grid=self.pressure_grid,
plotting=False,
contribution=True,
tau_cloud=tau_cloud,
cloud_wavel=self.cloud_wavel,
)
cloud_dict = model_param.copy()
cloud_dict["fsed"] = cloud_dict["fsed_2"]
(
wavelength,
flux_2,
emission_contr_2,
_,
) = calc_spectrum_clouds(
self.rt_object,
self.pressure,
temp,
c_o_ratio,
metallicity,
p_quench,
log_x_abund,
log_x_base,
cloud_dict,
model_param["logg"],
chemistry=chemistry,
knot_press_abund=knot_press_abund,
abund_smooth=self.abund_smooth,
pressure_grid=self.pressure_grid,
plotting=False,
contribution=True,
tau_cloud=tau_cloud,
cloud_wavel=self.cloud_wavel,
)
flux = (
model_param["f_clouds"] * flux_1
+ (1.0 - model_param["f_clouds"]) * flux_2
)
emission_contr = (
model_param["f_clouds"] * emission_contr_1
+ (1.0 - model_param["f_clouds"]) * emission_contr_2
)
else:
(
wavelength,
flux,
emission_contr,
_,
) = calc_spectrum_clouds(
self.rt_object,
self.pressure,
temp,
c_o_ratio,
metallicity,
p_quench,
log_x_abund,
log_x_base,
model_param,
model_param["logg"],
chemistry=chemistry,
knot_press_abund=knot_press_abund,
abund_smooth=self.abund_smooth,
pressure_grid=self.pressure_grid,
plotting=False,
contribution=True,
tau_cloud=tau_cloud,
cloud_wavel=self.cloud_wavel,
)
elif chemistry == "equilibrium":
# Calculate the petitRADTRANS spectrum for a clear atmosphere
wavelength, flux, emission_contr = calc_spectrum_clear(
self.rt_object,
self.pressure,
temp,
model_param["logg"],
model_param["c_o_ratio"],
model_param["metallicity"],
p_quench,
None,
pressure_grid=self.pressure_grid,
chemistry=chemistry,
knot_press_abund=knot_press_abund,
abund_smooth=self.abund_smooth,
contribution=True,
)
elif chemistry == "free":
log_x_abund = {}
if abund_nodes is None:
for line_item in self.rt_object.line_species:
log_x_abund[line_item] = model_param[line_item]
else:
for line_item in self.rt_object.line_species:
for node_idx in range(abund_nodes):
log_x_abund[f"{line_item}_{node_idx}"] = model_param[
f"{line_item}_{node_idx}"
]
wavelength, flux, emission_contr = calc_spectrum_clear(
self.rt_object,
self.pressure,
temp,
model_param["logg"],
None,
None,
None,
log_x_abund,
chemistry=chemistry,
knot_press_abund=knot_press_abund,
abund_smooth=self.abund_smooth,
pressure_grid=self.pressure_grid,
contribution=True,
)
if "radius" in model_param:
# Calculate the planet mass from log(g) and radius
model_param["mass"] = logg_to_mass(
model_param["logg"], model_param["radius"]
)
# Scale the flux to the observer
if "parallax" in model_param:
scaling = (model_param["radius"] * constants.R_JUP) ** 2 / (
1e3 * constants.PARSEC / model_param["parallax"]
) ** 2
flux *= scaling
elif "distance" in model_param:
scaling = (model_param["radius"] * constants.R_JUP) ** 2 / (
model_param["distance"] * constants.PARSEC
) ** 2
flux *= scaling
# Apply ISM extinction
if "ism_ext" in model_param:
if "ism_red" in model_param:
ism_reddening = model_param["ism_red"]
else:
# Use default ISM reddening (R_V = 3.1) if ism_red is not provided
ism_reddening = 3.1
flux = apply_ism_ext(
wavelength, flux, model_param["ism_ext"], ism_reddening
)
# Plot 2D emission contribution
if plot_contribution:
# Calculate the total optical depth (line and continuum opacities)
# self.rt_object.calc_opt_depth(10.**model_param['logg'])
# From Paul: The first axis of total_tau is the coordinate
# of the cumulative opacity distribution function (ranging
# from 0 to 1). A correct average is obtained by
# multiplying the first axis with self.w_gauss, then
# summing them. This is then the actual wavelength-mean.
if self.scattering:
# From petitRADTRANS: Only use 0 index for species
# because for lbl or test_ck_shuffle_comp = True
# everything has been moved into the 0th index
w_gauss = self.rt_object.w_gauss[..., np.newaxis, np.newaxis]
optical_depth = np.sum(
w_gauss * self.rt_object.total_tau[:, :, 0, :], axis=0
)
else:
# TODO Is this correct?
w_gauss = self.rt_object.w_gauss[
..., np.newaxis, np.newaxis, np.newaxis
]
optical_depth = np.sum(
w_gauss * self.rt_object.total_tau[:, :, :, :], axis=0
)
# Sum over all species
optical_depth = np.sum(optical_depth, axis=1)
plt.rcParams["font.family"] = "serif"
plt.rcParams["mathtext.fontset"] = "dejavuserif"
plt.figure(figsize=(8.0, 4.0))
gridsp = mpl.gridspec.GridSpec(1, 1)
gridsp.update(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
ax = plt.subplot(gridsp[0, 0])
ax.tick_params(
axis="both",
which="major",
colors="black",
labelcolor="black",
direction="in",
width=1,
length=5,
labelsize=12,
top=True,
bottom=True,
left=True,
right=True,
)
ax.tick_params(
axis="both",
which="minor",
colors="black",
labelcolor="black",
direction="in",
width=1,
length=3,
labelsize=12,
top=True,
bottom=True,
left=True,
right=True,
)
ax.set_xlabel("Wavelength (µm)", fontsize=13)
ax.set_ylabel("Pressure (bar)", fontsize=13)
ax.get_xaxis().set_label_coords(0.5, -0.09)
ax.get_yaxis().set_label_coords(-0.07, 0.5)
ax.set_yscale("log")
ax.xaxis.set_major_locator(MultipleLocator(1.0))
ax.xaxis.set_minor_locator(MultipleLocator(0.2))
press_bar = 1e-6 * self.rt_object.press # (Ba) -> (Bar)
xx_grid, yy_grid = np.meshgrid(wavelength, press_bar)
emission_contr = np.nan_to_num(emission_contr)
ax.pcolormesh(
xx_grid,
yy_grid,
emission_contr,
cmap=plt.cm.bone_r,
shading="gouraud",
)
photo_press = np.zeros(wavelength.shape[0])
for i in range(photo_press.shape[0]):
press_interp = interp1d(optical_depth[i, :], self.rt_object.press)
photo_press[i] = press_interp(1.0) * 1e-6 # cgs to (bar)
ax.plot(wavelength, photo_press, lw=0.5, color="gray")
ax.set_xlim(np.amin(wavelength), np.amax(wavelength))
ax.set_ylim(np.amax(press_bar), np.amin(press_bar))
if isinstance(plot_contribution, str):
plt.savefig(plot_contribution, bbox_inches="tight")
else:
plt.show()
plt.clf()
plt.close()
# Convolve with a broadening kernel for vsin(i)
if "vsini" in model_param:
# fastRotBroad requires a linear wavelength sampling
# while pRT uses a logarithmic wavelength sampling
# so change temporarily to a linear sampling
# with a factor 10 larger number of wavelengths
wavel_linear = np.linspace(
np.amin(wavelength), np.amax(wavelength), wavelength.size * 10
)
flux_linear = spectres_numba(
wavel_linear,
wavelength,
flux,
spec_errs=None,
fill=np.nan,
verbose=True,
)
# Apply the rotational broadening
# The rotBroad function is much slower than
# fastRotBroad when tested on a large array
flux_broad = fastRotBroad(
wvl=wavel_linear,
flux=flux_linear,
epsilon=1.0,
vsini=model_param["vsini"],
effWvl=None,
)
# And change back to the original (logarithmic)
# wavelength sampling, with constant R
flux = spectres_numba(
wavelength,
wavel_linear,
flux_broad,
spec_errs=None,
fill=np.nan,
verbose=True,
)
# Convolve the spectrum with a Gaussian LSF
if spec_res is not None:
flux = convolve_spectrum(wavelength, flux, spec_res)
# Apply a radial velocity shift to the wavelengths
if "rad_vel" in model_param:
# Change speed of light from (m/s) to (km/s)
wavelength *= 1.0 - model_param["rad_vel"] / (constants.LIGHT * 1e-3)
# Resample the spectrum
if wavel_resample is not None:
flux = spectres.spectres(
wavel_resample,
wavelength,
flux,
spec_errs=None,
fill=np.nan,
verbose=True,
)
wavelength = wavel_resample
if hasattr(self.rt_object, "h_bol"):
pressure = 1e-6 * self.rt_object.press # (bar)
f_bol = -4.0 * np.pi * self.rt_object.h_bol
f_bol *= 1e-3 # (erg s-1 cm-2) -> (W m-2)
bol_flux = np.column_stack((pressure, f_bol))
else:
bol_flux = None
return create_box(
boxtype="model",
model="petitradtrans",
wavelength=wavelength,
flux=flux,
parameters=model_param,
quantity="flux",
contribution=emission_contr,
bol_flux=bol_flux,
)
@typechecked
def get_flux(self, model_param: Dict[str, float]) -> Tuple[float, None]:
"""
Function for calculating the filter-weighted flux density
for the ``filter_name``.
Parameters
----------
model_param : dict
Dictionary with the model parameters and values.
Returns
-------
float
Flux (W m-2 um-1).
NoneType
Uncertainty (W m-2 um-1). Always set to ``None``.
"""
if "log_p_quench" in model_param:
quenching = "pressure"
else:
quenching = None
spectrum = self.get_model(model_param, quenching=quenching)
synphot = SyntheticPhotometry(self.filter_name)
return synphot.spectrum_to_flux(spectrum.wavelength, spectrum.flux)
@typechecked
def get_magnitude(self, model_param: Dict[str, float]) -> Tuple[float, None]:
"""
Function for calculating the magnitude for the ``filter_name``.
Parameters
----------
model_param : dict
Dictionary with the model parameters and values.
Returns
-------
float
Magnitude.
NoneType
Uncertainty. Always set to ``None``.
"""
if "log_p_quench" in model_param:
quenching = "pressure"
else:
quenching = None
spectrum = self.get_model(model_param, quenching=quenching)
synphot = SyntheticPhotometry(self.filter_name)
app_mag, _ = synphot.spectrum_to_magnitude(spectrum.wavelength, spectrum.flux)
return app_mag
|
tomasstolkerREPO_NAMEspeciesPATH_START.@species_extracted@species-main@species@read@[email protected]_END.py
|
{
"filename": "test_url_helpers.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/utils/tests/test_url_helpers.py",
"type": "Python"
}
|
from ...utils.url_helpers import urljoin_keep_path
BASE_URL = 'http://example.com/foo/'
def test_urljoin_keep_path():
assert urljoin_keep_path(BASE_URL, '') == BASE_URL
assert urljoin_keep_path('', BASE_URL) == BASE_URL
assert urljoin_keep_path(BASE_URL, 'bar') == 'http://example.com/foo/bar'
assert urljoin_keep_path(BASE_URL, '/bar') == 'http://example.com/bar'
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@utils@tests@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sankey/node/hoverlabel/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._namelengthsrc import NamelengthsrcValidator
from ._namelength import NamelengthValidator
from ._font import FontValidator
from ._bordercolorsrc import BordercolorsrcValidator
from ._bordercolor import BordercolorValidator
from ._bgcolorsrc import BgcolorsrcValidator
from ._bgcolor import BgcolorValidator
from ._alignsrc import AlignsrcValidator
from ._align import AlignValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._namelengthsrc.NamelengthsrcValidator",
"._namelength.NamelengthValidator",
"._font.FontValidator",
"._bordercolorsrc.BordercolorsrcValidator",
"._bordercolor.BordercolorValidator",
"._bgcolorsrc.BgcolorsrcValidator",
"._bgcolor.BgcolorValidator",
"._alignsrc.AlignsrcValidator",
"._align.AlignValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sankey@node@hoverlabel@[email protected]_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.