metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
---|---|---|
{
"filename": "test_pyramids.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/skimage/transform/tests/test_pyramids.py",
"type": "Python"
}
|
import math
import warnings
import pytest
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal, assert_equal
from skimage import data
from skimage._shared.utils import _supported_float_type
from skimage.transform import pyramids
image = data.astronaut()
image_gray = image[..., 0]
@pytest.mark.parametrize('channel_axis', [0, 1, -1])
def test_pyramid_reduce_rgb(channel_axis):
image = data.astronaut()
rows, cols, dim = image.shape
image = np.moveaxis(image, source=-1, destination=channel_axis)
out_ = pyramids.pyramid_reduce(image, downscale=2, channel_axis=channel_axis)
out = np.moveaxis(out_, channel_axis, -1)
assert_array_equal(out.shape, (rows / 2, cols / 2, dim))
def test_pyramid_reduce_gray():
rows, cols = image_gray.shape
out1 = pyramids.pyramid_reduce(image_gray, downscale=2, channel_axis=None)
assert_array_equal(out1.shape, (rows / 2, cols / 2))
assert_almost_equal(np.ptp(out1), 1.0, decimal=2)
out2 = pyramids.pyramid_reduce(
image_gray, downscale=2, channel_axis=None, preserve_range=True
)
assert_almost_equal(np.ptp(out2) / np.ptp(image_gray), 1.0, decimal=2)
def test_pyramid_reduce_gray_defaults():
rows, cols = image_gray.shape
out1 = pyramids.pyramid_reduce(image_gray)
assert_array_equal(out1.shape, (rows / 2, cols / 2))
assert_almost_equal(np.ptp(out1), 1.0, decimal=2)
out2 = pyramids.pyramid_reduce(image_gray, preserve_range=True)
assert_almost_equal(np.ptp(out2) / np.ptp(image_gray), 1.0, decimal=2)
def test_pyramid_reduce_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*((8,) * ndim))
out = pyramids.pyramid_reduce(img, downscale=2, channel_axis=None)
expected_shape = np.asarray(img.shape) / 2
assert_array_equal(out.shape, expected_shape)
@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3])
def test_pyramid_expand_rgb(channel_axis):
image = data.astronaut()
rows, cols, dim = image.shape
image = np.moveaxis(image, source=-1, destination=channel_axis)
out = pyramids.pyramid_expand(image, upscale=2, channel_axis=channel_axis)
expected_shape = [rows * 2, cols * 2]
expected_shape.insert(channel_axis % image.ndim, dim)
assert_array_equal(out.shape, expected_shape)
def test_pyramid_expand_gray():
rows, cols = image_gray.shape
out = pyramids.pyramid_expand(image_gray, upscale=2)
assert_array_equal(out.shape, (rows * 2, cols * 2))
def test_pyramid_expand_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*((4,) * ndim))
out = pyramids.pyramid_expand(img, upscale=2, channel_axis=None)
expected_shape = np.asarray(img.shape) * 2
assert_array_equal(out.shape, expected_shape)
@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3])
def test_build_gaussian_pyramid_rgb(channel_axis):
image = data.astronaut()
rows, cols, dim = image.shape
image = np.moveaxis(image, source=-1, destination=channel_axis)
pyramid = pyramids.pyramid_gaussian(image, downscale=2, channel_axis=channel_axis)
for layer, out in enumerate(pyramid):
layer_shape = [rows / 2**layer, cols / 2**layer]
layer_shape.insert(channel_axis % image.ndim, dim)
assert out.shape == tuple(layer_shape)
def test_build_gaussian_pyramid_gray():
rows, cols = image_gray.shape
pyramid = pyramids.pyramid_gaussian(image_gray, downscale=2, channel_axis=None)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2**layer, cols / 2**layer)
assert_array_equal(out.shape, layer_shape)
def test_build_gaussian_pyramid_gray_defaults():
rows, cols = image_gray.shape
pyramid = pyramids.pyramid_gaussian(image_gray)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2**layer, cols / 2**layer)
assert_array_equal(out.shape, layer_shape)
def test_build_gaussian_pyramid_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*((8,) * ndim))
original_shape = np.asarray(img.shape)
pyramid = pyramids.pyramid_gaussian(img, downscale=2, channel_axis=None)
for layer, out in enumerate(pyramid):
layer_shape = original_shape / 2**layer
assert_array_equal(out.shape, layer_shape)
@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3])
def test_build_laplacian_pyramid_rgb(channel_axis):
image = data.astronaut()
rows, cols, dim = image.shape
image = np.moveaxis(image, source=-1, destination=channel_axis)
pyramid = pyramids.pyramid_laplacian(image, downscale=2, channel_axis=channel_axis)
for layer, out in enumerate(pyramid):
layer_shape = [rows / 2**layer, cols / 2**layer]
layer_shape.insert(channel_axis % image.ndim, dim)
assert out.shape == tuple(layer_shape)
def test_build_laplacian_pyramid_defaults():
rows, cols = image_gray.shape
pyramid = pyramids.pyramid_laplacian(image_gray)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2**layer, cols / 2**layer)
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid_nd():
for ndim in [1, 2, 3, 4]:
img = np.random.randn(*(16,) * ndim)
original_shape = np.asarray(img.shape)
pyramid = pyramids.pyramid_laplacian(img, downscale=2, channel_axis=None)
for layer, out in enumerate(pyramid):
layer_shape = original_shape / 2**layer
assert_array_equal(out.shape, layer_shape)
@pytest.mark.parametrize('channel_axis', [0, 1, 2, -1, -2, -3])
def test_laplacian_pyramid_max_layers(channel_axis):
for downscale in [2, 3, 5, 7]:
if channel_axis is None:
shape = (32, 8)
shape_without_channels = shape
else:
shape_without_channels = (32, 8)
ndim = len(shape_without_channels) + 1
n_channels = 5
shape = list(shape_without_channels)
shape.insert(channel_axis % ndim, n_channels)
shape = tuple(shape)
img = np.ones(shape)
pyramid = pyramids.pyramid_laplacian(
img, downscale=downscale, channel_axis=channel_axis
)
max_layer = math.ceil(math.log(max(shape_without_channels), downscale))
for layer, out in enumerate(pyramid):
if channel_axis is None:
out_shape_without_channels = out.shape
else:
assert out.shape[channel_axis] == n_channels
out_shape_without_channels = list(out.shape)
out_shape_without_channels.pop(channel_axis)
out_shape_without_channels = tuple(out_shape_without_channels)
if layer < max_layer:
# should not reach all axes as size 1 prior to final level
assert max(out_shape_without_channels) > 1
# total number of images is max_layer + 1
assert_equal(max_layer, layer)
# final layer should be size 1 on all axes
assert out_shape_without_channels == (1, 1)
def test_check_factor():
with pytest.raises(ValueError):
pyramids._check_factor(0.99)
with pytest.raises(ValueError):
pyramids._check_factor(-2)
@pytest.mark.parametrize('dtype', ['float16', 'float32', 'float64', 'uint8', 'int64'])
@pytest.mark.parametrize(
'pyramid_func', [pyramids.pyramid_gaussian, pyramids.pyramid_laplacian]
)
def test_pyramid_dtype_support(pyramid_func, dtype):
with warnings.catch_warnings():
# Ignore arch specific warning on arm64, armhf, ppc64el, riscv64, s390x
# https://github.com/scikit-image/scikit-image/issues/7391
warnings.filterwarnings(
action="ignore",
category=RuntimeWarning,
message="invalid value encountered in cast",
)
img = np.random.randn(32, 8).astype(dtype)
pyramid = pyramid_func(img)
float_dtype = _supported_float_type(dtype)
assert np.all([im.dtype == float_dtype for im in pyramid])
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@skimage@transform@tests@[email protected]_END.py
|
{
"filename": "tfcas.py",
"repo_name": "maxmahlke/classy",
"repo_path": "classy_extracted/classy-main/classy/sources/pds/tfcas.py",
"type": "Python"
}
|
import numpy as np
import pandas as pd
import rocks
from classy import config
from classy import index
# ------
# Module definitions
REFERENCES = {
"CHAPMAN1972": ["1972PhDT.........5C", "Chapman 1972"],
"CHAPMAN&GAFFEY1979A": ["1979aste.book..655C", "Chapman and Gaffey 1979"],
1: ["1979aste.book.1064C", "Chapman and Gaffey 1979"],
2: ["1984Icar...59...25M", "McFadden+ 1984"],
}
WAVE = np.array(
[
0.33,
0.34,
0.355,
0.4,
0.43,
0.47,
0.5,
0.54,
0.57,
0.6,
0.635,
0.67,
0.7,
0.73,
0.765,
0.8,
0.83,
0.87,
0.9,
0.93,
0.95,
0.97,
1.0,
1.03,
1.06,
1.1,
]
)
DATA_KWARGS = {}
# ------
# Module functions
def _build_index(PATH_REPO):
"""Create index of spectra collection."""
# Iterate over index file
entries = _load_tfcas(PATH_REPO / "data/data0/24color.tab")
entries["name"], entries["number"] = zip(*rocks.identify(entries.number))
# All-NaN entry
entries = entries[~entries["name"].isin(["Cerberus"])]
entries["source"] = "24CAS"
entries["host"] = "PDS"
entries["module"] = "tfcas"
for ind, row in entries.iterrows():
entries.loc[ind, "bibcode"] = REFERENCES[row.ref][0]
entries.loc[ind, "shortbib"] = REFERENCES[row.ref][1]
# Split the observations into one file per spectrum
entries["filename"] = entries["number"].apply(
lambda number: PATH_REPO.relative_to(config.PATH_DATA) / f"data/{number}.csv"
)
_create_spectra_files(entries)
index.add(entries)
def _create_spectra_files(entries):
"""Create one file per 24CAS spectrum."""
for _, row in entries.iterrows():
# Convert colours to reflectances
refl = row[[f"REFL_{i}" for i in range(1, 27)]].values
refl_err = row[[f"REFL_{i}_UNC" for i in range(1, 27)]].values
# Convert color indices to reflectance
data = pd.DataFrame(data={"wave": WAVE, "refl": refl, "refl_err": refl_err})
data.to_csv(config.PATH_DATA / row.filename, index=False)
def _load_tfcas(PATH):
"""Load the 24cas data file.
Returns
-------
pd.DataFrame
"""
refl_cols = zip(
[f"REFL_{i}" for i in range(1, 27)], [f"REFL_{i}_UNC" for i in range(1, 27)]
)
refl_cols = [r for tup in refl_cols for r in tup]
data = pd.read_fwf(
PATH,
colspecs=[
(0, 6),
(6, 17),
(17, 23),
(23, 28),
(28, 34),
(34, 39),
(39, 45),
(45, 50),
(50, 56),
(56, 61),
(61, 67),
(67, 72),
(72, 78),
(78, 83),
(83, 89),
(89, 94),
(94, 100),
(100, 105),
(105, 111),
(111, 116),
(116, 122),
(122, 127),
(127, 133),
(133, 138),
(138, 144),
(144, 149),
(149, 155),
(155, 160),
(160, 166),
(166, 171),
(171, 177),
(177, 182),
(182, 188),
(188, 193),
(193, 199),
(199, 204),
(204, 210),
(210, 215),
(215, 221),
(221, 226),
(226, 232),
(232, 237),
(237, 243),
(243, 248),
(248, 254),
(254, 259),
(259, 265),
(265, 270),
(270, 276),
(276, 281),
(281, 287),
(287, 292),
(292, 298),
(298, 303),
(303, 314),
(314, 316),
(316, 318),
],
names=[
"number",
"prov_id",
]
+ refl_cols
+ ["date_obs", "ref", "note"],
)
data = data.replace(-9.99, np.nan)
data = data.replace(9.99, np.nan)
data = data.replace("9999-99-99", np.nan)
return data
|
maxmahlkeREPO_NAMEclassyPATH_START.@classy_extracted@classy-main@classy@sources@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/waterfall/insidetextfont/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._weightsrc import WeightsrcValidator
from ._weight import WeightValidator
from ._variantsrc import VariantsrcValidator
from ._variant import VariantValidator
from ._textcasesrc import TextcasesrcValidator
from ._textcase import TextcaseValidator
from ._stylesrc import StylesrcValidator
from ._style import StyleValidator
from ._sizesrc import SizesrcValidator
from ._size import SizeValidator
from ._shadowsrc import ShadowsrcValidator
from ._shadow import ShadowValidator
from ._linepositionsrc import LinepositionsrcValidator
from ._lineposition import LinepositionValidator
from ._familysrc import FamilysrcValidator
from ._family import FamilyValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._weightsrc.WeightsrcValidator",
"._weight.WeightValidator",
"._variantsrc.VariantsrcValidator",
"._variant.VariantValidator",
"._textcasesrc.TextcasesrcValidator",
"._textcase.TextcaseValidator",
"._stylesrc.StylesrcValidator",
"._style.StyleValidator",
"._sizesrc.SizesrcValidator",
"._size.SizeValidator",
"._shadowsrc.ShadowsrcValidator",
"._shadow.ShadowValidator",
"._linepositionsrc.LinepositionsrcValidator",
"._lineposition.LinepositionValidator",
"._familysrc.FamilysrcValidator",
"._family.FamilyValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@waterfall@insidetextfont@[email protected]_END.py
|
{
"filename": "_parentssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/treemap/_parentssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ParentssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="parentssrc", parent_name="treemap", **kwargs):
super(ParentssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@treemap@[email protected]_END.py
|
{
"filename": "test_calculations.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/geometry/tests/test_calculations.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from numpy.testing import assert_allclose
from vispy.testing import assert_raises
from vispy.geometry import resize
def test_resize():
"""Test image resizing algorithms
"""
assert_raises(ValueError, resize, np.zeros(3), (3, 3))
assert_raises(ValueError, resize, np.zeros((3, 3)), (3,))
assert_raises(ValueError, resize, np.zeros((3, 3)), (4, 4), kind='foo')
for kind, tol in (('nearest', 1e-5), ('linear', 2e-1)):
shape = np.array((10, 11, 3))
data = np.random.RandomState(0).rand(*shape)
assert_allclose(data, resize(data, shape[:2], kind=kind),
rtol=1e-5, atol=1e-5)
# this won't actually be that close for bilinear interp
assert_allclose(data, resize(resize(data, 2 * shape[:2], kind=kind),
shape[:2], kind=kind), atol=tol, rtol=tol)
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@geometry@tests@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "bek0s/gbkfit",
"repo_path": "gbkfit_extracted/gbkfit-master/src/gbkfit/driver/native/__init__.py",
"type": "Python"
}
|
bek0sREPO_NAMEgbkfitPATH_START.@gbkfit_extracted@gbkfit-master@src@gbkfit@driver@native@[email protected]_END.py
|
|
{
"filename": "reduction_metrics_test.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/metrics/reduction_metrics_test.py",
"type": "Python"
}
|
import numpy as np
from keras.src import backend
from keras.src import testing
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.metrics import reduction_metrics
from keras.src.saving import register_keras_serializable
class SumTest(testing.TestCase):
def test_config(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
self.assertEqual(sum_obj.name, "sum")
self.assertEqual(len(sum_obj.variables), 1)
self.assertEqual(sum_obj._dtype, "float32")
# Check save and restore config
sum_obj2 = reduction_metrics.Sum.from_config(sum_obj.get_config())
self.assertEqual(sum_obj2.name, "sum")
self.assertEqual(len(sum_obj2.variables), 1)
self.assertEqual(sum_obj2._dtype, "float32")
def test_unweighted(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([1, 3, 5, 7])
result = sum_obj.result()
self.assertAllClose(result, 16.0, atol=1e-3)
def test_weighted(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
result = sum_obj.result()
self.assertAllClose(result, 4.0, atol=1e-3)
def test_weighted_nd(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 1], [1, 0]])
result = sum_obj.result()
self.assertAllClose(result, 9.0, atol=1e-3)
def test_weighted_nd_broadcast(self):
sum_obj = reduction_metrics.Sum(name="sum", dtype="float32")
sum_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 0]])
result = sum_obj.result()
self.assertAllClose(result, 6.0, atol=1e-3)
class MeanTest(testing.TestCase):
def test_config(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
self.assertEqual(mean_obj.name, "mean")
self.assertEqual(len(mean_obj.variables), 2)
self.assertEqual(mean_obj._dtype, "float32")
# Check save and restore config
mean_obj2 = reduction_metrics.Mean.from_config(mean_obj.get_config())
self.assertEqual(mean_obj2.name, "mean")
self.assertEqual(len(mean_obj2.variables), 2)
self.assertEqual(mean_obj2._dtype, "float32")
def test_unweighted(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([1, 3, 5, 7])
result = mean_obj.result()
self.assertAllClose(result, 4.0, atol=1e-3)
def test_weighted(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
result = mean_obj.result()
self.assertAllClose(result, 2.0, atol=1e-3)
def test_weighted_negative_weights(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([1, 3, 5, 7], sample_weight=[-1, -1, 0, 0])
result = mean_obj.result()
self.assertAllClose(result, 2.0, atol=1e-3)
def test_weighted_nd(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 1], [1, 0]])
result = mean_obj.result()
self.assertAllClose(result, 3.0, atol=1e-3)
def test_weighted_nd_broadcast(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
mean_obj.update_state([[1, 3], [5, 7]], sample_weight=[[1, 0]])
result = mean_obj.result()
self.assertAllClose(result, 3.0, atol=1e-3)
def test_weighted_dynamic_shapes(self):
mean_obj = reduction_metrics.Mean(name="mean", dtype="float32")
result = backend.compute_output_spec(
mean_obj, KerasTensor((None, 2)), KerasTensor((None, 2))
)
self.assertAllEqual(result.shape, ())
# How users would register a custom function or class to use with
# MeanMetricWrapper.
@register_keras_serializable(package="test", name="mse")
def mse(y_true, y_pred):
return (y_true - y_pred) ** 2
class MetricWrapperTest(testing.TestCase):
def test_config(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
self.assertEqual(mse_obj.name, "mse")
self.assertEqual(len(mse_obj.variables), 2)
self.assertEqual(mse_obj._dtype, "float32")
# Check save and restore config
mse_obj2 = reduction_metrics.MeanMetricWrapper.from_config(
mse_obj.get_config()
)
self.assertEqual(mse_obj2.name, "mse")
self.assertEqual(len(mse_obj2.variables), 2)
self.assertEqual(mse_obj2._dtype, "float32")
self.assertTrue("fn" in mse_obj2.get_config())
def test_unweighted(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
mse_obj.update_state(y_true, y_pred)
result = mse_obj.result()
self.assertAllClose(0.5, result, atol=1e-5)
def test_weighted(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([1.0, 1.5, 2.0, 2.5])
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.54285, result, atol=1e-5)
def test_weighted_broadcast(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
y_true = np.array(
[[0, 1, 0, 1, 0], [0, 0, 1, 1, 1], [1, 1, 1, 1, 0], [0, 0, 0, 0, 1]]
)
y_pred = np.array(
[[0, 0, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 0, 1, 0], [1, 1, 1, 1, 1]]
)
sample_weight = np.array([[1.0, 0.0, 0.5, 0.0, 1.0]])
result = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(0.45, result, atol=1e-5)
def test_weighted_dynamic_shape(self):
mse_obj = reduction_metrics.MeanMetricWrapper(
fn=mse, name="mse", dtype="float32"
)
result = backend.compute_output_spec(
mse_obj,
KerasTensor((None, 5)),
KerasTensor((None, 5)),
KerasTensor((None, 5)),
)
self.assertAllEqual(result.shape, ())
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@metrics@[email protected]_END.py
|
{
"filename": "alpha_dropout.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/regularization/alpha_dropout.py",
"type": "Python"
}
|
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.AlphaDropout")
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units (SELU) by
randomly setting activations to the negative saturation value.
Args:
rate: Float between 0 and 1. The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
noise_shape: 1D integer tensor representing the shape of the
binary alpha dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the alpha dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding alpha dropout) or in inference mode
(doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
noise_shape = self._get_concrete_noise_shape(
inputs, self.noise_shape
)
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = ops.greater_equal(
ops.random.uniform(noise_shape, seed=self.seed_generator),
self.rate,
)
kept_idx = ops.cast(kept_idx, inputs.dtype)
# Compute affine transformation parameters
a = ((1 - self.rate) * (1 + self.rate * alpha_p**2)) ** -0.5
b = -a * alpha_p * self.rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
return a * x + b
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def _get_concrete_noise_shape(self, inputs, noise_shape):
if noise_shape is None:
return ops.shape(inputs)
concrete_inputs_shape = ops.shape(inputs)
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@regularization@[email protected]_END.py
|
{
"filename": "_orientation.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/_orientation.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OrientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="orientation", parent_name="histogram", **kwargs):
super(OrientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
values=kwargs.pop("values", ["v", "h"]),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@histogram@[email protected]_END.py
|
{
"filename": "custom_call.filecheck.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/tests/filecheck/custom_call.filecheck.py",
"type": "Python"
}
|
# Copyright 2023 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Tests for mlir.custom_call().
# RUN: %PYTHON %s | FileCheck %s
from absl import app
import jax
from jax.interpreters import mlir
from jax._src.lib.mlir import ir
from jax._src.lib.mlir.dialects import func as func_dialect
import numpy as np
ShapedArray = jax.core.ShapedArray
def print_custom_call(name, arg_avals, result_avals, **kw):
print(f"TEST: {name}")
ctx = mlir.make_ir_context()
loc = ir.Location.unknown(context=ctx)
with ctx, loc:
module = ir.Module.create(loc=ir.Location.unknown())
ip = ir.InsertionPoint(module.body)
arg_types = [mlir.aval_to_ir_type(aval) for aval in arg_avals]
result_types = [mlir.aval_to_ir_type(aval) for aval in result_avals]
ftype = ir.FunctionType.get(arg_types, result_types)
func = func_dialect.FuncOp("func", ftype, ip=ip)
entry_block = func.add_entry_block()
with ir.InsertionPoint(entry_block):
outs = mlir.custom_call(
name, result_types=result_types, operands=entry_block.arguments, **kw
)
func_dialect.ReturnOp(outs)
module.operation.verify()
print(str(module))
def main(_):
aval1 = ShapedArray((2, 3), np.dtype(np.float32))
aval2 = ShapedArray((3, 4), np.dtype(np.int64))
# CHECK-LABEL: TEST: simple
# CHECK: stablehlo.custom_call @simple(%arg0) {api_version = 2 : i32, backend_config = ""} : (tensor<2x3xf32>) -> tensor<3x4xi64>
print_custom_call("simple", [aval1], [aval2])
# CHECK-LABEL: TEST: sideeffect
# CHECK: stablehlo.custom_call @sideeffect(%arg0) {backend_config = "", has_side_effect = true} : (tensor<2x3xf32>) -> tensor<3x4xi64>
print_custom_call("sideeffect", [aval1], [aval2], api_version=1,
has_side_effect=True)
# CHECK-LABEL: TEST: backendconfig
# CHECK: stablehlo.custom_call @backendconfig(%arg0) {backend_config = "hello"} : (tensor<2x3xf32>) -> tensor<3x4xi64>
print_custom_call("backendconfig", [aval1], [aval2], api_version=1,
backend_config=b"hello")
# CHECK-LABEL: TEST: calledcomputations
# CHECK: stablehlo.custom_call @calledcomputations(%arg0) {backend_config = "", called_computations = [@a, @b]} : (tensor<2x3xf32>) -> tensor<3x4xi64>
print_custom_call("calledcomputations", [aval1], [aval2], api_version=1,
called_computations=["a", "b"])
# CHECK-LABEL: TEST: aliases
# CHECK: stablehlo.custom_call @aliases(%arg0, %arg1) {backend_config = "", output_operand_aliases = [#stablehlo.output_operand_alias<output_tuple_indices = [0], operand_index = 1, operand_tuple_indices = []>]} : (tensor<2x3xf32>, tensor<3x4xi64>) -> (tensor<3x4xi64>, tensor<2x3xf32>)
print_custom_call("aliases", [aval1, aval2], [aval2, aval1], api_version=1,
operand_output_aliases={1: 0})
# CHECK-LABEL: TEST: layouts
# CHECK: stablehlo.custom_call @layouts(%arg0, %arg1) {backend_config = "", operand_layouts = [dense<[0, 1]> : tensor<2xindex>, dense<[1, 0]> : tensor<2xindex>], result_layouts = [dense<[1, 0]> : tensor<2xindex>, dense<[0, 1]> : tensor<2xindex>]} : (tensor<2x3xf32>, tensor<3x4xi64>) -> (tensor<3x4xi64>, tensor<2x3xf32>)
print_custom_call("layouts", [aval1, aval2], [aval2, aval1], api_version=1,
operand_layouts=[[0, 1], [1, 0]],
result_layouts=[[1, 0], [0, 1]])
if __name__ == "__main__":
app.run(main)
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@filecheck@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "b-thorne/PySM_public",
"repo_path": "PySM_public_extracted/PySM_public-master/pysm/test/__init__.py",
"type": "Python"
}
|
import os
import os.path
def get_testdata(*filename):
import pysm
# if a test data dir is set by run-tests.py, use it.
# otherwise fall back to the test data in source (e.g. if ran directly with py.test)
reldir = os.path.join(os.path.abspath(os.path.join(os.path.dirname(pysm.__file__), '..')), "test_data")
testdata = os.environ.get('PYSM_TESTDATA_DIR', reldir)
return os.path.join(testdata, *filename)
|
b-thorneREPO_NAMEPySM_publicPATH_START.@PySM_public_extracted@PySM_public-master@pysm@test@[email protected]_END.py
|
{
"filename": "core.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/splatalogue/core.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
Module to search Splatalogue.net via splat, modeled loosely on
ftp://ftp.cv.nrao.edu/NRAO-staff/bkent/slap/idl/
:author: Adam Ginsburg <[email protected]>
"""
import json
from astropy.table import Table
from astropy import units as u
from ..query import BaseQuery
from ..utils import async_to_sync, prepend_docstr_nosections
from . import conf
from . import load_species_table
from .utils import clean_column_headings
from astropy.utils.decorators import deprecated_renamed_argument
__all__ = ['Splatalogue', 'SplatalogueClass']
# example query of SPLATALOGUE directly:
# https://www.cv.nrao.edu/php/splat/c.php?sid%5B%5D=64&sid%5B%5D=108&calcIn=&data_version=v3.0&from=&to=&frequency_units=MHz&energy_range_from=&energy_range_to=&lill=on&tran=&submit=Search&no_atmospheric=no_atmospheric&no_potential=no_potential&no_probable=no_probable&include_only_nrao=include_only_nrao&displayLovas=displayLovas&displaySLAIM=displaySLAIM&displayJPL=displayJPL&displayCDMS=displayCDMS&displayToyaMA=displayToyaMA&displayOSU=displayOSU&displayRecomb=displayRecomb&displayLisa=displayLisa&displayRFI=displayRFI&ls1=ls1&ls5=ls5&el1=el1
# for backward-compatibility
# (As of March 5, this is incomplete, but is enough to make `minimize_table` work)
colname_mapping_feb2024 = {
'Species': 'name',
'Chemical Name': 'chemical_name',
'Resolved QNs': 'resolved_QNs',
'Freq-GHz(rest frame,redshifted)': 'orderedFreq',
'Meas Freq-GHz(rest frame,redshifted)': 'measFreq',
'Log<sub>10</sub> (A<sub>ij</sub>)': 'aij',
'E_U (K)': 'upper_state_energy_K',
}
@async_to_sync
class SplatalogueClass(BaseQuery):
SLAP_URL = conf.slap_url
QUERY_URL = conf.query_url
TIMEOUT = conf.timeout
LINES_LIMIT = conf.lines_limit
versions = ('v1.0', 'v2.0', 'v3.0', 'vall')
# global constant, not user-configurable
ALL_LINE_LISTS = ('LovasNIST', 'SLAIM', 'JPL', 'CDMS', 'ToyaMA', 'OSU',
'TopModel', 'Recombination', 'RFI')
VALID_LINE_STRENGTHS = ('CDMSJPL', 'SijMu2', 'Sij', 'Aij', 'LovasAST')
VALID_ENERGY_LEVELS = {'One': 'EL_cm-1',
'Two': 'EL_K',
'Three': 'EU_cm-1',
'Four': 'EU_K'}
VALID_ENERGY_TYPES = ('el_cm1', 'eu_cm1', 'eu_k', 'el_k')
VALID_INTENSITY_TYPES = ('CDMS/JPL (log)', 'Sij-mu2', 'Aij (log)')
def __init__(self, **kwargs):
"""
Initialize a Splatalogue query class with default arguments set.
Frequency specification is required for *every* query, but any
default keyword arguments (see `query_lines`) can be overridden
here.
"""
super().__init__()
self.data = self._default_kwargs()
self.set_default_options(**kwargs)
def set_default_options(self, **kwargs):
"""
Modify the default options.
See `query_lines`
"""
self.data.update(json.loads(self._parse_kwargs(**kwargs)['body']))
@deprecated_renamed_argument("restr", "species_regex", since="0.4.7")
def get_species_ids(self, species_regex=None, *, reflags=0, recache=False):
"""
Get a dictionary of "species" IDs, where species refers to the molecule
name, mass, and chemical composition.
Parameters
----------
species_regex : str
String to search for among the species names, if specified.
The string will be compiled into a regular expression using the
python `re` module.
reflags : int
Flags to pass to `re`.
recache : bool
Flag whether to refresh the local cache of species IDs
Examples
--------
>>> import re
>>> import pprint # unfortunate hack required for documentation testing
>>> rslt = Splatalogue.get_species_ids(species_regex='Formaldehyde')
>>> pprint.pprint(rslt)
{'03023 H2CO - Formaldehyde': '194',
'03106 H213CO - Formaldehyde': '324',
'03107 HDCO - Formaldehyde': '109',
'03108 H2C17O - Formaldehyde': '982',
'03202 H2C18O - Formaldehyde': '155',
'03203 D2CO - Formaldehyde': '94',
'03204 HD13CO - Formaldehyde': '1219',
'03301 D213CO - Formaldehyde': '1220',
'03315 HDC18O - Formaldehyde': '21141',
'03410 D2C18O - Formaldehyde': '21140'}
>>> rslt = Splatalogue.get_species_ids(species_regex='H2CO')
>>> pprint.pprint(rslt)
{'03023 H2CO - Formaldehyde': '194',
'03109 H2COH+ - Hydroxymethylium ion': '224',
'04406 c-H2COCH2 - Ethylene Oxide': '21',
'06029 NH2CONH2 - Urea': '21166',
'07510 H2NCH2COOH - I v=0 - Glycine': '389',
'07511 H2NCH2COOH - I v=1 - Glycine': '1312',
'07512 H2NCH2COOH - I v=2 - Glycine': '1313',
'07513 H2NCH2COOH - II v=0 - Glycine': '262',
'07514 H2NCH2COOH - II v=1 - Glycine': '1314',
'07515 H2NCH2COOH - II v=2 - Glycine': '1315',
'07517 NH2CO2CH3 v=0 - Methyl Carbamate': '1334',
'07518 NH2CO2CH3 v=1 - Methyl Carbamate': '1335',
'08902 CH3CHNH2COOH - I - α-Alanine': '1321',
'08903 CH3CHNH2COOH - II - α-Alanine': '1322'}
>>> # note the whitespace, preventing H2CO within other
>>> # more complex molecules
>>> Splatalogue.get_species_ids(species_regex=' H2CO ')
{'03023 H2CO - Formaldehyde': '194'}
>>> Splatalogue.get_species_ids(species_regex=' h2co ', reflags=re.IGNORECASE)
{'03023 H2CO - Formaldehyde': '194'}
"""
# loading can be an expensive operation and should not change at
# runtime: do it lazily
if not hasattr(self, '_species_ids'):
self._species_ids = load_species_table.species_lookuptable(recache=recache)
if species_regex is not None:
return self._species_ids.find(species_regex, flags=reflags)
else:
return self._species_ids
def _default_kwargs(self):
kwargs = dict(min_frequency=0 * u.GHz,
max_frequency=100 * u.THz,
chemical_name='',
line_lists=self.ALL_LINE_LISTS,
line_strengths=self.VALID_LINE_STRENGTHS,
energy_levels=self.VALID_ENERGY_LEVELS.keys(),
exclude=('potential', 'atmospheric', 'probable'),
version='v3.0',
only_NRAO_recommended=None,
export=True,
export_limit=self.LINES_LIMIT,
noHFS=False, displayHFS=False, show_unres_qn=False,
show_upper_degeneracy=False, show_molecule_tag=False,
show_qn_code=False, show_lovas_labref=False,
show_lovas_obsref=False, show_orderedfreq_only=False,
show_nrao_recommended=False,)
return json.loads(self._parse_kwargs(**kwargs)['body'])
def _parse_kwargs(self, *, min_frequency=None, max_frequency=None,
chemical_name=None,
chem_re_flags=0, energy_min=None, energy_max=None,
energy_type=None, intensity_lower_limit=None,
intensity_type=None, transition=None, version=None,
exclude=None,
only_astronomically_observed=None,
only_NRAO_recommended=None,
line_lists=None, line_strengths=None, energy_levels=None,
export=None, export_limit=None, noHFS=None,
displayHFS=None, show_unres_qn=None,
show_upper_degeneracy=None, show_molecule_tag=None,
show_qn_code=None, show_lovas_labref=None,
show_lovas_obsref=None, show_orderedfreq_only=None,
show_nrao_recommended=None,
parse_chemistry_locally=True,
export_start=0,
export_stop=250):
"""
The Splatalogue service returns lines with rest frequencies in the
range [min_frequency, max_frequency].
Parameters
----------
min_frequency : `astropy.units`
Minimum frequency (or any spectral() equivalent)
max_frequency : `astropy.units`
Maximum frequency (or any spectral() equivalent)
chemical_name : str
Name of the chemical to search for. Treated as a regular
expression. An empty set ('', (), [], {}) will match *any*
species. Examples:
``'H2CO'`` - 13 species have H2CO somewhere in their formula.
``'Formaldehyde'`` - There are 8 isotopologues of Formaldehyde
(e.g., H213CO).
``'formaldehyde'`` - Thioformaldehyde,Cyanoformaldehyde.
``'formaldehyde',chem_re_flags=re.I`` - Formaldehyde,thioformaldehyde,
and Cyanoformaldehyde.
``' H2CO '`` - Just 1 species, H2CO. The spaces prevent including
others.
parse_chemistry_locally : bool
Attempt to determine the species ID #'s locally before sending the
query? This will prevent queries that have no matching species.
It also performs a more flexible regular expression match to the
species IDs. See the examples in `get_species_ids`
chem_re_flags : int
See the `re` module
energy_min : `None` or float
Energy range to include. See energy_type
energy_max : `None` or float
Energy range to include. See energy_type
energy_type : ``'el_cm1'``, ``'eu_cm1'``, ``'eu_k'``, ``'el_k'``
Type of energy to restrict. L/U for lower/upper state energy,
cm/K for *inverse* cm, i.e. wavenumber, or K for Kelvin
intensity_lower_limit : `None` or float
Lower limit on the intensity. See intensity_type
intensity_type : `None` or ``'sij'``, ``'cdms_jpl'``, ``'aij'``
The type of intensity on which to place a lower limit
transition : str
e.g. 1-0
version : ``'v1.0'``, ``'v2.0'``, ``'v3.0'`` or ``'vall'``
Data version
exclude : list
Types of lines to exclude. Default is:
(``'potential'``, ``'atmospheric'``, ``'probable'``)
Can also exclude ``'known'``.
To exclude nothing, use 'none', not the python object None, since
the latter is meant to indicate 'leave as default'
only_astronomically_observed : bool
Show only astronomically observed species?
only_NRAO_recommended : bool
Show only NRAO recommended species?
line_lists : list
Options:
Lovas, SLAIM, JPL, CDMS, ToyaMA, OSU, Recombination, RFI
line_strengths : list
* CDMS/JPL Intensity : ls1
* Sij : ls3
* Aij : ls4
* Lovas/AST : ls5
energy_levels : list
* E_lower (cm^-1) : "One"
* E_lower (K) : "Two"
* E_upper (cm^-1) : "Three"
* E_upper (K) : "Four"
export : bool
Set up arguments for the export server (as opposed to the HTML
server)?
export_limit : int
Maximum number of lines in output file
noHFS : bool
No HFS Display
displayHFS : bool
Display HFS Intensity
show_unres_qn : bool
Display Unresolved Quantum Numbers
show_upper_degeneracy : bool
Display Upper State Degeneracy
show_molecule_tag : bool
Display Molecule Tag
show_qn_code : bool
Display Quantum Number Code
show_lovas_labref : bool
Display Lab Ref
show_lovas_obsref : bool
Display Obs Ref
show_orderedfreq_only : bool
Display Ordered Frequency ONLY
show_nrao_recommended : bool
Display NRAO Recommended Frequencies
Returns
-------
payload : dict
Dictionary of the parameters to send to the SPLAT page
"""
payload = {"searchSpecies": "",
"speciesSelectBox": [],
"dataVersion": "v3.0",
"userInputFrequenciesFrom": [],
"userInputFrequenciesTo": [],
"userInputFrequenciesUnit": "GHz",
"frequencyRedshift": 0,
"energyFrom": 0,
"energyTo": 0,
"energyRangeType": "el_cm-1",
"lineIntensity": "None",
"lineIntensityLowerLimit": 0,
"excludeAtmosSpecies": False,
"excludePotentialInterstellarSpecies": False,
"excludeProbableInterstellarSpecies": False,
"excludeKnownASTSpecies": False,
"showOnlyAstronomicallyObservedTransitions": False,
"showOnlyNRAORecommendedFrequencies": False,
"lineListDisplayJPL": True,
"lineListDisplayCDMS": True,
"lineListDisplayLovasNIST": True,
"lineListDisplaySLAIM": True,
"lineListDisplayToyaMA": True,
"lineListDisplayOSU": True,
"lineListDisplayRecombination": True,
"lineListDisplayTopModel": True,
"lineListDisplayRFI": True,
"lineStrengthDisplayCDMSJPL": True,
"lineStrengthDisplaySijMu2": False,
"lineStrengthDisplaySij": False,
"lineStrengthDisplayAij": False,
"lineStrengthDisplayLovasAST": True,
"energyLevelOne": True,
"energyLevelTwo": False,
"energyLevelThree": False,
"energyLevelFour": False,
"displayObservedTransitions": False,
"displayG358MaserTransitions": False,
"displayObservationReference": False,
"displayObservationSource": False,
"displayTelescopeLovasNIST": False,
"frequencyErrorLimit": False,
"displayHFSIntensity": False,
"displayUnresolvedQuantumNumbers": False,
"displayUpperStateDegeneracy": False,
"displayMoleculeTag": False,
"displayQuantumNumberCode": False,
"displayLabRef": False,
"displayOrderedFrequencyOnly": False,
"displayNRAORecommendedFrequencies": False,
"displayUniqueSpeciesTag": False,
"displayUniqueLineIDNumber": False,
"exportType": "current",
"exportDelimiter": "tab",
"exportLimit": "allRecords",
"exportStart": 1,
"exportStop": 250}
if min_frequency is not None and max_frequency is not None:
# allow setting payload without having *ANY* valid frequencies set
min_frequency = min_frequency.to(u.GHz, u.spectral())
max_frequency = max_frequency.to(u.GHz, u.spectral())
if min_frequency > max_frequency:
min_frequency, max_frequency = max_frequency, min_frequency
payload['userInputFrequenciesFrom'] = [min_frequency.value]
payload['userInputFrequenciesTo'] = [max_frequency.value]
if chemical_name in ('', {}, (), [], set(), None):
# include all by default, or whatever default was set
payload['speciesSelectBox'] = (self.data['speciesSelectBox']
if hasattr(self, 'data')
else [])
elif chemical_name is not None:
if parse_chemistry_locally:
species_ids = self.get_species_ids(species_regex=chemical_name, reflags=chem_re_flags)
if len(species_ids) == 0:
raise ValueError("No matching chemical species found.")
payload['speciesSelectBox'] = list(species_ids.values())
else:
payload['searchSpecies'] = chemical_name
if energy_min is not None:
payload['energyFrom'] = float(energy_min)
if energy_max is not None:
payload['energyTo'] = float(energy_max)
if energy_type is not None:
if energy_type not in self.VALID_ENERGY_TYPES:
raise ValueError(f'energy_type must be one of {self.VALID_ENERGY_TYPES}')
payload['energyRangeType'] = energy_type
if intensity_lower_limit is not None:
if intensity_type is None:
raise ValueError("If you specify an intensity lower limit, you must also specify its intensity_type.")
elif intensity_type not in self.VALID_INTENSITY_TYPES:
raise ValueError(f'intensity_type must be one of {self.VALID_INTENSITY_TYPES}')
payload['lineIntensity'] = intensity_type
payload['lineIntensityLowerLimit'] = intensity_lower_limit
if version in self.versions:
payload['dataVersion'] = version
elif version is not None:
raise ValueError("Invalid version specified. Allowed versions "
"are {vers}".format(vers=str(self.versions)))
if exclude is not None:
if 'potential' in exclude:
payload['excludePotentialInterstellarSpecies'] = True
if 'atmospheric' in exclude:
payload['excludeAtmosSpecies'] = True
if 'probable' in exclude:
payload['excludeProbableInterstellarSpecies'] = True
if 'known' in exclude:
payload['excludeKnownASTSpecies'] = True
if only_astronomically_observed:
payload['showOnlyAstronomicallyObservedTransitions'] = True
if only_NRAO_recommended:
payload['showOnlyNRAORecommendedFrequencies'] = True
if line_lists is not None:
if type(line_lists) not in (tuple, list):
raise TypeError("Line lists should be a list of linelist "
"names. See Splatalogue.ALL_LINE_LISTS")
for L in self.ALL_LINE_LISTS:
kwd = 'lineListDisplay' + L
payload[kwd] = L in line_lists
if line_strengths is not None:
for LS in line_strengths:
if LS not in self.VALID_LINE_STRENGTHS:
raise ValueError(f"Line strengths must be one of {self.VALID_LINE_STRENGTHS}")
payload['lineStrengthDisplay' + LS] = True
if energy_levels is not None:
for EL in energy_levels:
if EL not in self.VALID_ENERGY_LEVELS:
raise ValueError("Energy levels must be a number spelled out, i.e., "
f"one of {self.VALID_ENERGY_LEVELS}")
payload['energyLevel' + EL] = True
for b in ("displayHFSIntensity", "displayUnresolvedQuantumNumbers",
"displayUpperStateDegeneracy", "displayMoleculeTag",
"displayQuantumNumberCode", "displayLabRef",
"displayOrderedFrequencyOnly", "displayNRAORecommendedFrequencies",
"displayUniqueLineIDNumber", "displayUniqueSpeciesTag"):
if b in locals() and locals()[b]:
payload[b] = True
if export:
payload['exportDelimiter'] = 'tab' # or tab or comma
payload['exportType'] = 'current'
payload['exportStart'] = export_start
payload['exportStop'] = export_stop
if export_limit is not None:
payload['exportLimit'] = export_limit
else:
payload['exportLimit'] = self.LINES_LIMIT
payload = {'body': json.dumps(payload),
'headers': {'normalizedNames': {}, 'lazyUpdate': None}}
return payload
def _validate_kwargs(self, *, min_frequency=None, max_frequency=None,
**kwargs):
"""
Check that min_frequency + max_frequency are specified
"""
if min_frequency is None or max_frequency is None:
raise ValueError("Must specify min/max frequency")
@prepend_docstr_nosections("\n" + _parse_kwargs.__doc__)
def query_lines_async(self, min_frequency=None, max_frequency=None, *,
cache=True, **kwargs):
"""
Returns
-------
response : `requests.Response`
The response of the HTTP request.
"""
# have to chomp this kwd here...
get_query_payload = kwargs.pop('get_query_payload', False)
self._validate_kwargs(min_frequency=min_frequency,
max_frequency=max_frequency, **kwargs)
data_payload = self._parse_kwargs(min_frequency=min_frequency,
max_frequency=max_frequency,
**kwargs)
if hasattr(self, 'data'):
body = self.data.copy()
else:
body = self._default_kwargs()
body.update(json.loads(
self._parse_kwargs(min_frequency=min_frequency,
max_frequency=max_frequency, **kwargs)['body']))
data_payload['body'] = json.dumps(body)
if get_query_payload:
return data_payload
response = self._request(method='POST',
url=self.QUERY_URL,
json=data_payload,
timeout=self.TIMEOUT,
cache=cache)
self.response = response
return response
def _parse_result(self, response, *, verbose=False):
"""
Parse a response into an `~astropy.table.Table`
Parameters
----------
verbose : bool
Has no effect; kept for API compatibility
"""
# these are metadata items not intended to be part of the table
meta_columns = ['orderFreqColName', 'measFreqColName']
meta = {}
jdat = response.json()
result = Table([x for x in jdat if x['species_id'] is not None])
for key in meta_columns:
if key in result.colnames:
meta[key] = result[key][0]
del result[key]
result.meta = meta
return result
def get_fixed_table(self, *, columns=None):
"""
Convenience function to get the table with html column names made human
readable. It returns only the columns identified with the ``columns``
keyword. See the source for the defaults.
"""
if columns is None:
columns = ('Species', 'Chemical Name', 'Resolved QNs',
'Freq-GHz(rest frame,redshifted)',
'Meas Freq-GHz(rest frame,redshifted)',
'Log<sub>10</sub> (A<sub>ij</sub>)',
'E_U (K)')
table = clean_column_headings(self.table[columns])
return table
Splatalogue = SplatalogueClass()
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@[email protected]@.PATH_END.py
|
{
"filename": "CHANGELOG.md",
"repo_name": "dmentipl/plonk",
"repo_path": "plonk_extracted/plonk-main/CHANGELOG.md",
"type": "Markdown"
}
|
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
Types of changes:
- `Added` for new features.
- `Changed` for changes in existing functionality.
- `Deprecated` for soon-to-be removed features.
- `Removed` for now removed features.
- `Fixed` for any bug fixes.
- `Security` in case of vulnerabilities.
## [Unreleased]
## [0.7.4] - 2021-10-13
### Changed
- Moved from Travis CI to GitHub actions for tests/CI.
- Moved to src layout.
- Version is now set in `src/plonk/__init__.py` which is read in `setup.cfg`.
- Update MANIFEST.in.
- Renamed master branch to main. Changes reflected in docs.
- Restructure docs, including changing from reStructuredText to Markdown using myst-parser.
## [0.7.3] - 2020-08-28
### Added
- Add add_alias method to Profile.
- Add disc_viscosity, alpha_shakura_sunyaev, epicyclic_frequency, midplane_stokes_number profiles.
- Add utils vector_array_names like dust_array_names.
- Add plotting of multiple dust or vector profiles.
- Add extra test data to complement the test suite.
- Add function to set missing header values on converted Phantom-HDF5 files.
### Changed
- Renamed toomre_Q profile to toomre_q.
- Use cache context manager to generate some Snap attributes so as to not cache some arrays in memory.
- Some profiles have been removed when making non-radial profiles.
- The analysis.total module now uses sink particles in computing summed quantities.
- Moved some quantities from particles to discs as they are not generic but appropriate for discs simulations.
- Rename interpolate arguments: number_of_pixels -> num_pixels and density_weighted -> weighted. And weighted is a named argument (i.e. not just caught with **kwargs) to image, vector, and interpolate
- Rename `load_ev` to `load_time_series`.
- Rename `load_sim` to `load_simulation`.
- Replaced Snap.set_gravitational_parameter to Snap.set_central_body. A central body is required to calculate orbital dynamics quantities, e.g. eccentricity, on the particles.
- Refactored plonk.load_snap. Much of the code that was in the phantom reader module was, in fact, more general. Now, some of that code lives in Snap.load_snap which requires defining three functions in the reader modules: snap_properties_and_units, snap_array_registry, and snap_sink_registry
- Use figshare to host sample datasets not Anaconda Cloud.
### Deprecated
- `load_ev` is deprecated in favour of `load_time_series`.
- `load_sim` is deprecated in favour of `load_simulation`.
### Fixed
- Fix units labels in Profile.plot.
- Fix units of internal energy.
- Fix bugs in rotation to face-on and edge-on.
- Fix bugs in disc position and inclination angles.
## [0.7.2] - 2020-08-23
### Added
- Add label argument to Profile.plot.
- Add dust_array_names utility function. This makes a list of array names broken into sub-species.
- Add plonk.animate function to provide a common interface to animation functions.
- Always cache arrays during plotting using the caching context manager.
- Added function name to logging messages.
### Changed
- Analysis particles functions with dust quantites are now for the dust "mixture method" (i.e. 1-fluid).
- Remove '_tot' as an array suffix for dust arrays. This was causing confusion as it makes no sense to sum stopping times.
- Rename base_array_name to base_profile_name.
- Change interpolate to return dimensionful quantity.
- Moved utils functions from each sub-package to modules in utils sub-package.
- Moved animation, animation_particles, animation_profiles from plonk namespace to plonk.visualize namespace.
- Renamed animation to animation_images.
### Fixed
- Fix bug in setting default units for dimensionless arrays.
- Fix bugs in Profile related to units.
- Fix bug in std dev shading in Profile.plot.
## [0.7.1] - 2020-08-21
### Added
- Use a TOML config file to configure options.
- Add default_units and set_units on Snap and Profile.
- Sinks are iterable.
- Plot error bars on profiles.
### Changed
- Renamed subsnaps_by_type to subsnaps_as_dict.
- Renamed units_defaults to array_units.
- Name mapping, units, aliases are no longer hard coded and are now in config.toml.
- If no units specified in image/plot/vector functions then use the default units on the Snap if available.
- Renamed some analysis.particles functions.
- Sinks analysis functions take a Sinks object as arguments.
- Profile.plot "std_dev_shading" argument changed to "std", and now is a string not a bool.
### Fixed
- Fixed bug in using Snap for snaps with no sinks.
- Fixed bug in accessing a single Sink if a np.int rather than int was passed in.
- Fixed bug in reading Phantom datasets without knowing units.
## [0.7.0] - 2020-08-17
### Added
- Added plonk.image to make image plots (with interpolation and then matplotlib imshow).
- Added plonk.vector to make vector plots (with interpolation and then matplotlib quiver).
- Added plot_smoothing_length function to plot the smoothing length on particles, or accretion radius on sink particles.
- Added pretty_array_name function to prettify array names.
- Added visualize_sim as a method on Simulation objects.
- Allow getting subsets of Sinks.
- Added ax_kwargs to plotting functions for passing to ax.set.
- Added xlim, ylim on visualize.plot.
- Added units_dict function to return a units dictionary for passing to plotting functions.
- Added bulk load and unload functions on Snap for loading/unloading multiple arrays into/out-of memory.
- Add context manager for caching arrays on Snap.
- Added public methods on Snap: family for accessing particle families, and array for accessing particle arrays. These are already accessible via __getitem__ but this makes the underlying methods available.
- Add function to add missing units to array on Snap.
### Changed
- Removed plonk.particle_plot in favour of plonk.plot.
- Changed plonk.plot to produce particle plots
- Renamed MultiVisualization and plot_snaps to VisualizeSimulation and visualize_sim.
- Changed units system from cgs to SI.
- Simplified animation functions by only allowing one axes per animation.
- Changed default units to more conventional SI units, e.g. Pascal for pressure and Joule for energy.
- Simplified tree and neighbours functions on Snap. Now there is only one tree for a Snap. If you want a tree for just, say, dust particles, then first create a SubSnap and get the tree on that.
- Changed _Sinks into Sinks, i.e. a public class.
- All plotting functions/methods use the same argument for setting units.
- Renamed Snap.available_arrays argument "all" to "verbose".
- Changed Snap.units to Snap.code_units.
- Use pretty_array_name for plots labels.
- Rename Snap.unset to Snap.reset and allow for unloading cached arrays.
- When setting Snap.cache_arrays to False, no longer unload currently cached arrays.
### Fixed
- Fixed writing Snap.to_dataframe units.
## [0.6.2] - 2020-08-11
### Changed
- Use setup.cfg for setuptools, and pyproject.toml (and setup.cfg) for config of tools.
- Version is set in setup.cfg and imported into plonk via importlib_metadata.
- Changed API documentation.
- Moved sph module from utils sub-package to analysis.
### Deprecated
- plonk.particle_plot will be removed.
- plonk.plot will change from image plots to particle plots, and plonk.image and plonk.vector will be added to replace plonk.plot.
- Default units will change from cgs to SI.
### Fixed
- Fixed bug in Profile with getting number of mixture dust species.
- Fixed bugs in animation functions (due to making physical units on by default).
- Fixed issues with colorbar size matching height of plots.
## [0.6.1] - 2020-08-09
### Added
- Snap.sinks attribute has more features.
- Cross section interpolation in a non-xy plane specified by a normal vector to the plane.
- Snap.rotate can be set by an axis vector and angle as opposed to a scipy Rotation object.
- discs module to analysis.
- filters module to analysis to set SubSnaps easily.
- 'id' array on Snap to help track particles.
- Function to plot the smoothing length as a circle.
- Profile method to generate a function from a profile to help create particle filters, for example.
- Simulation method to create a particle array over the whole simulation.
### Changed
- Snap.available_arrays does not reference sink particles; see Snap.sinks.available_arrays.
- Profile.plot units are now consistent with visualize functions.
- Dust profiles in Profile are now distinguished by whether they are mixture (dust/gas) particles or dust-only particles.
### Fixed
- Setting origin in extra quantities.
- All analysis functions have better physical units support.
- Bug in Snap.num_particles_of_type.
## [0.6.0] - 2020-08-05
### Added
- Added plot and particle_plot as methods of the Snap class. This allows for plotting with `snap.plot(quantity='quantity')` as opposed to `plonk.visualize.plot(snap=snap, quantity='quantity)`.
- Axis and colorbars have labels by default now, including units.
- The to_dataframe Snap method now indicates units in the column names, e.g. `position [au]`.
- The available_arrays method of Snap has additional arguments to see all sub-arrays on particles, e.g. `velocity_x` and `dust_fraction_001`.
- Added to examples and quick-start in documentation.
- Added method to re-open a closed Snap file.
### Changed
- Physical units are turned on by default on Snap objects. All particle and sink arrays have units (provided by Pint).
- The units attribute of Snap and Simulation now only has core units, i.e. length, time, mass, and magnetic field.
- Some extra quantities have been renamed.
- Extra quantities are available on Snap objects by default.
- The arguments radius_min and radius_max in Profile have been renamed cmin and cmax to reflect that profiles are not just radial.
### Fixed
- Fixed setting pressure from Phantom equation of states.
## [0.5.3] - 2020-07-28
### Fixed
- Fixed major bug in setting extra dust arrays on a Snap.
## [0.5.2] - 2020-07-28
### Added
- Change log.
- Cartesian profiles in y- and z-direction, in addition to x-direction which was already implemented.
### Changed
- Do not raise exception in extra_quantities and physical_units if already set.
- Scikit-image and tqdm are no longer required dependencies.
- Conda environment renamed from plonk-dev to plonk.
- Refactor Plonk namespace. Fewer modules are directly imported.
## [0.5.1] - 2020-07-11
### Added
- Analysis functions for sink particles.
- Function to animate particle plots.
- Different colours per particle type in particle plots.
- Tqdm progress bar for animation.
### Changed
- Use Matplotlib consistent argument names in particle_plot.
### Fixed
- Fix bug in standard deviation shading in Profile.
## [0.5.0] - 2020-04-20
### Added
- Neighbour finding via kd-tree.
- Compute SPH derivatives using kd-tree.
- IPython tab completion for Snap arrays and Profile profiles.
- Profile can have ndim==1 which gives a linear profile, useful for box calculations.
- Option to turn off caching of particle arrays, so that they are always read from file.
- Write derived arrays to HDF5 file, and read arrays from that file onto a Snap.
- Added logging of warning and other information messages.
### Changed
- Generalize sub-types: dust_type → sub_type: this allows for Phantom boundary particle sub-types.
### Removed
- Remove `Visualization` class in favour of just returning matplotlib's Axes and Figure objects.
## [0.4.1] - 2020-03-24
### Added
- Add scatter plots, i.e. particle plots with variable color and size markers.
- Add `extra_quantities` method to easily calculate extra quantities on the snapshot.
- Allow for setting array units, whether the array is rotatable, and whether it is a dust on derived arrays.
- Profiles are automatically generated from any 1d Snap quantity.
- Access individual dust quantities on profiles via '_001', etc.
- Read Phantom equation of state information to get pressure, sound speed, temperature.
- Add extra Snap quantities, e.g. Stokes number.
- Add extra profiles, e.g. Toomre Q.
- Allow accessing components of Snap quantities via '_x', etc.
- Calculate standard deviation on profiles.
### Changed
- Use verbose names for all snapshot quantities, e.g. 'dust_fraction' not 'dustfrac' and 'velocity_divergence' not 'divv'.
### Removed
- Remove `Evolution` object in favour of pandas DataFrame.
## [0.4.0] - 2020-03-15
### Added
- Add physical units on the `Snap` object.
- Physical units are compatible with all visualization and analysis modules.
## [0.3.1] - 2020-03-06
### Added
- Add many analysis functions.
- Animations of visualizations.
### Changed
- Make it easier to add profiles to Profile
- Make `plonk.visualize.plot` easier to use.
### Fixed
- Fix bug in `Snap.rotate` not rotating sink particles.
## [0.3.0] - 2019-12-07
### Changed
- Interpolation functions are now written in Python and JIT-compiled with Numba.
## [0.2.1] - 2019-11-27
### Added
- Add the JOSS paper describing the code.
## [0.2.0] - 2019-11-06
### Changed
- Use KDEpy for interpolation.
## [0.1.0] - 2019-06-28
- Initial release.
|
dmentiplREPO_NAMEplonkPATH_START.@plonk_extracted@[email protected]@.PATH_END.py
|
{
"filename": "readme.md",
"repo_name": "tomasoshea/chameleon",
"repo_path": "chameleon_extracted/chameleon-main/readme.md",
"type": "Markdown"
}
|
# solar chameleon production
Code to calculate the solar Primakoff production of light scalars / chameleons.
- *filter.py* reads solar model from AGSS09 files in data folder and outputs easy to use .dat files
- *solarcham.cpp* contains all the code needed to calculate spectra, energy loss, profiles etc in the form of .dat files
- *utils.h* contains constants and that and is used by *solarcham.cpp*
- the python plotting codes then produce various plots based on the data outputted by *solarcham.cpp*
|
tomasosheaREPO_NAMEchameleonPATH_START.@chameleon_extracted@[email protected]@.PATH_END.py
|
{
"filename": "enumerate_ops.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/data/experimental/ops/enumerate_ops.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Enumerate dataset transformations."""
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@deprecation.deprecated(None, "Use `tf.data.Dataset.enumerate()`.")
@tf_export("data.experimental.enumerate_dataset")
def enumerate_dataset(start=0):
"""A transformation that enumerates the elements of a dataset.
It is similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.apply(tf.data.experimental.enumerate_dataset(start=5))
=> { (5, 1), (6, 2), (7, 3) }
b.apply(tf.data.experimental.enumerate_dataset())
=> { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return dataset.enumerate(start)
return _apply_fn
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@data@experimental@ops@[email protected]_END.py
|
{
"filename": "test_numpy_network.py",
"repo_name": "pynucastro/pynucastro",
"repo_path": "pynucastro_extracted/pynucastro-main/pynucastro/networks/tests/test_numpy_network.py",
"type": "Python"
}
|
import pytest
from numpy.testing import assert_allclose
import pynucastro as pyna
class TestNumpyNetwork:
"""Make sure the vectorized methods give the same results."""
@pytest.fixture(scope="class")
def net(self, reaclib_library):
rate_names = ["c12(p,g)n13",
"c13(p,g)n14",
"n13(,)c13",
"n13(p,g)o14",
"n14(p,g)o15",
"n15(p,a)c12",
"o14(,)n14",
"o15(,)n15",
"he4(aa,g)c12"]
rates = reaclib_library.get_rate_by_name(rate_names)
net = pyna.NumpyNetwork(rates=rates)
return net
@pytest.fixture(scope="class")
def comp(self, net):
c = pyna.Composition(net.unique_nuclei)
c.set_solar_like()
return c
@pytest.fixture(scope="class")
def rho(self):
return 1e5
@pytest.fixture(scope="class")
def temp(self):
return 1e8
def test_yfac_arr(self, net, comp):
expected = [0.0001666666666666666, 0.0001538461538461538,
0.00021978021978021975, 0.0001538461538461538,
0.00014285714285714281, 0.00013333333333333329,
0.0002040816326530612, 0.00019047619047619045,
0.00034299999999999966]
net.clear_arrays()
net.update_yfac_arr(comp)
assert_allclose(net.yfac, expected, rtol=1e-10, atol=1e-100)
def test_prefac_arr(self, net, rho, comp):
expected = [rho, rho, 1.0, rho, rho, rho, 1.0, 1.0, rho*rho / 6]
net.clear_arrays()
net.update_prefac_arr(rho, comp)
assert_allclose(net.prefac, expected, rtol=1e-10, atol=1e-100)
def test_evaluate_rates_arr(self, net, rho, comp, temp):
expected = []
rv = net.evaluate_rates(rho=rho, T=temp, composition=comp)
expected = [rv[r] for r in net.rates]
net.clear_arrays()
net.update_yfac_arr(comp)
with pytest.raises(Exception):
net.evaluate_rates_arr(temp)
net.update_prefac_arr(rho, comp)
rates_arr = net.evaluate_rates_arr(temp)
assert_allclose(rates_arr, expected, rtol=1e-10, atol=1e-100)
def test_evaluate_ydots_arr(self, net, rho, comp, temp):
ydots = net.evaluate_ydots(rho=rho, T=temp, composition=comp)
expected = [ydots[nuc] for nuc in net.unique_nuclei]
net.clear_arrays()
with pytest.raises(Exception):
net.evaluate_ydots_arr(temp)
net.update_yfac_arr(comp)
net.update_prefac_arr(rho, comp)
ydots_arr = net.evaluate_ydots_arr(temp)
assert_allclose(ydots_arr, expected, rtol=1e-10, atol=1e-100)
def test_evaluate_activity_arr(self, net, rho, comp, temp):
activity = net.evaluate_activity(rho=rho, T=temp, composition=comp)
expected = [activity[nuc] for nuc in net.unique_nuclei]
net.clear_arrays()
with pytest.raises(Exception):
net.evaluate_activity_arr(temp)
net.update_yfac_arr(comp)
net.update_prefac_arr(rho, comp)
activity_arr = net.evaluate_activity_arr(temp)
assert_allclose(activity_arr, expected, rtol=1e-10, atol=1e-100)
|
pynucastroREPO_NAMEpynucastroPATH_START.@pynucastro_extracted@pynucastro-main@pynucastro@networks@tests@[email protected]_END.py
|
{
"filename": "independent_sample.py",
"repo_name": "POSYDON-code/POSYDON",
"repo_path": "POSYDON_extracted/POSYDON-main/posydon/popsyn/independent_sample.py",
"type": "Python"
}
|
"""Generate the initial parameters for a binary population."""
__authors__ = [
"Devina Misra <[email protected]>",
"Jeffrey Andrews <[email protected]>",
"Kyle Akira Rocha <[email protected]>",
"Konstantinos Kovlakas <[email protected]>",
"Simone Bavera <[email protected]>",
"Emmanouil Zapartas <[email protected]>",
"Scott Coughlin <[email protected]>",
]
import numpy as np
from scipy.stats import truncnorm
from posydon.utils import rejection_sampler
def generate_independent_samples(orbital_scheme, **kwargs):
"""Randomly generate a population of binaries at ZAMS.
Parameters
----------
number_of_binaries : int
Number of binaries that require randomly sampled orbital separations
**kwargs : dictionary
kwargs from BinaryPopulation class
Returns
-------
orbital_scheme_set : ndarray of floats
Randomly drawn orbital separations/periods depending on the scheme
eccentricity_set : ndarray of floats
Randomly drawn eccentricities
m1_set : ndarray of floats
Randomly drawn primary masses
m2_set : ndarray of floats
Randomly drawn secondary masses
"""
# Generate eccentricities
eccentricity_set = generate_eccentricities(**kwargs)
# Generate primary masses
m1_set = generate_primary_masses(**kwargs)
# Generate secondary masses
m2_set = generate_secondary_masses(m1_set, **kwargs)
if orbital_scheme == 'separation':
# Generate orbital separations
orbital_scheme_set = generate_orbital_separations(**kwargs)
elif orbital_scheme == 'period':
# Generate orbital periods
orbital_scheme_set = generate_orbital_periods(m1_set, **kwargs)
else:
raise ValueError("Allowed orbital schemes are separation or period.")
return orbital_scheme_set, eccentricity_set, m1_set, m2_set
def generate_orbital_periods(primary_masses,
number_of_binaries=1,
orbital_period_min=0.35,
orbital_period_max=10**3.5,
orbital_period_scheme='Sana+12_period_extended',
**kwargs):
"""Randomaly generate orbital periods for a sample of binaries."""
RNG = kwargs.get('RNG', np.random.default_rng())
# Check inputs
# Sana H., et al., 2012, Science, 337, 444
if orbital_period_scheme == 'Sana+12_period_extended':
# compute periods as if all M1 <= 15Msun (where pi = 0.0)
orbital_periods_M_lt_15 = 10**RNG.uniform(
low=np.log10(orbital_period_min),
high=np.log10(orbital_period_max),
size=number_of_binaries)
# compute periods as if all M1 > 15Msun
def pdf(logp):
pi = 0.55
beta = 1 - pi
A = np.log10(10**0.15)**(-pi)*(np.log10(10**0.15)
- np.log10(orbital_period_min))
B = 1./beta*(np.log10(orbital_period_max)**beta
- np.log10(10**0.15)**beta)
C = 1./(A + B)
pdf = np.zeros(len(logp))
for j, logp_j in enumerate(logp):
# for logP<=0.15 days, the pdf is uniform
if np.log10(orbital_period_min) <= logp_j and logp_j < 0.15:
pdf[j] = C*0.15**(-pi)
# original Sana H., et al., 2012, Science, 337, 444
elif 0.15 <= logp_j and logp_j < np.log10(orbital_period_max):
pdf[j] = C*logp_j**(-pi)
else:
pdf[j] = 0.
return pdf
orbital_periods_M_gt_15 = 10**(rejection_sampler(
size=number_of_binaries,
x_lim=[np.log10(orbital_period_min), np.log10(orbital_period_max)],
pdf=pdf))
orbital_periods = np.where(primary_masses <= 15.0,
orbital_periods_M_lt_15,
orbital_periods_M_gt_15)
else:
raise ValueError("You must provide an allowed orbital period scheme.")
return orbital_periods
def generate_orbital_separations(number_of_binaries=1,
orbital_separation_min=5,
orbital_separation_max=1e5,
log_orbital_separation_mean=None,
log_orbital_separation_sigma=None,
orbital_separation_scheme='log_uniform',
**kwargs):
"""Generate random orbital separations.
Use the scheme defined in this particular instance of BinaryPopulation.
Parameters
----------
number_of_binaries : int
Number of binaries that require randomly sampled orbital separations
orbital_separation_min : float
Minimum orbital separation in solar radii
orbital_separation_max : float
Maximum orbital separation in solar radii
log_orbital_separation_mean : float
Mean of the lognormal distribution.
log_orbital_separation_sigma : float
Standard deviation of the lorgnormal distribution.
orbital_separation_scheme : string
Distribution from which the orbital separations are randomly drawn
Returns
-------
orbital_separations : ndarray of floats
Randomly drawn orbital separations
"""
RNG = kwargs.get('RNG', np.random.default_rng())
orbital_separation_scheme_options = ['log_uniform', 'log_normal']
# Check inputs
if orbital_separation_scheme not in orbital_separation_scheme_options:
raise ValueError("You must provide an allowed "
"orbital separation scheme.")
if orbital_separation_scheme == 'log_uniform':
orbital_separations = 10**RNG.uniform(
low=np.log10(orbital_separation_min),
high=np.log10(orbital_separation_max),
size=number_of_binaries)
if orbital_separation_max < orbital_separation_min:
raise ValueError("`orbital_separation_max` must be "
"larger than the orbital_separation_min.")
elif orbital_separation_scheme == 'log_normal':
if (log_orbital_separation_mean is None
or log_orbital_separation_sigma is None):
raise ValueError(
"For the `log_normal separation` scheme you must give "
"`log_orbital_separation_mean`, "
"`log_orbital_separation_sigma`.")
# Set limits for truncated normal distribution
a_low = (np.log10(orbital_separation_min)
- log_orbital_separation_mean) / log_orbital_separation_sigma
a_high = (np.log10(orbital_separation_max)
- log_orbital_separation_mean) / log_orbital_separation_sigma
# generate orbital separations from a truncted normal distribution
log_orbital_separations = truncnorm.rvs(
a_low, a_high,
loc=log_orbital_separation_mean,
scale=log_orbital_separation_sigma,
size=number_of_binaries,
random_state=RNG)
orbital_separations = 10**log_orbital_separations
else:
pass
return orbital_separations
def generate_eccentricities(number_of_binaries=1,
eccentricity_scheme='thermal',
**kwargs):
"""Generate random eccentricities.
Use the scheme defined in this particular instance of BinaryPopulation.
Parameters
----------
number_of_binaries : int
Number of binaries that require randomly sampled orbital separations
eccentricity_scheme : string
Distribution from which eccentricities are randomly drawn
**kwargs : dictionary
kwargs from BinaryPopulation class
Returns
-------
eccentricities : ndarray of floats
Randomly drawn eccentricities
"""
RNG = kwargs.get('RNG', np.random.default_rng())
eccentricity_scheme_options = ['thermal', 'uniform', 'zero']
if eccentricity_scheme not in eccentricity_scheme_options:
raise ValueError("You must provide an allowed eccentricity scheme.")
if eccentricity_scheme == 'thermal':
eccentricities = np.sqrt(RNG.uniform(size=number_of_binaries))
elif eccentricity_scheme == 'uniform':
eccentricities = RNG.uniform(size=number_of_binaries)
elif eccentricity_scheme == 'zero':
eccentricities = np.zeros(number_of_binaries)
else:
# This should never be reached
pass
return eccentricities
def generate_primary_masses(number_of_binaries=1,
primary_mass_min=7,
primary_mass_max=120,
primary_mass_scheme='Salpeter',
**kwargs):
"""Generate random primary masses.
Use the scheme defined in this particular instance of BinaryPopulation.
Parameters
----------
number_of_binaries : int
Number of binaries that require randomly sampled orbital separations
primary_mass_min : float
Minimum primary mass
primary_mass_max : float
Maximum primary mass
primary_mass_scheme : string
Distribution from which the primary masses are randomly drawn
Returns
-------
primary_masses : ndarray of floats
Randomly drawn primary masses
"""
RNG = kwargs.get('RNG', np.random.default_rng())
primary_mass_scheme_options = ['Salpeter', 'Kroupa1993', 'Kroupa2001']
if primary_mass_scheme not in primary_mass_scheme_options:
raise ValueError("You must provide an allowed primary mass scheme.")
# Salpeter E. E., 1955, ApJ, 121, 161
if primary_mass_scheme == 'Salpeter':
alpha = 2.35
normalization_constant = (1.0-alpha) / (primary_mass_max**(1-alpha)
- primary_mass_min**(1-alpha))
random_variable = RNG.uniform(size=number_of_binaries)
primary_masses = (random_variable*(1.0-alpha)/normalization_constant
+ primary_mass_min**(1.0-alpha))**(1.0/(1.0-alpha))
# Kroupa P., Tout C. A., Gilmore G., 1993, MNRAS, 262, 545
elif primary_mass_scheme == 'Kroupa1993':
alpha = 2.7
normalization_constant = (1.0-alpha) / (primary_mass_max**(1-alpha)
- primary_mass_min**(1-alpha))
random_variable = RNG.uniform(size=number_of_binaries)
primary_masses = (random_variable*(1.0-alpha)/normalization_constant
+ primary_mass_min**(1.0-alpha))**(1.0/(1.0-alpha))
# Kroupa P., 2001, MNRAS, 322, 231
elif primary_mass_scheme == 'Kroupa2001':
alpha = 2.3
normalization_constant = (1.0-alpha) / (primary_mass_max**(1-alpha)
- primary_mass_min**(1-alpha))
random_variable = RNG.uniform(size=number_of_binaries)
primary_masses = (random_variable*(1.0-alpha)/normalization_constant
+ primary_mass_min**(1.0-alpha))**(1.0/(1.0-alpha))
else:
pass
return primary_masses
def generate_secondary_masses(primary_masses,
number_of_binaries=1,
secondary_mass_min=0.35,
secondary_mass_max=120,
secondary_mass_scheme='flat_mass_ratio',
**kwargs):
"""Generate random secondary masses.
Use the scheme defined in this particular instance of BinaryPopulation.
Parameters
----------
primary_masses : ndarray of floats
Previously drawn primary masses
number_of_binaries : int
Number of binaries that require randomly sampled orbital separations
secondary_mass_min : float
Minimum secondary mass
secondary_mass_max : float
Maximum secondary mass
secondary_mass_scheme : string
Distribution from which the secondary masses are randomly drawn
Returns
-------
secondary_masses : ndarray of floats
Randomly drawn secondary masses
"""
RNG = kwargs.get('RNG', np.random.default_rng())
secondary_mass_scheme_options = ['flat_mass_ratio', 'q=1']
# Input parameter checks
if secondary_mass_scheme not in secondary_mass_scheme_options:
raise ValueError("You must provide an allowed secondary mass scheme.")
if np.min(primary_masses) < secondary_mass_min:
raise ValueError("`secondary_mass_min` is "
"larger than some primary masses")
# Generate secondary masses
if secondary_mass_scheme == 'flat_mass_ratio':
mass_ratio_min = secondary_mass_min / primary_masses
mass_ratio_max = np.min([secondary_mass_max / primary_masses,
np.ones(len(primary_masses))], axis=0)
secondary_masses = (
(mass_ratio_max - mass_ratio_min) * RNG.uniform(
size=number_of_binaries) + mass_ratio_min) * primary_masses
if secondary_mass_scheme == 'q=1':
secondary_masses = primary_masses
return secondary_masses
|
POSYDON-codeREPO_NAMEPOSYDONPATH_START.@POSYDON_extracted@POSYDON-main@posydon@popsyn@[email protected]_END.py
|
{
"filename": "_discretization.py",
"repo_name": "rapidsai/cuml",
"repo_path": "cuml_extracted/cuml-main/python/cuml/cuml/_thirdparty/sklearn/preprocessing/_discretization.py",
"type": "Python"
}
|
# Original authors from Sckit-Learn:
# Henry Lin <[email protected]>
# Tom Dupré la Tour
# License: BSD
# This code originates from the Scikit-Learn library,
# it was since modified to allow GPU acceleration.
# This code is under BSD 3 clause license.
# Authors mentioned above do not endorse or promote this production.
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ....internals import _deprecate_pos_args
from ....internals.memory_utils import using_output_type
from ....common.array_descriptor import CumlArrayDescriptor
from ....internals.array_sparse import SparseCumlArray
from ....thirdparty_adapters import check_array
from ..utils.validation import FLOAT_DTYPES
from ..utils.validation import check_is_fitted
from cuml.internals.mixins import SparseInputTagMixin
from ..utils.skl_dependencies import BaseEstimator, TransformerMixin
from cuml.cluster import KMeans
from cuml.preprocessing import OneHotEncoder
import warnings
from cuml.internals.safe_imports import cpu_only_import
import numbers
from cuml.internals.safe_imports import gpu_only_import
np = gpu_only_import('cupy')
cpu_np = cpu_only_import('numpy')
def digitize(x, bins):
return np.searchsorted(bins, x, side='left')
class KBinsDiscretizer(TransformerMixin,
BaseEstimator,
SparseInputTagMixin):
"""
Bin continuous data into intervals.
Parameters
----------
n_bins : int or array-like, shape (n_features,) (default=5)
The number of bins to produce. Raises ValueError if ``n_bins < 2``.
encode : {'onehot', 'onehot-dense', 'ordinal'}, (default='onehot')
Method used to encode the transformed result.
onehot
Encode the transformed result with one-hot encoding
and return a sparse matrix. Ignored features are always
stacked to the right.
onehot-dense
Encode the transformed result with one-hot encoding
and return a dense array. Ignored features are always
stacked to the right.
ordinal
Return the bin identifier encoded as an integer value.
strategy : {'uniform', 'quantile', 'kmeans'}, (default='quantile')
Strategy used to define the widths of the bins.
uniform
All bins in each feature have identical widths.
quantile
All bins in each feature have the same number of points.
kmeans
Values in each bin have the same nearest center of a 1D k-means
cluster.
Attributes
----------
n_bins_ : int array, shape (n_features,)
Number of bins per feature. Bins whose width are too small
(i.e., <= 1e-8) are removed with a warning.
bin_edges_ : array of arrays, shape (n_features, )
The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``
Ignored features will have empty arrays.
See Also
--------
cuml.preprocessing.Binarizer : Class used to bin values as ``0`` or
``1`` based on a parameter ``threshold``.
Notes
-----
In bin edges for feature ``i``, the first and last values are used only for
``inverse_transform``. During transform, bin edges are extended to::
np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])
You can combine ``KBinsDiscretizer`` with
:class:`cuml.compose.ColumnTransformer` if you only want to preprocess
part of the features.
``KBinsDiscretizer`` might produce constant features (e.g., when
``encode = 'onehot'`` and certain bins do not contain any data).
These features can be removed with feature selection algorithms
(e.g., :class:`sklearn.feature_selection.VarianceThreshold`).
Examples
--------
>>> from cuml.preprocessing import KBinsDiscretizer
>>> import cupy as cp
>>> X = [[-2, 1, -4, -1],
... [-1, 2, -3, -0.5],
... [ 0, 3, -2, 0.5],
... [ 1, 4, -1, 2]]
>>> X = cp.array(X)
>>> est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
>>> est.fit(X)
KBinsDiscretizer(...)
>>> Xt = est.transform(X)
>>> Xt
array([[0, 0, 0, 0],
[1, 1, 1, 0],
[2, 2, 2, 1],
[2, 2, 2, 2]], dtype=int32)
Sometimes it may be useful to convert the data back into the original
feature space. The ``inverse_transform`` function converts the binned
data into the original feature space. Each value will be equal to the mean
of the two bin edges.
>>> est.bin_edges_[0]
array([-2., -1., 0., 1.])
>>> est.inverse_transform(Xt)
array([[-1.5, 1.5, -3.5, -0.5],
[-0.5, 2.5, -2.5, -0.5],
[ 0.5, 3.5, -1.5, 0.5],
[ 0.5, 3.5, -1.5, 1.5]])
"""
bin_edges_internal_ = CumlArrayDescriptor()
n_bins_ = CumlArrayDescriptor()
@_deprecate_pos_args(version="21.06")
def __init__(self, n_bins=5, *, encode='onehot', strategy='quantile'):
self.n_bins = n_bins
self.encode = encode
self.strategy = strategy
def get_param_names(self):
return super().get_param_names() + [
"n_bins",
"encode",
"strategy"
]
def fit(self, X, y=None) -> "KBinsDiscretizer":
"""
Fit the estimator.
Parameters
----------
X : numeric array-like, shape (n_samples, n_features)
Data to be discretized.
y : None
Ignored. This parameter exists only for compatibility with
:class:`sklearn.pipeline.Pipeline`.
Returns
-------
self
"""
X = self._validate_data(X, dtype='numeric')
valid_encode = ('onehot', 'onehot-dense', 'ordinal')
if self.encode not in valid_encode:
raise ValueError("Valid options for 'encode' are {}. "
"Got encode={!r} instead."
.format(valid_encode, self.encode))
valid_strategy = ('uniform', 'quantile', 'kmeans')
if self.strategy not in valid_strategy:
raise ValueError("Valid options for 'strategy' are {}. "
"Got strategy={!r} instead."
.format(valid_strategy, self.strategy))
n_features = X.shape[1]
n_bins = self._validate_n_bins(n_features)
n_bins = np.asnumpy(n_bins)
bin_edges = cpu_np.zeros(n_features, dtype=object)
for jj in range(n_features):
column = X[:, jj]
col_min, col_max = column.min(), column.max()
if col_min == col_max:
warnings.warn("Feature %d is constant and will be "
"replaced with 0." % jj)
n_bins[jj] = 1
bin_edges[jj] = np.array([-np.inf, np.inf])
continue
if self.strategy == 'uniform':
bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
elif self.strategy == 'quantile':
quantiles = np.linspace(0, 100, n_bins[jj] + 1)
bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
# Workaround for https://github.com/cupy/cupy/issues/4451
# This should be removed as soon as a fix is available in cupy
# in order to limit alterations in the included sklearn code
bin_edges[jj][-1] = col_max
elif self.strategy == 'kmeans':
# Deterministic initialization with uniform spacing
uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
# 1D k-means procedure
km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1,
output_type='cupy')
km = km.fit(column[:, None])
with using_output_type('cupy'):
centers = km.cluster_centers_[:, 0]
# Must sort, centers may be unsorted even with sorted init
centers.sort()
bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
# Remove bins whose width are too small (i.e., <= 1e-8)
if self.strategy in ('quantile', 'kmeans'):
mask = np.diff(bin_edges[jj], prepend=-np.inf) > 1e-8
bin_edges[jj] = bin_edges[jj][mask]
if len(bin_edges[jj]) - 1 != n_bins[jj]:
warnings.warn('Bins whose width are too small (i.e., <= '
'1e-8) in feature %d are removed. Consider '
'decreasing the number of bins.' % jj)
n_bins[jj] = len(bin_edges[jj]) - 1
self.bin_edges_internal_ = bin_edges
self.n_bins_ = n_bins
if 'onehot' in self.encode:
self._encoder = OneHotEncoder(
categories=np.array([np.arange(i) for i in self.n_bins_]),
sparse_output=self.encode == 'onehot', output_type='cupy')
# Fit the OneHotEncoder with toy datasets
# so that it's ready for use after the KBinsDiscretizer is fitted
self._encoder.fit(np.zeros((1, len(self.n_bins_)), dtype=int))
return self
def _validate_n_bins(self, n_features):
"""Returns n_bins_, the number of bins per feature.
"""
orig_bins = self.n_bins
if isinstance(orig_bins, numbers.Number):
if not isinstance(orig_bins, numbers.Integral):
raise ValueError("{} received an invalid n_bins type. "
"Received {}, expected int."
.format(KBinsDiscretizer.__name__,
type(orig_bins).__name__))
if orig_bins < 2:
raise ValueError("{} received an invalid number "
"of bins. Received {}, expected at least 2."
.format(KBinsDiscretizer.__name__, orig_bins))
return np.full(n_features, orig_bins, dtype=int)
n_bins = check_array(orig_bins, dtype=np.int, copy=True,
ensure_2d=False)
if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
raise ValueError("n_bins must be a scalar or array "
"of shape (n_features,).")
bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)
violating_indices = np.where(bad_nbins_value)[0]
if violating_indices.shape[0] > 0:
indices = ", ".join(str(i) for i in violating_indices)
raise ValueError("{} received an invalid number "
"of bins at indices {}. Number of bins "
"must be at least 2, and must be an int."
.format(KBinsDiscretizer.__name__, indices))
return n_bins
def transform(self, X) -> SparseCumlArray:
"""
Discretize the data.
Parameters
----------
X : numeric array-like, shape (n_samples, n_features)
Data to be discretized.
Returns
-------
Xt : numeric array-like or sparse matrix
Data in the binned space.
"""
check_is_fitted(self)
Xt = check_array(X, copy=True, dtype=FLOAT_DTYPES)
n_features = self.n_bins_.shape[0]
if Xt.shape[1] != n_features:
raise ValueError("Incorrect number of features. Expecting {}, "
"received {}.".format(n_features, Xt.shape[1]))
bin_edges = self.bin_edges_internal_
for jj in range(Xt.shape[1]):
# Values which are close to a bin edge are susceptible to numeric
# instability. Add eps to X so these values are binned correctly
# with respect to their decimal truncation. See documentation of
# numpy.isclose for an explanation of ``rtol`` and ``atol``.
rtol = 1.e-5
atol = 1.e-8
eps = atol + rtol * np.abs(Xt[:, jj])
Xt[:, jj] = digitize(Xt[:, jj] + eps, bin_edges[jj][1:])
self.n_bins_ = np.asarray(self.n_bins_)
np.clip(Xt, 0, self.n_bins_ - 1, out=Xt)
Xt = Xt.astype(np.int32)
if self.encode == 'ordinal':
return Xt
Xt = self._encoder.transform(Xt)
return Xt
def inverse_transform(self, Xt) -> SparseCumlArray:
"""
Transform discretized data back to original feature space.
Note that this function does not regenerate the original data
due to discretization rounding.
Parameters
----------
Xt : numeric array-like, shape (n_sample, n_features)
Transformed data in the binned space.
Returns
-------
Xinv : numeric array-like
Data in the original feature space.
"""
check_is_fitted(self)
if 'onehot' in self.encode:
Xt = check_array(Xt, accept_sparse=['csr', 'coo'], copy=True)
Xt = self._encoder.inverse_transform(Xt)
Xinv = check_array(Xt, copy=True, dtype=FLOAT_DTYPES)
n_features = self.n_bins_.shape[0]
if Xinv.shape[1] != n_features:
raise ValueError("Incorrect number of features. Expecting {}, "
"received {}.".format(n_features, Xinv.shape[1]))
for jj in range(n_features):
bin_edges = self.bin_edges_internal_[jj]
bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
idxs = np.asnumpy(Xinv[:, jj])
Xinv[:, jj] = bin_centers[idxs.astype(np.int32)]
return Xinv
@property
def bin_edges_(self):
return self.bin_edges_internal_
|
rapidsaiREPO_NAMEcumlPATH_START.@cuml_extracted@cuml-main@python@cuml@cuml@_thirdparty@sklearn@preprocessing@[email protected]_END.py
|
{
"filename": "monitoring_test.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/tests/monitoring_test.py",
"type": "Python"
}
|
# Copyright 2022 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for jax.monitoring and jax._src.monitoring.
Verify that callbacks are registered/uregistered and invoked correctly to record
events.
"""
from absl.testing import absltest
from jax import monitoring
from jax._src import monitoring as jax_src_monitoring
class MonitoringTest(absltest.TestCase):
def tearDown(self):
monitoring.clear_event_listeners()
super().tearDown()
def test_record_event(self):
events = []
counters = {} # Map event names to frequency.
def increment_event_counter(event):
if event not in counters:
counters[event] = 0
counters[event] += 1
# Test that we can register multiple callbacks.
monitoring.register_event_listener(events.append)
monitoring.register_event_listener(increment_event_counter)
monitoring.record_event("test_unique_event")
monitoring.record_event("test_common_event")
monitoring.record_event("test_common_event")
self.assertListEqual(events, ["test_unique_event",
"test_common_event", "test_common_event"])
self.assertDictEqual(counters, {"test_unique_event": 1,
"test_common_event": 2})
def test_record_event_durations(self):
durations = {} # Map event names to frequency.
def increment_event_duration(event, duration):
if event not in durations:
durations[event] = 0.
durations[event] += duration
monitoring.register_event_duration_secs_listener(increment_event_duration)
monitoring.record_event_duration_secs("test_short_event", 1)
monitoring.record_event_duration_secs("test_short_event", 2)
monitoring.record_event_duration_secs("test_long_event", 10)
self.assertDictEqual(durations, {"test_short_event": 3,
"test_long_event": 10})
def test_unregister_exist_callback_success(self):
original_duration_listeners = jax_src_monitoring.get_event_duration_listeners()
callback = lambda event, durations: None
self.assertNotIn(callback, original_duration_listeners)
monitoring.register_event_duration_secs_listener(callback)
self.assertIn(callback, jax_src_monitoring.get_event_duration_listeners())
# Verify that original listeners list is not modified by register function.
self.assertNotEqual(original_duration_listeners,
jax_src_monitoring.get_event_duration_listeners())
jax_src_monitoring._unregister_event_duration_listener_by_callback(callback)
self.assertEqual(original_duration_listeners,
jax_src_monitoring.get_event_duration_listeners())
def test_unregister_not_exist_callback_fail(self):
callback = lambda event, durations: None
self.assertNotIn(callback,
jax_src_monitoring.get_event_duration_listeners())
with self.assertRaises(AssertionError):
jax_src_monitoring._unregister_event_duration_listener_by_callback(
callback)
def test_unregister_callback_index_in_range_success(self):
original_duration_listeners = jax_src_monitoring.get_event_duration_listeners()
callback = lambda event, durations: None
self.assertNotIn(callback, original_duration_listeners)
monitoring.register_event_duration_secs_listener(callback)
self.assertIn(callback, jax_src_monitoring.get_event_duration_listeners())
# Verify that original listeners list is not modified by register function.
self.assertNotEqual(original_duration_listeners,
jax_src_monitoring.get_event_duration_listeners())
jax_src_monitoring._unregister_event_duration_listener_by_index(-1)
self.assertEqual(original_duration_listeners,
jax_src_monitoring.get_event_duration_listeners())
def test_unregister_callback_index_out_of_range_fail(self):
size = len(jax_src_monitoring.get_event_duration_listeners())
# Verify index >= size raises AssertionError.
with self.assertRaises(AssertionError):
jax_src_monitoring._unregister_event_duration_listener_by_index(size)
# Verify index < -size raises AssertionError.
with self.assertRaises(AssertionError):
jax_src_monitoring._unregister_event_duration_listener_by_index(-size - 1)
def test_get_event_duration_listeners_returns_a_copy(self):
original_duration_listeners = jax_src_monitoring.get_event_duration_listeners()
callback = lambda event, durations: None
original_duration_listeners.append(callback)
self.assertNotIn(callback, jax_src_monitoring.get_event_duration_listeners())
self.assertNotEqual(original_duration_listeners,
jax_src_monitoring.get_event_duration_listeners())
def test_unregister_exist_event_callback_success(self):
original_event_listeners = jax_src_monitoring.get_event_listeners()
callback = lambda event: None
self.assertNotIn(callback, original_event_listeners)
monitoring.register_event_listener(callback)
self.assertIn(callback, jax_src_monitoring.get_event_listeners())
# Verify that original listeners list is not modified by register function.
self.assertNotEqual(original_event_listeners,
jax_src_monitoring.get_event_listeners())
jax_src_monitoring._unregister_event_listener_by_callback(callback)
self.assertEqual(original_event_listeners,
jax_src_monitoring.get_event_listeners())
def test_unregister_not_exist_event_callback_fail(self):
callback = lambda event: None
self.assertNotIn(callback, jax_src_monitoring.get_event_listeners())
with self.assertRaises(AssertionError):
jax_src_monitoring._unregister_event_listener_by_callback(callback)
if __name__ == "__main__":
absltest.main()
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@[email protected]_END.py
|
{
"filename": "_stream.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choropleth/_stream.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="stream", parent_name="choropleth", **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Stream"),
data_docs=kwargs.pop(
"data_docs",
"""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
""",
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@choropleth@[email protected]_END.py
|
{
"filename": "_marker.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/scatterpolar/unselected/_marker.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolar.unselected"
_path_str = "scatterpolar.unselected.marker"
_valid_props = {"color", "opacity", "size"}
# color
# -----
@property
def color(self):
"""
Sets the marker color of unselected points, applied only when a
selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size of unselected points, applied only when a
selection exists.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
"""
def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolar.u
nselected.Marker`
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolar.unselected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.unselected.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
[email protected][email protected]@packages@python@plotly@plotly@graph_objs@scatterpolar@unselected@[email protected]_END.py
|
{
"filename": "util.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/numpy/util.py",
"type": "Python"
}
|
# Copyright 2020 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Sequence
from functools import partial
from typing import Any
import warnings
from jax._src import api
from jax._src import config
from jax._src import core
from jax._src import dtypes
from jax._src.lax import lax
from jax._src.util import safe_zip, safe_map
from jax._src.typing import Array, ArrayLike, DimSize, DType, DTypeLike, Shape
import numpy as np
zip, unsafe_zip = safe_zip, zip
map, unsafe_map = safe_map, map
_dtype = partial(dtypes.dtype, canonicalize=True)
def promote_shapes(fun_name: str, *args: ArrayLike) -> list[Array]:
"""Apply NumPy-style broadcasting, making args shape-compatible for lax.py."""
if len(args) < 2:
return [lax.asarray(arg) for arg in args]
else:
shapes = [np.shape(arg) for arg in args]
if config.dynamic_shapes.value:
# With dynamic shapes we don't support singleton-dimension broadcasting;
# we instead broadcast out to the full shape as a temporary workaround.
# TODO(mattjj): revise this workaround
res_shape = lax.broadcast_shapes(*shapes) # Can raise an error!
return [_broadcast_to(arg, res_shape) for arg, shp in zip(args, shapes)]
else:
if all(len(shapes[0]) == len(s) for s in shapes[1:]):
return [lax.asarray(arg) for arg in args] # no need for rank promotion, so rely on lax promotion
nonscalar_ranks = {len(shp) for shp in shapes if shp}
if len(nonscalar_ranks) < 2:
return [lax.asarray(arg) for arg in args] # rely on lax scalar promotion
else:
if config.numpy_rank_promotion.value != "allow":
_rank_promotion_warning_or_error(fun_name, shapes)
result_rank = len(lax.broadcast_shapes(*shapes))
return [lax.broadcast_to_rank(arg, result_rank) for arg in args]
def _rank_promotion_warning_or_error(fun_name: str, shapes: Sequence[Shape]):
if config.numpy_rank_promotion.value == "warn":
msg = ("Following NumPy automatic rank promotion for {} on shapes {}. "
"Set the jax_numpy_rank_promotion config option to 'allow' to "
"disable this warning; for more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
warnings.warn(msg.format(fun_name, ' '.join(map(str, shapes))))
elif config.numpy_rank_promotion.value == "raise":
msg = ("Operands could not be broadcast together for {} on shapes {} "
"and with the config option jax_numpy_rank_promotion='raise'. "
"For more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
raise ValueError(msg.format(fun_name, ' '.join(map(str, shapes))))
def promote_dtypes(*args: ArrayLike) -> list[Array]:
"""Convenience function to apply Numpy argument dtype promotion."""
# TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.
if len(args) < 2:
return [lax.asarray(arg) for arg in args]
else:
to_dtype, weak_type = dtypes._lattice_result_type(*args)
to_dtype = dtypes.canonicalize_dtype(to_dtype, allow_extended_dtype=True) # type: ignore[assignment]
if config.sharding_in_types.value:
return [lax._convert_element_type(x, to_dtype, weak_type,
getattr(x, "sharding", None))
for x in args]
else:
return [lax._convert_element_type(x, to_dtype, weak_type) for x in args]
def promote_dtypes_inexact(*args: ArrayLike) -> list[Array]:
"""Convenience function to apply Numpy argument dtype promotion.
Promotes arguments to an inexact type."""
to_dtype, weak_type = dtypes._lattice_result_type(*args)
to_dtype = dtypes.canonicalize_dtype(to_dtype, allow_extended_dtype=True) # type: ignore[assignment]
to_dtype_inexact = dtypes.to_inexact_dtype(to_dtype)
return [lax._convert_element_type(x, to_dtype_inexact, weak_type)
for x in args]
def promote_dtypes_numeric(*args: ArrayLike) -> list[Array]:
"""Convenience function to apply Numpy argument dtype promotion.
Promotes arguments to a numeric (non-bool) type."""
to_dtype, weak_type = dtypes._lattice_result_type(*args)
to_dtype = dtypes.canonicalize_dtype(to_dtype)
to_dtype_numeric = dtypes.to_numeric_dtype(to_dtype)
return [lax._convert_element_type(x, to_dtype_numeric, weak_type)
for x in args]
def promote_dtypes_complex(*args: ArrayLike) -> list[Array]:
"""Convenience function to apply Numpy argument dtype promotion.
Promotes arguments to a complex type."""
to_dtype, weak_type = dtypes._lattice_result_type(*args)
to_dtype = dtypes.canonicalize_dtype(to_dtype)
to_dtype_complex = dtypes.to_complex_dtype(to_dtype)
return [lax._convert_element_type(x, to_dtype_complex, weak_type)
for x in args]
def _complex_elem_type(dtype: DTypeLike) -> DType:
"""Returns the float type of the real/imaginary parts of a complex dtype."""
return np.abs(np.zeros((), dtype)).dtype
def _arraylike(x: ArrayLike) -> bool:
return (isinstance(x, np.ndarray) or isinstance(x, Array) or
hasattr(x, '__jax_array__') or np.isscalar(x))
def check_arraylike(fun_name: str, *args: Any, emit_warning=False, stacklevel=3):
"""Check if all args fit JAX's definition of arraylike."""
assert isinstance(fun_name, str), f"fun_name must be a string. Got {fun_name}"
if any(not _arraylike(arg) for arg in args):
pos, arg = next((i, arg) for i, arg in enumerate(args)
if not _arraylike(arg))
msg = f"{fun_name} requires ndarray or scalar arguments, got {type(arg)} at position {pos}."
if emit_warning:
warnings.warn(msg + " In a future JAX release this will be an error.",
category=DeprecationWarning, stacklevel=stacklevel)
else:
raise TypeError(msg.format(fun_name, type(arg), pos))
def check_arraylike_or_none(fun_name: str, *args: Any):
assert isinstance(fun_name, str), f"fun_name must be a string. Got {fun_name}"
if any(not (_arraylike(arg) or arg is None) for arg in args):
pos, arg = next((i, arg) for i, arg in enumerate(args)
if not (_arraylike(arg) or arg is None))
msg = "{} requires ndarray, scalar, or None arguments, got {} at position {}."
raise TypeError(msg.format(fun_name, type(arg), pos))
def check_no_float0s(fun_name: str, *args: Any):
"""Check if none of the args have dtype float0."""
if any(dtypes.dtype(arg) == dtypes.float0 for arg in args):
raise TypeError(
f"Called {fun_name} with a float0 array. "
"float0s do not support any operations by design because they "
"are not compatible with non-trivial vector spaces. No implicit dtype "
"conversion is done. You can use np.zeros_like(arr, dtype=np.float) "
"to cast a float0 array to a regular zeros array. \n"
"If you didn't expect to get a float0 you might have accidentally "
"taken a gradient with respect to an integer argument.")
_check_no_float0s = check_no_float0s
def check_for_prngkeys(fun_name: str, *args: Any):
"""Check if args don't match and none of the args have typed prng dtype"""
arg_dtypes = [dtypes.dtype(arg) for arg in args]
if len(set(arg_dtypes)) < 2:
return # Will be caught by extended dtype impl rules.
if any(dtypes.issubdtype(dt, dtypes.prng_key) for dt in arg_dtypes):
if len(arg_dtypes) == 1:
raise TypeError(
f"{fun_name} does not accept dtype {str(arg_dtypes[0])}.")
else:
raise TypeError(
f"{fun_name} does not accept dtypes {', '.join(map(str, arg_dtypes))}."
)
def promote_args(fun_name: str, *args: ArrayLike) -> list[Array]:
"""Convenience function to apply Numpy argument shape and dtype promotion."""
check_arraylike(fun_name, *args)
_check_no_float0s(fun_name, *args)
check_for_prngkeys(fun_name, *args)
return promote_shapes(fun_name, *promote_dtypes(*args))
def promote_args_numeric(fun_name: str, *args: ArrayLike) -> list[Array]:
check_arraylike(fun_name, *args)
_check_no_float0s(fun_name, *args)
check_for_prngkeys(fun_name, *args)
return promote_shapes(fun_name, *promote_dtypes_numeric(*args))
def promote_args_inexact(fun_name: str, *args: ArrayLike) -> list[Array]:
"""Convenience function to apply Numpy argument shape and dtype promotion.
Promotes non-inexact types to an inexact type."""
check_arraylike(fun_name, *args)
_check_no_float0s(fun_name, *args)
check_for_prngkeys(fun_name, *args)
return promote_shapes(fun_name, *promote_dtypes_inexact(*args))
@partial(api.jit, inline=True)
def _broadcast_arrays(*args: ArrayLike) -> list[Array]:
"""Like Numpy's broadcast_arrays but doesn't return views."""
shapes = [np.shape(arg) for arg in args]
if not shapes or all(core.definitely_equal_shape(shapes[0], s) for s in shapes):
return [lax.asarray(arg) for arg in args]
result_shape = lax.broadcast_shapes(*shapes)
return [_broadcast_to(arg, result_shape) for arg in args]
def _broadcast_to(arr: ArrayLike, shape: DimSize | Shape) -> Array:
check_arraylike("broadcast_to", arr)
arr = arr if isinstance(arr, Array) else lax.asarray(arr)
if not isinstance(shape, tuple) and np.ndim(shape) == 0:
shape = (shape,)
# check that shape is concrete
shape = core.canonicalize_shape(shape) # type: ignore[arg-type]
arr_shape = np.shape(arr)
if core.definitely_equal_shape(arr_shape, shape):
return arr
elif len(shape) < len(arr_shape):
raise ValueError(f"Cannot broadcast to shape with fewer dimensions: {arr_shape=} {shape=}")
else:
nlead = len(shape) - len(arr_shape)
shape_tail = shape[nlead:]
compatible = all(core.definitely_equal_one_of_dim(arr_d, [1, shape_d])
for arr_d, shape_d in safe_zip(arr_shape, shape_tail))
if nlead < 0 or not compatible:
msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
raise ValueError(msg.format(arr_shape, shape))
return lax.broadcast_in_dim(arr, shape, tuple(range(nlead, len(shape))))
# The `jit` on `where` exists to avoid materializing constants in cases like
# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to
# materialize the broadcast forms of scalar arguments.
@api.jit
def _where(condition: ArrayLike, x: ArrayLike, y: ArrayLike) -> Array:
if x is None or y is None:
raise ValueError("Either both or neither of the x and y arguments should "
"be provided to jax.numpy.where, got {} and {}."
.format(x, y))
if not np.issubdtype(_dtype(condition), np.bool_):
condition = lax.ne(condition, lax._zero(condition))
x, y = promote_dtypes(x, y)
if np.ndim(condition) == 0:
# lax.select() handles scalar conditions without broadcasting.
x_arr, y_arr = _broadcast_arrays(x, y)
else:
condition, x_arr, y_arr = _broadcast_arrays(condition, x, y)
try:
is_always_empty = core.is_empty_shape(x_arr.shape)
except:
is_always_empty = False # can fail with dynamic shapes
return lax.select(condition, x_arr, y_arr) if not is_always_empty else x_arr
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@[email protected]@.PATH_END.py
|
{
"filename": "diffusion_aspherical.py",
"repo_name": "guillochon/MOSFiT",
"repo_path": "MOSFiT_extracted/MOSFiT-master/mosfit/modules/transforms/diffusion_aspherical.py",
"type": "Python"
}
|
"""Definitions for the `DiffusionAspherical` class."""
import numpy as np
from scipy.interpolate import interp1d
from mosfit.constants import C_CGS, DAY_CGS, FOUR_PI, KM_CGS, M_SUN_CGS
from mosfit.modules.transforms.transform import Transform
# Important: Only define one ``Module`` class per file.
class DiffusionAspherical(Transform):
"""
Photon diffusion transform.
Uses viewing angle and axis-ratio scalings from Darbha and Kasen 2020
"""
N_INT_TIMES = 100
MIN_LOG_SPACING = -3
DIFF_CONST = 2.0 * M_SUN_CGS / (13.7 * C_CGS * KM_CGS)
TRAP_CONST = 3.0 * M_SUN_CGS / (FOUR_PI * KM_CGS ** 2)
_REFERENCES = [
{'bibcode': '1982ApJ...253..785A'},
{'bibcode': '2020ApJ...897..150D'}
]
def process(self, **kwargs):
"""Process module."""
Transform.process(self, **kwargs)
self._kappa = kwargs[self.key('kappa')]
self._kappa_gamma = kwargs[self.key('kappagamma')]
self._m_ejecta = kwargs[self.key('mejecta')]
self._v_ejecta = kwargs[self.key('vejecta')]
self._Aproj = kwargs[self.key('area_proj')]
self._Aref = kwargs[self.key('area_ref')]
self._tau_diff = np.sqrt(self.DIFF_CONST * self._kappa *
self._m_ejecta / self._v_ejecta) / DAY_CGS
self._trap_coeff = (
self.TRAP_CONST * self._kappa_gamma * self._m_ejecta /
(self._v_ejecta ** 2)) / DAY_CGS ** 2
td2, A = self._tau_diff ** 2, self._trap_coeff # noqa: F841
new_lums = np.zeros_like(self._times_to_process)
if len(self._dense_times_since_exp) < 2:
return {self.dense_key('luminosities'): new_lums}
min_te = min(self._dense_times_since_exp)
tb = max(0.0, min_te)
linterp = interp1d(
self._dense_times_since_exp, self._dense_luminosities, copy=False,
assume_sorted=True)
uniq_times = np.unique(self._times_to_process[
(self._times_to_process >= tb) & (
self._times_to_process <= self._dense_times_since_exp[-1])])
lu = len(uniq_times)
num = int(round(self.N_INT_TIMES / 2.0))
lsp = np.logspace(
np.log10(self._tau_diff /
self._dense_times_since_exp[-1]) +
self.MIN_LOG_SPACING, 0, num)
xm = np.unique(np.concatenate((lsp, 1 - lsp)))
int_times = np.clip(
tb + (uniq_times.reshape(lu, 1) - tb) * xm, tb,
self._dense_times_since_exp[-1])
int_te2s = int_times[:, -1] ** 2
int_lums = linterp(int_times) # noqa: F841
int_args = int_lums * int_times * np.exp(
(int_times ** 2 - int_te2s.reshape(lu, 1)) / td2)
int_args[np.isnan(int_args)] = 0.0
uniq_lums = np.trapz(int_args, int_times)
uniq_lums *= -2.0 * np.expm1(-A / int_te2s) / td2
uniq_lums *= (1 + 1.4 * (2 + uniq_times/self._tau_diff/0.59) / (1 +
np.exp(uniq_times/self._tau_diff/0.59)) *
(self._Aproj/self._Aref - 1))
new_lums = uniq_lums[np.searchsorted(uniq_times,
self._times_to_process)]
return {self.key('tau_diffusion'): self._tau_diff,
self.dense_key('luminosities'): new_lums}
|
guillochonREPO_NAMEMOSFiTPATH_START.@MOSFiT_extracted@MOSFiT-master@mosfit@modules@transforms@[email protected]_END.py
|
{
"filename": "test_reference_metric.py",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/tests/test_reference_metric.py",
"type": "Python"
}
|
from UnitTesting.create_test import create_test
def test_Spherical():
module = 'reference_metric'
module_name = 'rfm_Spherical'
function_and_global_dict = {'reference_metric(True)': ['xxmin', 'xxmax', 'UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(True)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "Spherical")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
def test_SinhSpherical():
module = 'reference_metric'
module_name = 'rfm_SinhSpherical'
function_and_global_dict = {'reference_metric(True)': ['xxmin', 'xxmax', 'UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(True)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "SinhSpherical")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
def test_SinhSphericalv2():
module = 'reference_metric'
module_name = 'rfm_SinhSphericalv2'
function_and_global_dict = {'reference_metric(True)': ['xxmin', 'xxmax', 'UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(True)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "SinhSphericalv2")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
def test_NobleSphericalThetaOptionOne():
module = 'reference_metric'
module_name = 'rfm_NobleSphericalThetaOptionOne'
function_and_global_dict = {'reference_metric(False)': ['UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(False)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "NobleSphericalThetaOptionOne")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
def test_NobleSphericalThetaOptionTwo():
module = 'reference_metric'
module_name = 'rfm_NobleSphericalThetaOptionTwo'
function_and_global_dict = {'reference_metric(False)': ['UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(False)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "NobleSphericalThetaOptionTwo")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
def test_Cylindrical():
module = 'reference_metric'
module_name = 'rfm_Cylindrical'
function_and_global_dict = {'reference_metric(True)': ['xxmin', 'xxmax', 'UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(True)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "Cylindrical")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
def test_SinhCylindrical():
module = 'reference_metric'
module_name = 'rfm_SinhCylindrical'
function_and_global_dict = {'reference_metric(True)': ['xxmin', 'xxmax', 'UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(True)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "SinhCylindrical")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
def test_SinhCylindricalv2():
module = 'reference_metric'
module_name = 'rfm_SinhCylindricalv2'
function_and_global_dict = {'reference_metric(True)': ['xxmin', 'xxmax', 'UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(True)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "SinhCylindricalv2")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
def test_SymTP():
module = 'reference_metric'
module_name = 'rfm_SymTP'
function_and_global_dict = {'reference_metric(True)': ['xxmin', 'xxmax', 'UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(True)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "SymTP")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
def test_SinhSymTP():
module = 'reference_metric'
module_name = 'rfm_SinhSymTP'
function_and_global_dict = {'reference_metric(True)': ['xxmin', 'xxmax', 'UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(True)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "SinhSymTP")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
def test_Cartesian():
module = 'reference_metric'
module_name = 'rfm_Cartesian'
function_and_global_dict = {'reference_metric(True)': ['UnitVectors', 'ReU', 'ReDD', 'ghatDD', 'ghatUU', 'detgammahat',
'detgammahatdD', 'detgammahatdDD', 'ReUdD', 'ReUdDD', 'ReDDdD', 'ReDDdDD', 'ghatDDdD',
'ghatDDdDD', 'GammahatUDD', 'GammahatUDDdD', 'Cart_to_xx','xx_to_Cart','xxSph','scalefactor_orthog']}
initialization_string_dict = {'reference_metric(True)': '''
import NRPy_param_funcs as par
par.set_parval_from_str("reference_metric::CoordSystem", "Cartesian")
'''}
create_test(module, module_name, function_and_global_dict, initialization_string_dict=initialization_string_dict)
if __name__ == '__main__':
import sys
if len(sys.argv) <= 3:
failed_functions = []
for fun in dir():
if fun[0:5] == 'test_':
print('\nTesting ' + str(fun) + '...\n')
try:
exec(fun + '()')
except SystemExit:
failed_functions.append(fun)
if failed_functions != []:
import sys, os
with open(os.path.join('UnitTesting', 'failed_tests.txt'), 'a') as file:
for function in failed_functions:
file.write(sys.argv[0] + ': ' + str(function) + '\n')
sys.exit(1)
else:
globals()[sys.argv[4]]()
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@tests@[email protected]_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/table/cells/font/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="table.cells.font", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@table@cells@font@[email protected]_END.py
|
{
"filename": "fastframe.py",
"repo_name": "desihub/desisim",
"repo_path": "desisim_extracted/desisim-main/py/desisim/scripts/fastframe.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, print_function
import sys, os
import numpy as np
from astropy.table import Table
import astropy.units as u
import specsim.simulator
from desispec.frame import Frame
import desispec.io
from desispec.resolution import Resolution
import desisim.io
import desisim.simexp
from desisim.util import dateobs2night
import desisim.specsim
#-------------------------------------------------------------------------
def parse(options=None):
import argparse
parser = argparse.ArgumentParser(usage = "{prog} [options]")
parser.add_argument("--simspec", type=str, help="input simspec file")
parser.add_argument("--outdir", type=str, help="output directory")
parser.add_argument("--firstspec", type=int, default=0,
help="first spectrum to simulate")
parser.add_argument("--nspec", type=int, default=5000,
help="number of spectra to simulate")
parser.add_argument("--cframe", action="store_true",
help="directly write cframe")
parser.add_argument("--dwave", type=float, default=0.8, help="output wavelength step, in Angstrom")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args=None):
'''
Converts simspec -> frame files; see fastframe --help for usage options
'''
#- TODO: use desiutil.log
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
print('Reading files')
simspec = desisim.io.read_simspec(args.simspec, readphot=False)
if simspec.flavor == 'arc':
print('arc exposure; no frames to output')
return
fibermap = simspec.fibermap
obs = simspec.obsconditions
night = simspec.header['NIGHT']
expid = simspec.header['EXPID']
firstspec = args.firstspec
nspec = min(args.nspec, len(fibermap)-firstspec)
print('Simulating spectra {}-{}'.format(firstspec, firstspec+nspec))
wave = simspec.wave
flux = simspec.flux
ii = slice(firstspec, firstspec+nspec)
if simspec.flavor == 'science':
sim = desisim.simexp.simulate_spectra(wave, flux[ii],
fibermap=fibermap[ii], obsconditions=obs, dwave_out=args.dwave,
psfconvolve=True)
elif simspec.flavor in ['arc', 'flat', 'calib']:
x = fibermap['FIBERASSIGN_X']
y = fibermap['FIBERASSIGN_Y']
fiber_area = desisim.simexp.fiber_area_arcsec2(x, y)
surface_brightness = (flux.T / fiber_area).T
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=args.dwave)
# sim = specsim.simulator.Simulator(config, num_fibers=nspec)
sim = desisim.specsim.get_simulator(config, num_fibers=nspec,
camera_output=True)
sim.observation.exposure_time = simspec.header['EXPTIME'] * u.s
sbunit = 1e-17 * u.erg / (u.Angstrom * u.s * u.cm ** 2 * u.arcsec ** 2)
xy = np.vstack([x, y]).T * u.mm
sim.simulate(calibration_surface_brightness=surface_brightness[ii]*sbunit,
focal_positions=xy[ii])
else:
raise ValueError('Unknown simspec flavor {}'.format(simspec.flavor))
sim.generate_random_noise()
for i, results in enumerate(sim.camera_output):
results = sim.camera_output[i]
wave = results['wavelength']
scale=1e17
if args.cframe :
phot = scale*(results['observed_flux'] + results['random_noise_electrons']*results['flux_calibration']).T
ivar = 1./scale**2*results['flux_inverse_variance'].T
else :
phot = (results['num_source_electrons'] + \
results['num_sky_electrons'] + \
results['num_dark_electrons'] + \
results['random_noise_electrons']).T
ivar = 1.0 / results['variance_electrons'].T
R = Resolution(sim.instrument.cameras[i].get_output_resolution_matrix())
Rdata = np.tile(R.data.T, nspec).T.reshape(
nspec, R.data.shape[0], R.data.shape[1])
assert np.all(Rdata[0] == R.data)
assert phot.shape == (nspec, len(wave))
for spectro in range(10):
imin = max(firstspec, spectro*500) - firstspec
imax = min(firstspec+nspec, (spectro+1)*500) - firstspec
if imax <= imin:
continue
xphot = phot[imin:imax]
xivar = ivar[imin:imax]
xfibermap = fibermap[ii][imin:imax]
camera = '{}{}'.format(sim.camera_names[i], spectro)
meta = simspec.header.copy()
meta['CAMERA'] = camera
if args.cframe :
units = '1e-17 erg/(s cm2 Angstrom)'
else :
#
# We want to save electrons per angstrom and not electrons per bin
# to be consistent with the extraction code (specter.extract.ex2d).
# And to be FITS-compliant, we call electrons "counts".
#
units = 'count/Angstrom'
dwave=np.gradient(wave)
xphot /= dwave
xivar *= dwave**2
meta['BUNIT']=units
meta['DETECTOR'] = 'SIM'
if camera[0] == 'b':
meta['CCDSIZE'] = '4162,4232'
else:
meta['CCDSIZE'] = '4194,4256'
readnoise = sim.instrument.cameras[i].read_noise.value
meta['OBSRDNA'] = readnoise
meta['OBSRDNB'] = readnoise
meta['OBSRDNC'] = readnoise
meta['OBSRDND'] = readnoise
frame = Frame(wave, xphot, xivar, resolution_data=Rdata[0:imax-imin],
spectrograph=spectro, fibermap=xfibermap, meta=meta)
if args.cframe :
outfile = desispec.io.findfile('cframe', night, expid, camera,
outdir=args.outdir)
else :
outfile = desispec.io.findfile('frame', night, expid, camera,
outdir=args.outdir)
print('writing {}'.format(outfile))
desispec.io.write_frame(outfile, frame, units=units)
|
desihubREPO_NAMEdesisimPATH_START.@desisim_extracted@desisim-main@py@desisim@[email protected]@.PATH_END.py
|
{
"filename": "test_scripts_installed_correctly.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/tests/test_scripts_installed_correctly.py",
"type": "Python"
}
|
import glob
import os
import subprocess as sp
def test_scripts_installed():
cwd = os.path.dirname(__file__)
scripts = glob.glob(os.path.join(cwd, '..', 'bin', '*.py'))
for script in scripts:
if '__init__.py' in script:
continue
name = os.path.basename(script).replace('.py', '')
print('Testing', name)
sp.check_call('{} -h'.format(name).split(' '))
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@tests@[email protected]_END.py
|
{
"filename": "_cheatertype.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/carpet/aaxis/_cheatertype.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CheatertypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="cheatertype", parent_name="carpet.aaxis", **kwargs):
super(CheatertypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["index", "value"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@carpet@aaxis@[email protected]_END.py
|
{
"filename": "data_structures.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/frontends/art/data_structures.py",
"type": "Python"
}
|
import glob
import os
import struct
import weakref
import numpy as np
import yt.utilities.fortran_utils as fpu
from yt.data_objects.index_subobjects.octree_subset import OctreeSubset
from yt.data_objects.static_output import Dataset, ParticleFile
from yt.data_objects.unions import ParticleUnion
from yt.frontends.art.definitions import (
amr_header_struct,
constants,
dmparticle_header_struct,
filename_pattern,
fluid_fields,
particle_fields,
particle_header_struct,
seek_extras,
)
from yt.frontends.art.fields import ARTFieldInfo
from yt.frontends.art.io import (
_read_art_level_info,
_read_child_level,
_read_root_level,
a2b,
b2t,
)
from yt.funcs import mylog, setdefaultattr
from yt.geometry.geometry_handler import Index, YTDataChunk
from yt.geometry.oct_container import ARTOctreeContainer
from yt.geometry.oct_geometry_handler import OctreeIndex
from yt.geometry.particle_geometry_handler import ParticleIndex
class ARTIndex(OctreeIndex):
def __init__(self, ds, dataset_type="art"):
self.fluid_field_list = fluid_fields
self.dataset_type = dataset_type
self.dataset = weakref.proxy(ds)
self.index_filename = self.dataset.parameter_filename
self.directory = os.path.dirname(self.index_filename)
self.max_level = ds.max_level
self.float_type = np.float64
super().__init__(ds, dataset_type)
def get_smallest_dx(self):
"""
Returns (in code units) the smallest cell size in the simulation.
"""
# Overloaded
ds = self.dataset
return (ds.domain_width / ds.domain_dimensions / (2**self.max_level)).min()
def _initialize_oct_handler(self):
"""
Just count the number of octs per domain and
allocate the requisite memory in the oct tree
"""
nv = len(self.fluid_field_list)
self.oct_handler = ARTOctreeContainer(
self.dataset.domain_dimensions / 2, # dd is # of root cells
self.dataset.domain_left_edge,
self.dataset.domain_right_edge,
1,
)
# The 1 here refers to domain_id == 1 always for ARTIO.
self.domains = [ARTDomainFile(self.dataset, nv, self.oct_handler, 1)]
self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
self.total_octs = sum(self.octs_per_domain)
mylog.debug("Allocating %s octs", self.total_octs)
self.oct_handler.allocate_domains(self.octs_per_domain)
domain = self.domains[0]
domain._read_amr_root(self.oct_handler)
domain._read_amr_level(self.oct_handler)
self.oct_handler.finalize()
def _detect_output_fields(self):
self.particle_field_list = list(particle_fields)
self.field_list = [("art", f) for f in fluid_fields]
# now generate all of the possible particle fields
for ptype in self.dataset.particle_types_raw:
for pfield in self.particle_field_list:
pfn = (ptype, pfield)
self.field_list.append(pfn)
def _identify_base_chunk(self, dobj):
"""
Take the passed in data source dobj, and use its embedded selector
to calculate the domain mask, build the reduced domain
subsets and oct counts. Attach this information to dobj.
"""
if getattr(dobj, "_chunk_info", None) is None:
# Get all octs within this oct handler
domains = [dom for dom in self.domains if dom.included(dobj.selector)]
base_region = getattr(dobj, "base_region", dobj)
if len(domains) > 1:
mylog.debug("Identified %s intersecting domains", len(domains))
subsets = [
ARTDomainSubset(base_region, domain, self.dataset) for domain in domains
]
dobj._chunk_info = subsets
dobj._current_chunk = list(self._chunk_all(dobj))[0]
def _chunk_all(self, dobj):
oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
# We pass the chunk both the current chunk and list of chunks,
# as well as the referring data source
yield YTDataChunk(dobj, "all", oobjs, None)
def _chunk_spatial(self, dobj, ngz, sort=None, preload_fields=None):
sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
for og in sobjs:
if ngz > 0:
g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
else:
g = og
yield YTDataChunk(dobj, "spatial", [g], None)
def _chunk_io(self, dobj, cache=True, local_only=False):
"""
Since subsets are calculated per domain,
i.e. per file, yield each domain at a time to
organize by IO. We will eventually chunk out NMSU ART
to be level-by-level.
"""
oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
for subset in oobjs:
yield YTDataChunk(dobj, "io", [subset], None, cache=cache)
class ARTDataset(Dataset):
_index_class: type[Index] = ARTIndex
_field_info_class = ARTFieldInfo
def __init__(
self,
filename,
dataset_type="art",
fields=None,
storage_filename=None,
skip_particles=False,
skip_stars=False,
limit_level=None,
spread_age=True,
force_max_level=None,
file_particle_header=None,
file_particle_data=None,
file_particle_stars=None,
units_override=None,
unit_system="cgs",
default_species_fields=None,
):
self.fluid_types += ("art",)
if fields is None:
fields = fluid_fields
filename = os.path.abspath(filename)
self._fields_in_file = fields
self._file_amr = filename
self._file_particle_header = file_particle_header
self._file_particle_data = file_particle_data
self._file_particle_stars = file_particle_stars
self._find_files(filename)
self.skip_particles = skip_particles
self.skip_stars = skip_stars
self.limit_level = limit_level
self.max_level = limit_level
self.force_max_level = force_max_level
self.spread_age = spread_age
Dataset.__init__(
self,
filename,
dataset_type,
units_override=units_override,
unit_system=unit_system,
default_species_fields=default_species_fields,
)
self.storage_filename = storage_filename
def _find_files(self, file_amr):
"""
Given the AMR base filename, attempt to find the
particle header, star files, etc.
"""
base_prefix, base_suffix = filename_pattern["amr"]
numericstr = file_amr.rsplit("_", 1)[1].replace(base_suffix, "")
possibles = glob.glob(
os.path.join(os.path.dirname(os.path.abspath(file_amr)), "*")
)
for filetype, (prefix, suffix) in filename_pattern.items():
# if this attribute is already set skip it
if getattr(self, "_file_" + filetype, None) is not None:
continue
match = None
for possible in possibles:
if possible.endswith(numericstr + suffix):
if os.path.basename(possible).startswith(prefix):
match = possible
if match is not None:
mylog.info("discovered %s:%s", filetype, match)
setattr(self, "_file_" + filetype, match)
else:
setattr(self, "_file_" + filetype, None)
def __str__(self):
return self._file_amr.split("/")[-1]
def _set_code_unit_attributes(self):
"""
Generates the conversion to various physical units based
on the parameters from the header
"""
# spatial units
z = self.current_redshift
h = self.hubble_constant
boxcm_cal = self.parameters["boxh"]
boxcm_uncal = boxcm_cal / h
box_proper = boxcm_uncal / (1 + z)
aexpn = self.parameters["aexpn"]
# all other units
Om0 = self.parameters["Om0"]
ng = self.parameters["ng"]
boxh = self.parameters["boxh"]
aexpn = self.parameters["aexpn"]
hubble = self.parameters["hubble"]
r0 = boxh / ng
v0 = 50.0 * r0 * np.sqrt(Om0)
rho0 = 2.776e11 * hubble**2.0 * Om0
aM0 = rho0 * (boxh / hubble) ** 3.0 / ng**3.0
velocity = v0 / aexpn * 1.0e5 # proper cm/s
mass = aM0 * 1.98892e33
self.cosmological_simulation = True
setdefaultattr(self, "mass_unit", self.quan(mass, f"g*{ng ** 3}"))
setdefaultattr(self, "length_unit", self.quan(box_proper, "Mpc"))
setdefaultattr(self, "velocity_unit", self.quan(velocity, "cm/s"))
setdefaultattr(self, "time_unit", self.length_unit / self.velocity_unit)
def _parse_parameter_file(self):
"""
Get the various simulation parameters & constants.
"""
self.domain_left_edge = np.zeros(3, dtype="float")
self.domain_right_edge = np.zeros(3, dtype="float") + 1.0
self.dimensionality = 3
self.refine_by = 2
self._periodicity = (True, True, True)
self.cosmological_simulation = True
self.parameters = {}
self.parameters.update(constants)
self.parameters["Time"] = 1.0
# read the amr header
with open(self._file_amr, "rb") as f:
amr_header_vals = fpu.read_attrs(f, amr_header_struct, ">")
n_to_skip = len(("tl", "dtl", "tlold", "dtlold", "iSO"))
fpu.skip(f, n_to_skip, endian=">")
(self.ncell) = fpu.read_vector(f, "i", ">")[0]
# Try to figure out the root grid dimensions
est = int(np.rint(self.ncell ** (1.0 / 3.0)))
# Note here: this is the number of *cells* on the root grid.
# This is not the same as the number of Octs.
# domain dimensions is the number of root *cells*
self.domain_dimensions = np.ones(3, dtype="int64") * est
self.root_grid_mask_offset = f.tell()
self.root_nocts = self.domain_dimensions.prod() // 8
self.root_ncells = self.root_nocts * 8
mylog.debug(
"Estimating %i cells on a root grid side, %i root octs",
est,
self.root_nocts,
)
self.root_iOctCh = fpu.read_vector(f, "i", ">")[: self.root_ncells]
self.root_iOctCh = self.root_iOctCh.reshape(
self.domain_dimensions, order="F"
)
self.root_grid_offset = f.tell()
self.root_nhvar = fpu.skip(f, endian=">")
self.root_nvar = fpu.skip(f, endian=">")
# make sure that the number of root variables is a multiple of
# rootcells
assert self.root_nhvar % self.root_ncells == 0
assert self.root_nvar % self.root_ncells == 0
self.nhydro_variables = (
self.root_nhvar + self.root_nvar
) / self.root_ncells
self.iOctFree, self.nOct = fpu.read_vector(f, "i", ">")
self.child_grid_offset = f.tell()
# lextra needs to be loaded as a string, but it's actually
# array values. So pop it off here, and then re-insert.
lextra = amr_header_vals.pop("lextra")
amr_header_vals["lextra"] = np.frombuffer(lextra, ">f4")
self.parameters.update(amr_header_vals)
amr_header_vals = None
# estimate the root level
float_center, fl, iocts, nocts, root_level = _read_art_level_info(
f, [0, self.child_grid_offset], 1, coarse_grid=self.domain_dimensions[0]
)
del float_center, fl, iocts, nocts
self.root_level = root_level
mylog.info("Using root level of %02i", self.root_level)
# read the particle header
self.particle_types = []
self.particle_types_raw = ()
if not self.skip_particles and self._file_particle_header:
with open(self._file_particle_header, "rb") as fh:
particle_header_vals = fpu.read_attrs(fh, particle_header_struct, ">")
fh.seek(seek_extras)
n = particle_header_vals["Nspecies"]
wspecies = np.fromfile(fh, dtype=">f", count=10)
lspecies = np.fromfile(fh, dtype=">i", count=10)
# extras needs to be loaded as a string, but it's actually
# array values. So pop it off here, and then re-insert.
extras = particle_header_vals.pop("extras")
particle_header_vals["extras"] = np.frombuffer(extras, ">f4")
self.parameters["wspecies"] = wspecies[:n]
self.parameters["lspecies"] = lspecies[:n]
for specie in range(n):
self.particle_types.append("specie%i" % specie)
self.particle_types_raw = tuple(self.particle_types)
ls_nonzero = np.diff(lspecies)[: n - 1]
ls_nonzero = np.append(lspecies[0], ls_nonzero)
self.star_type = len(ls_nonzero)
mylog.info("Discovered %i species of particles", len(ls_nonzero))
info_str = "Particle populations: " + "%9i " * len(ls_nonzero)
mylog.info(info_str, *ls_nonzero)
self._particle_type_counts = dict(
zip(self.particle_types_raw, ls_nonzero, strict=True)
)
for k, v in particle_header_vals.items():
if k in self.parameters.keys():
if not self.parameters[k] == v:
mylog.info(
"Inconsistent parameter %s %1.1e %1.1e",
k,
v,
self.parameters[k],
)
else:
self.parameters[k] = v
self.parameters_particles = particle_header_vals
self.parameters.update(particle_header_vals)
self.parameters["ng"] = self.parameters["Ngridc"]
self.parameters["ncell0"] = self.parameters["ng"] ** 3
# setup standard simulation params yt expects to see
self.current_redshift = self.parameters["aexpn"] ** -1.0 - 1.0
self.omega_lambda = self.parameters["Oml0"]
self.omega_matter = self.parameters["Om0"]
self.hubble_constant = self.parameters["hubble"]
self.min_level = self.parameters["min_level"]
self.max_level = self.parameters["max_level"]
if self.limit_level is not None:
self.max_level = min(self.limit_level, self.parameters["max_level"])
if self.force_max_level is not None:
self.max_level = self.force_max_level
self.hubble_time = 1.0 / (self.hubble_constant * 100 / 3.08568025e19)
self.current_time = self.quan(b2t(self.parameters["t"]), "Gyr")
self.gamma = self.parameters["gamma"]
mylog.info("Max level is %02i", self.max_level)
def create_field_info(self):
super().create_field_info()
if "wspecies" in self.parameters:
# We create dark_matter and stars unions.
ptr = self.particle_types_raw
pu = ParticleUnion("darkmatter", list(ptr[:-1]))
self.add_particle_union(pu)
pu = ParticleUnion("stars", list(ptr[-1:]))
self.add_particle_union(pu)
@classmethod
def _is_valid(cls, filename: str, *args, **kwargs) -> bool:
"""
Defined for the NMSU file naming scheme.
This could differ for other formats.
"""
f = str(filename)
prefix, suffix = filename_pattern["amr"]
if not os.path.isfile(f):
return False
if not f.endswith(suffix):
return False
with open(f, "rb") as fh:
try:
fpu.read_attrs(fh, amr_header_struct, ">")
return True
except Exception:
return False
class ARTParticleFile(ParticleFile):
def __init__(self, ds, io, filename, file_id):
super().__init__(ds, io, filename, file_id, range=None)
self.total_particles = {}
for ptype, count in zip(
ds.particle_types_raw,
ds.parameters["total_particles"],
strict=True,
):
self.total_particles[ptype] = count
with open(filename, "rb") as f:
f.seek(0, os.SEEK_END)
self._file_size = f.tell()
class ARTParticleIndex(ParticleIndex):
def _setup_filenames(self):
# no need for template, all data in one file
template = self.dataset.filename_template
ndoms = self.dataset.file_count
cls = self.dataset._file_class
self.data_files = []
fi = 0
for i in range(int(ndoms)):
df = cls(self.dataset, self.io, template % {"num": i}, fi)
fi += 1
self.data_files.append(df)
class DarkMatterARTDataset(ARTDataset):
_index_class = ARTParticleIndex
_file_class = ARTParticleFile
filter_bbox = False
def __init__(
self,
filename,
dataset_type="dm_art",
fields=None,
storage_filename=None,
skip_particles=False,
skip_stars=False,
limit_level=None,
spread_age=True,
force_max_level=None,
file_particle_header=None,
file_particle_stars=None,
units_override=None,
unit_system="cgs",
):
self.num_zones = 2
self.n_ref = 64
self.particle_types += ("all",)
if fields is None:
fields = particle_fields
filename = os.path.abspath(filename)
self._fields_in_file = fields
self._file_particle = filename
self._file_particle_header = file_particle_header
self._find_files(filename)
self.skip_stars = skip_stars
self.spread_age = spread_age
Dataset.__init__(
self,
filename,
dataset_type,
units_override=units_override,
unit_system=unit_system,
)
self.storage_filename = storage_filename
def _find_files(self, file_particle):
"""
Given the particle base filename, attempt to find the
particle header and star files.
"""
base_prefix, base_suffix = filename_pattern["particle_data"]
aexpstr = file_particle.rsplit("s0", 1)[1].replace(base_suffix, "")
possibles = glob.glob(
os.path.join(os.path.dirname(os.path.abspath(file_particle)), "*")
)
for filetype, (prefix, suffix) in filename_pattern.items():
# if this attribute is already set skip it
if getattr(self, "_file_" + filetype, None) is not None:
continue
match = None
for possible in possibles:
if possible.endswith(aexpstr + suffix):
if os.path.basename(possible).startswith(prefix):
match = possible
if match is not None:
mylog.info("discovered %s:%s", filetype, match)
setattr(self, "_file_" + filetype, match)
else:
setattr(self, "_file_" + filetype, None)
def __str__(self):
return self._file_particle.split("/")[-1]
def _set_code_unit_attributes(self):
"""
Generates the conversion to various physical units based
on the parameters from the header
"""
# spatial units
z = self.current_redshift
h = self.hubble_constant
boxcm_cal = self.parameters["boxh"]
boxcm_uncal = boxcm_cal / h
box_proper = boxcm_uncal / (1 + z)
aexpn = self.parameters["aexpn"]
# all other units
Om0 = self.parameters["Om0"]
ng = self.parameters["ng"]
boxh = self.parameters["boxh"]
aexpn = self.parameters["aexpn"]
hubble = self.parameters["hubble"]
r0 = boxh / ng
rho0 = 2.776e11 * hubble**2.0 * Om0
aM0 = rho0 * (boxh / hubble) ** 3.0 / ng**3.0
velocity = 100.0 * r0 / aexpn * 1.0e5 # proper cm/s
mass = aM0 * 1.98892e33
self.cosmological_simulation = True
self.mass_unit = self.quan(mass, f"g*{ng ** 3}")
self.length_unit = self.quan(box_proper, "Mpc")
self.velocity_unit = self.quan(velocity, "cm/s")
self.time_unit = self.length_unit / self.velocity_unit
def _parse_parameter_file(self):
"""
Get the various simulation parameters & constants.
"""
self.domain_left_edge = np.zeros(3, dtype="float")
self.domain_right_edge = np.zeros(3, dtype="float") + 1.0
self.dimensionality = 3
self.refine_by = 2
self._periodicity = (True, True, True)
self.cosmological_simulation = True
self.parameters = {}
self.parameters.update(constants)
self.parameters["Time"] = 1.0
self.file_count = 1
self.filename_template = self.parameter_filename
# read the particle header
self.particle_types = []
self.particle_types_raw = ()
assert self._file_particle_header
with open(self._file_particle_header, "rb") as fh:
seek = 4
fh.seek(seek)
headerstr = fh.read(45).decode("ascii")
aexpn = np.fromfile(fh, count=1, dtype=">f4")
aexp0 = np.fromfile(fh, count=1, dtype=">f4")
amplt = np.fromfile(fh, count=1, dtype=">f4")
astep = np.fromfile(fh, count=1, dtype=">f4")
istep = np.fromfile(fh, count=1, dtype=">i4")
partw = np.fromfile(fh, count=1, dtype=">f4")
tintg = np.fromfile(fh, count=1, dtype=">f4")
ekin = np.fromfile(fh, count=1, dtype=">f4")
ekin1 = np.fromfile(fh, count=1, dtype=">f4")
ekin2 = np.fromfile(fh, count=1, dtype=">f4")
au0 = np.fromfile(fh, count=1, dtype=">f4")
aeu0 = np.fromfile(fh, count=1, dtype=">f4")
nrowc = np.fromfile(fh, count=1, dtype=">i4")
ngridc = np.fromfile(fh, count=1, dtype=">i4")
nspecs = np.fromfile(fh, count=1, dtype=">i4")
nseed = np.fromfile(fh, count=1, dtype=">i4")
Om0 = np.fromfile(fh, count=1, dtype=">f4")
Oml0 = np.fromfile(fh, count=1, dtype=">f4")
hubble = np.fromfile(fh, count=1, dtype=">f4")
Wp5 = np.fromfile(fh, count=1, dtype=">f4")
Ocurv = np.fromfile(fh, count=1, dtype=">f4")
wspecies = np.fromfile(fh, count=10, dtype=">f4")
lspecies = np.fromfile(fh, count=10, dtype=">i4")
extras = np.fromfile(fh, count=79, dtype=">f4")
boxsize = np.fromfile(fh, count=1, dtype=">f4")
n = nspecs[0]
particle_header_vals = {}
tmp = [
headerstr,
aexpn,
aexp0,
amplt,
astep,
istep,
partw,
tintg,
ekin,
ekin1,
ekin2,
au0,
aeu0,
nrowc,
ngridc,
nspecs,
nseed,
Om0,
Oml0,
hubble,
Wp5,
Ocurv,
wspecies,
lspecies,
extras,
boxsize,
]
for i, arr in enumerate(tmp):
a1 = dmparticle_header_struct[0][i]
a2 = dmparticle_header_struct[1][i]
if a2 == 1:
particle_header_vals[a1] = arr[0]
else:
particle_header_vals[a1] = arr[:a2]
for specie in range(n):
self.particle_types.append("specie%i" % specie)
self.particle_types_raw = tuple(self.particle_types)
ls_nonzero = np.diff(lspecies)[: n - 1]
ls_nonzero = np.append(lspecies[0], ls_nonzero)
self.star_type = len(ls_nonzero)
mylog.info("Discovered %i species of particles", len(ls_nonzero))
info_str = "Particle populations: " + "%9i " * len(ls_nonzero)
mylog.info(info_str, *ls_nonzero)
for k, v in particle_header_vals.items():
if k in self.parameters.keys():
if not self.parameters[k] == v:
mylog.info(
"Inconsistent parameter %s %1.1e %1.1e",
k,
v,
self.parameters[k],
)
else:
self.parameters[k] = v
self.parameters_particles = particle_header_vals
self.parameters.update(particle_header_vals)
self.parameters["wspecies"] = wspecies[:n]
self.parameters["lspecies"] = lspecies[:n]
self.parameters["ng"] = self.parameters["Ngridc"]
self.parameters["ncell0"] = self.parameters["ng"] ** 3
self.parameters["boxh"] = self.parameters["boxsize"]
self.parameters["total_particles"] = ls_nonzero
self.domain_dimensions = np.ones(3, dtype="int64") * 2 # NOT ng
# setup standard simulation params yt expects to see
# Convert to float to please unyt
self.current_redshift = float(self.parameters["aexpn"] ** -1.0 - 1.0)
self.omega_lambda = float(particle_header_vals["Oml0"])
self.omega_matter = float(particle_header_vals["Om0"])
self.hubble_constant = float(particle_header_vals["hubble"])
self.min_level = 0
self.max_level = 0
# self.min_level = particle_header_vals['min_level']
# self.max_level = particle_header_vals['max_level']
# if self.limit_level is not None:
# self.max_level = min(
# self.limit_level, particle_header_vals['max_level'])
# if self.force_max_level is not None:
# self.max_level = self.force_max_level
self.hubble_time = 1.0 / (self.hubble_constant * 100 / 3.08568025e19)
self.parameters["t"] = a2b(self.parameters["aexpn"])
self.current_time = self.quan(b2t(self.parameters["t"]), "Gyr")
self.gamma = self.parameters["gamma"]
mylog.info("Max level is %02i", self.max_level)
def create_field_info(self):
super(ARTDataset, self).create_field_info()
ptr = self.particle_types_raw
pu = ParticleUnion("darkmatter", list(ptr))
self.add_particle_union(pu)
pass
@classmethod
def _is_valid(cls, filename: str, *args, **kwargs) -> bool:
"""
Defined for the NMSU file naming scheme.
This could differ for other formats.
"""
f = str(filename)
prefix, suffix = filename_pattern["particle_data"]
if not os.path.isfile(f):
return False
if not f.endswith(suffix):
return False
if "s0" not in f:
# ATOMIC.DAT, for instance, passes the other tests, but then dies
# during _find_files because it can't be split.
return False
with open(f, "rb") as fh:
try:
amr_prefix, amr_suffix = filename_pattern["amr"]
possibles = glob.glob(
os.path.join(os.path.dirname(os.path.abspath(f)), "*")
)
for possible in possibles:
if possible.endswith(amr_suffix):
if os.path.basename(possible).startswith(amr_prefix):
return False
except Exception:
pass
try:
seek = 4
fh.seek(seek)
headerstr = np.fromfile(fh, count=1, dtype=(str, 45)) # NOQA
aexpn = np.fromfile(fh, count=1, dtype=">f4") # NOQA
aexp0 = np.fromfile(fh, count=1, dtype=">f4") # NOQA
amplt = np.fromfile(fh, count=1, dtype=">f4") # NOQA
astep = np.fromfile(fh, count=1, dtype=">f4") # NOQA
istep = np.fromfile(fh, count=1, dtype=">i4") # NOQA
partw = np.fromfile(fh, count=1, dtype=">f4") # NOQA
tintg = np.fromfile(fh, count=1, dtype=">f4") # NOQA
ekin = np.fromfile(fh, count=1, dtype=">f4") # NOQA
ekin1 = np.fromfile(fh, count=1, dtype=">f4") # NOQA
ekin2 = np.fromfile(fh, count=1, dtype=">f4") # NOQA
au0 = np.fromfile(fh, count=1, dtype=">f4") # NOQA
aeu0 = np.fromfile(fh, count=1, dtype=">f4") # NOQA
nrowc = np.fromfile(fh, count=1, dtype=">i4") # NOQA
ngridc = np.fromfile(fh, count=1, dtype=">i4") # NOQA
nspecs = np.fromfile(fh, count=1, dtype=">i4") # NOQA
nseed = np.fromfile(fh, count=1, dtype=">i4") # NOQA
Om0 = np.fromfile(fh, count=1, dtype=">f4") # NOQA
Oml0 = np.fromfile(fh, count=1, dtype=">f4") # NOQA
hubble = np.fromfile(fh, count=1, dtype=">f4") # NOQA
Wp5 = np.fromfile(fh, count=1, dtype=">f4") # NOQA
Ocurv = np.fromfile(fh, count=1, dtype=">f4") # NOQA
wspecies = np.fromfile(fh, count=10, dtype=">f4") # NOQA
lspecies = np.fromfile(fh, count=10, dtype=">i4") # NOQA
extras = np.fromfile(fh, count=79, dtype=">f4") # NOQA
boxsize = np.fromfile(fh, count=1, dtype=">f4") # NOQA
return True
except Exception:
return False
class ARTDomainSubset(OctreeSubset):
@property
def oct_handler(self):
return self.domain.oct_handler
def fill(self, content, ftfields, selector):
"""
This is called from IOHandler. It takes content
which is a binary stream, reads the requested field
over this while domain. It then uses oct_handler fill
to reorganize values from IO read index order to
the order they are in in the octhandler.
"""
oct_handler = self.oct_handler
all_fields = self.domain.ds.index.fluid_field_list
fields = [f for ft, f in ftfields]
field_idxs = [all_fields.index(f) for f in fields]
source, tr = {}, {}
cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id)
levels, cell_inds, file_inds = self.oct_handler.file_index_octs(
selector, self.domain_id, cell_count
)
for field in fields:
tr[field] = np.zeros(cell_count, "float64")
data = _read_root_level(
content, self.domain.level_child_offsets, self.domain.level_count
)
ns = (self.domain.ds.domain_dimensions.prod() // 8, 8)
for field, fi in zip(fields, field_idxs, strict=True):
source[field] = np.empty(ns, dtype="float64", order="C")
dt = data[fi, :].reshape(self.domain.ds.domain_dimensions, order="F")
for i in range(2):
for j in range(2):
for k in range(2):
ii = ((k * 2) + j) * 2 + i
# Note: C order because our index converts C to F.
source[field][:, ii] = dt[i::2, j::2, k::2].ravel(order="C")
oct_handler.fill_level(0, levels, cell_inds, file_inds, tr, source)
del source
# Now we continue with the additional levels.
for level in range(1, self.ds.index.max_level + 1):
no = self.domain.level_count[level]
noct_range = [0, no]
source = _read_child_level(
content,
self.domain.level_child_offsets,
self.domain.level_offsets,
self.domain.level_count,
level,
fields,
self.domain.ds.domain_dimensions,
self.domain.ds.parameters["ncell0"],
noct_range=noct_range,
)
oct_handler.fill_level(level, levels, cell_inds, file_inds, tr, source)
return tr
class ARTDomainFile:
"""
Read in the AMR, left/right edges, fill out the octhandler
"""
# We already read in the header in static output,
# and since these headers are defined in only a single file it's
# best to leave them in the static output
_last_mask = None
_last_selector_id = None
def __init__(self, ds, nvar, oct_handler, domain_id):
self.nvar = nvar
self.ds = ds
self.domain_id = domain_id
self._level_count = None
self._level_oct_offsets = None
self._level_child_offsets = None
self._oct_handler = oct_handler
@property
def oct_handler(self):
return self._oct_handler
@property
def level_count(self):
# this is number of *octs*
if self._level_count is not None:
return self._level_count
self.level_offsets
return self._level_count
@property
def level_child_offsets(self):
if self._level_count is not None:
return self._level_child_offsets
self.level_offsets
return self._level_child_offsets
@property
def level_offsets(self):
# this is used by the IO operations to find the file offset,
# and then start reading to fill values
# note that this is called hydro_offset in ramses
if self._level_oct_offsets is not None:
return self._level_oct_offsets
# We now have to open the file and calculate it
f = open(self.ds._file_amr, "rb")
(
nhydrovars,
inoll,
_level_oct_offsets,
_level_child_offsets,
) = self._count_art_octs(
f, self.ds.child_grid_offset, self.ds.min_level, self.ds.max_level
)
# remember that the root grid is by itself; manually add it back in
inoll[0] = self.ds.domain_dimensions.prod() // 8
_level_child_offsets[0] = self.ds.root_grid_offset
self.nhydrovars = nhydrovars
self.inoll = inoll # number of octs
self._level_oct_offsets = _level_oct_offsets
self._level_child_offsets = _level_child_offsets
self._level_count = inoll
return self._level_oct_offsets
def _count_art_octs(self, f, offset, MinLev, MaxLevelNow):
level_oct_offsets = [
0,
]
level_child_offsets = [
0,
]
f.seek(offset)
nchild, ntot = 8, 0
Level = np.zeros(MaxLevelNow + 1 - MinLev, dtype="int64")
iNOLL = np.zeros(MaxLevelNow + 1 - MinLev, dtype="int64")
iHOLL = np.zeros(MaxLevelNow + 1 - MinLev, dtype="int64")
for Lev in range(MinLev + 1, MaxLevelNow + 1):
level_oct_offsets.append(f.tell())
# Get the info for this level, skip the rest
# print("Reading oct tree data for level", Lev)
# print('offset:',f.tell())
Level[Lev], iNOLL[Lev], iHOLL[Lev] = fpu.read_vector(f, "i", ">")
# print('Level %i : '%Lev, iNOLL)
# print('offset after level record:',f.tell())
nLevel = iNOLL[Lev]
ntot = ntot + nLevel
# Skip all the oct hierarchy data
ns = fpu.peek_record_size(f, endian=">")
size = struct.calcsize(">i") + ns + struct.calcsize(">i")
f.seek(f.tell() + size * nLevel)
level_child_offsets.append(f.tell())
# Skip the child vars data
ns = fpu.peek_record_size(f, endian=">")
size = struct.calcsize(">i") + ns + struct.calcsize(">i")
f.seek(f.tell() + size * nLevel * nchild)
# find nhydrovars
nhydrovars = 8 + 2
f.seek(offset)
return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
def _read_amr_level(self, oct_handler):
"""Open the oct file, read in octs level-by-level.
For each oct, only the position, index, level and domain
are needed - its position in the octree is found automatically.
The most important is finding all the information to feed
oct_handler.add
"""
self.level_offsets
f = open(self.ds._file_amr, "rb")
for level in range(1, self.ds.max_level + 1):
unitary_center, fl, iocts, nocts, root_level = _read_art_level_info(
f,
self._level_oct_offsets,
level,
coarse_grid=self.ds.domain_dimensions[0],
root_level=self.ds.root_level,
)
nocts_check = oct_handler.add(self.domain_id, level, unitary_center)
assert nocts_check == nocts
mylog.debug(
"Added %07i octs on level %02i, cumulative is %07i",
nocts,
level,
oct_handler.nocts,
)
def _read_amr_root(self, oct_handler):
self.level_offsets
# add the root *cell* not *oct* mesh
root_octs_side = self.ds.domain_dimensions[0] / 2
NX = np.ones(3) * root_octs_side
LE = np.array([0.0, 0.0, 0.0], dtype="float64")
RE = np.array([1.0, 1.0, 1.0], dtype="float64")
root_dx = (RE - LE) / NX
LL = LE + root_dx / 2.0
RL = RE - root_dx / 2.0
# compute floating point centers of root octs
root_fc = np.mgrid[
LL[0] : RL[0] : NX[0] * 1j,
LL[1] : RL[1] : NX[1] * 1j,
LL[2] : RL[2] : NX[2] * 1j,
]
root_fc = np.vstack([p.ravel() for p in root_fc]).T
oct_handler.add(self.domain_id, 0, root_fc)
assert oct_handler.nocts == root_fc.shape[0]
mylog.debug(
"Added %07i octs on level %02i, cumulative is %07i",
root_octs_side**3,
0,
oct_handler.nocts,
)
def included(self, selector):
return True
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@frontends@art@[email protected]_END.py
|
{
"filename": "_textcasesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/hoverlabel/font/_textcasesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcasesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="textcasesrc", parent_name="surface.hoverlabel.font", **kwargs
):
super(TextcasesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@hoverlabel@font@[email protected]_END.py
|
{
"filename": "_legendgroup.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/histogram2dcontour/_legendgroup.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="legendgroup", parent_name="histogram2dcontour", **kwargs
):
super(LegendgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@histogram2dcontour@[email protected]_END.py
|
{
"filename": "_ticktextsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmap/colorbar/_ticktextsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="ticktextsrc", parent_name="choroplethmap.colorbar", **kwargs
):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmap@colorbar@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/sparse/linalg/_eigen/tests/__init__.py",
"type": "Python"
}
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@sparse@linalg@_eigen@tests@[email protected]_END.py
|
|
{
"filename": "test_injection_sims.py",
"repo_name": "deepskies/deeplenstronomy",
"repo_path": "deeplenstronomy_extracted/deeplenstronomy-master/exploded_setup_old/test/test_ImSim/test_injection_sims.py",
"type": "Python"
}
|
from deeplenstronomy.ImSim import inject_simulations
from deeplenstronomy.PopSim.population import Population
import pytest
import numpy as np
import numpy.testing as npt
class TestPopulation(object):
def setup(self):
pass
def test_add_arc(self):
pop = Population()
kwargs_params, kwargs_model = pop.draw_model(with_lens_light=True, with_quasar=True)
image = np.zeros((10, 10))
kwargs_band = {'read_noise': 10,
'pixel_scale': 0.263,
'ccd_gain': 4.5,
'exposure_time': 90.,
'magnitude_zero_point': 30,
'num_exposures': 10,
'psf_type': 'GAUSSIAN',
'seeing': 1.0,
'sky_brightness': 21}
added_image = inject_simulations.add_arc(image, kwargs_band, kwargs_params, kwargs_model, kwargs_numerics={})
assert np.sum(added_image) > 0
if __name__ == '__main__':
pytest.main()
|
deepskiesREPO_NAMEdeeplenstronomyPATH_START.@deeplenstronomy_extracted@deeplenstronomy-master@exploded_setup_old@test@test_ImSim@[email protected]_END.py
|
{
"filename": "planet_pos.py",
"repo_name": "Hoeijmakers/StarRotator",
"repo_path": "StarRotator_extracted/StarRotator-master/lib/planet_pos.py",
"type": "Python"
}
|
def calc_orbit_times(time_stamp, transitC, exposure_time, orb_p):
"""
Calculates the start, end and center of the transit from the provided times
input:
time_stamp: type: float, time
transitC: type:float, transit center - 2400000.
exposure_times: type: float, exposure time in seconds
orb_p: type: float, orbital period in days
output:
orbit_start (, orbit_end, orbit_center): times of current exposure relative to transit
start, end, center in days
"""
#this allows to really think about start and end of the exposures. It might be interesting for
#long exposures, but I'm leaving this for later
orbit_center = (time_stamp - transitC + 2400000.)% orb_p
#orbit_start = (time_stamp-exposure_time/(2.*24.*3600.)- transitC + 2400000.) % orb_p
#orbit_end = (time_stamp+exposure_time/(2.*24.*3600.)- transitC + 2400000.) % orb_p
return orbit_center# orbit_start, orbit_end,
def func_kepler(ecc_anom,mean_anom,ecc):
"""
Find the eccentric anomaly using the mean anomaly and eccentricity
via M = E - e sin(E)
input:
ecc_anom: type: list, eccentric anomaly
mean_anom: type: list, mean anomaly
ecc: type: float, eccentricity
output:
anome: type: list, eccentric anomaly
"""
anom=ecc_anom-ecc*np.sin(ecc_anom)-mean_anom
return anom
def calc_true_anom(ecc,phases,omega_bar):
"""
Calculates the true anomaly and the mean anomaly of the system
input:
ecc: type: float, eccentricity
phases: type: np.array, phases, either given by the user or calculated from time stamps
omega_bar: type: float, angle
output:
"""
import numpy as np
#Circular orbit
if np.isclose(ecc,0.,1e-4):
#True anomaly
true_anom=2.*np.pi*phases
#Eccentric anomaly
ecc_anom=None
#Eccentric orbit
else:
#True anomaly of the planet at mid-transit (in rad):
# - angle counted from 0 at the perisastron, to the star/Earth LOS
# - >0 counterclockwise, possibly modulo 2pi
true_anom_mt=(np.pi*0.5)-omega_bar
#True anomaly at the time of the transit
# - corresponds to 'dt_transit' (in years), time from periapsis to transit center
# - atan(X) is in -PI/2 ; PI/2
ecc_anom_mt=2.*np.arctan(np.tan(true_anom_mt*0.5)*np.sqrt((1.-ecc)/(1.+ecc)))
mean_anom_mt=ecc_anom_mt-ecc*np.sin(ecc_anom_mt)
if (mean_anom_mt<0.):
mean_anom_mt=mean_anom_mt+2.*np.pi
#Mean anomaly
# - time origin of t_mean at the periapsis (t_mean=0 <-> M=0 <-> E=0)
# - M(t_mean)=M(dt_transit)+M(t_simu)
mean_anom=2.*np.pi*phases+mean_anom_mt
#Eccentric anomaly :
# - M = E - e sin(E)
# - >0 counterclockwise
# - angle, with origin at the ellipse center, between the major axis toward the periapsis
# and the line crossing the circle with radius 'a_Rs' at its intersection with the
#perpendicular to the major axis through the planet position
ecc_anom=newton(func_kepler,mean_anom,args=(mean_anom,ecc,))
#True anomaly of the planet at current time
true_anom=2.*np.arctan(np.sqrt((1.+ecc)/(1.-ecc))*np.tan(ecc_anom/2.))
return true_anom,ecc_anom
def calc_planet_pos(sma_Rs,ecc,omega,inclin,l_spinorbit,Rp_Rs,orb_p,phase,exposure_times=0.):
"""
Takes the stellar and planet parameters as input and calulates the path of the planet
in front of the cartesian stellar coordiante system
input:
sma_Rs: type: float, scaled semi major axis in solar radii
ecc: type: float, eccentricity
omega: type: float, Angle between the ascending node and the periastron, in the orbital
plane (>0 counterclockwise)
inclin: type: float, Inclination from the line of sight toward the normal to the orbital
plane
l_spinorbit: Orbit obliquity in degrees.
Rp_Rs: type: float, ratio planet to star radii
orb_p: type: float, orbital period in days
phase: type: np.array, grid of phases
exposure_times: type: np.array, all exposure times, only needed if flag=="times", default 0.
output: x_pl, y_pl, z_pl: type: np.arrays of floats, containing the position of the planet in
units of stellar radii
"""
import numpy as np
inclin_bar = inclin*np.pi/180.
omega_bar = omega*np.pi/180.
obs_n = len(phase) #number of steps
positions= np.empty([3,obs_n], dtype=float)
#calc anomalies
true_anom,ecc_anom = calc_true_anom(ecc,phase,omega_bar)
#circular orbit
if np.isclose(ecc,0.,1e-4):
x_pl = sma_Rs*np.sin(np.asarray(true_anom))
y_pl = -sma_Rs*np.cos(np.asarray(true_anom))*np.cos(inclin_bar)
z_pl = sma_Rs*np.cos(np.asarray(true_anom))*np.sin(inclin_bar)
#eccentric orbit
else:
#planet position in the orbital plane in Rstar
X0_p = sma_Rs*(np.cos(ecc_anom)-ecc)
Y0_p = sma_Rs*np.sqrt(1.-ecc*ecc)*np.sin(ecc_anom)
#turn plane towards observer
X1_p = X0_p*np.sin(omega_bar) + Y0_p*np.cos(omega_bar)
Y1_p = -X0_p*np.cos(omega_bar) + Y0_p*np.sin(omega_bar)
#translate to planet pos
x_pl = Y1_p
y_pl = -X1_p*np.cos(inclin_bar)
z_pl = X1_p*np.sin(inclin_bar)
return(x_pl*np.cos(np.radians(l_spinorbit))-y_pl*np.sin(np.radians(l_spinorbit)),
x_pl*np.sin(np.radians(l_spinorbit))+y_pl*np.cos(np.radians(l_spinorbit)), z_pl)
#CONVERT x_p and y_p to perpendicular x,y wrt stellar spin axis, see eqs 4,5 of Cegla+ 2016.
|
HoeijmakersREPO_NAMEStarRotatorPATH_START.@StarRotator_extracted@StarRotator-master@lib@[email protected]_END.py
|
{
"filename": "MakeMovie.py",
"repo_name": "saopicc/DDFacet",
"repo_path": "DDFacet_extracted/DDFacet-master/DDFacet/MakeMovie.py",
"type": "Python"
}
|
#!/usr/bin/env python
'''
DDFacet, a facet-based radio imaging package
Copyright (C) 2013-2016 Cyril Tasse, l'Observatoire de Paris,
SKA South Africa, Rhodes University
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DDFacet.compatibility import range
import optparse
import sys
import pickle
def read_options():
desc="""DDFacet """
opt = optparse.OptionParser(usage='Usage: %prog --Parset=somename.MS <options>',version='%prog version 1.0',description=desc)
group = optparse.OptionGroup(opt, "* Data-related options", "Won't work if not specified.")
group.add_option('--Parset',help='Input Parset [no default]',default='')
opt.add_option_group(group)
group = optparse.OptionGroup(opt, "* Data selection options")
group.add_option('--PointingID',help='PointingID in case multiple pointing dataset [no default]',default=0)
group.add_option('--pngBaseDir',help='PNG directory [no default]',default='png')
group.add_option('--EnablePlot',help='Enable matplotlib',default=0)
#group.add_option('--TimeCode',help='TimeCode',default=[0,-1,1])
group.add_option('--Incr',help='Increment in time-steps',default=10)
opt.add_option_group(group)
options, arguments = opt.parse_args()
f = open("last_param.obj","wb")
pickle.dump(options,f)
return options
def main(options=None):
if options is None:
f = open("last_param.obj",'rb')
options = pickle.load(f)
if options.EnablePlot==0:
import matplotlib
matplotlib.use('agg')
TCode=(0,-1,int(options.Incr))
import ClassMovieMachine
MM=ClassMovieMachine.MovieMachine(ParsetFile=options.Parset,PointingID=options.PointingID,pngBaseDir=options.pngBaseDir,TimeCode=TCode)
MM.MainLoop()
if __name__=="__main__":
options=read_options()
main(options)
|
saopiccREPO_NAMEDDFacetPATH_START.@DDFacet_extracted@DDFacet-master@[email protected]@.PATH_END.py
|
{
"filename": "_side.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2d/colorbar/title/_side.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="histogram2d.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2d@colorbar@title@[email protected]_END.py
|
{
"filename": "training_utils.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/keras/engine/training_utils.py",
"type": "Python"
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related utilities."""
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
def slice_arrays(arrays, indices, contiguous=True):
"""Slices batches out of provided arrays (workaround for eager tensors).
Unfortunately eager tensors don't have the same slicing behavior as
Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
hence we cannot use `generic_utils.slice_arrays` directly
and we have to implement this workaround based on `concat`. This has a
performance cost.
Args:
arrays: Single array or list of arrays.
indices: List of indices in the array that should be included in the output
batch.
contiguous: Boolean flag indicating whether the indices are contiguous.
Returns:
Slice of data (either single array or list of arrays).
"""
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tensor_util.is_tf_type(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [array_ops.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
else:
slices = generic_utils.slice_arrays(arrays, indices)
if converted_to_list:
slices = slices[0]
return slices
def handle_partial_sample_weights(outputs, sample_weights, sample_weight_modes,
check_all_flat=False):
"""Adds 1.0 as sample weights for the outputs for which there is no weight.
Args:
outputs: List of model outputs.
sample_weights: List of sample weight inputs.
sample_weight_modes: List of sample weight modes or None.
check_all_flat: Ensure that inputs are not nested structures. This is not
a free check, so we may not want to run it eagerly every iteration.
Returns:
Tuple of sample weights, one sample weight for every output, and booleans
describing the raw sample weights.
"""
any_sample_weight = sample_weights is not None and any(
w is not None for w in sample_weights)
partial_sample_weight = any_sample_weight and any(
w is None for w in sample_weights)
if not any_sample_weight:
return None, any_sample_weight, partial_sample_weight
if not partial_sample_weight:
return sample_weights, any_sample_weight, partial_sample_weight
if check_all_flat:
nest.assert_same_structure(
list_to_tuple(sample_weights),
list_to_tuple(nest.flatten(sample_weights)))
nest.assert_same_structure(
list_to_tuple(outputs),
list_to_tuple(nest.flatten(outputs)))
if sample_weight_modes is not None:
nest.assert_same_structure(
sample_weight_modes, nest.flatten(sample_weight_modes))
new_sample_weights = []
for i, sw in enumerate(sample_weights):
if sw is None:
as_numpy = isinstance(outputs[i], np.ndarray)
output = outputs[i]
output_shape = output.shape if as_numpy else array_ops.shape(output)
is_temporal = (
sample_weight_modes is not None and
sample_weight_modes[i] == 'temporal')
sw_shape = (output_shape[0],
output_shape[1]) if is_temporal else (output_shape[0],)
new_sample_weights.append(
np.ones(sw_shape) if as_numpy else array_ops.ones(sw_shape))
else:
new_sample_weights.append(sw)
return (list_to_tuple(new_sample_weights),
any_sample_weight, partial_sample_weight)
class RespectCompiledTrainableState(object):
"""Set and restore trainable state if it has changed since compile.
The keras API guarantees that the value of each Layer's `trainable` property
at `Model.compile` time will be used when training that model. In order to
respect this requirement, it may be necessary to set the trainable value of
layers to their compile time values before beginning a training endpoint and
restore the values before returing from said endpoint. This scope checks if
any layer's trainable state has changed since Model compile, and performs this
set and un-set bookkeeping.
However, the trainable state of a layer changes quite infrequently, if ever,
for many kinds of workflows. Moreover, updating every layer in a model is an
expensive operation. As a result, we will only explicitly set and unset the
trainable state of a model if a trainable value has changed since compile.
"""
def __init__(self, model):
self._model = model
self._current_trainable_state = None
self._compiled_trainable_state = None
self._should_set_trainable = False
def __enter__(self):
self._current_trainable_state = self._model._get_trainable_state() # pylint: disable=protected-access
self._compiled_trainable_state = self._model._compiled_trainable_state # pylint: disable=protected-access
# Check to see if any layer's trainable state has changed since `compile`.
for layer, trainable in self._compiled_trainable_state.items():
if (layer in self._current_trainable_state and
trainable != self._current_trainable_state[layer]):
self._should_set_trainable = True
break
# If so, restore the model to its compiled state.
if self._should_set_trainable:
self._model._set_trainable_state(self._compiled_trainable_state) # pylint: disable=protected-access
def __exit__(self, type_arg, value_arg, traceback_arg):
# If we set the values to their compiled state in __enter__, we need to
# restore the original values before leaving the scope.
if self._should_set_trainable:
self._model._set_trainable_state(self._current_trainable_state) # pylint: disable=protected-access
return False # False values do not suppress exceptions
# Allow use of methods not exposed to the user.
# pylint: disable=protected-access
def get_input_shape_and_dtype(layer):
"""Retrieves input shape and input dtype of layer if applicable.
Args:
layer: Layer (or model) instance.
Returns:
Tuple (input_shape, input_dtype). Both could be None if the layer
does not have a defined input shape.
Raises:
ValueError: in case an empty Sequential or Functional model is passed.
"""
def _is_graph_model(layer):
return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or
layer.__class__.__name__ == 'Sequential')
# In case of nested models: recover the first layer
# of the deepest model to infer input shape and dtype.
# Subclassed Models may not have been built so can't be checked.
while _is_graph_model(layer):
if not layer.layers:
raise ValueError('An empty Model cannot be used as a Layer.')
layer = layer.layers[0]
if getattr(layer, '_batch_input_shape', None):
return layer._batch_input_shape, layer.dtype
return None, None
# pylint: enable=protected-access
def get_static_batch_size(layer):
"""Gets the static batch size of a Layer.
Args:
layer: a `Layer` instance.
Returns:
The static batch size of a Layer.
"""
batch_input_shape, _ = get_input_shape_and_dtype(layer)
if batch_input_shape is not None:
return tensor_shape.Dimension(batch_input_shape[0]).value
return None
def list_to_tuple(maybe_list):
"""Datasets will stack the list of tensor, so switch them to tuples."""
if isinstance(maybe_list, list):
return tuple(maybe_list)
return maybe_list
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@keras@engine@[email protected]_END.py
|
{
"filename": "eis.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/mms/eis/eis.py",
"type": "Python"
}
|
from pyspedas.projects.mms.mms_load_data import mms_load_data
from pyspedas.projects.mms.eis.mms_eis_omni import mms_eis_omni
from pyspedas.projects.mms.eis.mms_eis_spin_avg import mms_eis_spin_avg
from pyspedas.projects.mms.eis.mms_eis_set_metadata import mms_eis_set_metadata
from pyspedas.projects.mms.mms_config import CONFIG
from pytplot import tnames
def mms_load_eis(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy', level='l2', datatype='extof',
varformat=None, varnames=[], get_support_data=True, suffix='', time_clip=False, no_update=False,
available=False, notplot=False, latest_version=False, major_version=False, min_version=None, cdf_version=None,
spdf=False, always_prompt=False):
"""
Load data from the MMS Energetic Ion Spectrometer (EIS)
Parameters
----------
trange : list of str
time range of interest [start time, end time] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
Default: ['2015-10-16', '2015-10-17']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
Default: '1'
data_rate : str or list of str
instrument data rates for EIS include ['brst', 'srvy'].
Default: 'srvy'
level : str
indicates level of data processing.
Default: 'l2'
datatype : str or list of str
Valid datatypes for EIS are: ['extof', 'phxtof', and 'electronenergy']
Default: 'extof'
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot.
Default: 'True'
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
Default: 'False'
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted.
Default: None (all variables are loaded)
varnames: list of str
List of variable names to load. If list is empty or not specified,
all data variables are loaded.
Default: []
suffix: str
The tplot variable names will be given this suffix.
Default: None
notplot: bool
If True, then data are returned in a hash table instead of
being stored in tplot variables (useful for debugging, and
access to multidimensional data products)
Default: False
available: bool
If True, simply return the available data files (without downloading)
for the requested parameters
Default: False
no_update: bool
Set this flag to preserve the original data. if not set and newer
data is found the existing data will be overwritten
Default: False
cdf_version: str
Specify a specific CDF version # to load (e.g., cdf_version='4.3.0')
Default: None
min_version: str
Specify a minimum CDF version # to load
Default: None
latest_version: bool
Only grab the latest CDF version in the requested time interval
Default: False
major_version: bool
Only open the latest major CDF version (e.g., X in vX.Y.Z) in the requested time interval
Default: False
always_prompt: bool
Set this keyword to always prompt for the user's username and password;
useful if you accidentally save an incorrect password, or if your SDC password has changed
Default: False
spdf: bool
If True, download the data from the SPDF instead of the SDC
Default: False
Returns
-------
list of str
List of tplot variables created.
Example
-------
>>> import pyspedas
>>> from pytplot import tplot
>>> eis_vars = pyspedas.projects.mms.mms_load_eis(trange=['2015-10-16', '2015-10-17'], probe='1', datatype=['phxtof', 'extof'])
>>> # plot the non-spin averaged flux
>>> tplot(['mms1_epd_eis_srvy_l2_extof_proton_flux_omni', 'mms1_epd_eis_srvy_l2_phxtof_proton_flux_omni'])
>>> # plot the spin averaged flux
>>> tplot(['mms1_epd_eis_srvy_l2_extof_proton_flux_omni_spin', 'mms1_epd_eis_srvy_l2_phxtof_proton_flux_omni_spin'])
"""
tvars = mms_load_data(trange=trange, notplot=notplot, probe=probe, data_rate=data_rate, level=level, instrument='epd-eis',
datatype=datatype, varformat=varformat, varnames=varnames, get_support_data=get_support_data, prefix='', suffix=suffix,
time_clip=time_clip, no_update=no_update, available=available, latest_version=latest_version,
major_version=major_version, min_version=min_version, cdf_version=cdf_version, spdf=spdf, always_prompt=always_prompt)
if tvars == [] or available or notplot or CONFIG['download_only'] or tvars is None:
return tvars
if not isinstance(probe, list): probe = [probe]
if not isinstance(data_rate, list): data_rate = [data_rate]
if not isinstance(level, list): level = [level]
if not isinstance(datatype, list): datatype = [datatype]
# the probes will need to be strings beyond this point
if isinstance(probe, list):
probe = [str(p) for p in probe]
for probe_id in probe:
for datatype_id in datatype:
for level_id in level:
for data_rate_id in data_rate:
if datatype_id == 'electronenergy':
e_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='electron', datatype=datatype_id, data_rate=data_rate_id, level=level_id, suffix=suffix)
# create non-spin averaged omni-directional spectra
e_omni_spectra = mms_eis_omni(probe_id, species='electron', data_rate=data_rate_id, level=level_id, datatype=datatype_id)
# create spin averaged omni-directional spectra
e_omni_spectra_spin = mms_eis_omni(probe_id, species='electron', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix+'_spin')
# add the vars to the output
if e_spin_avg_var is not None:
for tvar in e_spin_avg_var:
tvars.append(tvar)
if e_omni_spectra is not None:
tvars.append(e_omni_spectra)
if e_omni_spectra_spin is not None:
tvars.append(e_omni_spectra_spin)
elif datatype_id == 'extof':
# 9Feb2021, egrimes added 'helium' species for updates coming soon to the CDFs
p_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='proton', datatype=datatype_id, data_rate=data_rate_id, level=level_id, suffix=suffix)
o_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='oxygen', datatype=datatype_id, data_rate=data_rate_id, level=level_id, suffix=suffix)
a_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='alpha', datatype=datatype_id, data_rate=data_rate_id, level=level_id, suffix=suffix)
h_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='helium', datatype=datatype_id, data_rate=data_rate_id, level=level_id, suffix=suffix)
# create non-spin averaged omni-directional spectra
p_omni_spectra = mms_eis_omni(probe_id, species='proton', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix)
o_omni_spectra = mms_eis_omni(probe_id, species='oxygen', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix)
a_omni_spectra = mms_eis_omni(probe_id, species='alpha', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix)
h_omni_spectra = mms_eis_omni(probe_id, species='helium', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix)
# create spin averaged omni-directional spectra
p_omni_spectra_spin = mms_eis_omni(probe_id, species='proton', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix+'_spin')
o_omni_spectra_spin = mms_eis_omni(probe_id, species='oxygen', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix+'_spin')
a_omni_spectra_spin = mms_eis_omni(probe_id, species='alpha', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix+'_spin')
h_omni_spectra_spin = mms_eis_omni(probe_id, species='helium', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix+'_spin')
# add the vars to the output
if p_spin_avg_var is not None:
for tvar in p_spin_avg_var:
tvars.append(tvar)
if o_spin_avg_var is not None:
for tvar in o_spin_avg_var:
tvars.append(tvar)
if a_spin_avg_var is not None:
for tvar in a_spin_avg_var:
tvars.append(tvar)
if h_spin_avg_var is not None:
for tvar in h_spin_avg_var:
tvars.append(tvar)
if p_omni_spectra is not None:
tvars.append(p_omni_spectra)
if o_omni_spectra is not None:
tvars.append(o_omni_spectra)
if a_omni_spectra is not None:
tvars.append(a_omni_spectra)
if h_omni_spectra is not None:
tvars.append(h_omni_spectra)
if p_omni_spectra_spin is not None:
tvars.append(p_omni_spectra_spin)
if o_omni_spectra_spin is not None:
tvars.append(o_omni_spectra_spin)
if a_omni_spectra_spin is not None:
tvars.append(a_omni_spectra_spin)
if h_omni_spectra_spin is not None:
tvars.append(h_omni_spectra_spin)
elif datatype_id == 'phxtof':
# 9Feb2021, egrimes commented out oxygen calculations to match IDL updates
p_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='proton', datatype=datatype_id, data_rate=data_rate_id, level=level_id, suffix=suffix)
# o_spin_avg_var = mms_eis_spin_avg(probe=probe_id, species='oxygen', datatype=datatype_id, level=level_id, data_rate=data_rate_id, suffix=suffix)
# create non-spin averaged omni-directional spectra
p_omni_spectra = mms_eis_omni(probe_id, species='proton', data_rate=data_rate_id, datatype=datatype_id, level=level_id, suffix=suffix)
# o_omni_spectra = mms_eis_omni(probe_id, species='oxygen', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix)
# create spin averaged omni-directional spectra
p_omni_spectra_spin = mms_eis_omni(probe_id, species='proton', data_rate=data_rate_id, datatype=datatype_id, level=level_id, suffix=suffix+'_spin')
# o_omni_spectra_spin = mms_eis_omni(probe_id, species='oxygen', data_rate=data_rate_id, level=level_id, datatype=datatype_id, suffix=suffix+'_spin')
# add the vars to the output
if p_spin_avg_var is not None:
for tvar in p_spin_avg_var:
tvars.append(tvar)
# if o_spin_avg_var is not None:
# for tvar in o_spin_avg_var:
# tvars.append(tvar)
if p_omni_spectra is not None:
tvars.append(p_omni_spectra)
# if o_omni_spectra is not None:
# tvars.append(o_omni_spectra)
if p_omni_spectra_spin is not None:
tvars.append(p_omni_spectra_spin)
# if o_omni_spectra_spin is not None:
# tvars.append(o_omni_spectra_spin)
mms_eis_set_metadata(tnames(tvars), data_rate=data_rate_id, datatype=datatype_id, suffix=suffix)
return tnames(tvars)
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@mms@[email protected]@.PATH_END.py
|
{
"filename": "install.md",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/install.md",
"type": "Markdown"
}
|
## The ParaMonte library build mechanisms
There are three ways to build the ParaMonte library:
1. Building via the [install.sh](https://github.com/cdslaborg/paramonte/blob/main/install.sh) Bash script located in the root
directory of the [ParaMonte GitHub repository](https://github.com/cdslaborg/paramonte) in the in a **Unix Bash terminal**.
See more on the relevant installation instructions in [install.sh.md](./install.sh.md).
This approach works on all platforms that support Bash terminals including:
+ **Git Bash** or other **MinGW** terminals on **Windows**
+ **MSYS2** on **Windows**
+ **WSL** on **Windows**
+ **macOS**
+ **Linux**
See more on the relevant installation instructions in [install.sh.md](./install.bat.md).
2. Building via the [install.bat](https://github.com/cdslaborg/paramonte/blob/main/install.bat) Batch script located in the root
directory of the [ParaMonte GitHub repository](https://github.com/cdslaborg/paramonte) in the in a **Windows CMD terminal**.
See more on the relevant installation instructions in [install.bat.md](./install.bat.md).
3. Building via the CMake script directly in any Windows or Unix terminal that supports CMake.
See more on the relevant installation instructions in [CMakeLists.md](./CMakeLists.md).
The Bash and Batch install scripts in the first two build mechanisms above are merely
convenient wrappers around the lower-level CMake scripts in the third building mechanism.
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@[email protected]@.PATH_END.py
|
{
"filename": "anisotropygammas.py",
"repo_name": "vhaasteren/piccard",
"repo_path": "piccard_extracted/piccard-master/piccard/anisotropygammas.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
Copyright (c) 2013 Chiara Mingarelli
Contributed code for anisotropic gravitrational-wave background by Chiara
Mingarelli. Work that uses the anisotropic background functionality should
reference Mingarelli and Vecchio 2013, arXiv:1306.5394
Contributed work on anisotropic gravitational-wave background by Steve Taylor.
Work that uses the anisotropic background functionality should reference Taylor
and Gair 2013, arXiv:1306:5395
"""
import math
import matplotlib.pyplot as plt
import numpy as np
from math import factorial, sqrt, sin, cos, tan, acos, atan, pi, log
from cmath import exp
import scipy
from scipy.integrate import quad, dblquad
from scipy import special as sp
import random
norm = 3./(8*pi)
c00=sqrt(4*pi)
def calczeta(phi1, phi2, theta1, theta2):
"""
Calculate the angular separation between position (phi1, theta1) and
(phi2, theta2)
"""
zeta = 0.0
if phi1 == phi2 and theta1 == theta2:
zeta = 0.0
else:
argument =sin(theta1)*sin(theta2)*cos(phi1-phi2) + cos(theta1)*cos(theta2)
if argument < -1:
zeta = np.pi
elif argument > 1:
zeta = 0.0
else:
zeta=acos(argument)
return zeta
def Gamma00(zeta):
"""
l=0, m=0. Isotropic solution.
Pulsar term doubling at zero.
Normalised so that c00*Gamma00=1 when zeta=0.
"""
b=(1.-cos(zeta))
if zeta==0: return 2*norm*c00*0.25*sqrt(pi*4)*(1+(cos(zeta)/3.))
newans= 0.25*c00*norm*sqrt(pi*4)*(1+(cos(zeta)/3.)+4*b*log(sin(zeta/2)))
return newans
def dipole_Gammas(m,zeta):
a=1.+cos(zeta)
b=1.-cos(zeta)
if m==0:
if zeta==0: return -2*0.5*norm*(sqrt(pi/3.))*a
ans01=-0.5*norm*(sqrt(pi/3.))*(a+3*b*(a+4*log(sin(zeta/2.))))
return ans01
if m==1:
if zeta==0: return 0.
if zeta==pi: return 0.
ans11=norm*0.5*sqrt(pi/6.)*sin(zeta)*(1.+3*b*(1+(4./a)*log(sin(zeta/2.))))
return ans11
if m==-1:
if zeta==0: return 0.
if zeta==pi: return 0.
ans11_m=-1*norm*0.5*sqrt(pi/6.)*sin(zeta)*(1+3*b*(1+(4./a)*log(sin(zeta/2.))))
return ans11_m
def quadrupole_Gammas(m,zeta):
a=1.+cos(zeta)
b=1.-cos(zeta)
if zeta == 0 and m!=0: return 0.
if zeta == pi and m!=0: return 0.
if m==2:
ans22=-1*norm*sqrt(5*pi/6.)/4.*b/a*(a*(cos(zeta)**2+4*cos(zeta)-9.)-24*b*log(sin(zeta/2.)))
return ans22
if m==1:
ans21=norm*(0.25*sqrt(2*pi/15.))*sin(zeta)*(5*cos(zeta)**2+15.*cos(zeta)-21.-60*(b/a)*log(sin(zeta/2)))
return ans21
if m==0:
if zeta==0: return 2*0.25*norm*(4./3)*(sqrt(pi/5))*cos(zeta)
ans20=norm*(1./3)*sqrt(pi/5)*(cos(zeta)+(15./4)*(1-cos(zeta))*(cos(zeta)**2+4*cos(zeta)+3.+8.*log(sin(zeta/2.))))
return ans20
if m==-1:
return -1*norm*(0.25*sqrt(2*pi/15.))*sin(zeta)*(5*cos(zeta)**2+15.*cos(zeta)-21.-60*(b/a)*log(sin(zeta/2)))
if m==-2:
return -1*norm*sqrt(5*pi/6.)/4.*b/a*(a*(cos(zeta)**2+4*cos(zeta)-9.)-24*b*log(sin(zeta/2.)))
# Part2: rotation functions (from T. Sidery's Ylm.py file, corrected)
def dlmk(l,m,k,theta1):
"""
returns value of d^l_mk as defined in allen, ottewill 97.
Called by Dlmk
"""
if m>=k:
factor = sqrt(factorial(l-k)*factorial(l+m)/factorial(l+k)/factorial(l-m))
part2 = (cos(theta1/2))**(2*l+k-m)*(-sin(theta1/2))**(m-k)/factorial(m-k)
part3 = sp.hyp2f1(m-l,-k-l,m-k+1,-(tan(theta1/2))**2)
return factor*part2*part3
else:
return (-1)**(m-k)*dlmk(l,k,m,theta1)
def Dlmk(l,m,k,phi1,phi2,theta1,theta2):
"""
returns value of D^l_mk as defined in allen, ottewill 97.
"""
return exp(complex(0.,-m*phi1))*dlmk(l,m,k,theta1)*exp(complex(0.,-k*gamma(phi1,phi2,theta1,theta2)))
def gamma(phi1,phi2,theta1,theta2):
"""
calculate third rotation angle
inputs are angles from 2 pulsars
returns the angle.
"""
if phi1 == phi2 and theta1 == theta2:
gamma = 0 # psrA=psrB. Skipping this rotation is same as (gamma=0)
else:
gamma = atan( sin(theta2)*sin(phi2-phi1)/(cos(theta1)*sin(theta2)*cos(phi1-phi2) - sin(theta1)*cos(theta2)))
if (cos(gamma)*cos(theta1)*sin(theta2)*cos(phi1-phi2) + sin(gamma)*sin(theta2)*sin(phi2-phi1) - cos(gamma)*sin(theta1)*cos(theta2)) >= 0:
return gamma
else:
return pi + gamma
# Part 3: Rotated Gammas: Dipole
def rotated_dipole(m,phi1,phi2,theta1,theta2):
"""
Rotates dipole, i.e. l=1 overlap reduction functions, into the cosmic rest-frame
"""
l=1
zeta = calczeta(phi1, phi2, theta1, theta2)
dipole_gammas=[dipole_Gammas(-1,zeta),dipole_Gammas(0,zeta),dipole_Gammas(1,zeta)]
rotated_gamma=0
for i in range(2*l+1):
rotated_gamma += Dlmk(l,m,i-l,phi1,phi2,theta1,theta2).conjugate()*dipole_gammas[i] #as per eq 73 in Allen&Ottewill'97
return rotated_gamma
def rotated_quadrupole(m,phi1,phi2,theta1,theta2):
"""
Rotates qudrupole, i.e. l=2 overlap reduction functions, into the cosmic rest-frame
"""
l=2
zeta = calczeta(phi1, phi2, theta1, theta2)
quad_gammas=[quadrupole_Gammas(-2,zeta),quadrupole_Gammas(-1,zeta),quadrupole_Gammas(0,zeta),quadrupole_Gammas(1,zeta),quadrupole_Gammas(2,zeta)]
rotated_gamma=0
for i in range(2*l+1):
rotated_gamma += Dlmk(l,m,i-l,phi1,phi2,theta1,theta2).conjugate()*quad_gammas[i]
return rotated_gamma
def any_Gamma_comp(phi,theta,m,l,phi1,phi2,theta1,theta2):
"""
Evaluation of any gamma in the *computational frame*. phi and theta are the variables being integrated over
whereas phi1,phi2,theta1,theta2 are the coordinates of the pulsar pairs and are just used to
compute zeta. Normalisation such that c00*\Gamma00=1 at zeta=0.
"""
zeta = calczeta(phi1, phi2, theta1, theta2)
ylm=sp.sph_harm(m,l,phi,theta) #anisotropy
numerator=-0.25*sin(theta)*(1.-cos(theta))*(sin(zeta)*sin(zeta)*sin(phi)*sin(phi)-sin(zeta)*sin(zeta)*cos(theta)*cos(theta)*cos(phi)*cos(phi)-cos(zeta)*cos(zeta)*sin(theta)*sin(theta)+2*sin(zeta)*cos(zeta)*sin(theta)*cos(theta)*cos(phi))
deno=1.+sin(zeta)*sin(theta)*cos(phi)+cos(zeta)*cos(theta)
integrand=norm*numerator*ylm/deno
if zeta==0: return 2*integrand.real
return integrand.real #this answer is necessarily real-valued, as it is calculated in comp. frame. with no pulsar term
def int_Gamma_lm(m,l,phi1,phi2,theta1,theta2):
"""
Integrates any_Gamma_comp function from 0..pi and 0..2pi. Special cases with analytical solutions
(l=0,1,2) are handled separately to not waste computing time.
"""
zeta = calczeta(phi1, phi2, theta1, theta2)
if l==0:
return Gamma00(zeta)
if l==1:
return dipole_Gammas(m,zeta)
if l==2:
return quadrupole_Gammas(m,zeta)
else: result=dblquad(any_Gamma_comp,0,pi,lambda x: 0,lambda x: 2*pi,args=(m,l,phi1,phi2,theta1,theta2))[0]
return result
def rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml):
"""
This function takes any gamma in the computational frame and rotates it to the
cosmic frame. Special cases exist for dipole and qudrupole, as these have
been found analytically.
"""
rotated_gamma = 0
for i in range(2*l+1):
rotated_gamma += Dlmk(l,m,i-l,phi1,phi2,theta1,theta2).conjugate()*gamma_ml[i]
return rotated_gamma
def real_rotated_Gammas(m,l,phi1,phi2,theta1,theta2,gamma_ml):
"""
This function returns the real-valued form of the Overlap Reduction Functions,
see Eqs 47 in Mingarelli et al, 2013.
"""
if m>0:
ans=(1./sqrt(2))*(rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml)+(-1)**m*rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml))
return ans.real
if m==0:
return rotated_Gamma_ml(0,l,phi1,phi2,theta1,theta2,gamma_ml).real
if m<0:
ans=(1./sqrt(2)/complex(0.,1))*(rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml)-(-1)**m*rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml))
return ans.real
#testing
if __name__ == "__main__":
l=1
phi1 = 0.3
phi2 = 0.7
theta1 = 0.2
theta2 = 1.0
p1 = np.array([sin(theta1)*cos(phi1), sin(theta1)*sin(phi1),cos(theta1)])
p2 = np.array([sin(theta2)*cos(phi2), sin(theta2)*sin(phi2),cos(theta2)])
rot_Gs=[]
plus_gamma_ml = [] #this will hold the list of gammas evaluated at a specific value of phi1,2, and theta1,2.
neg_gamma_ml = []
gamma_ml = []
#pre-calculate all the gammas so this gets done only once. Need all the values to execute rotation codes.
for i in range(l+1):
intg_gamma=int_Gamma_lm(i,l,phi1,phi2,theta1,theta2)
neg_intg_gamma=(-1)**(i)*intg_gamma # just (-1)^m Gamma_ml since this is in the computational frame
plus_gamma_ml.append(intg_gamma) #all of the gammas from Gamma^-m_l --> Gamma ^m_l
neg_gamma_ml.append(neg_intg_gamma) #get the neg m values via complex conjugates
neg_gamma_ml=neg_gamma_ml[1:] #this makes sure we don't have 0 twice
rev_neg_gamma_ml=neg_gamma_ml[::-1] #reverse direction of list, now runs from -m .. 0
gamma_ml=rev_neg_gamma_ml+plus_gamma_ml
#print gamma_ml #just 1 list from -m..m, this concatenates the lists.
for m in range(-l,l+1):
rot_Gs.append(real_rotated_Gammas(m,l,phi1,phi2,theta1,theta2,gamma_ml))
result_file = open("myFile"+str(l)+".txt", "a") # the a+ allows you to create the file and write to it.
result_file.write('{0} {1} {2} \n'.format(m, l, rot_Gs[m+l])) #writes data to 0th, 1st and 2nd column, resp.
result_file.close()
|
vhaasterenREPO_NAMEpiccardPATH_START.@piccard_extracted@piccard-master@[email protected]@.PATH_END.py
|
{
"filename": "mem.py",
"repo_name": "vortex-exoplanet/VIP",
"repo_path": "VIP_extracted/VIP-master/vip_hci/config/mem.py",
"type": "Python"
}
|
#! /usr/bin/env python
"""
System memory related functions
"""
__author__ = "Carlos Alberto Gomez Gonzalez"
__all__ = ["check_enough_memory", "get_available_memory"]
from psutil import virtual_memory
def get_available_memory(verbose=True):
"""
Get the available memory in bytes.
Parameters
----------
verbose : bool, optional
Print out the total/available memory
Returns
-------
available_memory : int
The available memory in bytes.
"""
mem = virtual_memory()
if verbose:
print("System total memory = {:.3f} GB".format(mem.total / 1e9))
print("System available memory = {:.3f} GB".format(mem.available / 1e9))
return mem.available
def check_enough_memory(
input_bytes, factor=1, raise_error=True, error_msg="", verbose=True
):
"""
Check if ``input_bytes`` are larger than system's available memory times
``factor``. This function is used to check the inputs (largest ones such as
multi-dimensional cubes) of algorithms and avoid system/Python crashes or
heavy swapping.
Parameters
----------
input_bytes : float
The size in bytes of the inputs of a given function.
factor : float, optional
Scales how much memory is needed in terms of the size of input_bytes.
raise_error : bool, optional
If True, a RuntimeError is raised when the condition is not met.
error_msg : str, optional
[raise_error=True] To be appended to the message of the RuntimeError.
verbose : bool, optional
If True, information about the available memory is printed out.
"""
available_memory = get_available_memory(verbose=verbose)
if input_bytes > factor * available_memory:
if raise_error:
raise RuntimeError(
"Input is larger than available system memory" + error_msg
)
return False
else:
return True
|
vortex-exoplanetREPO_NAMEVIPPATH_START.@VIP_extracted@VIP-master@vip_hci@[email protected]@.PATH_END.py
|
{
"filename": "_textsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/_textsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="scatter", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@[email protected]_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/violin/_line.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "violin"
_path_str = "violin.line"
_valid_props = {"color", "width"}
# color
# -----
@property
def color(self):
"""
Sets the color of line bounding the violin(s).
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of line bounding the violin(s).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the color of line bounding the violin(s).
width
Sets the width (in px) of line bounding the violin(s).
"""
def __init__(self, arg=None, color=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.violin.Line`
color
Sets the color of line bounding the violin(s).
width
Sets the width (in px) of line bounding the violin(s).
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.violin.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.violin.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@violin@[email protected]_END.py
|
{
"filename": "StreamKicks.ipynb",
"repo_name": "jobovy/stream-stream",
"repo_path": "stream-stream_extracted/stream-stream-main/py/StreamKicks.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import os, os.path
import numpy
from galpy.df import impulse_deltav_plummer, impulse_deltav_plummerstream
import seaborn as sns
from galpy.util import bovy_plot, bovy_conversion
from stream2_util import R0, V0
from matplotlib import pyplot
from matplotlib.ticker import NullFormatter
nullfmt= NullFormatter()
%pylab inline
save_figures= False
```
Populating the interactive namespace from numpy and matplotlib
## Simple analytic calculations of stream-stream interactions
```python
# Setup
GM=10.**-2./bovy_conversion.mass_in_1010msol(V0,R0)
rs= 0.625/R0
b= rs
stream_phi= numpy.linspace(-numpy.pi/2.,numpy.pi/2.,201)
stream_r= 10./R0
stream_v= 220./V0
x_gc= stream_r*stream_phi
v_gc= numpy.tile([0.000001,stream_v,0.000001],(201,1))
w= numpy.array([0.,132.,176])/V0
wmag= numpy.sqrt(numpy.sum(w**2.))
```
```python
deltav_curved_r = impulse_deltav_plummer(v_gc,x_gc,-b,w,GM,rs)
```
```python
dt= 2./wmag/V0*bovy_conversion.freq_in_kmskpc(V0,R0)
deltav_curved_stream_r_1kpc = impulse_deltav_plummerstream(v_gc,x_gc,-b,w,
lambda t: GM/dt,rs,-dt/2.,dt/2.)
dt= 4./wmag/V0*bovy_conversion.freq_in_kmskpc(V0,R0)
deltav_curved_stream_r_2kpc = impulse_deltav_plummerstream(v_gc,x_gc,-b,w,
lambda t: GM/dt,rs,-dt/2.,dt/2.)
dt= 10./wmag/V0*bovy_conversion.freq_in_kmskpc(V0,R0)
deltav_curved_stream_r_5kpc = impulse_deltav_plummerstream(v_gc,x_gc,-b,w,
lambda t: GM/dt,rs,-dt/2.,dt/2.)
dt= 40./wmag/V0*bovy_conversion.freq_in_kmskpc(V0,R0)
deltav_curved_stream_r_20kpc = impulse_deltav_plummerstream(v_gc,x_gc,-b,w,
lambda t: GM/dt,rs,-dt/2.,dt/2.)
```
```python
deltav_curved_streamplus_r = impulse_deltav_plummer(v_gc,x_gc,-b,w,GM/2.,rs)\
+deltav_curved_stream_r_2kpc/2.
```
```python
bovy_plot.bovy_print(axes_labelsize=18.,xtick_labelsize=14.,ytick_labelsize=14.)
figsize(4,6)
subplot(2,1,1)
bovy_plot.bovy_plot(stream_phi/numpy.pi*180.,
deltav_curved_r[:,0]*V0,
color='k',lw=3.,
xrange=[-89.,89.],
yrange=[-3.75,1.],
gcf=True,zorder=0,
# xlabel=r'$\mathrm{angle\ along\ stream\,(deg)}$',
ylabel=r'$\delta v_x\,(\mathrm{km\,s}^{-1})$')
#bovy_plot.bovy_text(r'$v_x$',top_left=True,size=17.)
plot(stream_phi/numpy.pi*180.,deltav_curved_stream_r_1kpc[:,0]*V0,
color=sns.color_palette("colorblind")[2],zorder=4,lw=3.)
plot(stream_phi/numpy.pi*180.,deltav_curved_stream_r_5kpc[:,0]*V0,
color=sns.color_palette("colorblind")[1],zorder=3,lw=3.)
plot(stream_phi/numpy.pi*180.,deltav_curved_stream_r_20kpc[:,0]*V0,
color=sns.color_palette("colorblind")[3],zorder=2,lw=3.)
plot(stream_phi/numpy.pi*180.,deltav_curved_streamplus_r[:,0]*V0,
color=sns.color_palette("colorblind")[0],zorder=1,lw=3.)
# label
plot([stream_phi[41]/numpy.pi*180.,-60.],[deltav_curved_stream_r_20kpc[41,0]*V0,-2.],
lw=1.,color=sns.color_palette("colorblind")[3],zorder=0)
bovy_plot.bovy_text(-82.,-2.5,r'$\mathrm{20\ kpc\ arms}$',
size=17.,color=sns.color_palette("colorblind")[3])
plot([stream_phi[131]/numpy.pi*180.,40.],[deltav_curved_stream_r_5kpc[131,0]*V0,-2.],
lw=1.,color=sns.color_palette("colorblind")[1],zorder=0)
bovy_plot.bovy_text(22.,-2.5,r'$\mathrm{5\ kpc\ arms}$',
size=17.,color=sns.color_palette("colorblind")[1])
pyplot.gca().xaxis.set_major_formatter(nullfmt)
subplot(2,1,2)
bovy_plot.bovy_plot(stream_phi/numpy.pi*180.,
deltav_curved_r[:,1]*V0,
color='k',lw=3.,
xrange=[-89.,89.],
yrange=[-2.5,2.5],
gcf=True,zorder=0,
xlabel=r'$\mathrm{angle\ along\ stream\,(deg)}$',
ylabel=r'$\delta v_y\,(\mathrm{km\,s}^{-1})$')
#bovy_plot.bovy_text(r'$v_y$',top_left=True,size=17.)
plot(stream_phi/numpy.pi*180.,deltav_curved_stream_r_1kpc[:,1]*V0,
color=sns.color_palette("colorblind")[2],zorder=4,lw=3.)
plot(stream_phi/numpy.pi*180.,deltav_curved_stream_r_5kpc[:,1]*V0,
color=sns.color_palette("colorblind")[1],zorder=3,lw=3.)
plot(stream_phi/numpy.pi*180.,deltav_curved_stream_r_20kpc[:,1]*V0,
color=sns.color_palette("colorblind")[3],zorder=2,lw=3.)
plot(stream_phi/numpy.pi*180.,deltav_curved_streamplus_r[:,1]*V0,
color=sns.color_palette("colorblind")[0],zorder=1,lw=3.)
# label
plot([stream_phi[95]/numpy.pi*180.,30.],[deltav_curved_r[95,1]*V0,2.],
lw=1.,color='k',zorder=0)
bovy_plot.bovy_text(32.,1.45,r'$\mathrm{halo},$'+'\n'+r'$\mathrm{no\ arms}$',
size=17.,color='k')
plot([stream_phi[90]/numpy.pi*180.,20.],[deltav_curved_stream_r_1kpc[90,1]*V0,.85],
lw=1.,color=sns.color_palette("colorblind")[2],zorder=1)
bovy_plot.bovy_text(22.,0.65,r'$\mathrm{1\ kpc\ arms}$',
size=17.,color=sns.color_palette("colorblind")[2])
plot([stream_phi[112]/numpy.pi*180.,-13.],[deltav_curved_streamplus_r[112,1]*V0,-1.3],
lw=1.,color=sns.color_palette("colorblind")[0],zorder=5)
bovy_plot.bovy_text(-77.,-1.35,r'$\mathrm{halo}\ +$'+'\n'+r'$\mathrm{2\ kpc\ arms}$',
size=17.,color=sns.color_palette("colorblind")[0])
tight_layout()
if save_figures:
bovy_plot.bovy_end_print(os.path.join(os.getenv('PAPERSDIR'),'2015-stream-stream','fig1.pdf'))
```

```python
```
|
jobovyREPO_NAMEstream-streamPATH_START.@stream-stream_extracted@stream-stream-main@[email protected]@.PATH_END.py
|
{
"filename": "anthropic.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/chat/anthropic.ipynb",
"type": "Jupyter Notebook"
}
|
---
sidebar_label: Anthropic
---
# ChatAnthropic
This notebook provides a quick overview for getting started with Anthropic [chat models](/docs/concepts/chat_models). For detailed documentation of all ChatAnthropic features and configurations head to the [API reference](https://python.langchain.com/api_reference/anthropic/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html).
Anthropic has several chat models. You can find information about their latest models and their costs, context windows, and supported input types in the [Anthropic docs](https://docs.anthropic.com/en/docs/models-overview).
:::info AWS Bedrock and Google VertexAI
Note that certain Anthropic models can also be accessed via AWS Bedrock and Google VertexAI. See the [ChatBedrock](/docs/integrations/chat/bedrock/) and [ChatVertexAI](/docs/integrations/chat/google_vertex_ai_palm/) integrations to use Anthropic models via these services.
:::
## Overview
### Integration details
| Class | Package | Local | Serializable | [JS support](https://js.langchain.com/docs/integrations/chat/anthropic) | Package downloads | Package latest |
| :--- | :--- | :---: | :---: | :---: | :---: | :---: |
| [ChatAnthropic](https://python.langchain.com/api_reference/anthropic/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html) | [langchain-anthropic](https://python.langchain.com/api_reference/anthropic/index.html) | ❌ | beta | ✅ |  |  |
### Model features
| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | Native async | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| ✅ | ✅ | ❌ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | ❌ |
## Setup
To access Anthropic models you'll need to create an Anthropic account, get an API key, and install the `langchain-anthropic` integration package.
### Credentials
Head to https://console.anthropic.com/ to sign up for Anthropic and generate an API key. Once you've done this set the ANTHROPIC_API_KEY environment variable:
```python
import getpass
import os
if "ANTHROPIC_API_KEY" not in os.environ:
os.environ["ANTHROPIC_API_KEY"] = getpass.getpass("Enter your Anthropic API key: ")
```
If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:
```python
# os.environ["LANGSMITH_API_KEY"] = getpass.getpass("Enter your LangSmith API key: ")
# os.environ["LANGSMITH_TRACING"] = "true"
```
### Installation
The LangChain Anthropic integration lives in the `langchain-anthropic` package:
```python
%pip install -qU langchain-anthropic
```
## Instantiation
Now we can instantiate our model object and generate chat completions:
```python
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(
model="claude-3-5-sonnet-20240620",
temperature=0,
max_tokens=1024,
timeout=None,
max_retries=2,
# other params...
)
```
## Invocation
```python
messages = [
(
"system",
"You are a helpful assistant that translates English to French. Translate the user sentence.",
),
("human", "I love programming."),
]
ai_msg = llm.invoke(messages)
ai_msg
```
AIMessage(content="J'adore la programmation.", response_metadata={'id': 'msg_018Nnu76krRPq8HvgKLW4F8T', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 29, 'output_tokens': 11}}, id='run-57e9295f-db8a-48dc-9619-babd2bedd891-0', usage_metadata={'input_tokens': 29, 'output_tokens': 11, 'total_tokens': 40})
```python
print(ai_msg.content)
```
J'adore la programmation.
## Chaining
We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:
```python
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant that translates {input_language} to {output_language}.",
),
("human", "{input}"),
]
)
chain = prompt | llm
chain.invoke(
{
"input_language": "English",
"output_language": "German",
"input": "I love programming.",
}
)
```
AIMessage(content="Here's the German translation:\n\nIch liebe Programmieren.", response_metadata={'id': 'msg_01GhkRtQZUkA5Ge9hqmD8HGY', 'model': 'claude-3-5-sonnet-20240620', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 23, 'output_tokens': 18}}, id='run-da5906b4-b200-4e08-b81a-64d4453643b6-0', usage_metadata={'input_tokens': 23, 'output_tokens': 18, 'total_tokens': 41})
## Content blocks
One key difference to note between Anthropic models and most others is that the contents of a single Anthropic AI message can either be a single string or a **list of content blocks**. For example when an Anthropic model invokes a tool, the tool invocation is part of the message content (as well as being exposed in the standardized `AIMessage.tool_calls`):
```python
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
"""Get the current weather in a given location"""
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
llm_with_tools = llm.bind_tools([GetWeather])
ai_msg = llm_with_tools.invoke("Which city is hotter today: LA or NY?")
ai_msg.content
```
[{'text': "To answer this question, we'll need to check the current weather in both Los Angeles (LA) and New York (NY). I'll use the GetWeather function to retrieve this information for both cities.",
'type': 'text'},
{'id': 'toolu_01Ddzj5PkuZkrjF4tafzu54A',
'input': {'location': 'Los Angeles, CA'},
'name': 'GetWeather',
'type': 'tool_use'},
{'id': 'toolu_012kz4qHZQqD4qg8sFPeKqpP',
'input': {'location': 'New York, NY'},
'name': 'GetWeather',
'type': 'tool_use'}]
```python
ai_msg.tool_calls
```
[{'name': 'GetWeather',
'args': {'location': 'Los Angeles, CA'},
'id': 'toolu_01Ddzj5PkuZkrjF4tafzu54A'},
{'name': 'GetWeather',
'args': {'location': 'New York, NY'},
'id': 'toolu_012kz4qHZQqD4qg8sFPeKqpP'}]
## API reference
For detailed documentation of all ChatAnthropic features and configurations head to the API reference: https://python.langchain.com/api_reference/anthropic/chat_models/langchain_anthropic.chat_models.ChatAnthropic.html
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@[email protected]@.PATH_END.py
|
{
"filename": "base.py",
"repo_name": "EranOfek/AstroPack",
"repo_path": "AstroPack_extracted/AstroPack-main/matlab/util/scripts/base.py",
"type": "Python"
}
|
#----------------------------------------------------------------------------
# Project: ULTRASAT
# Module: Common
# File: base.py
# Title: General utilities
# Author: Chen Tishler, 01/2022
# @Dan - UT & doc
#----------------------------------------------------------------------------
#
# ===========================================================================
#
# base.py - Base classes and definitions
#
#
# Classes in this file:
#
# Color
# Base
# Config
# IniFile
# Component
# Logger
#
#
# ===========================================================================
import os, sys, time, configparser
from datetime import datetime
#import psutil
from colorama import Fore, Back, Style
import colorama
colorama.init()
# ===========================================================================
# Global FileComm object used from msg_log() below
# It will be set in gcsifc.py
global_gui_com = None # Assign object of type FileComm()
def set_global_gui_com(com):
"""
Set the global
"""
global global_gui_com
global_gui_com = com
# ===========================================================================
#
# ===========================================================================
class Color:
"""
Common RGB colors
"""
black = 0x000000
white = 0xffffff
red = 0x0000ff
green = 0x008000
blue = 0xff0000
gray = 0x808080
silver = 0xC0C0C0
yellow = 0xffff00
fuchsia = 0xff00ff
maroon = 0x000080
cyan = 0x00FFFF
aqua = 0x00FFFF
lime = 0x00FF00
olive = 0x808000
purple = 0x800080
teal = 0x008080
navy = 0x000080
def get_colorama_fore(color):
fore = ''
if color == Color.black:
fore = Fore.BLACK
elif color == Color.white:
fore = Fore.WHITE
elif color == Color.blue:
fore = Fore.BLUE
elif color == Color.red:
fore = Fore.RED
elif color == Color.green:
fore = Fore.GREEN
elif color == Color.yellow:
back = Fore.YELLOW
elif color == Color.purple:
fore = Fore.MAGENTA
else:
fore = Fore.BLUE
return fore
def get_colorama_back(color):
back = ''
if color == Color.black:
back = Back.BLACK
elif color == Color.white:
back = Back.WHITE
elif color == Color.blue:
back = Back.BLUE
elif color == Color.red:
back = Back.RED
elif color == Color.green:
back = Back.GREEN
elif color == Color.yellow:
back = Back.YELLOW
elif color == Color.purple:
back = Back.MAGENTA
return back
def colored_text(text, fore, back=None):
fore = Color.get_colorama_fore(fore)
back = Color.get_colorama_back(back)
s = fore + back + text + Style.RESET_ALL
return s
# ===========================================================================
#
# ===========================================================================
class LogLevel:
"""
See AstroPack.git/matlab/base/LogLevel.m
"""
Non = 0 # Log disabled, use it with setLogLevel()
Fatal = 1 # Fatal error, must terminate
Error = 2 # Error
Assert = 3 # Assert error
Warning = 4 # Warning
Info = 5 # General info
Verbose = 6 # Verbose info
Debug = 7 # Detailed debug
DebugEx = 8 # Very detailed debug
Perf = 9 # Performance timing
Test = 10 # Unit-Test
All = 11 # All, use it with setLogLevel()
log_level_str = {
LogLevel.Non: 'NON',
LogLevel.Fatal: 'FTL',
LogLevel.Error: 'ERR',
LogLevel.Assert: 'ASR',
LogLevel.Warning: 'WRN',
LogLevel.Info: 'INF',
LogLevel.Verbose: 'VRB',
LogLevel.Debug: 'DBG',
LogLevel.DebugEx: 'DBX',
LogLevel.Perf: 'PRF',
LogLevel.Test: 'TST',
LogLevel.All: 'ALL'
}
#
log_path = ''
class Logger:
"""
Simple logger class.
When file size is bigger than max_size, the file is renamed to '.old' and
a new file is started
"""
def __init__(self, path=None, fname=None):
super().__init__()
self.path = path
self.filename = None
self.filename_ex = None
self.base_filename = None
self.logfile = None
self.logfile_ex = None
self.gui_com = None
self.use_dt = True
self.use_pid = True
self.pid = os.getpid()
#self.process_name = psutil.Process(self.pid).name()
self.last_date = ''
# Special options
self.date_path = False
self.max_size = 0
self.rename_to_time = False
if fname:
self.init_log(fname=fname)
def init_log(self, path=None, fname=None):
"""
Initialize.
"""
# Log message to file
if path:
self.path = path
else:
if sys.platform == "win32":
self.path = 'c:/soc/log/'
else:
self.path = '/tmp/soc/log/'
#self.path = '/var/log/soc/incoming_alerts/lvc/log/'
if not os.path.exists(self.path):
os.makedirs(self.path)
if not fname:
fname = 'soc_default.log'
self.filename = os.path.join(self.path, fname)
self.logfile = open(self.filename, 'a')
if self.use_pid:
self.filename = os.path.join(self.path, str(os.getpid()) + '.log')
self.filename_ex = os.path.join(self.path, str(os.getpid()) + '_ex.log')
else:
self.filename = os.path.join(self.path, fname + '.log')
self.filename_ex = os.path.join(self.path, fname + '_ex.log')
self.logfile = open(self.filename, 'a')
self.logfile_ex = open(self.filename_ex, 'a')
def msg_log(self, msg, type=None, comp=None, use_dt=None, dt=None, gui=None, gui_com=None, color=0, bkg=0xffffff, ex=None):
# Write message to logfile.
if not self.filename:
self.init_log()
if not use_dt:
use_dt = self.use_dt
#
msg = self.get_msg_log_text(msg=msg, type=type, comp=comp, use_dt=use_dt, dt=dt, ex=ex)
# @Todo
if self.date_path:
fdate = datetime.now().strftime('%Y/%m/%d')
if fdate != self.last_date:
fpath = os.path.join(self.path, fdate)
if not os.path.exists(fpath):
os.makedirs(fpath)
self.filename = os.path.join(fpath, self.base_filename)
if color and color != 0:
print(Color.get_colorama_fore(color) + msg + Style.RESET_ALL)
else:
print(msg)
if self.logfile:
self.logfile.write(msg)
self.logfile.write("\n")
self.logfile.flush()
# Check file size limit, then rename to '.old' and create new file
if self.max_size > 0:
size = self.logfile.tell()
if size > self.max_size:
self.logfile.close()
if self.rename_to_time:
old_filename = self.filename[:-3] + '.' + datetime.now().strftime('%y_%m_%d__%H_%M_%S')
else:
old_filename = self.filename[:-3] + '.old'
if os.path.exists(old_filename):
os.remove(old_filename)
os.rename(self.filename, old_filename)
self.logfile = open(self.filename, 'a')
#
if ex and self.logfile_ex:
self.logfile_ex.write(msg)
self.logfile_ex.write("\n")
self.logfile_ex.flush()
# Send log message go GUI
if gui:
if not self.gui_com:
self.gui_com = global_gui_com
if gui_com:
params = {'Text': msg, 'Color': color, 'Bkg': bkg}
gui_com.send_yml_cmd('Log', params)
def get_msg_log_text(self, msg, type='', comp='', use_dt=True, dt=None, ex=None):
# Prepare message log text from its fields.
if not msg:
use_dt = False
if type:
msg = '[{}] {}'.format(type, msg)
if comp:
msg = '[{}] {}'.format(comp, msg)
if ex:
try:
msg += ' - exception: ' + str(ex)
except:
msg += ' - exception: (no message property)'
if use_dt:
if not dt:
dt = datetime.now()
if self.use_pid:
msg = ('[%05d] ' % self.pid) + datetime.now().strftime('%d/%m/%y %H:%M:%S.%f')[:-3] + ' ' + msg
else:
msg = datetime.now().strftime('%d/%m/%y %H:%M:%S.%f')[:-3] + ' ' + msg
return msg
def get_log_level_str(level):
"""
:param level:
:return:
"""
if not level or level == '':
level = LogLevel.Info
if level in log_level_str:
text = log_level_str[level]
else:
text = str(level)
return text
# ===========================================================================
# msg_log(...)
# ===========================================================================
default_logger = None
def init_log(path=None, fname=None):
if not fname:
fname = 'log'
global default_logger
if not default_logger:
default_logger = Logger(path=path, fname=fname)
return default_logger
def msg_log(msg, logger=None, type=None, comp=None, use_dt=None, dt=None, gui=None, gui_com=None, color=0, bkg=0xffffff, ex=None):
#
global default_logger
if not logger:
if not default_logger:
init_log()
logger = default_logger
logger.msg_log(msg, type=type, comp=comp, use_dt=use_dt, dt=dt, gui=gui, gui_com=gui_com, color=color, bkg=bkg, ex=ex)
# ===========================================================================
#
# ===========================================================================
class Base:
"""
Base class for all objects.
"""
def __init__(self):
self.name = ''
self.base_gui_com = None
self.log_to_gui = False
self.root_path = os.getenv('ULTRASAT_PATH')
self.config_path = os.path.join(self.root_path, 'config')
def log(self, msg, type='', comp='', color=0, bkg=0, columns=None, use_dt=True, dt=None, ex=None):
"""
Write log message with name and optional colors
:param msg:
:param type:
:param comp:
:param color:
:param bkg:
:param columns:
:param use_dt:
:param dt:
:param ex:
:return:
"""
if comp == '':
comp = self.name
msg_log(msg, comp=comp, type=type, gui=self.log_to_gui, gui_com=self.base_gui_com, color=color, bkg=bkg, use_dt=use_dt, dt=dt, ex=ex)
# ===========================================================================
#
# ===========================================================================
class Config(Base):
"""
Configuration class, based on YML files.
Required packages: yaml
"""
def __init__(self):
super().__init__()
self.name = 'Config'
self.filename = ''
self.yml = None
self.data = None
def load(self, filename=''):
"""
Load configuration file.
:param filename:
:return:
"""
if filename == '':
path = os.getenv('ULTRASAT_PATH')
if not path or path == '':
path = 'd:/ultrasat/ultrasat.git/python/prj/src/gcs/'
if sys.platform == "win32":
filename = os.path.join(path, 'python/prj/src/gcs/gcs_conf_win.yml')
else:
filename = os.path.join(path, 'python/prj/src/gcs/gcs_conf.yml')
print('Config.load: %s' % filename)
self.filename = filename
self.data = yaml_utils.yaml_file_to_obj(filename)
config_ = None
@staticmethod
def get_config():
"""
Create/return singleton configuration object.
"""
if not Config.config_:
config_ = Config()
config_.load()
return config_
# ===========================================================================
#
# ===========================================================================
class IniFile(Base):
"""
Simple INI file read/write.
"""
def __init__(self, filename=''):
self.filename = ''
self.ini = None
if filename != '':
self.load(filename)
def load(self, filename: str):
"""
Load INI file to self.ini
:param filename:
:return:
"""
self.filename = filename
self.ini = configparser.ConfigParser()
self.ini.read(self.filename)
#print(config['DEFAULT']['path']) # -> "/path/name/"
#config['DEFAULT']['path'] = '/var/shared/' # update
#config['DEFAULT']['default_message'] = 'Hey! help me!!' # create
def save(self):
"""
Save to file.
"""
with open(self.filename, 'w') as f: # save
self.ini.write(f)
# ===========================================================================
#
# ===========================================================================
class Component(Base):
"""
Parent class for all components.
"""
def __init__(self):
super().__init__()
self.name = 'Component' # Name string
self.owner = None # Indicates the component that is responsible for streaming and freeing this component
self.uuid = None # Global unique ID, generated with java.util.UUID.randomUUID()
self.tag = None # Optional tag (i.e. for events handling)
self.config = None # Configuration # Configuration, deafult is system configuration
self.logger = None # MsgLogger # Logger, default is system logger
self.is_utc = True #
self.debug_mode = False # DebugMode
# By default use system log and configuration
#self.logger = MsgLogger.getSingleton()
#self.config = Configuration.getSingleton()
def make_uuid(self):
"""
(re)Generate unique ID for each element in object
"""
self.uuid = system.new_uuid()
return self.uuid
def need_uuid(self):
"""
Generate unique ID only if not empty.
"""
if self.uuid == '':
self.make_uuid()
return self.uuid
# ===========================================================================
#
# ===========================================================================
class Stopwatch:
"""
"""
def __init__(self, enable=True, interval=0, delay=0, auto_restart=True, first_arrived=False):
"""
:param enable:
:param interval:
:param delay:
:param auto_restart:
:param first_arrived:
"""
self.start_time = 0
self.stop_time = 0
self.elapsed_time = 0
self.interval = 0
self.enabled = False
self.first_arrived = first_arrived
self.auto_restart = auto_restart
if enable:
self.start(interval=interval, delay=delay)
def start(self, interval=0, delay=0):
"""
Start.
:param interval: Interval in seconds
:param delay: Optional delay in seconds
:return:
"""
if interval > 0:
self.interval = interval
if delay > 0:
self.start_time = self.get_time() + delay
else:
self.start_time = self.get_time()
self.enabled = True
def stop(self):
"""
Stop, return elapsed time if was was enabled, or None
:return:
"""
if self.enabled:
self.stop_time = self.get_time()
self.elapsed_time = self.stop_time - self.start_time
self.enabled = False
return self.elapsed_time
else:
return None
def elapsed(self):
"""
Get elapsed time since last start.
:return: elapsed time in seconds
"""
if self.enabled:
self.elapsed_time = self.get_time() - self.start_time
return self.elapsed_time
else:
return 0
def arrived(self, once=True, restart=False, stop=False):
"""
Check if time has arrived since last start.
:param once:
:param restart:
:param stop:
:return:
"""
result = False
if self.first_arrived:
self.first_arrived = False
result = True
elif self.enabled:
elapsed = self.elapsed()
if elapsed > self.interval:
result = True
if restart or self.auto_restart:
self.start()
elif stop or once:
self.stop()
return result
def get_time(self):
"""
Get current time in seconds, as time.time()
@Todo - Discuss with Dan time simulator @Dan
:return: time in seconds as time.time()
"""
return time.time()
# ===========================================================================
#
# ===========================================================================
def debug_stopwatch():
"""
Debug StopWatch
:return:
"""
sw1 = Stopwatch(interval=1, enable=True)
sw2 = Stopwatch(interval=2, enable=True)
sw3 = Stopwatch()
while True:
if sw1.arrived(restart=True):
print('sw1 arrived: {}'.format(sw1.elapsed_time))
elapsed = sw2.elapsed()
if elapsed > 2:
print('sw2 elapsed: {}'.format(elapsed))
sw2.start()
sw3.start(interval=0.5)
if sw3.arrived(stop=True):
print('sw3 arrived: {}'.format(sw2.elapsed_time))
# ===========================================================================
#
# ===========================================================================
def debug():
# Log
init_log()
msg_log('log file: ' + default_logger.filename)
#debug_stopwatch()
# Base
b1 = Base()
b1.log('Log message black', color=Color.black)
b1.log('Log message blue', color=Color.blue)
b1.log('Log message red', color=Color.red)
# Config
conf = Config()
conf.load()
c2 = conf.get_config()
print(c2.data.__dict__)
assert(c2.data.Interface.MsgInPath != '')
# IniFile
ini = IniFile('./test.ini')
assert(ini.ini['Test1']['Param1'] != '')
# Component
comp = Component()
assert(comp.need_uuid() != '')
# Logger
lg = Logger('./test_logger.log')
lg.msg_log('log msg 1')
lg.msg_log('log msg 2')
return True
if __name__ == '__main__':
debug()
|
EranOfekREPO_NAMEAstroPackPATH_START.@AstroPack_extracted@AstroPack-main@matlab@util@[email protected]@.PATH_END.py
|
{
"filename": "calibration.py",
"repo_name": "ACCarnall/bagpipes",
"repo_path": "bagpipes_extracted/bagpipes-master/bagpipes/fitting/calibration.py",
"type": "Python"
}
|
import numpy as np
from numpy.polynomial.chebyshev import chebval, chebfit
class calib_model(object):
""" A class for modelling spectrophotometric calibration.
Parameters
----------
calib_dict : dictionary
Contains the desired parameters for the calibration model.
spectrum : array_like
The spectral data to which the calibration model is applied.
spectral_model : array_like
The physical model which is being fitted to the data.
"""
def __init__(self, calib_dict, spectrum, spectral_model):
self.param = calib_dict
self.y = spectrum[:, 1]
self.y_err = spectrum[:, 2]
self.y_model = spectral_model[:, 1]
self.wavs = spectrum[:, 0]
# Transform the spectral wavelengths to the interval (-1, 1).
x = spectrum[:, 0]
self.x = 2.*(x - (x[0] + (x[-1] - x[0])/2.))/(x[-1] - x[0])
# Call the appropriate method to calculate the calibration.
getattr(self, self.param["type"])()
def polynomial_bayesian(self):
""" Bayesian fitting of Chebyshev calibration polynomial. """
coefs = []
while str(len(coefs)) in list(self.param):
coefs.append(self.param[str(len(coefs))])
self.poly_coefs = np.array(coefs)
self.model = chebval(self.x, coefs)
def double_polynomial_bayesian(self):
""" Bayesian fitting of Chebyshev calibration polynomial. """
x_blue = self.wavs[self.wavs < self.param["wav_cut"]]
x_red = self.wavs[self.wavs > self.param["wav_cut"]]
self.x_blue = 2.*(x_blue - (x_blue[0] + (x_blue[-1] - x_blue[0])/2.))
self.x_blue /= (x_blue[-1] - x_blue[0])
self.x_red = 2.*(x_red - (x_red[0] + (x_red[-1] - x_red[0])/2.))
self.x_red /= (x_red[-1] - x_red[0])
blue_coefs = []
red_coefs = []
while "blue" + str(len(blue_coefs)) in list(self.param):
blue_coefs.append(self.param["blue" + str(len(blue_coefs))])
while "red" + str(len(red_coefs)) in list(self.param):
red_coefs.append(self.param["red" + str(len(red_coefs))])
self.blue_poly_coefs = np.array(blue_coefs)
self.red_poly_coefs = np.array(red_coefs)
model = np.zeros_like(self.x)
model[self.wavs < self.param["wav_cut"]] = chebval(self.x_blue,
blue_coefs)
model[self.wavs > self.param["wav_cut"]] = chebval(self.x_red,
red_coefs)
self.model = model
def polynomial_max_like(self):
order = int(self.param["order"])
mask = (self.y == 0.)
ratio = self.y_model/self.y
errs = np.abs(self.y_err*self.y_model/self.y**2)
ratio[mask] = 0.
errs[mask] = 9.9*10**99
coefs = chebfit(self.x, ratio, order, w=1./errs)
self.poly_coefs = np.array(coefs)
self.model = chebval(self.x, coefs)
def multi_polynomial_max_like(self):
slice_order = int(self.param["slice_order"])
n_slices = int(self.param["n_slices"])
sect_length = (self.x[-1] - self.x[0])/n_slices
poly = np.zeros_like(self.x)
for i in range(n_slices):
mask = (self.x >= self.x[0] + sect_length*i) & (self.x < self.x[0] + sect_length*(i+1))
if i == n_slices - 1:
mask = (self.x >= self.x[0] + sect_length*i) & (self.x <= self.x[0] + sect_length*(i+1))
ratio = self.y_model[mask]/self.y[mask]
errs = np.abs(self.y_err[mask]*self.y_model[mask]/self.y[mask]**2)
coefs = chebfit(self.x[mask], ratio, slice_order, w=1./errs)
model = chebval(self.x[mask], coefs)
poly[mask] = model
self.model = poly
|
ACCarnallREPO_NAMEbagpipesPATH_START.@bagpipes_extracted@bagpipes-master@bagpipes@[email protected]@.PATH_END.py
|
{
"filename": "_showgrid.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/xaxis/_showgrid.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowgridValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showgrid", parent_name="layout.xaxis", **kwargs):
super(ShowgridValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@xaxis@[email protected]_END.py
|
{
"filename": "asfgrid.py",
"repo_name": "hposborn/MonoTools",
"repo_path": "MonoTools_extracted/MonoTools-main/MonoTools/stellar/isoclassify/isoclassify/direct/asfgrid.py",
"type": "Python"
}
|
#! /usr/bin/env python
# --------------------------------------------------------------
# The asfgrid is a python module to compute asteroseismic
# parameters for a star with given stellar parameters and vice versa.
# Copyright (C) 2015 Sanjib Sharma, Dennis Stello
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------
"""
A module to compute asteroseismic parameters for
a star with given stellar parameters and vice versa.
Author Sanjib Sharma <bugsanjib at gmail com>
Copyright (c) 2015 Sanjib Sharma, Dennis Stello
License: AGPL see <http://www.gnu.org/licenses/>.
Data files should be in current directory.
To run as a script
$./asfgrid.py --help
To use as module
::
>>> import asfgrid
>>> evstate=[1,1]
>>> logz=[-1.97,-1.98]
>>> teff=[4659.8,4903.2]
>>> dnu=[8.81,13.1]
>>> numax=[92.36,157.3]
>>> s=asfgrid.Seism()
>>> mass,radius=s.get_mass_radius(evstate,logz,teff,dnu,numax)
>>> print mass,radius
>>> logg=s.mr2logg(mass,radius)
>>> dnu,numax,fdnu=s.get_dnu_numax(evstate,logz,teff,mass,mass,logg)
>>> print dnu,numax
"""
import sys
import ebf
import numpy as np
import scipy.interpolate
__version__ = "0.0.5"
def _tocsv(filename,data,basekey=None,keylist=None,delimiter=', '):
"""
Write a dict or npstruct to a csv file
"""
if type(data) == dict:
with open(filename,'w') as fp:
if keylist==None:
keylist=data.keys()
if basekey == None:
nsize=data[keylist[0]].size
else:
nsize=data[basekey].size
keylist=[key for key in keylist if data[key].size==nsize]
# s=str(keylist)
# s=s[1:-1].replace("'",'')+'\n'
s=delimiter.join([str(key) for key in keylist])+'\n'
fp.write(s)
for i in range(data[keylist[0]].size):
s=', '.join([str(data[key][i]) for key in keylist])+'\n'
fp.write(s)
else:
with open(filename,'w') as fp:
if keylist==None:
s=str(data.dtype.names)
s=s[1:-1].replace("'",'')+'\n'
fp.write(s)
for temp in data:
s=str(temp)
s=s[1:-1].replace("'",'')+'\n'
fp.write(s)
else:
s=str(keylist)
s=s[1:-1].replace("'",'')+'\n'
fp.write(s)
for i in range(data[keylist[0]].size):
s=', '.join([str(data[key][i]) for key in keylist])+'\n'
fp.write(s)
print 'Written file:',filename
class _IGrid():
def __init__(self,data1,keys):
data=np.resize(data1,data1.size)
data.sort(order=keys)
self.keys=keys
self.vnames=[temp for temp in data.dtype.names if temp not in self.keys]
self.points=[np.unique(data[key]) for key in self.keys]
self.values={}
for vname in self.vnames:
self.values[vname]=data[vname].reshape([point.size for point in self.points])
self.points1=tuple([data[key] for key in self.keys])
self.values1={}
for vname in self.vnames:
self.values1[vname]=data[vname]
def homogenize_arrays(self,xi):
xj=[np.asarray(t) for t in xi]
temp=xj[0]
for t in xj:
temp=temp+t
xj=[np.zeros_like(temp)+t for t in xj]
return xj
def get_values(self,vname,xi,fill_value='nearest'):
fill_value1=np.nan
if type(xi) == list:
xi=np.array(self.homogenize_arrays(xi)).transpose()
t1=scipy.interpolate.interpn(self.points,self.values[vname],xi,bounds_error=False,fill_value=fill_value1)
if fill_value == 'nearest':
ind=np.where(np.isfinite(t1)==False)[0]
if ind.size>0:
print 'outside interp range',ind.size,' out of ',t1.size
if (xi.ndim == 1)&(len(self.keys)>1):
xi=xi.reshape([1,xi.size])
t1[ind]=scipy.interpolate.griddata(self.points1,self.values1[vname],xi[ind],method='nearest')
return t1
class Seism():
def __init__(self,datadir=None,z_solar=0.019):
# Change this to appropriate path
if datadir is None:
self.datadir=''
# self.datadir='/work1/sharma/Projects/kepler/data/dnu_grid6/'
else:
self.datadir=datadir
# set solar reference values
# self.radius= 6.958e8
# self.mass=1.99e30
# sun logg=np.log10(100.0*6.67259e-11*1.989e30/(6.958e8*6.958e8))
self.logg_solar=4.43796037457 # cgs unit
self.teff_solar=5777.0 # kelvin
self.numax_solar=3090.0 # micro Hz 3090+-30
# cannot change this
self.dnu_solar=135.1 # micro Hz 135.1
self.z_solar=z_solar # solar metallicity value
data1=ebf.read(self.datadir+'grid_interp1.ebf','/data')
data2=ebf.read(self.datadir+'grid_interp2.ebf','/data')
self.igrid1=_IGrid(data1,['evstate','logz','mass','logg_teff'])
self.igrid2=_IGrid(data2,['evstate','logz','mass_nu','logg_teff'])
def logg2r(self,logg,mass):
"""
From logg and mass compute radius
"""
return np.power(10.0,((self.logg_solar-logg)*0.5))*np.sqrt(mass)
def logg2m(self,logg,radius):
"""
From logg and radius compute mass
"""
return np.power(10.0,logg-self.logg_solar)*radius*radius
def logg2numax(self,logg,teff):
"""
From logg and teff compute numax with numax_solar=3090.0 microHz
"""
return (self.numax_solar)*np.power(10.0,logg-self.logg_solar)/(np.power(teff/self.teff_solar,0.5))
def numax2logg(self,numax,teff):
"""
From logg and teff compute numax with numax_solar=3090.0 microHz
"""
return np.log10((numax/self.numax_solar)*np.sqrt(teff/self.teff_solar))+self.logg_solar
def mr2rho(self,mass,sradius):
"""
From mass and radius compute rho_rho_solar
"""
return mass/np.power(sradius,3)
def mr2logg(self,mass,radius):
"""
From mass and radius compute logg
"""
return self.logg_solar+np.log10(mass/(radius*radius))
def kappa_m(self,dnu,numax):
"""
From dnu and numax compute kappa_m
"""
return np.power(numax/3090.0,3.0)*np.power(dnu/135.1,-4.0)
def kappa_r(self,dnu,numax):
"""
Not in original
From dnu and numax compute kappa_r
"""
return (numax/3090.0)*np.power(dnu/135.1,-2.0)
def mass_sc(self,dnu,numax,teff):
"""
From dnu, numax and teff compute mass according to scaling relation
Assumes dnu_solar=135.1 microHz and numax_solar=3090.0 microHz
"""
return np.power(numax/3090.0,3.0)*np.power(dnu/135.1,-4.0)*np.power(teff/self.teff_solar,1.5)
def _mass_dnu(self,dnu,logg):
"""
From dnu, logg compute mass according to scaling relation
Assumes dnu_solar=135.1 microHz
"""
return np.power(10.0,3*(logg-self.logg_solar))*np.power(dnu/(135.1),-4.0)
def _quantf(self,logg,teff):
"""
From logg and teff compute a quantity for interpolation that
is almost monotonic with age
"""
return np.log10(teff)+0.5*(np.tanh((logg-4.5)/0.25)+1)*logg*0.1
def get_dnu_numax(self,evstate,logz,teff,mini,mass,logg,fill_value='nearest',isfeh=False):
"""
Get average seismic parameters (dnu, numax) by interpolation on
a grid for a given (evstate, logz, teff, dnu, numax).
Assumption numax_solar=3090.0 microHz.
Args:
evstate (array): 1) Pre RGB 2) Post RGB
logz or feh (array): log(Z) log metallcity or [Fe/H]=log(Z/Z_solar)
logz (array): log(Z) log metallcity ([Fe/H]=log(Z/Z_solar))
teff (array): temperature
mini (array): initial mass
mass (array): actual mass with mass loss (mass <= mini).
logg (array): log(gravity)
fill_value : Deafault is 'nearest', to use nearest grid points
in case of input values being out of grid range.
Alternatively, one can use None
Returns:
dnu (array): Large frequency separation (micro Hz)
numax (array): Central frequency of max amplitude (micro Hz)
fdnu (array): the correction factor for Delta nu
"""
evstate=np.asarray(evstate)
logz=np.asarray(logz)
if isfeh is True:
logz=logz+np.log10(self.z_solar)
teff=np.asarray(teff)
mini=np.asarray(mini)
mass=np.asarray(mass)
logg=np.asarray(logg)
numax=self.logg2numax(logg,teff)
sradius=self.logg2r(logg,mass)
logg1=self.mr2logg(mini,sradius)
factor=self._get_fdnu(evstate,logz,teff,mini,logg1,fill_value= fill_value)
dnu=135.1*factor*np.power(mass,0.5)/np.power(sradius,1.5)
if (factor.size == 1)&(numax.ndim == 0):
return dnu[0],numax,factor[0]
else:
return dnu,numax,factor
def _get_fdnu(self,evstate,logz,teff,mass,logg,fill_value='nearest'):
evstate=np.asarray(evstate)
logz=np.asarray(logz)
teff=np.asarray(teff)
mass=np.asarray(mass)
logg=np.asarray(logg)
return self._from_mlogg('fdnu',evstate,logz,teff,mass,logg,fill_value= fill_value)
def _from_mlogg(self,quant,evstate,logz,teff,mini,logg,fill_value='nearest'):
"""
The driver function to perform interpolation on the grid
for a given (evstate, logz, teff, mini, logg)
Args:
quant (str): name of quantity for which answer is needed.
For example 'fdnu', 'age', etc
evstate (array): 1) Pre RGB 2) Post RGB
logz (array): log(Z) log metallcity ([Fe/H]=log(Z/Z_solar))
teff (array): temperature
mini (array): initial mass
logg (array): log(gravity)
"""
logz=np.asarray(logz)
logg_teff=self._quantf(logg,teff)
return self.igrid1.get_values(quant,[evstate,logz,mini,logg_teff],fill_value= fill_value)
def _from_freq(self,quant,evstate,logz,teff,dnu,numax,fill_value='nearest'):
"""
The driver function to perform interpolation on the grid
for a given (evstate, logz, teff, dnu, numax).
Args:
quant (str): name of quantity for which answer is needed.
For example 'fdnu', 'age', etc
evstate (array): 1) Pre RGB 2) Post RGB
logz (array): log(Z) log metallcity ([Fe/H]=log(Z/Z_solar))
teff (array): temperature
dnu (array): Large frequency separation (micro Hz)
numax (array): Central frequency of max amplitude (micro Hz)
"""
logz=np.asarray(logz)
logg=self.numax2logg(numax,teff)
mass_dnu=self._mass_dnu(dnu,logg)
logg_teff=self._quantf(logg,teff)
return self.igrid2.get_values(quant,[evstate,logz,mass_dnu,logg_teff],fill_value= fill_value)
def get_mass_radius(self,evstate,logz,teff,dnu,numax,fill_value='nearest',isfeh=False):
"""
Get mass and radius of stars by interpolation on a grid
for a given (evstate, logz, teff, dnu, numax).
Assumption numax_solar=3090.0 microHz.
Args:
evstate (array): 1) Pre RGB 2) Post RGB
logz or feh (array): log(Z) log metallcity or [Fe/H]=log(Z/Z_solar)
teff (array): temperature
dnu (array): Large frequency separation (micro Hz)
numax (array): Central frequency of max amplitude (micro Hz)
fill_value : Deafault is 'nearest', to use nearest grid points
in case of input values being out of grid range.
Alternatively, one can use None
isfeh : A flag with default value being False. If set to
True, the second argument is considered to be
[Fe/H]
"""
evstate=np.asarray(evstate)
logz=np.asarray(logz)
if isfeh is True:
logz=logz+np.log10(self.z_solar)
teff=np.asarray(teff)
dnu=np.asarray(dnu)
numax=np.asarray(numax)
mass=self._from_freq('mass',evstate,logz,teff,dnu,numax,fill_value= fill_value)
logg=self.numax2logg(numax,teff)
sradius=self.logg2r(logg,mass)
if (mass.size == 1)&(evstate.ndim == 0):
return mass[0],sradius[0]
else:
return mass,sradius
def _usage():
print "NAME:"
print "\t asfgrid 0.0.4 - computes asteroseismic freuqncies or masses"
print "\t Copyright (c) 2015 Sanjib Sharma and Dennis Stello "
print "\t License: AGPL see <http://www.gnu.org/licenses/>."
print "\t Reference: Sharma et al. 2016, ApJ,822,15 \n"
print "USAGE:"
print "\t asfgrid inputfile \n"
print "DESCRIPTION:"
print "\t Outfile name is constructed from filename with suffix .out "
print "\t Input file should be ascii as follows"
print "\t evstate logz teff dnu numax"
print "\t 1 -1.97 4659.8 8.81 92.36"
print "\t 1 -1.98 4903.2 13.1 157.3 \n"
print "\t First line must contain column names"
print "\t Column names can be in any order but need to follow names"
print "\t given below"
print "OPTIONS:"
print "\t Possible input outputs are"
print "\t 1) (evstate, logz, teff, dnu, numax) ->(mass,radius)"
print "\t 2) (evstate, logz, teff, mass, logg) ->(dnu,numax,fdnu)"
print "\t 3) (evstate, logz, teff, mass, logg, mini)->(dnu,numax,fdnu)"
print "\t 4) (evstate, feh, teff, dnu, numax) ->(mass,radius)"
print "\t 5) (evstate, feh, teff, mass, logg) ->(dnu,numax,fdnu)"
print "\t 6) (evstate, feh, teff, mass, logg, mini) ->(dnu,numax,fdnu)"
print "\t Third and sixth option allows for mass loss if mass<mini \n"
print "VARIABLES:"
print "\t evstate (array): 1=Pre RGB tip, 2=Post RGB tip"
print "\t logz or feh (array): Log(Z) log metallcity or [Fe/H]=log(Z/Z_solar)."
print "\t If input feh, program assumes Z_solar=0.019"
print "\t to convert to LogZ in the grid."
print "\t teff (array): Effective temperature."
print "\t mass (array): Actual mass; "
print "\t when written as output,"
print "\t mass obtained using the dnu-scaling relation"
print "\t corrected with fdnu. "
print "\t radius (array): Radius; corresponds to the radius obtained using"
print "\t the dnu-scaling relation corrected with fdnu. "
print "\t mini (array): Initial mass. Useful for cases with mass loss,"
print "\t when actual mass is <= mini. "
print "\t logg (array): Log(gravity)."
print "\t dnu (array): Observed large frequency separation (micro Hz);"
print "\t when written as output, it corresponds to the"
print "\t radial mode frequency-based dnu from the grid. "
print "\t numax (array): Observed frequency of max power (micro Hz);"
print "\t when written as output, it corresponds to the"
print "\t scaling-based numax from the grid. "
print "\t fdnu (array): Correction factor for Delta nu scaling relation."
if __name__ == '__main__':
if len(sys.argv) == 1:
_usage()
elif len(sys.argv) == 2:
if sys.argv[1] == '-help':
_usage()
elif sys.argv[1] == '--help':
_usage()
else:
filename=sys.argv[1]
data1=np.genfromtxt(filename,names=True)
if data1.ndim ==0:
data1=np.array([data1])
data={}
for key in data1.dtype.names:
data[key]=data1[key]
keylist=list(data1.dtype.names)
s1=set(data.keys())
status1=1
status2=1
if 'feh' in s1:
data['logz']=data['feh']+np.log10(0.019)
s1=set(data.keys())
for key in ['evstate','logz','teff']:
if key not in s1:
print 'Following columns should be present in input file'
print 'evstate, logz (or feh) and teff'
status1=0
status2=0
for key in ['mass','logg']:
if key not in s1:
status1=0
for key in ['dnu','numax']:
if key not in s1:
status2=0
if (status1+status2) !=1:
print 'In addition to [evstate, logz, teff],'
print 'only one of following should be present'
print '[mini, mass, logg] [mass, logg] or [dnu,numax]'
print('Ambiguous input')
else:
s=Seism()
if status1 == 1:
if 'mini' in data.keys():
print '(evstate, logz, teff, mass, logg, mini) -> (dnu, numax,fdnu)'
data['dnu'],data['numax'],data['fdnu']=s.get_dnu_numax(data['evstate'],data['logz'],data['teff'],data['mini'],data['mass'],data['logg'])
keylist=keylist+['dnu','numax','fdnu']
else:
print '(evstate, logz, teff, mass, logg) -> (dnu, numax,fdnu)'
data['dnu'],data['numax'],data['fdnu']=s.get_dnu_numax(data['evstate'],data['logz'],data['teff'],data['mass'],data['mass'],data['logg'])
keylist=keylist+['dnu','numax','fdnu']
_tocsv(filename+'.out',data,keylist=keylist)
elif status2 == 1:
print '(evstate, logz, teff, dnu, numax) -> (mass,radius)'
data['mass'],data['radius']=s.get_mass_radius(data['evstate'],data['logz'],data['teff'],data['dnu'],data['numax'])
keylist=keylist+['mass','radius']
_tocsv(filename+'.out',data,keylist=keylist)
|
hposbornREPO_NAMEMonoToolsPATH_START.@MonoTools_extracted@MonoTools-main@MonoTools@stellar@isoclassify@isoclassify@[email protected]@.PATH_END.py
|
{
"filename": "_weight.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/volume/colorbar/tickfont/_weight.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="weight", parent_name="volume.colorbar.tickfont", **kwargs
):
super(WeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
extras=kwargs.pop("extras", ["normal", "bold"]),
max=kwargs.pop("max", 1000),
min=kwargs.pop("min", 1),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@volume@colorbar@tickfont@[email protected]_END.py
|
{
"filename": "CONTRIBUTING.md",
"repo_name": "microsoft/vscode",
"repo_path": "vscode_extracted/vscode-main/extensions/json-language-features/CONTRIBUTING.md",
"type": "Markdown"
}
|
## Setup
- Clone [microsoft/vscode](https://github.com/microsoft/vscode)
- Run `npm i` at `/`, this will install
- Dependencies for `/extension/json-language-features/`
- Dependencies for `/extension/json-language-features/server/`
- devDependencies such as `gulp`
- Open `/extensions/json-language-features/` as the workspace in VS Code
- In `/extensions/json-language-features/` run `npm run compile`(or `npm run watch`) to build the client and server
- Run the [`Launch Extension`](https://github.com/microsoft/vscode/blob/master/extensions/json-language-features/.vscode/launch.json) debug target in the Debug View. This will:
- Launch a new VS Code instance with the `json-language-features` extension loaded
- Open a `.json` file to activate the extension. The extension will start the JSON language server process.
- Add `"json.trace.server": "verbose"` to the settings to observe the communication between client and server in the `JSON Language Server` output.
- Debug the extension and the language server client by setting breakpoints in`json-language-features/client/`
- Debug the language server process by using `Attach to Node Process` command in the VS Code window opened on `json-language-features`.
- Pick the process that contains `jsonServerMain` in the command line. Hover over `code-insiders` resp `code` processes to see the full process command line.
- Set breakpoints in `json-language-features/server/`
- Run `Reload Window` command in the launched instance to reload the extension
### Contribute to vscode-json-languageservice
[microsoft/vscode-json-languageservice](https://github.com/microsoft/vscode-json-languageservice) is the library that implements the language smarts for JSON.
The JSON language server forwards most the of requests to the service library.
If you want to fix JSON issues or make improvements, you should make changes at [microsoft/vscode-json-languageservice](https://github.com/microsoft/vscode-json-languageservice).
However, within this extension, you can run a development version of `vscode-json-languageservice` to debug code or test language features interactively:
#### Linking `vscode-json-languageservice` in `json-language-features/server/`
- Clone [microsoft/vscode-json-languageservice](https://github.com/microsoft/vscode-json-languageservice)
- Run `npm i` in `vscode-json-languageservice`
- Run `npm link` in `vscode-json-languageservice`. This will compile and link `vscode-json-languageservice`
- In `json-language-features/server/`, run `npm link vscode-json-languageservice`
#### Testing the development version of `vscode-json-languageservice`
- Open both `vscode-json-languageservice` and this extension in two windows or with a single window with the[multi-root workspace](https://code.visualstudio.com/docs/editor/multi-root-workspaces) feature.
- Run `npm run watch` at `json-languagefeatures/server/` to recompile this extension with the linked version of `vscode-json-languageservice`
- Make some changes in `vscode-json-languageservice`
- Now when you run `Launch Extension` debug target, the launched instance will use your development version of `vscode-json-languageservice`. You can interactively test the language features.
|
microsoftREPO_NAMEvscodePATH_START.@vscode_extracted@vscode-main@extensions@[email protected]@.PATH_END.py
|
{
"filename": "test_svm.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/unit_tests/retrievers/test_svm.py",
"type": "Python"
}
|
import pytest
from langchain_core.documents import Document
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.retrievers.svm import SVMRetriever
class TestSVMRetriever:
@pytest.mark.requires("sklearn")
def test_from_texts(self) -> None:
input_texts = ["I have a pen.", "Do you have a pen?", "I have a bag."]
svm_retriever = SVMRetriever.from_texts(
texts=input_texts, embeddings=FakeEmbeddings(size=100)
)
assert len(svm_retriever.texts) == 3
@pytest.mark.requires("sklearn")
def test_from_documents(self) -> None:
input_docs = [
Document(page_content="I have a pen.", metadata={"foo": "bar"}),
Document(page_content="Do you have a pen?"),
Document(page_content="I have a bag."),
]
svm_retriever = SVMRetriever.from_documents(
documents=input_docs, embeddings=FakeEmbeddings(size=100)
)
assert len(svm_retriever.texts) == 3
@pytest.mark.requires("sklearn")
def test_metadata_persists(self) -> None:
input_docs = [
Document(page_content="I have a pen.", metadata={"foo": "bar"}),
Document(page_content="How about you?", metadata={"foo": "baz"}),
Document(page_content="I have a bag.", metadata={"foo": "qux"}),
]
svm_retriever = SVMRetriever.from_documents(
documents=input_docs, embeddings=FakeEmbeddings(size=100)
)
query = "Have anything?"
output_docs = svm_retriever.invoke(query)
for doc in output_docs:
assert "foo" in doc.metadata
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@unit_tests@retrievers@[email protected]_END.py
|
{
"filename": "test_vald.py",
"repo_name": "AWehrhahn/SME",
"repo_path": "SME_extracted/SME-master/test/test_vald.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from os.path import dirname, join
import numpy as np
import pytest
from pysme.abund import Abund
from pysme.linelist.linelist import LineList
from pysme.linelist.vald import ValdFile
species = "Fe 1"
wlcent = 5502.9931
excit = 0.9582
gflog = -3.047
gamrad = 7.19
gamqst = -6.22
gamvw = 239.249
linedata = [species, wlcent, excit, gflog, gamrad, gamqst, gamvw]
def test_line_init():
"""Test that property values equal line data passed to __init__()."""
line = LineList(linedata)
assert isinstance(line, LineList)
assert line.species[0] == species
assert line.wlcent[0] == wlcent
assert line.excit[0] == excit
assert line.gflog[0] == gflog
assert line.gamrad[0] == gamrad
assert line.gamqst[0] == gamqst
assert line.gamvw[0] == gamvw
def test_linelist_add_and_len():
"""Test that len() returns the number of lines (including 0) in list."""
linelist = LineList()
assert isinstance(linelist, LineList)
assert len(linelist) == 0
for iline in range(3):
print(len(linelist._lines))
print(linelist._lines)
assert len(linelist) == iline
linelist.add(*linedata)
def test_linelist_properties():
"""Test that properties are lists with one item per line.
Test that property value equal line data passed to add().
"""
linelist = LineList()
linelist.add(*linedata)
proplist = [
linelist.species,
linelist.wlcent,
linelist.excit,
linelist.gflog,
linelist.gamrad,
linelist.gamqst,
linelist.gamvw,
]
for iprop, prop in enumerate(proplist):
print(linelist._lines)
assert isinstance(prop, np.ndarray)
assert len(prop) == 1
assert prop[0] == linedata[iprop]
def test_valdfile():
"""Test class to read a VALD line data file."""
testdir = dirname(__file__)
linelist = ValdFile(join(testdir, "testcase1.lin"))
assert len(linelist) == 44
assert linelist.lineformat == "short"
assert linelist.medium == "air"
assert linelist[0].species == "V 1"
with pytest.raises(IOError):
ValdFile(join(testdir, "testcase1.npy"))
def test_medium():
"""Test class to read a VALD line data file."""
testdir = dirname(__file__)
vf = ValdFile(join(testdir, "testcase1.lin"), medium="vac")
assert vf.medium == "vac"
def test_short_format():
linelist = ValdFile(join(dirname(__file__), "testcase1.lin"))
assert linelist.lineformat == "short"
assert len(linelist) == 44
assert linelist.atomic is not None
assert linelist.species is not None
with pytest.raises(AttributeError):
_ = linelist.lulande
with pytest.raises(AttributeError):
_ = linelist.extra
assert isinstance(linelist.abund, Abund)
assert isinstance(linelist.atmo, str)
def test_long_format():
linelist = ValdFile(join(dirname(__file__), "testcase3.lin"))
assert linelist.lineformat == "long"
assert len(linelist) == 67
assert linelist.atomic is not None
assert linelist.species is not None
assert linelist.lulande is not None
assert linelist.extra is not None
assert linelist.reference is not None
assert linelist.error is not None
assert linelist.term_lower is not None
assert linelist.term_upper is not None
assert isinstance(linelist.abund, Abund)
assert isinstance(linelist.atmo, str)
|
AWehrhahnREPO_NAMESMEPATH_START.@SME_extracted@SME-master@test@[email protected]_END.py
|
{
"filename": "list_correction.py",
"repo_name": "benrendle/AIMS",
"repo_path": "AIMS_extracted/AIMS-master/Binary_Grid_Generation/list_correction.py",
"type": "Python"
}
|
# Generates the full file locations for the final output table and saves this
# out to the desired location.
########################################
def get_lines(filename):
lines = []
with open(filename) as f:
for line in f:
lines.append(line)
return lines
##########################################
input_file = "master" # file containing the initial table
output_file = "/home/bmr135/git_AIMS/AIMS/AIMS_BEN/CLES_RGB_v3" # output file
lines = get_lines(input_file)
names = []
directs = []
to_freqs = []
for line in lines:
names.append(line[:])
directs.append(line[:20])
# to_freqs.append(line[:34])
commands = []
for i in range(len(names)):
# stitch together sections of the file name to create the desired file location
commands.append(directs[i] + "/AIMSG/" + names[i]) # CLES
# commands.append(directs[i] + "/" + names[i]) # MESA
with open(output_file,"w") as f:
# write data to output file with extension to location of frequency files included
f.write("/home/bmr135/GridGiantsClesV0.3/models_grad_rad_under/ .freq \n") # CLES
# f.write("/home/miglioa/GridNGC6819_Fep0.25/LOGS/ .sgyre_l0 \n") # MESA
for command in commands:
f.write(command)
|
benrendleREPO_NAMEAIMSPATH_START.@AIMS_extracted@AIMS-master@Binary_Grid_Generation@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "BRML/climin",
"repo_path": "climin_extracted/climin-master/climin/__init__.py",
"type": "Python"
}
|
from __future__ import absolute_import
# Control breaking does not work on Windows after e.g. scipy.stats is
# imported because certain Fortran libraries register their own signal handler
# See:
# http://stackoverflow.com/questions/15457786/ctrl-c-crashes-python-after-importing-scipy-stats
# and https://github.com/numpy/numpy/issues/6923
import sys
import os
import imp
import ctypes
if sys.platform == 'win32':
# For setups where Intel Fortran compiler version >= 16.0 (This is the case
# for Anaconda version 4.1.5 which comes with numpy version 1.10.4) is used,
# the following flag allows to disable the additionally introduced signal
# handler, older versios make no use of this environment variable
env = 'FOR_DISABLE_CONSOLE_CTRL_HANDLER'
if env not in os.environ:
os.environ[env] = '1'
# In setups with an older version, ensuring that the respective dlls are
# loaded from the numpy core and not somewhere else (e.g. the Windows System
# folder) helps
basepath = imp.find_module('numpy')[1]
# dll loading fails when Intel Fortran compiler version >= 16.0, therefore
# use try/catch
try:
ctypes.CDLL(os.path.join(basepath, 'core', 'libmmd.dll'))
ctypes.CDLL(os.path.join(basepath, 'core', 'libifcoremd.dll'))
except Exception:
pass
from .adadelta import Adadelta
from .adam import Adam
from .asgd import Asgd
from .bfgs import Bfgs, Lbfgs, Sbfgs
from .cg import ConjugateGradient, NonlinearConjugateGradient
from .gd import GradientDescent
from .nes import Xnes
from .rmsprop import RmsProp
from .rprop import Rprop
from .smd import Smd
|
BRMLREPO_NAMEcliminPATH_START.@climin_extracted@climin-master@climin@[email protected]_END.py
|
{
"filename": "test_packaging.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/tests/test_packaging.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# Copyright (C) 2020, the ixpeobssim team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function, division
"""Unit test for packaging information.
"""
import unittest
import numpy
import scipy
import astropy
import matplotlib
from ixpeobssim.utils.packaging_ import xPackageVersion, parse_version_string, retrieve_version
class testPackaging(unittest.TestCase):
"""Unit test for the extraction of packaging information.
"""
def test_main_packages(self):
"""Print the version of our major dependencies.
"""
for package in (numpy, scipy, astropy, matplotlib):
print(package.__name__, retrieve_version(package))
def test_strings(self):
"""Make sure we parse version string without patch and with post suffix.
"""
print(parse_version_string('1.2'))
print(parse_version_string('4.0.1.post1'))
def test_xspec_patch(self):
"""See issue https://bitbucket.org/ixpesw/ixpeobssim/issues/507/
"""
with self.assertRaises(SystemExit):
parse_version_string('12.12.0g')
def test_comparisons(self):
"""
"""
self.assertTrue(xPackageVersion(1, 4, 0) < '1.6.0')
self.assertTrue(xPackageVersion(1, 4, 0) <= '1.6.0')
self.assertTrue(xPackageVersion(1, 4, 0) != '1.6.0')
self.assertTrue(xPackageVersion(1, 4, 0) != '1.6')
self.assertTrue(xPackageVersion(1, 4, 0) != '1.6.0.post1')
self.assertTrue(xPackageVersion(1, 4, 0) == '1.4.0')
self.assertTrue(xPackageVersion(1, 4, 0) <= '1.4.0')
self.assertTrue(xPackageVersion(1, 6, 0) >= '1.4.0')
self.assertTrue(xPackageVersion(1, 6, 0) >= '0.4')
self.assertTrue(xPackageVersion(1, 6) >= '0.4.0')
if __name__ == '__main__':
unittest.main()
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@tests@[email protected]_END.py
|
{
"filename": "statistic.py",
"repo_name": "dazhiUBC/SCUBA2_MF",
"repo_path": "SCUBA2_MF_extracted/SCUBA2_MF-main/statistic.py",
"type": "Python"
}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
fid_snr = np.load('fide_snr_850.npy')
de = pd.read_csv('850/deboosting_values_snr.dat',delimiter = ' ')
com = pd.read_csv('850/completeness_values_snr.dat',delimiter = ' ')
plt.plot(com['input_snr'],com['fraction'],color='tab:green',label = 'Completeness')
plt.plot(de['observed_snr'], de['deboost_value_mean'],color='tab:red',label = 'Deboosting')
plt.fill_between(de['observed_snr'],de['deboost_value_mean']+de['std'],de['deboost_value_mean']-de['std'],color='tab:red',alpha=0.1)
plt.plot(fid_snr[0],fid_snr[1],label = 'Fidelity',color='tab:blue')
plt.xlim(3.5,16)
plt.ylim(0.31,1.08)
plt.xlabel(r'SNR$_{rec}$')
plt.ylabel('Factor')
plt.legend()
plt.savefig('deboost_new.pdf',bbox_inches='tight')
|
dazhiUBCREPO_NAMESCUBA2_MFPATH_START.@SCUBA2_MF_extracted@[email protected]@.PATH_END.py
|
{
"filename": "demo12.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/examples/demo12.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
"""
Demo #12
The twelfth script in our tutorial about using GalSim in python scripts: examples/demo*.py.
(This file is designed to be viewed in a window 100 characters wide.)
This script introduces wavelength-dependent profiles. Three kinds of chromatic profiles are
demonstrated here:
1) A chromatic object representing a DeVaucouleurs galaxy with an early-type SED at redshift 0.8.
The galaxy is drawn using the six LSST filters, which demonstrate that the galaxy is a g-band
dropout.
2) A two-component bulge+disk galaxy, in which the bulge and disk have different SEDs.
3) A wavelength-dependent atmospheric PSF, which includes the effect of differential chromatic
refraction and the wavelength dependence of Kolmogorov-turbulence-induced seeing. This PSF
is convolved with a simple Exponential galaxy.
In all three cases, six images are created, which correspond to each of the LSST filters:
u, g, r, i, z, and Y. We also provide suggested parameters for viewing in ds9.
New features introduced in this demo:
- SED = galsim.SED(wave, flambda, wave_type, flux_type)
- SED2 = SED.atRedshift(redshift)
- bandpass = galsim.Bandpass(filename, wave_type)
- bandpass2 = bandpass.truncate(relative_throughput)
- bandpass3 = bandpass2.thin(rel_err)
- gal = GSObject * SED
- obj = galsim.Add([list of ChromaticObjects])
- ChromaticObject.drawImage(bandpass)
- PSF = galsim.ChromaticAtmosphere(GSObject, base_wavelength, zenith_angle)
"""
import sys
import os
import logging
import galsim
def main(argv):
# Where to find and output data
path, filename = os.path.split(__file__)
outpath = os.path.abspath(os.path.join(path, "output/"))
if not os.path.isdir(outpath):
os.mkdir(outpath)
# Make output directory if not already present.
if not os.path.isdir(outpath):
os.mkdir(outpath)
datapath = galsim.meta_data.share_dir
# In non-script code, use getLogger(__name__) at module scope instead.
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger("demo12")
# initialize (pseudo-)random number generator
random_seed = galsim.BaseDeviate(1234567).raw()
# read in SEDs
SED_names = ['CWW_E_ext', 'CWW_Sbc_ext', 'CWW_Scd_ext', 'CWW_Im_ext']
SEDs = {}
for SED_name in SED_names:
SED_filename = os.path.join(datapath, 'SEDs/{0}.sed'.format(SED_name))
# Here we create some galsim.SED objects to hold star or galaxy spectra. The most
# convenient way to create realistic spectra is to read them in from a two-column ASCII
# file, where the first column is wavelength and the second column is flux. Wavelengths in
# the example SED files are in Angstroms, flux in flambda. We use a set of files that are
# distributed with GalSim in the share/ directory.
SED = galsim.SED(SED_filename, wave_type='Ang', flux_type='flambda')
# The normalization of SEDs affects how many photons are eventually drawn into an image.
# One way to control this normalization is to specify the flux density in photons per nm
# at a particular wavelength. For example, here we normalize such that the photon density
# is 1 photon per nm at 500 nm.
SEDs[SED_name] = SED.withFluxDensity(target_flux_density=1.0, wavelength=500)
logger.debug('Successfully read in SEDs')
# read in the LSST filters
filter_names = 'ugrizy'
filters = {}
for filter_name in filter_names:
filter_filename = os.path.join(datapath, 'bandpasses/LSST_{0}.dat'.format(filter_name))
# Here we create some galsim.Bandpass objects to represent the filters we're observing
# through. These include the entire imaging system throughput including the atmosphere,
# reflective and refractive optics, filters, and the CCD quantum efficiency. These are
# also conveniently read in from two-column ASCII files where the first column is
# wavelength and the second column is dimensionless throughput. The example filter files
# units of nanometers for the wavelength type, so we specify that using the required
# `wave_type` argument.
filters[filter_name] = galsim.Bandpass(filter_filename, wave_type='nm')
# For speed, we can thin out the wavelength sampling of the filter a bit.
# In the following line, `rel_err` specifies the relative error when integrating over just
# the filter (however, this is not necessarily the relative error when integrating over the
# filter times an SED).
filters[filter_name] = filters[filter_name].thin(rel_err=1e-4)
logger.debug('Read in filters')
pixel_scale = 0.2 # arcseconds
#-----------------------------------------------------------------------------------------------
# Part A: chromatic de Vaucouleurs galaxy
# Here we create a chromatic version of a de Vaucouleurs profile by multipying a GSObject by an
# SED. This is how one generally constructs _separable_ ChromaticObjects in GalSim, that is,
# those objects whose spatial dependence and wavelength dependence factor.
logger.info('')
logger.info('Starting part A: chromatic De Vaucouleurs galaxy')
redshift = 0.8
mono_gal = galsim.DeVaucouleurs(half_light_radius=0.5)
SED = SEDs['CWW_E_ext'].atRedshift(redshift)
gal = mono_gal * SED
# You can still shear, shift, and dilate the resulting chromatic object.
gal = gal.shear(g1=0.5, g2=0.3).dilate(1.05).shift((0.0, 0.1))
logger.debug('Created separable ChromaticObject')
# convolve with PSF to make final profile
PSF = galsim.Moffat(fwhm=0.6, beta=2.5)
final = galsim.Convolve([gal, PSF])
logger.debug('Created final profile')
# draw profile through LSST filters
for i, filter_name in enumerate(filter_names):
filter_ = filters[filter_name]
img = galsim.ImageF(64, 64, scale=pixel_scale)
final.drawImage(filter_, image=img)
# To match the yaml, we need a new rng for each file
rng = galsim.BaseDeviate(random_seed+1+i)
gaussian_noise = galsim.GaussianNoise(rng, sigma=0.02)
img.addNoise(gaussian_noise)
logger.debug('Created {0}-band image'.format(filter_name))
out_filename = os.path.join(outpath, 'demo12a_{0}.fits'.format(filter_name))
galsim.fits.write(img, out_filename)
logger.debug('Wrote {0}-band image to disk'.format(filter_name))
logger.info('Added flux for {0}-band image: {1}'.format(filter_name, img.added_flux))
logger.info('You can display the output in ds9 with a command line that looks something like:')
logger.info('ds9 output/demo12a_*.fits -match scale -zoom 2 -match frame image &')
#-----------------------------------------------------------------------------------------------
# Part B: chromatic bulge+disk galaxy
logger.info('')
logger.info('Starting part B: chromatic bulge+disk galaxy')
redshift = 0.8
# make a bulge ...
mono_bulge = galsim.DeVaucouleurs(half_light_radius=0.5)
bulge_SED = SEDs['CWW_E_ext'].atRedshift(redshift)
bulge = mono_bulge * bulge_SED
bulge = bulge.shear(g1=0.12, g2=0.07)
logger.debug('Created bulge component')
# ... and a disk ...
mono_disk = galsim.Exponential(half_light_radius=2.0)
disk_SED = SEDs['CWW_Im_ext'].atRedshift(redshift)
disk = mono_disk * disk_SED
disk = disk.shear(g1=0.4, g2=0.2)
logger.debug('Created disk component')
# ... and then combine them.
bdgal = 1.1 * (0.8*bulge+4*disk) # you can add and multiply ChromaticObjects just like GSObjects
bdfinal = galsim.Convolve([bdgal, PSF])
# Note that at this stage, our galaxy is chromatic but our PSF is still achromatic. Part C)
# below will dive into chromatic PSFs.
logger.debug('Created bulge+disk galaxy final profile')
# draw profile through LSST filters
for i, filter_name in enumerate(filter_names):
filter_ = filters[filter_name]
img = galsim.ImageF(64, 64, scale=pixel_scale)
bdfinal.drawImage(filter_, image=img)
rng = galsim.BaseDeviate(random_seed+1+i)
gaussian_noise = galsim.GaussianNoise(rng, sigma=0.02)
img.addNoise(gaussian_noise)
logger.debug('Created {0}-band image'.format(filter_name))
out_filename = os.path.join(outpath, 'demo12b_{0}.fits'.format(filter_name))
galsim.fits.write(img, out_filename)
logger.debug('Wrote {0}-band image to disk'.format(filter_name))
logger.info('Added flux for {0}-band image: {1}'.format(filter_name, img.added_flux))
logger.info('You can display the output in ds9 with a command line that looks something like:')
logger.info('ds9 -rgb -blue -scale limits -0.2 0.8 output/demo12b_r.fits -green -scale limits'
+' -0.25 1.0 output/demo12b_i.fits -red -scale limits -0.25 1.0 output/demo12b_z.fits'
+' -zoom 2 &')
#-----------------------------------------------------------------------------------------------
# Part C: chromatic PSF
logger.info('')
logger.info('Starting part C: chromatic PSF')
redshift = 0.0
mono_gal = galsim.Exponential(half_light_radius=0.5)
SED = SEDs['CWW_Im_ext'].atRedshift(redshift)
# Here's another way to set the normalization of the SED. If we want 50 counts to be drawn
# when observing an object with this SED through the LSST g-band filter, for instance, then we
# can do:
SED = SED.withFlux(50.0, filters['g'])
# The flux drawn through other bands, which sample different parts of the SED and have different
# throughputs, will, of course, be different.
gal = mono_gal * SED
gal = gal.shear(g1=0.5, g2=0.3)
logger.debug('Created chromatic galaxy')
# For a ground-based PSF, two chromatic effects are introduced by the atmosphere:
# (i) differential chromatic refraction (DCR), and (ii) wavelength-dependent seeing.
#
# DCR shifts the position of the PSF as a function of wavelength. Blue light is shifted
# toward the zenith slightly more than red light.
#
# Kolmogorov turbulence in the atmosphere leads to a seeing size (e.g., FWHM) that scales with
# wavelength to the (-0.2) power.
#
# The ChromaticAtmosphere function will attach both of these effects to a fiducial PSF at
# some fiducial wavelength.
# First we define a monochromatic PSF to be the fiducial PSF.
PSF_500 = galsim.Moffat(beta=2.5, fwhm=0.5)
# Then we use ChromaticAtmosphere to manipulate this fiducial PSF as a function of wavelength.
# ChromaticAtmosphere also needs to know the wavelength of the fiducial PSF, and the location
# and orientation of the object with respect to the zenith. This final piece of information
# can be specified in several ways (see the ChromaticAtmosphere docstring for all of them).
# Here are a couple ways: let's pretend our object is located near M101 on the sky, we observe
# it 1 hour before it transits and we're observing from Mauna Kea.
ra = galsim.Angle.from_hms("14:03:13") # hours : minutes : seconds
dec = galsim.Angle.from_dms("54:20:57") # degrees : minutes : seconds
m101 = galsim.CelestialCoord(ra, dec)
latitude = 19.8207 * galsim.degrees # latitude of Mauna Kea
HA = -1.0 * galsim.hours # Hour angle = one hour before transit
# Then we can compute the zenith angle and parallactic angle (which is is the position angle
# of the zenith measured from North through East) of this object:
za, pa = galsim.dcr.zenith_parallactic_angles(m101, HA=HA, latitude=latitude)
# And then finally, create the chromatic PSF
PSF = galsim.ChromaticAtmosphere(PSF_500, 500.0, zenith_angle=za, parallactic_angle=pa)
# We could have also just passed `m101`, `latitude` and `HA` to ChromaticAtmosphere directly:
PSF = galsim.ChromaticAtmosphere(PSF_500, 500.0, obj_coord=m101, latitude=latitude, HA=HA)
# and proceed like normal.
# convolve with galaxy to create final profile
final = galsim.Convolve([gal, PSF])
logger.debug('Created chromatic PSF final profile')
# Draw profile through LSST filters
for i, filter_name in enumerate(filter_names):
filter_ = filters[filter_name]
img = galsim.ImageF(64, 64, scale=pixel_scale)
final.drawImage(filter_, image=img)
rng = galsim.BaseDeviate(random_seed+1+i)
gaussian_noise = galsim.GaussianNoise(rng, sigma=0.02)
img.addNoise(gaussian_noise)
logger.debug('Created {0}-band image'.format(filter_name))
out_filename = os.path.join(outpath, 'demo12c_{0}.fits'.format(filter_name))
galsim.fits.write(img, out_filename)
logger.debug('Wrote {0}-band image to disk'.format(filter_name))
logger.info('Added flux for {0}-band image: {1}'.format(filter_name, img.added_flux))
logger.info('You can display the output in ds9 with a command line that looks something like:')
logger.info('ds9 output/demo12c_*.fits -match scale -zoom 2 -match frame image -blink &')
if __name__ == "__main__":
main(sys.argv)
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@[email protected]@.PATH_END.py
|
{
"filename": "_autocolorscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/splom/marker/_autocolorscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AutocolorscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="autocolorscale", parent_name="splom.marker", **kwargs
):
super(AutocolorscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@splom@marker@[email protected]_END.py
|
{
"filename": "_tickvalssrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/indicator/gauge/axis/_tickvalssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="tickvalssrc", parent_name="indicator.gauge.axis", **kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@indicator@gauge@axis@[email protected]_END.py
|
{
"filename": "flow.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/tests/test-projects/import-project/my_module/flow.py",
"type": "Python"
}
|
import prefect
from .utils import get_output
@prefect.flow(name="test")
def test_flow():
return get_output()
@prefect.flow(name="test")
def prod_flow():
return get_output()
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@tests@test-projects@import-project@[email protected]@.PATH_END.py
|
{
"filename": "randDemo.py",
"repo_name": "AstroVPK/kali",
"repo_path": "kali_extracted/kali-master/examples/randDemo.py",
"type": "Python"
}
|
#!/usr/bin/env python
""" Module to draw hardware random numbers.
For a demonstration of the module, please run the module as a command line program eg.
bash-prompt$ python randDemo.py --help
and
bash-prompt$ python randDemo.py -n 100
"""
import numpy as np
import rand
if __name__ == '__main__':
import argparse as argparse
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--numRand', type=int, default=10,
help=r'Number of hardware random numbers to generate...')
args = parser.parse_args()
A = np.zeros(args.numRand, dtype='uint32')
success = rand.rdrand(A)
for i in xrange(args.numRand):
print '%s'%(str(A[i]))
|
AstroVPKREPO_NAMEkaliPATH_START.@kali_extracted@kali-master@[email protected]@.PATH_END.py
|
{
"filename": "ROADMAP.md",
"repo_name": "ledatelescope/bifrost",
"repo_path": "bifrost_extracted/bifrost-master/ROADMAP.md",
"type": "Markdown"
}
|
# Bifrost Roadmap
This is a high-level outline of Bifrost development plans. Unless otherwise
stated, the items on this page have not yet been developed.
## Algorithms and blocks
* Single-pulse search algorithms
* Baseline removal, peak finding
* Pulsar search algorithms
* Harmonic summing, folding
* Calibration and imaging algorithms
* Gridding/degridding, compressive sensing, CLEAN
* I/O (source/sink) blocks for additional astronomy/audio/generic file formats
## Pipeline features
* Method of sending data between different servers
* Remote control mechanisms
* Pipeline status and performance monitoring
* Streaming data visualisation
## Backend features
* Improved packet capture/transmission framework
* Support for InfiniBand verbs
* CPU backends for existing CUDA-only algorithms
* Support for inter-process shared memory rings
* Optimisations for low-latency applications
## Platform and dependency updates
* Python 2.x will no longer be supported after the end of 2022.
|
ledatelescopeREPO_NAMEbifrostPATH_START.@bifrost_extracted@[email protected]@.PATH_END.py
|
{
"filename": "F2b.py",
"repo_name": "mlares/hearsay",
"repo_path": "hearsay_extracted/hearsay-master/paper/F2b.py",
"type": "Python"
}
|
from hearsay import hearsay
import numpy as np
import pandas as pd
from itertools import product as pp
from matplotlib import pyplot as plt
# Figura 2a
# (a) Variation of tau_survive for fixed tau_awakening
# -----------------------------------------------------
# 1) Generate points in the paramter space to sample :::::::::::::::::::
ta = [1000]
ts = [5000, 10000, 20000, 50000]
td = [32615]
z = pp(ta, ts, td)
tau_a = []
tau_s = []
d_max = []
fname = []
for k, i in enumerate(z):
tau_a.append(i[0])
tau_s.append(i[1])
d_max.append(i[2])
fname.append(f"../out/F2b/{str(k).zfill(5)}_001.pk")
df = pd.DataFrame(list(zip(tau_a, tau_s, d_max, fname)),
columns=['tau_awakening', 'tau_survive', 'D_max',
'filename'])
df.to_csv('F2b.csv')
# 1) Correr las simulaciones :::::::::::::::::::
# ....(a)
df = pd.read_csv('F2b.csv')
config = hearsay.Parser('F2b.ini')
config.load_config()
G = hearsay.C3Net(config)
G.set_parameters(df)
G.run()
# 2) Leer las simulaciones :::::::::::::::::::::
dfa = pd.read_csv('F2b.csv')
config = hearsay.Parser('F2b.ini')
config.load_config()
G = hearsay.C3Net(config)
G.set_parameters(dfa)
R = hearsay.Results(G)
R.load()
res = R.redux()
ib = res['lI']
fig = plt.figure()
ax = fig.add_subplot()
for k, inbox in enumerate(ib):
imax = max(inbox)
breaks = np.array(range(imax+1)) + 0.5
hy, hx = np.histogram(inbox, breaks, density=True)
xx = breaks[:-1] + 0.5
yy = np.cumsum(hy)
lbl = (f"A={R.params.iloc[k]['tau_awakening']},"
f"S={R.params.iloc[k]['tau_survive']}")
ax.step(xx, yy, label=lbl)
ax.set_xscale('log')
ax.legend()
fig.savefig('F2b.png')
fig.savefig('F2b.pdf')
plt.close()
|
mlaresREPO_NAMEhearsayPATH_START.@hearsay_extracted@hearsay-master@[email protected]@.PATH_END.py
|
{
"filename": "combspec_main.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/scripts/main/combspec_main.py",
"type": "Python"
}
|
#standard python
import sys
import os
import shutil
import unittest
from datetime import datetime
import json
import numpy as np
import fitsio
import glob
import argparse
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
from desitarget.io import read_targets_in_tiles
from desitarget.mtl import inflate_ledger
from desimodel.footprint import is_point_in_desi
#sys.path.append('../py') #this requires running from LSS/bin, *something* must allow linking without this but is not present in code yet
#from this package
#try:
import LSS.main.cattools as ct
parser = argparse.ArgumentParser()
parser.add_argument("--basedir", help="base directory for output, default is CSCRATCH",default=os.environ['CSCRATCH'])
parser.add_argument("--version", help="catalog version; use 'test' unless you know what you are doing!",default='test')
parser.add_argument("--prog", help="dark or bright is supported",default='dark')
parser.add_argument("--zmtl",help="if yes, only concatenate zmtl file",default='y')
args = parser.parse_args()
print(args)
basedir = args.basedir
version = args.version
prog = args.prog
progu = prog.upper()
mt = Table.read('/global/cfs/cdirs/desi/survey/ops/surveyops/trunk/ops/tiles-specstatus.ecsv')
wd = mt['SURVEY'] == 'main'
#wd &= mt['EFFTIME_SPEC']/mt['GOALTIME'] > 0.85
wd &= mt['ZDONE'] == 'true'
wd &= mt['FAPRGRM'] == prog
mtd = mt[wd]
#print('found '+str(len(mtd))+' '+prog+' time main survey tiles that are greater than 85% of goaltime')
print('found '+str(len(mtd))+' '+prog+' time main survey tiles with zdone true')
tiles4comb = Table()
tiles4comb['TILEID'] = mtd['TILEID']
tiles4comb['ZDATE'] = mtd['LASTNIGHT']
#share basedir location '/global/cfs/cdirs/desi/survey/catalogs'
maindir = basedir +'/main/LSS/'
if not os.path.exists(maindir+'/logs'):
os.mkdir(maindir+'/logs')
print('made '+maindir+'/logs')
if not os.path.exists(maindir+'/LSScats'):
os.mkdir(maindir+'/LSScats')
print('made '+maindir+'/LSScats')
dirout = maindir+'LSScats/'+version+'/'
if not os.path.exists(dirout):
os.mkdir(dirout)
print('made '+dirout)
#outf = maindir+'datcomb_'+prog+'_spec_premtlup.fits'
if args.zmtl == 'n':
outf = maindir+'datcomb_'+prog+'_spec_zdone.fits'
md = ''
if args.zmtl == 'y':
outf = maindir+'datcomb_'+prog+'_zmtl_zdone.fits'
md = 'zmtl'
ct.combtile_spec(tiles4comb,outf,md=md)
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@scripts@main@[email protected]_END.py
|
{
"filename": "test_uncertainties.py",
"repo_name": "hover2pi/sedkit",
"repo_path": "sedkit_extracted/sedkit-main/tests/test_uncertainties.py",
"type": "Python"
}
|
import unittest
import astropy.units as q
import numpy as np
from sedkit import uncertainties as un
class TestUnum(unittest.TestCase):
"""Tests for the Unum class"""
def setUp(self):
"""Setup the tests"""
# Symmetry
self.sym = un.Unum(10.1, 0.2)
self.asym = un.Unum(9.3, 0.08, 0.11)
self.sym_u = un.Unum(12 * q.um, 0.1 * q.um)
# Data structures
self.u1 = un.Unum(10 * q.um, 1 * q.um, n_samples=200)
self.u2 = un.Unum(10, 1, n_samples=20)
self.a1 = un.UArray(np.ones(3) * q.um, np.abs(np.random.normal(size=3)) * q.um, n_samples=1000)
self.a2 = un.UArray(np.ones(3), np.abs(np.random.normal(size=3)), n_samples=1000)
self.i1 = 5 * q.um
self.i2 = 5
self.s1 = np.array([1, 2, 3]) * q.um
self.s2 = np.array([1, 2, 3])
def test_attrs(self):
"""Test attributes"""
x = self.sym
x.value
x.quantiles
def test_add(self):
"""Test add method"""
# Equivalent units
x = self.sym + self.asym
# Not equivalent
try:
x = self.sym + self.sym_u
except TypeError:
pass
x = self.u1 + self.u1
x = self.u1 + self.i1
x = self.u2 + self.u2
x = self.u2 + self.i2
x = self.a1 + self.u1
x = self.a1 + self.i1
x = self.a1 + self.s1
x = self.a2 + self.u2
x = self.a2 + self.i2
x = self.a2 + self.s2
def test_mul(self):
"""Test mul method"""
x = self.sym * self.asym
x = self.u1 * self.u1
x = self.u1 * self.i1
x = self.u1 * self.i2
x = self.u2 * self.u2
x = self.u2 * self.i1
x = self.u2 * self.i2
x = self.a1 * self.u1
x = self.a1 * self.u2
x = self.a1 * self.i1
x = self.a1 * self.i2
x = self.a1 * self.s1
x = self.a1 * self.s2
x = self.a1 * self.a1
x = self.a1 * self.a2
x = self.a2 * self.u1
x = self.a2 * self.u2
x = self.a2 * self.i1
x = self.a2 * self.i2
x = self.a2 * self.s1
x = self.a2 * self.s2
x = self.a2 * self.a1
x = self.a2 * self.a2
def test_sub(self):
"""Test sub method"""
# Equivalent units
x = self.sym - self.asym
# Not equivalent
try:
x = self.sym - self.sym_u
except TypeError:
pass
x = self.u1 - self.u1
x = self.u1 - self.i1
x = self.u2 - self.u2
x = self.u2 - self.i2
x = self.a1 - self.u1
x = self.a1 - self.i1
x = self.a1 - self.s1
x = self.a2 - self.u2
x = self.a2 - self.i2
x = self.a2 - self.s2
def test_pow(self):
"""Test pow method"""
x = self.sym ** 2
x = self.u1 ** 2
x = self.u2 ** 2
x = self.a1 ** 2
x = self.a2 ** 2
def test_truediv(self):
"""Test truediv method"""
x = self.sym / self.asym
x = self.u1 / self.u1
x = self.u1 / self.i1
x = self.u1 / self.i2
x = self.u2 / self.u2
x = self.u2 / self.i1
x = self.u2 / self.i2
x = self.a1 / self.u1
x = self.a1 / self.u2
x = self.a1 / self.i1
x = self.a1 / self.i2
x = self.a1 / self.s1
x = self.a1 / self.s2
x = self.a1 / self.a1
x = self.a1 / self.a2
x = self.a2 / self.u1
x = self.a2 / self.u2
x = self.a2 / self.i1
x = self.a2 / self.i2
x = self.a2 / self.s1
x = self.a2 / self.s2
x = self.a2 / self.a1
x = self.a2 / self.a2
def test_floordiv(self):
"""Test floordiv method"""
# Equivalent units
x = self.sym // self.asym
# Not equivalent
try:
x = self.sym // self.sym_u
except TypeError:
pass
x = self.u1 // self.u1
x = self.u1 // self.i1
x = self.u2 // self.u2
x = self.u2 // self.i2
x = self.a1 // self.u1
x = self.a1 // self.i1
x = self.a1 // self.s1
x = self.a1 // self.a1
x = self.a2 // self.u2
x = self.a2 // self.i2
x = self.a2 // self.s2
x = self.a2 // self.a2
def test_log10(self):
"""Test log10 method"""
x = self.u2.log10()
x = self.a2.log10()
def test_polyval(self):
"""Test polyval method"""
coeffs = [1, 2, 3]
x = self.sym.polyval(coeffs)
x = self.u1.polyval([1, 2, 3])
x = self.u2.polyval([1, 2, 3])
x = self.a1.polyval([1, 2, 3])
x = self.a2.polyval([1, 2, 3])
def test_plot(self):
"""Test plot method"""
x = self.sym
x.plot()
def test_sample_from_errors(self):
"""Test the sample_from_errors method"""
# Test symmetric error case
x = self.sym
x.sample_from_errors()
x.sample_from_errors(low_lim=0, up_lim=100)
# Test asymmetric error case
y = self.asym
y.sample_from_errors()
y.sample_from_errors(low_lim=0, up_lim=100)
|
hover2piREPO_NAMEsedkitPATH_START.@sedkit_extracted@sedkit-main@tests@[email protected]_END.py
|
{
"filename": "generalized_linear_model.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/genmod/generalized_linear_model.py",
"type": "Python"
}
|
"""
Generalized linear models currently supports estimation using the one-parameter
exponential families
References
----------
Gill, Jeff. 2000. Generalized Linear Models: A Unified Approach.
SAGE QASS Series.
Green, PJ. 1984. "Iteratively reweighted least squares for maximum
likelihood estimation, and some robust and resistant alternatives."
Journal of the Royal Statistical Society, Series B, 46, 149-192.
Hardin, J.W. and Hilbe, J.M. 2007. "Generalized Linear Models and
Extensions." 2nd ed. Stata Press, College Station, TX.
McCullagh, P. and Nelder, J.A. 1989. "Generalized Linear Models." 2nd ed.
Chapman & Hall, Boca Rotan.
"""
from statsmodels.compat.pandas import Appender
import warnings
import numpy as np
from numpy.linalg import LinAlgError
from statsmodels.base import _prediction_inference as pred
import statsmodels.base._parameter_inference as pinfer
from statsmodels.base._prediction_inference import PredictionResultsMean
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_ceres_residuals_doc,
_plot_partial_residuals_doc,
)
import statsmodels.regression._tools as reg_tools
import statsmodels.regression.linear_model as lm
from statsmodels.tools.decorators import (
cache_readonly,
cached_data,
cached_value,
)
from statsmodels.tools.data import _as_array_with_name
from statsmodels.tools.docstring import Docstring
from statsmodels.tools.sm_exceptions import (
DomainWarning,
HessianInversionWarning,
PerfectSeparationWarning,
)
from statsmodels.tools.validation import float_like
# need import in module instead of lazily to copy `__doc__`
from . import families
__all__ = ['GLM', 'PredictionResultsMean']
def _check_convergence(criterion, iteration, atol, rtol):
return np.allclose(criterion[iteration], criterion[iteration + 1],
atol=atol, rtol=rtol)
# Remove after 0.13 when bic changes to bic llf
class _ModuleVariable:
_value = None
@property
def use_bic_llf(self):
return self._value
def set_use_bic_llf(self, val):
if val not in (True, False, None):
raise ValueError("Must be True, False or None")
self._value = bool(val) if val is not None else val
_use_bic_helper = _ModuleVariable()
SET_USE_BIC_LLF = _use_bic_helper.set_use_bic_llf
class GLM(base.LikelihoodModel):
__doc__ = """
Generalized Linear Models
GLM inherits from statsmodels.base.model.LikelihoodModel
Parameters
----------
endog : array_like
1d array of endogenous response variable. This array can be 1d or 2d.
Binomial family models accept a 2d array with two columns. If
supplied, each observation is expected to be [success, failure].
exog : array_like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user (models specified using a formula
include an intercept by default). See `statsmodels.tools.add_constant`.
family : family class instance
The default is Gaussian. To specify the binomial distribution
family = sm.family.Binomial()
Each family can take a link instance as an argument. See
statsmodels.family.family for more information.
offset : array_like or None
An offset to be included in the model. If provided, must be
an array whose length is the number of rows in exog.
exposure : array_like or None
Log(exposure) will be added to the linear prediction in the model.
Exposure is only valid if the log link is used. If provided, it must be
an array with the same length as endog.
freq_weights : array_like
1d array of frequency weights. The default is None. If None is selected
or a blank value, then the algorithm will replace with an array of 1's
with length equal to the endog.
WARNING: Using weights is not verified yet for all possible options
and results, see Notes.
var_weights : array_like
1d array of variance (analytic) weights. The default is None. If None
is selected or a blank value, then the algorithm will replace with an
array of 1's with length equal to the endog.
WARNING: Using weights is not verified yet for all possible options
and results, see Notes.
{extra_params}
Attributes
----------
df_model : float
Model degrees of freedom is equal to p - 1, where p is the number
of regressors. Note that the intercept is not reported as a
degree of freedom.
df_resid : float
Residual degrees of freedom is equal to the number of observation n
minus the number of regressors p.
endog : ndarray
See Notes. Note that `endog` is a reference to the data so that if
data is already an array and it is changed, then `endog` changes
as well.
exposure : array_like
Include ln(exposure) in model with coefficient constrained to 1. Can
only be used if the link is the logarithm function.
exog : ndarray
See Notes. Note that `exog` is a reference to the data so that if
data is already an array and it is changed, then `exog` changes
as well.
freq_weights : ndarray
See Notes. Note that `freq_weights` is a reference to the data so that
if data is already an array and it is changed, then `freq_weights`
changes as well.
var_weights : ndarray
See Notes. Note that `var_weights` is a reference to the data so that
if data is already an array and it is changed, then `var_weights`
changes as well.
iteration : int
The number of iterations that fit has run. Initialized at 0.
family : family class instance
The distribution family of the model. Can be any family in
statsmodels.families. Default is Gaussian.
mu : ndarray
The mean response of the transformed variable. `mu` is the value of
the inverse of the link function at lin_pred, where lin_pred is the
linear predicted value of the WLS fit of the transformed variable.
`mu` is only available after fit is called. See
statsmodels.families.family.fitted of the distribution family for more
information.
n_trials : ndarray
See Notes. Note that `n_trials` is a reference to the data so that if
data is already an array and it is changed, then `n_trials` changes
as well. `n_trials` is the number of binomial trials and only available
with that distribution. See statsmodels.families.Binomial for more
information.
normalized_cov_params : ndarray
The p x p normalized covariance of the design / exogenous data.
This is approximately equal to (X.T X)^(-1)
offset : array_like
Include offset in model with coefficient constrained to 1.
scale : float
The estimate of the scale / dispersion of the model fit. Only
available after fit is called. See GLM.fit and GLM.estimate_scale
for more information.
scaletype : str
The scaling used for fitting the model. This is only available after
fit is called. The default is None. See GLM.fit for more information.
weights : ndarray
The value of the weights after the last iteration of fit. Only
available after fit is called. See statsmodels.families.family for
the specific distribution weighting functions.
Examples
--------
>>> import statsmodels.api as sm
>>> data = sm.datasets.scotland.load()
>>> data.exog = sm.add_constant(data.exog)
Instantiate a gamma family model with the default link function.
>>> gamma_model = sm.GLM(data.endog, data.exog,
... family=sm.families.Gamma())
>>> gamma_results = gamma_model.fit()
>>> gamma_results.params
array([-0.01776527, 0.00004962, 0.00203442, -0.00007181, 0.00011185,
-0.00000015, -0.00051868, -0.00000243])
>>> gamma_results.scale
0.0035842831734919055
>>> gamma_results.deviance
0.087388516416999198
>>> gamma_results.pearson_chi2
0.086022796163805704
>>> gamma_results.llf
-83.017202161073527
See Also
--------
statsmodels.genmod.families.family.Family
:ref:`families`
:ref:`links`
Notes
-----
Note: PerfectSeparationError exception has been converted to a
PerfectSeparationWarning and perfect separation or perfect prediction will
not raise an exception by default. (changed in version 0.14)
Only the following combinations make sense for family and link:
============= ===== === ===== ====== ======= === ==== ====== ====== ====
Family ident log logit probit cloglog pow opow nbinom loglog logc
============= ===== === ===== ====== ======= === ==== ====== ====== ====
Gaussian x x x x x x x x x
inv Gaussian x x x
binomial x x x x x x x x x
Poisson x x x
neg binomial x x x x
gamma x x x
Tweedie x x x
============= ===== === ===== ====== ======= === ==== ====== ====== ====
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer to are already
arrays and these arrays are changed, endog and exog will change.
statsmodels supports two separate definitions of weights: frequency weights
and variance weights.
Frequency weights produce the same results as repeating observations by the
frequencies (if those are integers). Frequency weights will keep the number
of observations consistent, but the degrees of freedom will change to
reflect the new weights.
Variance weights (referred to in other packages as analytic weights) are
used when ``endog`` represents an an average or mean. This relies on the
assumption that that the inverse variance scales proportionally to the
weight--an observation that is deemed more credible should have less
variance and therefore have more weight. For the ``Poisson`` family--which
assumes that occurrences scale proportionally with time--a natural practice
would be to use the amount of time as the variance weight and set ``endog``
to be a rate (occurrences per period of time). Similarly, using a
compound Poisson family, namely ``Tweedie``, makes a similar assumption
about the rate (or frequency) of occurrences having variance proportional to
time.
Both frequency and variance weights are verified for all basic results with
nonrobust or heteroscedasticity robust ``cov_type``. Other robust
covariance types have not yet been verified, and at least the small sample
correction is currently not based on the correct total frequency count.
Currently, all residuals are not weighted by frequency, although they may
incorporate ``n_trials`` for ``Binomial`` and ``var_weights``
+---------------+----------------------------------+
| Residual Type | Applicable weights |
+===============+==================================+
| Anscombe | ``var_weights`` |
+---------------+----------------------------------+
| Deviance | ``var_weights`` |
+---------------+----------------------------------+
| Pearson | ``var_weights`` and ``n_trials`` |
+---------------+----------------------------------+
| Reponse | ``n_trials`` |
+---------------+----------------------------------+
| Working | ``n_trials`` |
+---------------+----------------------------------+
WARNING: Loglikelihood and deviance are not valid in models where
scale is equal to 1 (i.e., ``Binomial``, ``NegativeBinomial``, and
``Poisson``). If variance weights are specified, then results such as
``loglike`` and ``deviance`` are based on a quasi-likelihood
interpretation. The loglikelihood is not correctly specified in this case,
and statistics based on it, such AIC or likelihood ratio tests, are not
appropriate.
""".format(extra_params=base._missing_param_doc)
# Maximum number of endogenous variables when using a formula
_formula_max_endog = 2
def __init__(self, endog, exog, family=None, offset=None,
exposure=None, freq_weights=None, var_weights=None,
missing='none', **kwargs):
if type(self) is GLM:
self._check_kwargs(kwargs, ['n_trials'])
if (family is not None) and not isinstance(family.link,
tuple(family.safe_links)):
warnings.warn((f"The {type(family.link).__name__} link function "
"does not respect the domain of the "
f"{type(family).__name__} family."),
DomainWarning)
self._exposure_name = None
self._offset_name = None
self._freq_weights_name = None
self._var_weights_name = None
if exposure is not None:
exposure_array, self._exposure_name = _as_array_with_name(exposure, "exposure")
exposure = np.log(exposure_array)
if offset is not None: # this should probably be done upstream
offset, self._offset_name = _as_array_with_name(offset, "offset")
if freq_weights is not None:
freq_weights, self._freq_weights_name = _as_array_with_name(freq_weights, "freq_weights")
if var_weights is not None:
var_weights, self._var_weights_name = _as_array_with_name(var_weights, "var_weights")
self.freq_weights = freq_weights
self.var_weights = var_weights
super().__init__(endog, exog, missing=missing,
offset=offset, exposure=exposure,
freq_weights=freq_weights,
var_weights=var_weights, **kwargs)
self._check_inputs(family, self.offset, self.exposure, self.endog,
self.freq_weights, self.var_weights)
if offset is None:
delattr(self, 'offset')
if exposure is None:
delattr(self, 'exposure')
self.nobs = self.endog.shape[0]
# things to remove_data
self._data_attr.extend(['weights', 'mu', 'freq_weights',
'var_weights', 'iweights', '_offset_exposure',
'n_trials'])
# register kwds for __init__, offset and exposure are added by super
self._init_keys.append('family')
self._setup_binomial()
# internal usage for recreating a model
if 'n_trials' in kwargs:
self.n_trials = kwargs['n_trials']
# Construct a combined offset/exposure term. Note that
# exposure has already been logged if present.
offset_exposure = 0.
if hasattr(self, 'offset'):
offset_exposure = self.offset
if hasattr(self, 'exposure'):
offset_exposure = offset_exposure + self.exposure
self._offset_exposure = offset_exposure
self.scaletype = None
def initialize(self):
"""
Initialize a generalized linear model.
"""
self.df_model = np.linalg.matrix_rank(self.exog) - 1
if (self.freq_weights is not None) and \
(self.freq_weights.shape[0] == self.endog.shape[0]):
self.wnobs = self.freq_weights.sum()
self.df_resid = self.wnobs - self.df_model - 1
else:
self.wnobs = self.exog.shape[0]
self.df_resid = self.exog.shape[0] - self.df_model - 1
def _check_inputs(self, family, offset, exposure, endog, freq_weights,
var_weights):
# Default family is Gaussian
if family is None:
family = families.Gaussian()
self.family = family
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError("exposure can only be used with the log "
"link function")
elif exposure.shape[0] != endog.shape[0]:
raise ValueError("exposure is not the same length as endog")
if offset is not None:
if offset.shape[0] != endog.shape[0]:
raise ValueError("offset is not the same length as endog")
if freq_weights is not None:
if freq_weights.shape[0] != endog.shape[0]:
raise ValueError("freq weights not the same length as endog")
if len(freq_weights.shape) > 1:
raise ValueError("freq weights has too many dimensions")
# internal flag to store whether freq_weights were not None
self._has_freq_weights = (self.freq_weights is not None)
if self.freq_weights is None:
self.freq_weights = np.ones(endog.shape[0])
# TODO: check do we want to keep None as sentinel for freq_weights
if np.shape(self.freq_weights) == () and self.freq_weights > 1:
self.freq_weights = (self.freq_weights *
np.ones(endog.shape[0]))
if var_weights is not None:
if var_weights.shape[0] != endog.shape[0]:
raise ValueError("var weights not the same length as endog")
if len(var_weights.shape) > 1:
raise ValueError("var weights has too many dimensions")
# internal flag to store whether var_weights were not None
self._has_var_weights = (var_weights is not None)
if var_weights is None:
self.var_weights = np.ones(endog.shape[0])
# TODO: check do we want to keep None as sentinel for var_weights
self.iweights = np.asarray(self.freq_weights * self.var_weights)
def _get_init_kwds(self):
# this is a temporary fixup because exposure has been transformed
# see #1609, copied from discrete_model.CountModel
kwds = super()._get_init_kwds()
if 'exposure' in kwds and kwds['exposure'] is not None:
kwds['exposure'] = np.exp(kwds['exposure'])
return kwds
def loglike_mu(self, mu, scale=1.):
"""
Evaluate the log-likelihood for a generalized linear model.
"""
scale = float_like(scale, "scale")
return self.family.loglike(self.endog, mu, self.var_weights,
self.freq_weights, scale)
def loglike(self, params, scale=None):
"""
Evaluate the log-likelihood for a generalized linear model.
"""
scale = float_like(scale, "scale", optional=True)
lin_pred = np.dot(self.exog, params) + self._offset_exposure
expval = self.family.link.inverse(lin_pred)
if scale is None:
scale = self.estimate_scale(expval)
llf = self.family.loglike(self.endog, expval, self.var_weights,
self.freq_weights, scale)
return llf
def score_obs(self, params, scale=None):
"""score first derivative of the loglikelihood for each observation.
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
score_obs : ndarray, 2d
The first derivative of the loglikelihood function evaluated at
params for each observation.
"""
scale = float_like(scale, "scale", optional=True)
score_factor = self.score_factor(params, scale=scale)
return score_factor[:, None] * self.exog
def score(self, params, scale=None):
"""score, first derivative of the loglikelihood function
Parameters
----------
params : ndarray
Parameter at which score is evaluated.
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
score : ndarray_1d
The first derivative of the loglikelihood function calculated as
the sum of `score_obs`
"""
scale = float_like(scale, "scale", optional=True)
score_factor = self.score_factor(params, scale=scale)
return np.dot(score_factor, self.exog)
def score_factor(self, params, scale=None):
"""weights for score for each observation
This can be considered as score residuals.
Parameters
----------
params : ndarray
parameter at which score is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
score_factor : ndarray_1d
A 1d weight vector used in the calculation of the score_obs.
The score_obs are obtained by `score_factor[:, None] * exog`
"""
scale = float_like(scale, "scale", optional=True)
mu = self.predict(params)
if scale is None:
scale = self.estimate_scale(mu)
score_factor = (self.endog - mu) / self.family.link.deriv(mu)
score_factor /= self.family.variance(mu)
score_factor *= self.iweights * self.n_trials
if not scale == 1:
score_factor /= scale
return score_factor
def hessian_factor(self, params, scale=None, observed=True):
"""Weights for calculating Hessian
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned. If false then the
expected information matrix is returned.
Returns
-------
hessian_factor : ndarray, 1d
A 1d weight vector used in the calculation of the Hessian.
The hessian is obtained by `(exog.T * hessian_factor).dot(exog)`
"""
# calculating eim_factor
mu = self.predict(params)
if scale is None:
scale = self.estimate_scale(mu)
eim_factor = 1 / (self.family.link.deriv(mu)**2 *
self.family.variance(mu))
eim_factor *= self.iweights * self.n_trials
if not observed:
if not scale == 1:
eim_factor /= scale
return eim_factor
# calculating oim_factor, eim_factor is with scale=1
score_factor = self.score_factor(params, scale=1.)
if eim_factor.ndim > 1 or score_factor.ndim > 1:
raise RuntimeError('something wrong')
tmp = self.family.variance(mu) * self.family.link.deriv2(mu)
tmp += self.family.variance.deriv(mu) * self.family.link.deriv(mu)
tmp = score_factor * tmp
# correct for duplicatee iweights in oim_factor and score_factor
tmp /= self.iweights * self.n_trials
oim_factor = eim_factor * (1 + tmp)
if tmp.ndim > 1:
raise RuntimeError('something wrong')
if not scale == 1:
oim_factor /= scale
return oim_factor
def hessian(self, params, scale=None, observed=None):
"""Hessian, second derivative of loglikelihood function
Parameters
----------
params : ndarray
parameter at which Hessian is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
observed : bool
If True, then the observed Hessian is returned (default).
If False, then the expected information matrix is returned.
Returns
-------
hessian : ndarray
Hessian, i.e. observed information, or expected information matrix.
"""
if observed is None:
if getattr(self, '_optim_hessian', None) == 'eim':
observed = False
else:
observed = True
scale = float_like(scale, "scale", optional=True)
tmp = getattr(self, '_tmp_like_exog', np.empty_like(self.exog, dtype=float))
factor = self.hessian_factor(params, scale=scale, observed=observed)
np.multiply(self.exog.T, factor, out=tmp.T)
return -tmp.T.dot(self.exog)
def information(self, params, scale=None):
"""
Fisher information matrix.
"""
scale = float_like(scale, "scale", optional=True)
return self.hessian(params, scale=scale, observed=False)
def _derivative_exog(self, params, exog=None, transform="dydx",
dummy_idx=None, count_idx=None,
offset=None, exposure=None):
"""
Derivative of mean, expected endog with respect to the parameters
"""
if exog is None:
exog = self.exog
if (offset is not None) or (exposure is not None):
raise NotImplementedError("offset and exposure not supported")
lin_pred = self.predict(params, exog, which="linear",
offset=offset, exposure=exposure)
k_extra = getattr(self, 'k_extra', 0)
params_exog = params if k_extra == 0 else params[:-k_extra]
margeff = (self.family.link.inverse_deriv(lin_pred)[:, None] *
params_exog)
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
mean = self.family.link.inverse(lin_pred)
margeff /= mean[:,None]
return self._derivative_exog_helper(margeff, params, exog,
dummy_idx, count_idx, transform)
def _derivative_exog_helper(self, margeff, params, exog, dummy_idx,
count_idx, transform):
"""
Helper for _derivative_exog to wrap results appropriately
"""
from statsmodels.discrete.discrete_margins import (
_get_count_effects,
_get_dummy_effects,
)
if count_idx is not None:
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
def _derivative_predict(self, params, exog=None, transform='dydx',
offset=None, exposure=None):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
exog : ndarray or None
Explanatory variables at which derivative are computed.
If None, then the estimation exog is used.
offset, exposure : None
Not yet implemented.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
# core part is same as derivative_mean_params
# additionally handles exog and transform
if exog is None:
exog = self.exog
if (offset is not None) or (exposure is not None) or (
getattr(self, 'offset', None) is not None):
raise NotImplementedError("offset and exposure not supported")
lin_pred = self.predict(params, exog=exog, which="linear")
idl = self.family.link.inverse_deriv(lin_pred)
dmat = exog * idl[:, None]
if 'ey' in transform:
mean = self.family.link.inverse(lin_pred)
dmat /= mean[:, None]
return dmat
def _deriv_mean_dparams(self, params):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
params : ndarray
parameter at which score is evaluated
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
"""
lin_pred = self.predict(params, which="linear")
idl = self.family.link.inverse_deriv(lin_pred)
dmat = self.exog * idl[:, None]
return dmat
def _deriv_score_obs_dendog(self, params, scale=None):
"""derivative of score_obs w.r.t. endog
Parameters
----------
params : ndarray
parameter at which score is evaluated
scale : None or float
If scale is None, then the default scale will be calculated.
Default scale is defined by `self.scaletype` and set in fit.
If scale is not None, then it is used as a fixed scale.
Returns
-------
derivative : ndarray_2d
The derivative of the score_obs with respect to endog. This
can is given by `score_factor0[:, None] * exog` where
`score_factor0` is the score_factor without the residual.
"""
scale = float_like(scale, "scale", optional=True)
mu = self.predict(params)
if scale is None:
scale = self.estimate_scale(mu)
score_factor = 1 / self.family.link.deriv(mu)
score_factor /= self.family.variance(mu)
score_factor *= self.iweights * self.n_trials
if not scale == 1:
score_factor /= scale
return score_factor[:, None] * self.exog
def score_test(self, params_constrained, k_constraints=None,
exog_extra=None, observed=True):
"""score test for restrictions or for omitted variables
The covariance matrix for the score is based on the Hessian, i.e.
observed information matrix or optionally on the expected information
matrix..
Parameters
----------
params_constrained : array_like
estimated parameter of the restricted model. This can be the
parameter estimate for the current when testing for omitted
variables.
k_constraints : int or None
Number of constraints that were used in the estimation of params
restricted relative to the number of exog in the model.
This must be provided if no exog_extra are given. If exog_extra is
not None, then k_constraints is assumed to be zero if it is None.
exog_extra : None or array_like
Explanatory variables that are jointly tested for inclusion in the
model, i.e. omitted variables.
observed : bool
If True, then the observed Hessian is used in calculating the
covariance matrix of the score. If false then the expected
information matrix is used.
Returns
-------
chi2_stat : float
chisquare statistic for the score test
p-value : float
P-value of the score test based on the chisquare distribution.
df : int
Degrees of freedom used in the p-value calculation. This is equal
to the number of constraints.
Notes
-----
not yet verified for case with scale not equal to 1.
"""
if exog_extra is None:
if k_constraints is None:
raise ValueError('if exog_extra is None, then k_constraints'
'needs to be given')
score = self.score(params_constrained)
hessian = self.hessian(params_constrained, observed=observed)
else:
# exog_extra = np.asarray(exog_extra)
if k_constraints is None:
k_constraints = 0
ex = np.column_stack((self.exog, exog_extra))
k_constraints += ex.shape[1] - self.exog.shape[1]
score_factor = self.score_factor(params_constrained)
score = (score_factor[:, None] * ex).sum(0)
hessian_factor = self.hessian_factor(params_constrained,
observed=observed)
hessian = -np.dot(ex.T * hessian_factor, ex)
from scipy import stats
# TODO check sign, why minus?
chi2stat = -score.dot(np.linalg.solve(hessian, score[:, None]))
pval = stats.chi2.sf(chi2stat, k_constraints)
# return a stats results instance instead? Contrast?
return chi2stat, pval, k_constraints
def _update_history(self, tmp_result, mu, history):
"""
Helper method to update history during iterative fit.
"""
history['params'].append(tmp_result.params)
history['deviance'].append(self.family.deviance(self.endog, mu,
self.var_weights,
self.freq_weights,
self.scale))
return history
def estimate_scale(self, mu):
"""
Estimate the dispersion/scale.
Type of scale can be chose in the fit method.
Parameters
----------
mu : ndarray
mu is the mean response estimate
Returns
-------
Estimate of scale
Notes
-----
The default scale for Binomial, Poisson and Negative Binomial
families is 1. The default for the other families is Pearson's
Chi-Square estimate.
See Also
--------
statsmodels.genmod.generalized_linear_model.GLM.fit
"""
if not self.scaletype:
if isinstance(self.family, (families.Binomial, families.Poisson,
families.NegativeBinomial)):
return 1.
else:
return self._estimate_x2_scale(mu)
if isinstance(self.scaletype, float):
return np.array(self.scaletype)
if isinstance(self.scaletype, str):
if self.scaletype.lower() == 'x2':
return self._estimate_x2_scale(mu)
elif self.scaletype.lower() == 'dev':
return (self.family.deviance(self.endog, mu, self.var_weights,
self.freq_weights, 1.) /
(self.df_resid))
else:
raise ValueError("Scale %s with type %s not understood" %
(self.scaletype, type(self.scaletype)))
else:
raise ValueError("Scale %s with type %s not understood" %
(self.scaletype, type(self.scaletype)))
def _estimate_x2_scale(self, mu):
resid = np.power(self.endog - mu, 2) * self.iweights
return np.sum(resid / self.family.variance(mu)) / self.df_resid
def estimate_tweedie_power(self, mu, method='brentq', low=1.01, high=5.):
"""
Tweedie specific function to estimate scale and the variance parameter.
The variance parameter is also referred to as p, xi, or shape.
Parameters
----------
mu : array_like
Fitted mean response variable
method : str, defaults to 'brentq'
Scipy optimizer used to solve the Pearson equation. Only brentq
currently supported.
low : float, optional
Low end of the bracketing interval [a,b] to be used in the search
for the power. Defaults to 1.01.
high : float, optional
High end of the bracketing interval [a,b] to be used in the search
for the power. Defaults to 5.
Returns
-------
power : float
The estimated shape or power.
"""
if method == 'brentq':
from scipy.optimize import brentq
def psi_p(power, mu):
scale = ((self.iweights * (self.endog - mu) ** 2 /
(mu ** power)).sum() / self.df_resid)
return (np.sum(self.iweights * ((self.endog - mu) ** 2 /
(scale * (mu ** power)) - 1) *
np.log(mu)) / self.freq_weights.sum())
power = brentq(psi_p, low, high, args=(mu))
else:
raise NotImplementedError('Only brentq can currently be used')
return power
def predict(self, params, exog=None, exposure=None, offset=None,
which="mean", linear=None):
"""
Return predicted values for a design matrix
Parameters
----------
params : array_like
Parameters / coefficients of a GLM.
exog : array_like, optional
Design / exogenous data. Is exog is None, model exog is used.
exposure : array_like, optional
Exposure time values, only can be used with the log link
function. See notes for details.
offset : array_like, optional
Offset values. See notes for details.
which : 'mean', 'linear', 'var'(optional)
Statitistic to predict. Default is 'mean'.
- 'mean' returns the conditional expectation of endog E(y | x),
i.e. inverse of the model's link function of linear predictor.
- 'linear' returns the linear predictor of the mean function.
- 'var_unscaled' variance of endog implied by the likelihood model.
This does not include scale or var_weights.
linear : bool
The ``linear` keyword is deprecated and will be removed,
use ``which`` keyword instead.
If True, returns the linear predicted values. If False or None,
then the statistic specified by ``which`` will be returned.
Returns
-------
An array of fitted values
Notes
-----
Any `exposure` and `offset` provided here take precedence over
the `exposure` and `offset` used in the model fit. If `exog`
is passed as an argument here, then any `exposure` and
`offset` values in the fit will be ignored.
Exposure values must be strictly positive.
"""
if linear is not None:
msg = 'linear keyword is deprecated, use which="linear"'
warnings.warn(msg, FutureWarning)
if linear is True:
which = "linear"
# Use fit offset if appropriate
if offset is None and exog is None and hasattr(self, 'offset'):
offset = self.offset
elif offset is None:
offset = 0.
if exposure is not None and not isinstance(self.family.link,
families.links.Log):
raise ValueError("exposure can only be used with the log link "
"function")
# Use fit exposure if appropriate
if exposure is None and exog is None and hasattr(self, 'exposure'):
# Already logged
exposure = self.exposure
elif exposure is None:
exposure = 0.
else:
exposure = np.log(np.asarray(exposure))
if exog is None:
exog = self.exog
linpred = np.dot(exog, params) + offset + exposure
if which == "mean":
return self.family.fitted(linpred)
elif which == "linear":
return linpred
elif which == "var_unscaled":
mean = self.family.fitted(linpred)
var_ = self.family.variance(mean)
return var_
else:
raise ValueError(f'The which value "{which}" is not recognized')
def get_distribution(self, params, scale=None, exog=None, exposure=None,
offset=None, var_weights=1., n_trials=1.):
"""
Return a instance of the predictive distribution.
Parameters
----------
params : array_like
The model parameters.
scale : scalar
The scale parameter.
exog : array_like
The predictor variable matrix.
offset : array_like or None
Offset variable for predicted mean.
exposure : array_like or None
Log(exposure) will be added to the linear prediction.
var_weights : array_like
1d array of variance (analytic) weights. The default is None.
n_trials : int
Number of trials for the binomial distribution. The default is 1
which corresponds to a Bernoulli random variable.
Returns
-------
gen
Instance of a scipy frozen distribution based on estimated
parameters.
Use the ``rvs`` method to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
"""
scale = float_like(scale, "scale", optional=True)
# use scale=1, independent of QMLE scale for discrete
if isinstance(self.family, (families.Binomial, families.Poisson,
families.NegativeBinomial)):
scale = 1.
mu = self.predict(params, exog, exposure, offset, which="mean")
kwds = {}
if (np.any(n_trials != 1) and
isinstance(self.family, families.Binomial)):
kwds["n_trials"] = n_trials
distr = self.family.get_distribution(mu, scale,
var_weights=var_weights, **kwds)
return distr
def _setup_binomial(self):
# this checks what kind of data is given for Binomial.
# family will need a reference to endog if this is to be removed from
# preprocessing
self.n_trials = np.ones(self.endog.shape[0]) # For binomial
if isinstance(self.family, families.Binomial):
tmp = self.family.initialize(self.endog, self.freq_weights)
self.endog = tmp[0]
self.n_trials = tmp[1]
self._init_keys.append('n_trials')
def fit(self, start_params=None, maxiter=100, method='IRLS', tol=1e-8,
scale=None, cov_type='nonrobust', cov_kwds=None, use_t=None,
full_output=True, disp=False, max_start_irls=3, **kwargs):
"""
Fits a generalized linear model for a given family.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
The default is family-specific and is given by the
``family.starting_mu(endog)``. If start_params is given then the
initial mean will be calculated as ``np.dot(exog, start_params)``.
maxiter : int, optional
Default is 100.
method : str
Default is 'IRLS' for iteratively reweighted least squares.
Otherwise gradient optimization is used.
tol : float
Convergence tolerance. Default is 1e-8.
scale : str or float, optional
`scale` can be 'X2', 'dev', or a float
The default value is None, which uses `X2` for Gamma, Gaussian,
and Inverse Gaussian.
`X2` is Pearson's chi-square divided by `df_resid`.
The default is 1 for the Binomial and Poisson families.
`dev` is the deviance divided by df_resid
cov_type : str
The type of parameter estimate covariance matrix to compute.
cov_kwds : dict-like
Extra arguments for calculating the covariance of the parameter
estimates.
use_t : bool
If True, the Student t-distribution is used for inference.
full_output : bool, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
Not used if methhod is IRLS.
disp : bool, optional
Set to True to print convergence messages. Not used if method is
IRLS.
max_start_irls : int
The number of IRLS iterations used to obtain starting
values for gradient optimization. Only relevant if
`method` is set to something other than 'IRLS'.
atol : float, optional
(available with IRLS fits) The absolute tolerance criterion that
must be satisfied. Defaults to ``tol``. Convergence is attained
when: :math:`rtol * prior + atol > abs(current - prior)`
rtol : float, optional
(available with IRLS fits) The relative tolerance criterion that
must be satisfied. Defaults to 0 which means ``rtol`` is not used.
Convergence is attained when:
:math:`rtol * prior + atol > abs(current - prior)`
tol_criterion : str, optional
(available with IRLS fits) Defaults to ``'deviance'``. Can
optionally be ``'params'``.
wls_method : str, optional
(available with IRLS fits) options are 'lstsq', 'pinv' and 'qr'
specifies which linear algebra function to use for the irls
optimization. Default is `lstsq` which uses the same underlying
svd based approach as 'pinv', but is faster during iterations.
'lstsq' and 'pinv' regularize the estimate in singular and
near-singular cases by truncating small singular values based
on `rcond` of the respective numpy.linalg function. 'qr' is
only valid for cases that are not singular nor near-singular.
optim_hessian : {'eim', 'oim'}, optional
(available with scipy optimizer fits) When 'oim'--the default--the
observed Hessian is used in fitting. 'eim' is the expected Hessian.
This may provide more stable fits, but adds assumption that the
Hessian is correctly specified.
Notes
-----
If method is 'IRLS', then an additional keyword 'attach_wls' is
available. This is currently for internal use only and might change
in future versions. If attach_wls' is true, then the final WLS
instance of the IRLS iteration is attached to the results instance
as `results_wls` attribute.
"""
if isinstance(scale, str):
scale = scale.lower()
if scale not in ("x2", "dev"):
raise ValueError(
"scale must be either X2 or dev when a string."
)
elif scale is not None:
# GH-6627
try:
scale = float(scale)
except Exception as exc:
raise type(exc)(
"scale must be a float if given and no a string."
)
self.scaletype = scale
if method.lower() == "irls":
if cov_type.lower() == 'eim':
cov_type = 'nonrobust'
return self._fit_irls(start_params=start_params, maxiter=maxiter,
tol=tol, scale=scale, cov_type=cov_type,
cov_kwds=cov_kwds, use_t=use_t, **kwargs)
else:
self._optim_hessian = kwargs.get('optim_hessian')
if self._optim_hessian is not None:
del kwargs['optim_hessian']
self._tmp_like_exog = np.empty_like(self.exog, dtype=float)
fit_ = self._fit_gradient(start_params=start_params,
method=method,
maxiter=maxiter,
tol=tol, scale=scale,
full_output=full_output,
disp=disp, cov_type=cov_type,
cov_kwds=cov_kwds, use_t=use_t,
max_start_irls=max_start_irls,
**kwargs)
del self._optim_hessian
del self._tmp_like_exog
return fit_
def _fit_gradient(self, start_params=None, method="newton",
maxiter=100, tol=1e-8, full_output=True,
disp=True, scale=None, cov_type='nonrobust',
cov_kwds=None, use_t=None, max_start_irls=3,
**kwargs):
"""
Fits a generalized linear model for a given family iteratively
using the scipy gradient optimizers.
"""
# fix scale during optimization, see #4616
scaletype = self.scaletype
self.scaletype = 1.
if (max_start_irls > 0) and (start_params is None):
irls_rslt = self._fit_irls(start_params=start_params,
maxiter=max_start_irls,
tol=tol, scale=1., cov_type='nonrobust',
cov_kwds=None, use_t=None,
**kwargs)
start_params = irls_rslt.params
del irls_rslt
rslt = super().fit(start_params=start_params,
maxiter=maxiter, full_output=full_output,
method=method, disp=disp, **kwargs)
# reset scaletype to original
self.scaletype = scaletype
mu = self.predict(rslt.params)
scale = self.estimate_scale(mu)
if rslt.normalized_cov_params is None:
cov_p = None
else:
cov_p = rslt.normalized_cov_params / scale
if cov_type.lower() == 'eim':
oim = False
cov_type = 'nonrobust'
else:
oim = True
try:
cov_p = np.linalg.inv(-self.hessian(rslt.params, observed=oim)) / scale
except LinAlgError:
warnings.warn('Inverting hessian failed, no bse or cov_params '
'available', HessianInversionWarning)
cov_p = None
results_class = getattr(self, '_results_class', GLMResults)
results_class_wrapper = getattr(self, '_results_class_wrapper', GLMResultsWrapper)
glm_results = results_class(self, rslt.params,
cov_p,
scale,
cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t)
# TODO: iteration count is not always available
history = {'iteration': 0}
if full_output:
glm_results.mle_retvals = rslt.mle_retvals
if 'iterations' in rslt.mle_retvals:
history['iteration'] = rslt.mle_retvals['iterations']
glm_results.method = method
glm_results.fit_history = history
return results_class_wrapper(glm_results)
def _fit_irls(self, start_params=None, maxiter=100, tol=1e-8,
scale=None, cov_type='nonrobust', cov_kwds=None,
use_t=None, **kwargs):
"""
Fits a generalized linear model for a given family using
iteratively reweighted least squares (IRLS).
"""
attach_wls = kwargs.pop('attach_wls', False)
atol = kwargs.get('atol')
rtol = kwargs.get('rtol', 0.)
tol_criterion = kwargs.get('tol_criterion', 'deviance')
wls_method = kwargs.get('wls_method', 'lstsq')
atol = tol if atol is None else atol
endog = self.endog
wlsexog = self.exog
if start_params is None:
start_params = np.zeros(self.exog.shape[1])
mu = self.family.starting_mu(self.endog)
lin_pred = self.family.predict(mu)
else:
lin_pred = np.dot(wlsexog, start_params) + self._offset_exposure
mu = self.family.fitted(lin_pred)
self.scale = self.estimate_scale(mu)
dev = self.family.deviance(self.endog, mu, self.var_weights,
self.freq_weights, self.scale)
if np.isnan(dev):
raise ValueError("The first guess on the deviance function "
"returned a nan. This could be a boundary "
" problem and should be reported.")
# first guess on the deviance is assumed to be scaled by 1.
# params are none to start, so they line up with the deviance
history = dict(params=[np.inf, start_params], deviance=[np.inf, dev])
converged = False
criterion = history[tol_criterion]
# This special case is used to get the likelihood for a specific
# params vector.
if maxiter == 0:
mu = self.family.fitted(lin_pred)
self.scale = self.estimate_scale(mu)
wls_results = lm.RegressionResults(self, start_params, None)
iteration = 0
for iteration in range(maxiter):
self.weights = (self.iweights * self.n_trials *
self.family.weights(mu))
wlsendog = (lin_pred + self.family.link.deriv(mu) * (self.endog-mu)
- self._offset_exposure)
wls_mod = reg_tools._MinimalWLS(wlsendog, wlsexog,
self.weights, check_endog=True,
check_weights=True)
wls_results = wls_mod.fit(method=wls_method)
lin_pred = np.dot(self.exog, wls_results.params)
lin_pred += self._offset_exposure
mu = self.family.fitted(lin_pred)
history = self._update_history(wls_results, mu, history)
self.scale = self.estimate_scale(mu)
if endog.squeeze().ndim == 1 and np.allclose(mu - endog, 0):
msg = ("Perfect separation or prediction detected, "
"parameter may not be identified")
warnings.warn(msg, category=PerfectSeparationWarning)
converged = _check_convergence(criterion, iteration + 1, atol,
rtol)
if converged:
break
self.mu = mu
if maxiter > 0: # Only if iterative used
wls_method2 = 'pinv' if wls_method == 'lstsq' else wls_method
wls_model = lm.WLS(wlsendog, wlsexog, self.weights)
wls_results = wls_model.fit(method=wls_method2)
glm_results = GLMResults(self, wls_results.params,
wls_results.normalized_cov_params,
self.scale,
cov_type=cov_type, cov_kwds=cov_kwds,
use_t=use_t)
glm_results.method = "IRLS"
glm_results.mle_settings = {}
glm_results.mle_settings['wls_method'] = wls_method
glm_results.mle_settings['optimizer'] = glm_results.method
if (maxiter > 0) and (attach_wls is True):
glm_results.results_wls = wls_results
history['iteration'] = iteration + 1
glm_results.fit_history = history
glm_results.converged = converged
return GLMResultsWrapper(glm_results)
def fit_regularized(self, method="elastic_net", alpha=0.,
start_params=None, refit=False,
opt_method="bfgs", **kwargs):
r"""
Return a regularized fit to a linear regression model.
Parameters
----------
method : {'elastic_net'}
Only the `elastic_net` approach is currently implemented.
alpha : scalar or array_like
The penalty weight. If a scalar, the same penalty weight
applies to all variables in the model. If a vector, it
must have the same length as `params`, and contains a
penalty weight for each coefficient.
start_params : array_like
Starting values for `params`.
refit : bool
If True, the model is refit using only the variables that
have non-zero coefficients in the regularized fit. The
refitted model is not regularized.
opt_method : string
The method used for numerical optimization.
**kwargs
Additional keyword arguments used when fitting the model.
Returns
-------
GLMResults
An array or a GLMResults object, same type returned by `fit`.
Notes
-----
The penalty is the ``elastic net`` penalty, which is a
combination of L1 and L2 penalties.
The function that is minimized is:
.. math::
-loglike/n + alpha*((1-L1\_wt)*|params|_2^2/2 + L1\_wt*|params|_1)
where :math:`|*|_1` and :math:`|*|_2` are the L1 and L2 norms.
Post-estimation results are based on the same data used to
select variables, hence may be subject to overfitting biases.
The elastic_net method uses the following keyword arguments:
maxiter : int
Maximum number of iterations
L1_wt : float
Must be in [0, 1]. The L1 penalty has weight L1_wt and the
L2 penalty has weight 1 - L1_wt.
cnvrg_tol : float
Convergence threshold for maximum parameter change after
one sweep through all coefficients.
zero_tol : float
Coefficients below this threshold are treated as zero.
"""
if kwargs.get("L1_wt", 1) == 0:
return self._fit_ridge(alpha, start_params, opt_method)
from statsmodels.base.elastic_net import fit_elasticnet
if method != "elastic_net":
raise ValueError("method for fit_regularized must be elastic_net")
defaults = {"maxiter": 50, "L1_wt": 1, "cnvrg_tol": 1e-10,
"zero_tol": 1e-10}
defaults.update(kwargs)
llkw = kwargs.get("loglike_kwds", {})
sckw = kwargs.get("score_kwds", {})
hekw = kwargs.get("hess_kwds", {})
llkw["scale"] = 1
sckw["scale"] = 1
hekw["scale"] = 1
defaults["loglike_kwds"] = llkw
defaults["score_kwds"] = sckw
defaults["hess_kwds"] = hekw
result = fit_elasticnet(self, method=method,
alpha=alpha,
start_params=start_params,
refit=refit,
**defaults)
self.mu = self.predict(result.params)
self.scale = self.estimate_scale(self.mu)
if not result.converged:
warnings.warn("Elastic net fitting did not converge")
return result
def _fit_ridge(self, alpha, start_params, method):
if start_params is None:
start_params = np.zeros(self.exog.shape[1])
def fun(x):
return -(self.loglike(x) / self.nobs - np.sum(alpha * x**2) / 2)
def grad(x):
return -(self.score(x) / self.nobs - alpha * x)
from scipy.optimize import minimize
from statsmodels.base.elastic_net import (
RegularizedResults,
RegularizedResultsWrapper,
)
mr = minimize(fun, start_params, jac=grad, method=method)
params = mr.x
if not mr.success:
ngrad = np.sqrt(np.sum(mr.jac**2))
msg = "GLM ridge optimization may have failed, |grad|=%f" % ngrad
warnings.warn(msg)
results = RegularizedResults(self, params)
results = RegularizedResultsWrapper(results)
return results
def fit_constrained(self, constraints, start_params=None, **fit_kwds):
"""fit the model subject to linear equality constraints
The constraints are of the form `R params = q`
where R is the constraint_matrix and q is the vector of
constraint_values.
The estimation creates a new model with transformed design matrix,
exog, and converts the results back to the original parameterization.
Parameters
----------
constraints : formula expression or tuple
If it is a tuple, then the constraint needs to be given by two
arrays (constraint_matrix, constraint_value), i.e. (R, q).
Otherwise, the constraints can be given as strings or list of
strings.
see t_test for details
start_params : None or array_like
starting values for the optimization. `start_params` needs to be
given in the original parameter space and are internally
transformed.
**fit_kwds : keyword arguments
fit_kwds are used in the optimization of the transformed model.
Returns
-------
results : Results instance
"""
from patsy import DesignInfo
from statsmodels.base._constraints import (
LinearConstraints,
fit_constrained,
)
# same pattern as in base.LikelihoodModel.t_test
lc = DesignInfo(self.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
# TODO: add start_params option, need access to tranformation
# fit_constrained needs to do the transformation
params, cov, res_constr = fit_constrained(self, R, q,
start_params=start_params,
fit_kwds=fit_kwds)
# create dummy results Instance, TODO: wire up properly
res = self.fit(start_params=params, maxiter=0) # we get a wrapper back
res._results.params = params
res._results.cov_params_default = cov
cov_type = fit_kwds.get('cov_type', 'nonrobust')
if cov_type != 'nonrobust':
res._results.normalized_cov_params = cov / res_constr.scale
else:
res._results.normalized_cov_params = None
res._results.scale = res_constr.scale
k_constr = len(q)
res._results.df_resid += k_constr
res._results.df_model -= k_constr
res._results.constraints = LinearConstraints.from_patsy(lc)
res._results.k_constr = k_constr
res._results.results_constrained = res_constr
return res
@property
def offset_name(self):
"""
Name of the offset variable if available. If offset is not a pd.Series,
defaults to 'offset'.
"""
return self._offset_name
@property
def exposure_name(self):
"""
Name of the exposure variable if available. If exposure is not a pd.Series,
defaults to 'exposure'.
"""
return self._exposure_name
@property
def freq_weights_name(self):
"""
Name of the freq weights variable if available. If freq_weights is not a
pd.Series, defaults to 'freq_weights'.
"""
return self._freq_weights_name
@property
def var_weights_name(self):
"""
Name of var weights variable if available. If var_weights is not a pd.Series,
defaults to 'var_weights'.
"""
return self._var_weights_name
get_prediction_doc = Docstring(pred.get_prediction_glm.__doc__)
get_prediction_doc.remove_parameters("pred_kwds")
class GLMResults(base.LikelihoodModelResults):
"""
Class to contain GLM results.
GLMResults inherits from statsmodels.LikelihoodModelResults
Attributes
----------
df_model : float
See GLM.df_model
df_resid : float
See GLM.df_resid
fit_history : dict
Contains information about the iterations. Its keys are `iterations`,
`deviance` and `params`.
model : class instance
Pointer to GLM model instance that called fit.
nobs : float
The number of observations n.
normalized_cov_params : ndarray
See GLM docstring
params : ndarray
The coefficients of the fitted model. Note that interpretation
of the coefficients often depends on the distribution family and the
data.
pvalues : ndarray
The two-tailed p-values for the parameters.
scale : float
The estimate of the scale / dispersion for the model fit.
See GLM.fit and GLM.estimate_scale for more information.
stand_errors : ndarray
The standard errors of the fitted GLM. #TODO still named bse
See Also
--------
statsmodels.base.model.LikelihoodModelResults
"""
def __init__(self, model, params, normalized_cov_params, scale,
cov_type='nonrobust', cov_kwds=None, use_t=None):
super().__init__(
model,
params,
normalized_cov_params=normalized_cov_params,
scale=scale)
self.family = model.family
self._endog = model.endog
self.nobs = model.endog.shape[0]
self._freq_weights = model.freq_weights
self._var_weights = model.var_weights
self._iweights = model.iweights
if isinstance(self.family, families.Binomial):
self._n_trials = self.model.n_trials
else:
self._n_trials = 1
self.df_resid = model.df_resid
self.df_model = model.df_model
self._cache = {}
# are these intermediate results needed or can we just
# call the model's attributes?
# for remove data and pickle without large arrays
self._data_attr.extend(['results_constrained', '_freq_weights',
'_var_weights', '_iweights'])
self._data_in_cache.extend(['null', 'mu'])
self._data_attr_model = getattr(self, '_data_attr_model', [])
self._data_attr_model.append('mu')
# robust covariance
from statsmodels.base.covtype import get_robustcov_results
if use_t is None:
self.use_t = False # TODO: class default
else:
self.use_t = use_t
# temporary warning
ct = (cov_type == 'nonrobust') or (cov_type.upper().startswith('HC'))
if self.model._has_freq_weights and not ct:
from statsmodels.tools.sm_exceptions import SpecificationWarning
warnings.warn('cov_type not fully supported with freq_weights',
SpecificationWarning)
if self.model._has_var_weights and not ct:
from statsmodels.tools.sm_exceptions import SpecificationWarning
warnings.warn('cov_type not fully supported with var_weights',
SpecificationWarning)
if cov_type == 'nonrobust':
self.cov_type = 'nonrobust'
self.cov_kwds = {'description': 'Standard Errors assume that the' +
' covariance matrix of the errors is correctly ' +
'specified.'}
else:
if cov_kwds is None:
cov_kwds = {}
get_robustcov_results(self, cov_type=cov_type, use_self=True,
use_t=use_t, **cov_kwds)
@cached_data
def resid_response(self):
"""
Response residuals. The response residuals are defined as
`endog` - `fittedvalues`
"""
return self._n_trials * (self._endog-self.mu)
@cached_data
def resid_pearson(self):
"""
Pearson residuals. The Pearson residuals are defined as
(`endog` - `mu`)/sqrt(VAR(`mu`)) where VAR is the distribution
specific variance function. See statsmodels.families.family and
statsmodels.families.varfuncs for more information.
"""
return (np.sqrt(self._n_trials) * (self._endog-self.mu) *
np.sqrt(self._var_weights) /
np.sqrt(self.family.variance(self.mu)))
@cached_data
def resid_working(self):
"""
Working residuals. The working residuals are defined as
`resid_response`/link'(`mu`). See statsmodels.family.links for the
derivatives of the link functions. They are defined analytically.
"""
# Isn't self.resid_response is already adjusted by _n_trials?
val = (self.resid_response * self.family.link.deriv(self.mu))
val *= self._n_trials
return val
@cached_data
def resid_anscombe(self):
"""
Anscombe residuals. See statsmodels.families.family for distribution-
specific Anscombe residuals. Currently, the unscaled residuals are
provided. In a future version, the scaled residuals will be provided.
"""
return self.resid_anscombe_scaled
@cached_data
def resid_anscombe_scaled(self):
"""
Scaled Anscombe residuals. See statsmodels.families.family for
distribution-specific Anscombe residuals.
"""
return self.family.resid_anscombe(self._endog, self.fittedvalues,
var_weights=self._var_weights,
scale=self.scale)
@cached_data
def resid_anscombe_unscaled(self):
"""
Unscaled Anscombe residuals. See statsmodels.families.family for
distribution-specific Anscombe residuals.
"""
return self.family.resid_anscombe(self._endog, self.fittedvalues,
var_weights=self._var_weights,
scale=1.)
@cached_data
def resid_deviance(self):
"""
Deviance residuals. See statsmodels.families.family for distribution-
specific deviance residuals.
"""
dev = self.family.resid_dev(self._endog, self.fittedvalues,
var_weights=self._var_weights,
scale=1.)
return dev
@cached_value
def pearson_chi2(self):
"""
Pearson's Chi-Squared statistic is defined as the sum of the squares
of the Pearson residuals.
"""
chisq = (self._endog - self.mu)**2 / self.family.variance(self.mu)
chisq *= self._iweights * self._n_trials
chisqsum = np.sum(chisq)
return chisqsum
@cached_data
def fittedvalues(self):
"""
The estimated mean response.
This is the value of the inverse of the link function at
lin_pred, where lin_pred is the linear predicted value
obtained by multiplying the design matrix by the coefficient
vector.
"""
return self.mu
@cached_data
def mu(self):
"""
See GLM docstring.
"""
return self.model.predict(self.params)
@cache_readonly
def null(self):
"""
Fitted values of the null model
"""
endog = self._endog
model = self.model
exog = np.ones((len(endog), 1))
kwargs = model._get_init_kwds().copy()
kwargs.pop('family')
for key in getattr(model, '_null_drop_keys', []):
del kwargs[key]
start_params = np.atleast_1d(self.family.link(endog.mean()))
oe = self.model._offset_exposure
if not (np.size(oe) == 1 and oe == 0):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DomainWarning)
mod = GLM(endog, exog, family=self.family, **kwargs)
fitted = mod.fit(start_params=start_params).fittedvalues
else:
# correct if fitted is identical across observations
wls_model = lm.WLS(endog, exog,
weights=self._iweights * self._n_trials)
fitted = wls_model.fit().fittedvalues
return fitted
@cache_readonly
def deviance(self):
"""
See statsmodels.families.family for the distribution-specific deviance
functions.
"""
return self.family.deviance(self._endog, self.mu, self._var_weights,
self._freq_weights)
@cache_readonly
def null_deviance(self):
"""The value of the deviance function for the model fit with a constant
as the only regressor."""
return self.family.deviance(self._endog, self.null, self._var_weights,
self._freq_weights)
@cache_readonly
def llnull(self):
"""
Log-likelihood of the model fit with a constant as the only regressor
"""
return self.family.loglike(self._endog, self.null,
var_weights=self._var_weights,
freq_weights=self._freq_weights,
scale=self.scale)
def llf_scaled(self, scale=None):
"""
Return the log-likelihood at the given scale, using the
estimated scale if the provided scale is None. In the Gaussian
case with linear link, the concentrated log-likelihood is
returned.
"""
_modelfamily = self.family
if scale is None:
if (isinstance(self.family, families.Gaussian) and
isinstance(self.family.link, families.links.Power) and
(self.family.link.power == 1.)):
# Scale for the concentrated Gaussian log likelihood
# (profile log likelihood with the scale parameter
# profiled out).
scale = (np.power(self._endog - self.mu, 2) * self._iweights).sum()
scale /= self.model.wnobs
else:
scale = self.scale
val = _modelfamily.loglike(self._endog, self.mu,
var_weights=self._var_weights,
freq_weights=self._freq_weights,
scale=scale)
return val
@cached_value
def llf(self):
"""
Value of the loglikelihood function evalued at params.
See statsmodels.families.family for distribution-specific
loglikelihoods. The result uses the concentrated
log-likelihood if the family is Gaussian and the link is linear,
otherwise it uses the non-concentrated log-likelihood evaluated
at the estimated scale.
"""
return self.llf_scaled()
def pseudo_rsquared(self, kind="cs"):
"""
Pseudo R-squared
Cox-Snell likelihood ratio pseudo R-squared is valid for both discrete
and continuous data. McFadden's pseudo R-squared is only valid for
discrete data.
Cox & Snell's pseudo-R-squared: 1 - exp((llnull - llf)*(2/nobs))
McFadden's pseudo-R-squared: 1 - (llf / llnull)
Parameters
----------
kind : P"cs", "mcf"}
Type of pseudo R-square to return
Returns
-------
float
Pseudo R-squared
"""
kind = kind.lower()
if kind.startswith("mcf"):
prsq = 1 - self.llf / self.llnull
elif kind.startswith("cox") or kind in ["cs", "lr"]:
prsq = 1 - np.exp((self.llnull - self.llf) * (2 / self.nobs))
else:
raise ValueError("only McFadden and Cox-Snell are available")
return prsq
@cached_value
def aic(self):
"""
Akaike Information Criterion
-2 * `llf` + 2 * (`df_model` + 1)
"""
return self.info_criteria("aic")
@property
def bic(self):
"""
Bayes Information Criterion
`deviance` - `df_resid` * log(`nobs`)
.. warning::
The current definition is based on the deviance rather than the
log-likelihood. This is not consistent with the AIC definition,
and after 0.13 both will make use of the log-likelihood definition.
Notes
-----
The log-likelihood version is defined
-2 * `llf` + (`df_model` + 1)*log(n)
"""
if _use_bic_helper.use_bic_llf not in (True, False):
warnings.warn(
"The bic value is computed using the deviance formula. After "
"0.13 this will change to the log-likelihood based formula. "
"This change has no impact on the relative rank of models "
"compared using BIC. You can directly access the "
"log-likelihood version using the `bic_llf` attribute. You "
"can suppress this message by calling "
"statsmodels.genmod.generalized_linear_model.SET_USE_BIC_LLF "
"with True to get the LLF-based version now or False to retain"
"the deviance version.",
FutureWarning
)
if bool(_use_bic_helper.use_bic_llf):
return self.bic_llf
return self.bic_deviance
@cached_value
def bic_deviance(self):
"""
Bayes Information Criterion
Based on the deviance,
`deviance` - `df_resid` * log(`nobs`)
"""
return (self.deviance -
(self.model.wnobs - self.df_model - 1) *
np.log(self.model.wnobs))
@cached_value
def bic_llf(self):
"""
Bayes Information Criterion
Based on the log-likelihood,
-2 * `llf` + log(n) * (`df_model` + 1)
"""
return self.info_criteria("bic")
def info_criteria(self, crit, scale=None, dk_params=0):
"""Return an information criterion for the model.
Parameters
----------
crit : string
One of 'aic', 'bic', or 'qaic'.
scale : float
The scale parameter estimated using the parent model,
used only for qaic.
dk_params : int or float
Correction to the number of parameters used in the information
criterion. By default, only mean parameters are included, the
scale parameter is not included in the parameter count.
Use ``dk_params=1`` to include scale in the parameter count.
Returns
-------
Value of information criterion.
Notes
-----
The quasi-Akaike Information criterion (qaic) is -2 *
`llf`/`scale` + 2 * (`df_model` + 1). It may not give
meaningful results except for Poisson and related models.
The QAIC (ic_type='qaic') must be evaluated with a provided
scale parameter. Two QAIC values are only comparable if they
are calculated using the same scale parameter. The scale
parameter should be estimated using the largest model among
all models being compared.
References
----------
Burnham KP, Anderson KR (2002). Model Selection and Multimodel
Inference; Springer New York.
"""
crit = crit.lower()
k_params = self.df_model + 1 + dk_params
if crit == "aic":
return -2 * self.llf + 2 * k_params
elif crit == "bic":
nobs = self.df_model + self.df_resid + 1
bic = -2*self.llf + k_params*np.log(nobs)
return bic
elif crit == "qaic":
f = self.model.family
fl = (families.Poisson, families.NegativeBinomial,
families.Binomial)
if not isinstance(f, fl):
msg = "QAIC is only valid for Binomial, Poisson and "
msg += "Negative Binomial families."
warnings.warn(msg)
llf = self.llf_scaled(scale=1)
return -2 * llf/scale + 2 * k_params
# now explicit docs, old and new behavior, copied from generic classes
# @Appender(str(get_prediction_doc))
def get_prediction(self, exog=None, exposure=None, offset=None,
transform=True, which=None, linear=None,
average=False, agg_weights=None,
row_labels=None):
"""
Compute prediction results for GLM compatible models.
Options and return class depend on whether "which" is None or not.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
exposure : array_like, optional
Exposure time values, only can be used with the log link
function.
offset : array_like, optional
Offset values.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
which : 'mean', 'linear', 'var'(optional)
Statitistic to predict. Default is 'mean'.
If which is None, then the deprecated keyword "linear" applies.
If which is not None, then a generic Prediction results class will
be returned. Some options are only available if which is not None.
See notes.
- 'mean' returns the conditional expectation of endog E(y | x),
i.e. inverse of the model's link function of linear predictor.
- 'linear' returns the linear predictor of the mean function.
- 'var_unscaled' variance of endog implied by the likelihood model.
This does not include scale or var_weights.
linear : bool
The ``linear` keyword is deprecated and will be removed,
use ``which`` keyword instead.
If which is None, then the linear keyword is used, otherwise it will
be ignored.
If True and which is None, the linear predicted values are returned.
If False or None, then the statistic specified by ``which`` will be
returned.
average : bool
Keyword is only used if ``which`` is not None.
If average is True, then the mean prediction is computed, that is,
predictions are computed for individual exog and then the average
over observation is used.
If average is False, then the results are the predictions for all
observations, i.e. same length as ``exog``.
agg_weights : ndarray, optional
Keyword is only used if ``which`` is not None.
Aggregation weights, only used if average is True.
row_labels : list of str or None
If row_lables are provided, then they will replace the generated
labels.
Returns
-------
prediction_results : instance of a PredictionResults class.
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
The Results class of the return depends on the value of ``which``.
See Also
--------
GLM.predict
GLMResults.predict
Notes
-----
Changes in statsmodels 0.14: The ``which`` keyword has been added.
If ``which`` is None, then the behavior is the same as in previous
versions, and returns the mean and linear prediction results.
If the ``which`` keyword is not None, then a generic prediction results
class is returned and is not backwards compatible with the old prediction
results class, e.g. column names of summary_frame differs.
There are more choices for the returned predicted statistic using
``which``. More choices will be added in the next release.
Two additional keyword, average and agg_weights options are now also
available if ``which`` is not None.
In a future version ``which`` will become not None and the backwards
compatible prediction results class will be removed.
"""
import statsmodels.regression._prediction as linpred
pred_kwds = {'exposure': exposure, 'offset': offset, 'which': 'linear'}
if which is None:
# two calls to a get_prediction duplicates exog generation if patsy
res_linpred = linpred.get_prediction(self, exog=exog,
transform=transform,
row_labels=row_labels,
pred_kwds=pred_kwds)
pred_kwds['which'] = 'mean'
res = pred.get_prediction_glm(self, exog=exog, transform=transform,
row_labels=row_labels,
linpred=res_linpred,
link=self.model.family.link,
pred_kwds=pred_kwds)
else:
# new generic version, if 'which' is specified
pred_kwds = {'exposure': exposure, 'offset': offset}
# not yet, only applies to count families
# y_values is explicit so we can add it to the docstring
# if y_values is not None:
# pred_kwds["y_values"] = y_values
res = pred.get_prediction(
self,
exog=exog,
which=which,
transform=transform,
row_labels=row_labels,
average=average,
agg_weights=agg_weights,
pred_kwds=pred_kwds
)
return res
@Appender(pinfer.score_test.__doc__)
def score_test(self, exog_extra=None, params_constrained=None,
hypothesis='joint', cov_type=None, cov_kwds=None,
k_constraints=None, observed=True):
if self.model._has_freq_weights is True:
warnings.warn("score test has not been verified with freq_weights",
UserWarning)
if self.model._has_var_weights is True:
warnings.warn("score test has not been verified with var_weights",
UserWarning)
# We need to temporarily change model.df_resid for scale computation
# TODO: find a nicer way. gh #7840
mod_df_resid = self.model.df_resid
self.model.df_resid = self.df_resid
if k_constraints is not None:
self.model.df_resid += k_constraints
res = pinfer.score_test(self, exog_extra=exog_extra,
params_constrained=params_constrained,
hypothesis=hypothesis,
cov_type=cov_type, cov_kwds=cov_kwds,
k_constraints=k_constraints,
scale=None,
observed=observed)
self.model.df_resid = mod_df_resid
return res
def get_hat_matrix_diag(self, observed=True):
"""
Compute the diagonal of the hat matrix
Parameters
----------
observed : bool
If true, then observed hessian is used in the hat matrix
computation. If false, then the expected hessian is used.
In the case of a canonical link function both are the same.
Returns
-------
hat_matrix_diag : ndarray
The diagonal of the hat matrix computed from the observed
or expected hessian.
"""
weights = self.model.hessian_factor(self.params, observed=observed)
wexog = np.sqrt(weights)[:, None] * self.model.exog
hd = (wexog * np.linalg.pinv(wexog).T).sum(1)
return hd
def get_influence(self, observed=True):
"""
Get an instance of GLMInfluence with influence and outlier measures
Parameters
----------
observed : bool
If true, then observed hessian is used in the hat matrix
computation. If false, then the expected hessian is used.
In the case of a canonical link function both are the same.
Returns
-------
infl : GLMInfluence instance
The instance has methods to calculate the main influence and
outlier measures as attributes.
See Also
--------
statsmodels.stats.outliers_influence.GLMInfluence
"""
from statsmodels.stats.outliers_influence import GLMInfluence
weights = self.model.hessian_factor(self.params, observed=observed)
weights_sqrt = np.sqrt(weights)
wexog = weights_sqrt[:, None] * self.model.exog
wendog = weights_sqrt * self.model.endog
# using get_hat_matrix_diag has duplicated computation
hat_matrix_diag = self.get_hat_matrix_diag(observed=observed)
infl = GLMInfluence(self, endog=wendog, exog=wexog,
resid=self.resid_pearson / np.sqrt(self.scale),
hat_matrix_diag=hat_matrix_diag)
return infl
def get_distribution(self, exog=None, exposure=None,
offset=None, var_weights=1., n_trials=1.):
"""
Return a instance of the predictive distribution.
Parameters
----------
scale : scalar
The scale parameter.
exog : array_like
The predictor variable matrix.
offset : array_like or None
Offset variable for predicted mean.
exposure : array_like or None
Log(exposure) will be added to the linear prediction.
var_weights : array_like
1d array of variance (analytic) weights. The default is None.
n_trials : int
Number of trials for the binomial distribution. The default is 1
which corresponds to a Bernoulli random variable.
Returns
-------
gen
Instance of a scipy frozen distribution based on estimated
parameters.
Use the ``rvs`` method to generate random values.
Notes
-----
Due to the behavior of ``scipy.stats.distributions objects``, the
returned random number generator must be called with ``gen.rvs(n)``
where ``n`` is the number of observations in the data set used
to fit the model. If any other value is used for ``n``, misleading
results will be produced.
"""
# Note this is mostly a copy of GLM.get_prediction
# calling here results.predict avoids the exog check and trasnform
if isinstance(self.model.family, (families.Binomial, families.Poisson,
families.NegativeBinomial)):
# use scale=1, independent of QMLE scale for discrete
scale = 1.
if self.scale != 1.:
msg = "using scale=1, no exess dispersion in distribution"
warnings.warn(msg, UserWarning)
else:
scale = self.scale
mu = self.predict(exog, exposure, offset, which="mean")
kwds = {}
if (np.any(n_trials != 1) and
isinstance(self.model.family, families.Binomial)):
kwds["n_trials"] = n_trials
distr = self.model.family.get_distribution(
mu, scale, var_weights=var_weights, **kwds)
return distr
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Warning: offset, exposure and weights (var_weights and freq_weights)
are not supported by margeff.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available from the returned object.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semi-elasticity -- dy/d(lnx)
- 'eydx' - estimate semi-elasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables. For interpretations of these methods
see notes below.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
DiscreteMargins : marginal effects instance
Returns an object that holds the marginal effects, standard
errors, confidence intervals, etc. See
`statsmodels.discrete.discrete_margins.DiscreteMargins` for more
information.
Notes
-----
Interpretations of methods:
- 'dydx' - change in `endog` for a change in `exog`.
- 'eyex' - proportional change in `endog` for a proportional change
in `exog`.
- 'dyex' - change in `endog` for a proportional change in `exog`.
- 'eydx' - proportional change in `endog` for a change in `exog`.
When using after Poisson, returns the expected number of events per
period, assuming that the model is loglinear.
Status : unsupported features offset, exposure and weights. Default
handling of freq_weights for average effect "overall" might change.
"""
if getattr(self.model, "offset", None) is not None:
raise NotImplementedError("Margins with offset are not available.")
if (np.any(self.model.var_weights != 1) or
np.any(self.model.freq_weights != 1)):
warnings.warn("weights are not taken into account by margeff")
from statsmodels.discrete.discrete_margins import DiscreteMargins
return DiscreteMargins(self, (at, method, atexog, dummy, count))
@Appender(base.LikelihoodModelResults.remove_data.__doc__)
def remove_data(self):
# GLM has alias/reference in result instance
self._data_attr.extend([i for i in self.model._data_attr
if '_data.' not in i])
super(self.__class__, self).remove_data()
# TODO: what are these in results?
self._endog = None
self._freq_weights = None
self._var_weights = None
self._iweights = None
self._n_trials = None
@Appender(_plot_added_variable_doc % {'extra_params_doc': ''})
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
@Appender(_plot_partial_residuals_doc % {'extra_params_doc': ''})
def plot_partial_residuals(self, focus_exog, ax=None):
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
@Appender(_plot_ceres_residuals_doc % {'extra_params_doc': ''})
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the Regression Results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is `var_#` for ## in
the number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Model Family:', [self.family.__class__.__name__]),
('Link Function:', [self.family.link.__class__.__name__]),
('Method:', [self.method]),
('Date:', None),
('Time:', None),
('No. Iterations:',
["%d" % self.fit_history['iteration']]),
]
try:
prsquared = self.pseudo_rsquared(kind="cs")
except ValueError:
prsquared = np.nan
top_right = [('No. Observations:', None),
('Df Residuals:', None),
('Df Model:', None),
('Scale:', ["%#8.5g" % self.scale]),
('Log-Likelihood:', None),
('Deviance:', ["%#8.5g" % self.deviance]),
('Pearson chi2:', ["%#6.3g" % self.pearson_chi2]),
('Pseudo R-squ. (CS):', ["%#6.4g" % prsquared])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
if title is None:
title = "Generalized Linear Model Regression Results"
# create summary tables
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname, title=title)
smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha,
use_t=self.use_t)
if hasattr(self, 'constraints'):
smry.add_extra_txt(['Model has been estimated subject to linear '
'equality constraints.'])
return smry
def summary2(self, yname=None, xname=None, title=None, alpha=.05,
float_format="%.4f"):
"""Experimental summary for regression Results
Parameters
----------
yname : str
Name of the dependent variable (optional)
xname : list[str], optional
Names for the exogenous variables, default is `var_#` for ## in
the number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format : str
print format for floats in parameters summary
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary results
"""
from statsmodels.iolib import summary2
smry = summary2.Summary()
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
smry.add_base(results=self, alpha=alpha, float_format=float_format,
xname=xname, yname=yname, title=title)
if hasattr(self, 'constraints'):
smry.add_text('Model has been estimated subject to linear '
'equality constraints.')
return smry
class GLMResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'resid_anscombe': 'rows',
'resid_deviance': 'rows',
'resid_pearson': 'rows',
'resid_response': 'rows',
'resid_working': 'rows'
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GLMResultsWrapper, GLMResults)
if __name__ == "__main__":
from statsmodels.datasets import longley
data = longley.load()
# data.exog = add_constant(data.exog)
GLMmod = GLM(data.endog, data.exog).fit()
GLMT = GLMmod.summary(returns='tables')
# GLMT[0].extend_right(GLMT[1])
# print(GLMT[0])
# print(GLMT[2])
GLMTp = GLMmod.summary(title='Test GLM')
"""
From Stata
. webuse beetle
. glm r i.beetle ldose, family(binomial n) link(cloglog)
Iteration 0: log likelihood = -79.012269
Iteration 1: log likelihood = -76.94951
Iteration 2: log likelihood = -76.945645
Iteration 3: log likelihood = -76.945645
Generalized linear models No. of obs = 24
Optimization : ML Residual df = 20
Scale parameter = 1
Deviance = 73.76505595 (1/df) Deviance = 3.688253
Pearson = 71.8901173 (1/df) Pearson = 3.594506
Variance function: V(u) = u*(1-u/n) [Binomial]
Link function : g(u) = ln(-ln(1-u/n)) [Complementary log-log]
AIC = 6.74547
Log likelihood = -76.94564525 BIC = 10.20398
------------------------------------------------------------------------------
| OIM
r | Coef. Std. Err. z P>|z| [95% Conf. Interval]
-------------+----------------------------------------------------------------
beetle |
2 | -.0910396 .1076132 -0.85 0.398 -.3019576 .1198783
3 | -1.836058 .1307125 -14.05 0.000 -2.09225 -1.579867
|
ldose | 19.41558 .9954265 19.50 0.000 17.46458 21.36658
_cons | -34.84602 1.79333 -19.43 0.000 -38.36089 -31.33116
------------------------------------------------------------------------------
"""
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@genmod@[email protected]_END.py
|
{
"filename": "tree_sitter_segmenter.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/document_loaders/parsers/language/tree_sitter_segmenter.py",
"type": "Python"
}
|
from abc import abstractmethod
from typing import TYPE_CHECKING, List
from langchain_community.document_loaders.parsers.language.code_segmenter import (
CodeSegmenter,
)
if TYPE_CHECKING:
from tree_sitter import Language, Parser
class TreeSitterSegmenter(CodeSegmenter):
"""Abstract class for `CodeSegmenter`s that use the tree-sitter library."""
def __init__(self, code: str):
super().__init__(code)
self.source_lines = self.code.splitlines()
try:
import tree_sitter # noqa: F401
import tree_sitter_languages # noqa: F401
except ImportError:
raise ImportError(
"Could not import tree_sitter/tree_sitter_languages Python packages. "
"Please install them with "
"`pip install tree-sitter tree-sitter-languages`."
)
def is_valid(self) -> bool:
language = self.get_language()
error_query = language.query("(ERROR) @error")
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
return len(error_query.captures(tree.root_node)) == 0
def extract_functions_classes(self) -> List[str]:
language = self.get_language()
query = language.query(self.get_chunk_query())
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
captures = query.captures(tree.root_node)
processed_lines = set()
chunks = []
for node, name in captures:
start_line = node.start_point[0]
end_line = node.end_point[0]
lines = list(range(start_line, end_line + 1))
if any(line in processed_lines for line in lines):
continue
processed_lines.update(lines)
chunk_text = node.text.decode("UTF-8")
chunks.append(chunk_text)
return chunks
def simplify_code(self) -> str:
language = self.get_language()
query = language.query(self.get_chunk_query())
parser = self.get_parser()
tree = parser.parse(bytes(self.code, encoding="UTF-8"))
processed_lines = set()
simplified_lines = self.source_lines[:]
for node, name in query.captures(tree.root_node):
start_line = node.start_point[0]
end_line = node.end_point[0]
lines = list(range(start_line, end_line + 1))
if any(line in processed_lines for line in lines):
continue
simplified_lines[start_line] = self.make_line_comment(
f"Code for: {self.source_lines[start_line]}"
)
for line_num in range(start_line + 1, end_line + 1):
simplified_lines[line_num] = None # type: ignore
processed_lines.update(lines)
return "\n".join(line for line in simplified_lines if line is not None)
def get_parser(self) -> "Parser":
from tree_sitter import Parser
parser = Parser()
parser.set_language(self.get_language())
return parser
@abstractmethod
def get_language(self) -> "Language":
raise NotImplementedError() # pragma: no cover
@abstractmethod
def get_chunk_query(self) -> str:
raise NotImplementedError() # pragma: no cover
@abstractmethod
def make_line_comment(self, text: str) -> str:
raise NotImplementedError() # pragma: no cover
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@document_loaders@parsers@language@[email protected]_END.py
|
{
"filename": "_constrain.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/xaxis/_constrain.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ConstrainValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="constrain", parent_name="layout.xaxis", **kwargs):
super(ConstrainValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["range", "domain"]),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@layout@xaxis@[email protected]_END.py
|
{
"filename": "GiRaFFEfood_NRPy_Three_Waves.py",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/in_progress-GiRaFFE_NRPy/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Three_Waves.py",
"type": "Python"
}
|
# Step 0: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os,sys
nrpy_dir_path = os.path.join("..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
nrpy_dir_path = os.path.join("../..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
# Step 0.a: Import the NRPy+ core modules and set the reference metric to Cartesian
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import GiRaFFEfood_NRPy.GiRaFFEfood_NRPy_Common_Functions as gfcf # Some useful functions for GiRaFFE initial data.
import reference_metric as rfm # NRPy+: Reference metric support
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
# Step 1a: Set commonly used parameters.
thismodule = __name__
import Min_Max_and_Piecewise_Expressions as noif
def Ax_TW(x,y,z, **params):
return sp.sympify(0)
def Ay_TW(x,y,z, **params):
return sp.Rational(7,2)*x*noif.coord_greater_bound(-x,0) + sp.sympify(3)*x*noif.coord_greater_bound(x,0)
def Az_TW(x,y,z, **params):
return y-sp.Rational(3,2)*x*noif.coord_greater_bound(-x,0) - sp.sympify(3)*x*noif.coord_greater_bound(x,0)
#Step 3: Compute v^i from B^i and E_i
def ValenciavU_func_TW(**params):
x = rfm.xx_to_Cart[0]
B_aU = ixp.zerorank1(DIM=3)
E_aU = ixp.zerorank1(DIM=3)
B_pU = ixp.zerorank1(DIM=3)
E_pU = ixp.zerorank1(DIM=3)
B_mU = ixp.zerorank1(DIM=3)
E_mU = ixp.zerorank1(DIM=3)
B_aU[0] = sp.sympify(1)
B_aU[1] = noif.coord_leq_bound(x,0) * sp.sympify(1) + noif.coord_greater_bound(x,0) * sp.Rational(3,2)
B_aU[2] = sp.sympify(2)
E_aU[0] = noif.coord_leq_bound(x,0) * sp.sympify(-1) + noif.coord_greater_bound(x,0) * sp.Rational(-3,2)
E_aU[1] = sp.sympify(1)
E_aU[2] = sp.sympify(0)
B_pU[0] = sp.sympify(0)
B_pU[1] = noif.coord_leq_bound(x,0) * sp.sympify(0) + noif.coord_greater_bound(x,0) * sp.Rational(3,2)
B_pU[2] = noif.coord_leq_bound(x,0) * sp.sympify(0) + noif.coord_greater_bound(x,0) * sp.sympify(1)
E_pU[0] = sp.sympify(0)
E_pU[1] = noif.coord_leq_bound(x,0) * sp.sympify(0) + noif.coord_greater_bound(x,0) * sp.sympify(1)
E_pU[2] = noif.coord_leq_bound(x,0) * sp.sympify(0) + noif.coord_greater_bound(x,0) * sp.Rational(-3,2)
B_mU[0] = sp.sympify(0)
B_mU[1] = noif.coord_leq_bound(x,0) * sp.Rational(1,2) + noif.coord_greater_bound(x,0) * sp.sympify(0)
B_mU[2] = noif.coord_leq_bound(x,0) * sp.Rational(3,2) + noif.coord_greater_bound(x,0) * sp.sympify(0)
E_mU[0] = sp.sympify(0)
E_mU[1] = noif.coord_leq_bound(x,0) * sp.Rational(-3,2) + noif.coord_greater_bound(x,0) * sp.sympify(0)
E_mU[2] = noif.coord_leq_bound(x,0) * sp.Rational(1,2) + noif.coord_greater_bound(x,0) * sp.sympify(0)
BU = ixp.zerorank1(DIM=3)
EU = ixp.zerorank1(DIM=3)
for i in range(3):
BU[i] = B_aU[i] + B_pU[i] + B_mU[i]
EU[i] = E_aU[i] + E_pU[i] + E_mU[i]
# In flat space, ED and EU are identical, so we can still use this function.
return gfcf.compute_ValenciavU_from_ED_and_BU(EU, BU)
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@in_progress-GiRaFFE_NRPy@GiRaFFEfood_NRPy@[email protected]_END.py
|
{
"filename": "cleanup_downloads.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/utils/cleanup_downloads.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utility to cleanup files created during doctesting.
"""
import glob
import os
import shutil
__all__ = ['cleanup_saved_downloads']
def cleanup_saved_downloads(names):
""" Function to clean up save files.
Parameters
----------
names : str or list of str
Files or directories to clean up. Wildcards are accepted.
"""
if isinstance(names, str):
names = [names]
for path in names:
files = glob.glob(path)
for saved_download in files:
try:
shutil.rmtree(saved_download)
except NotADirectoryError:
os.remove(saved_download)
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@utils@[email protected]_END.py
|
{
"filename": "_valueformat.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/indicator/number/_valueformat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ValueformatValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="valueformat", parent_name="indicator.number", **kwargs
):
super(ValueformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@indicator@number@[email protected]_END.py
|
{
"filename": "_minzoom.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/mapbox/layer/_minzoom.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MinzoomValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="minzoom", parent_name="layout.mapbox.layer", **kwargs
):
super(MinzoomValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
max=kwargs.pop("max", 24),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@mapbox@layer@[email protected]_END.py
|
{
"filename": "_familysrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sunburst/textfont/_familysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="sunburst.textfont", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sunburst@textfont@[email protected]_END.py
|
{
"filename": "ClassSpectralFunctions.py",
"repo_name": "saopicc/DDFacet",
"repo_path": "DDFacet_extracted/DDFacet-master/DDFacet/ToolsDir/ClassSpectralFunctions.py",
"type": "Python"
}
|
'''
DDFacet, a facet-based radio imaging package
Copyright (C) 2013-2016 Cyril Tasse, l'Observatoire de Paris,
SKA South Africa, Rhodes University
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DDFacet.compatibility import range
import numpy as np
class ClassSpectralFunctions():
def __init__(self,DicoMappingDesc,BeamEnable=True,RefFreq=None):
self.DicoMappingDesc=DicoMappingDesc
self.NFreqBand=len(self.DicoMappingDesc["freqs"])
self.BeamEnable=BeamEnable
self.RefFreq=RefFreq
#self.setFreqs()
self.DicoBeamFactors={}
# def setFreqs(self):
# AllFreqs=[]
# AllFreqsMean=np.zeros((self.NFreqBand,),np.float32)
# for iChannel in range(self.NFreqBand):
# AllFreqs+=self.DicoMappingDesc["freqs"][iChannel]
# AllFreqsMean[iChannel]=np.mean(self.DicoMappingDesc["freqs"][iChannel])
# RefFreq=np.sum(AllFreqsMean.ravel()*self.DicoMappingDesc["WeightChansImages"].ravel())
# self.AllFreqs=AllFreqs
# self.RefFreq=RefFreq
def GiveBeamFactorsFacet(self,iFacet):
if iFacet in self.DicoBeamFactors:
return self.DicoBeamFactors[iFacet]
SumJonesChan=self.DicoMappingDesc["SumJonesChan"]
ChanMappingGrid=self.DicoMappingDesc["ChanMappingGrid"]
# ListBeamFactor=[]
# ListBeamFactorWeightSq=[]
# for iChannel in range(self.NFreqBand):
# ThisSumJonesChan=[]
# ThisSumJonesChanWeightSq=[]
# for iMS in range(len(SumJonesChan)):
# ind=np.where(ChanMappingGrid[iMS]==iChannel)[0]
# ThisSumJonesChan+=SumJonesChan[iMS][ind].tolist()
# ThisSumJonesChanWeightSq+=SumJonesChanWeightSq[iMS][ind].tolist()
# ListBeamFactor.append(np.array(ThisSumJonesChan))
# ListBeamFactorWeightSq.append(np.array(ThisSumJonesChanWeightSq))
ChanMappingGridChan=self.DicoMappingDesc["ChanMappingGridChan"]
ListBeamFactor=[]
ListBeamFactorWeightSq=[]
for iChannel in range(self.NFreqBand):
nfreq = len(self.DicoMappingDesc["freqs"][iChannel])
ThisSumJonesChan = np.zeros(nfreq,np.float64)
ThisSumJonesChanWeightSq = np.zeros(nfreq,np.float64)
for iMS in SumJonesChan.keys():
ind = np.where(ChanMappingGrid[iMS]==iChannel)[0]
channels = ChanMappingGridChan[iMS][ind]
ThisSumJonesChan[channels] += SumJonesChan[iMS][iFacet,0,ind]
ThisSumJonesChanWeightSq[channels] += SumJonesChan[iMS][iFacet,1,ind]
ListBeamFactor.append(ThisSumJonesChan)
ListBeamFactorWeightSq.append(ThisSumJonesChanWeightSq)
self.DicoBeamFactors[iFacet] = ListBeamFactor, ListBeamFactorWeightSq
return ListBeamFactor, ListBeamFactorWeightSq
def GiveFreqBandsFluxRatio(self,iFacet,Alpha):
NFreqBand=self.NFreqBand
NAlpha=Alpha.size
FreqBandsFluxRatio=np.zeros((NAlpha,NFreqBand),np.float32)
for iChannel in range(NFreqBand):
for iAlpha in range(NAlpha):
ThisAlpha=Alpha[iAlpha]
FreqBandsFluxRatio[iAlpha,iChannel]=self.IntExpFunc(Alpha=ThisAlpha,iChannel=iChannel,iFacet=iFacet)
return FreqBandsFluxRatio
def CalcFluxBands(self,NAlpha=21):
Alphas=np.linspace(-1,1,NAlpha)
NFacets=len(self.DicoMappingDesc["MeanJonesBand"])
self.FluxBands=np.zeros((NFacets,NAlpha,self.NFreqBand),np.float32)
for iFacet in range(NFacets):
self.FluxBands[iFacet]=self.GiveFreqBandsFluxRatio(iFacet,Alphas)
def IntExpFunc(self,S0=1.,Alpha=0.,iChannel=0,iFacet=0):
RefFreq=self.RefFreq
ThisAlpha=Alpha
ThisFreqs=np.array(self.DicoMappingDesc["freqs"][iChannel])
S0=np.array(S0)
Npix=S0.size
if self.BeamEnable:
ListBeamFactor,ListBeamFactorWeightSq = self.GiveBeamFactorsFacet(iFacet)
BeamFactor=ListBeamFactor[iChannel].reshape((1,ThisFreqs.size))
BeamFactorWeightSq=ListBeamFactorWeightSq[iChannel].reshape((1,ThisFreqs.size))
MeanJonesBand=self.DicoMappingDesc["MeanJonesBand"][iFacet][iChannel]
else:
BeamFactor=1.
BeamFactorWeightSq=1.
MeanJonesBand=1.
ThisFreqs=ThisFreqs.reshape((1,ThisFreqs.size))
ThisAlpha=ThisAlpha.reshape((Npix,1))
FreqBandsFlux=np.sqrt(np.sum(BeamFactor*((ThisFreqs/RefFreq)**ThisAlpha)**2,axis=1))/np.sqrt(np.sum(BeamFactorWeightSq))
FreqBandsFlux/=np.sqrt(MeanJonesBand)
S0=S0.reshape((Npix,))
FreqBandsFlux*=S0
return FreqBandsFlux.ravel()
def IntExpFuncPoly(self,PolyArray,iChannel=0,iFacet=0,FluxScale="Exp"):#, S0=1.,Alpha=0.):
RefFreq=self.RefFreq
ThisFreqs=np.array(self.DicoMappingDesc["freqs"][iChannel])
S0=np.array(PolyArray[:,0])
Npix=S0.size
if self.BeamEnable:
ListBeamFactor,ListBeamFactorWeightSq = self.GiveBeamFactorsFacet(iFacet)
BeamFactor=ListBeamFactor[iChannel].reshape((1,ThisFreqs.size))
BeamFactorWeightSq=ListBeamFactorWeightSq[iChannel].reshape((1,ThisFreqs.size))
MeanJonesBand=self.DicoMappingDesc["MeanJonesBand"][iFacet][iChannel]
else:
BeamFactor=1.
BeamFactorWeightSq=1.
MeanJonesBand=1.
ThisFreqs=ThisFreqs.reshape((1,ThisFreqs.size))
# ThisAlpha=ThisAlpha.reshape((Npix,1))
# SUnityFreq=(ThisFreqs/RefFreq)**ThisAlpha
# SUnityFreq=np.zeros((Npix,ThisFreqs.size),np.float32)
# for iPix in range(Npix):
# p=PolyArray[iPix,:].copy()
# p[0]=0.
# logS=np.poly1d(p[::-1])(np.log(ThisFreqs.ravel()/RefFreq))
# SUnityFreq[iPix,:]=np.exp(logS)
Npix,NOrder=PolyArray.shape
n=np.arange(NOrder)
n=n.reshape((1,1,NOrder))
f=ThisFreqs.reshape((1,-1,1))
a=(PolyArray.copy()).reshape((Npix,1,NOrder))
if FluxScale=="Exp":
a[:,:,0]=0.
SUnityFreq0=a*(np.log(f/RefFreq))**n
SUnityFreq0=np.exp(np.sum(SUnityFreq0,axis=-1))
elif FluxScale=="Linear":
SUnityFreq0=a*((f-RefFreq)/RefFreq)**n
SUnityFreq0=np.sum(SUnityFreq0,axis=-1)
SUnityFreq=SUnityFreq0
FreqBandsFlux=np.sqrt(np.sum(BeamFactor*( SUnityFreq )**2,axis=1))/np.sqrt(np.sum(BeamFactorWeightSq))
FreqBandsFlux/=np.sqrt(MeanJonesBand)
S0=S0.reshape((Npix,))
if FluxScale=="Exp":
FreqBandsFlux*=S0
return FreqBandsFlux.ravel()
|
saopiccREPO_NAMEDDFacetPATH_START.@DDFacet_extracted@DDFacet-master@DDFacet@[email protected]@.PATH_END.py
|
{
"filename": "_selected.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choropleth/_selected.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SelectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="selected", parent_name="choropleth", **kwargs):
super(SelectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Selected"),
data_docs=kwargs.pop(
"data_docs",
"""
marker
:class:`plotly.graph_objects.choropleth.selecte
d.Marker` instance or dict with compatible
properties
""",
),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@choropleth@[email protected]_END.py
|
{
"filename": "ceres_optimizer.py",
"repo_name": "dstndstn/tractor",
"repo_path": "tractor_extracted/tractor-main/tractor/ceres_optimizer.py",
"type": "Python"
}
|
from __future__ import print_function
import numpy as np
from astrometry.util.ttime import Time
from tractor.engine import logverb
from tractor.optimize import Optimizer
class CeresOptimizer(Optimizer):
def __init__(self, BW=10, BH=10, threads=None):
super(CeresOptimizer, self).__init__()
self.BW = 10
self.BH = 10
self.ceresType = np.float32
self.threads = threads
def getDynamicScales(self, tractor):
'''
Returns parameter step sizes that will result in changes in
chi^2 of about 1.0
'''
scales = np.zeros(tractor.numberOfParams())
for i in range(tractor.getNImages()):
derivs = self._getOneImageDerivs(tractor, i)
for j, x0, y0, der in derivs:
scales[j] += np.sum(der**2)
scales = np.sqrt(scales)
I = (scales != 0)
if any(I):
scales[I] = 1. / scales[I]
I = (scales == 0)
if any(I):
scales[I] = np.array(tractor.getStepSizes())[I]
return scales
def _optimize_forcedphot_core(self, tractor, result, *args, **kwargs):
x = self._ceres_forced_photom(tractor, result, *args, **kwargs)
result.ceres_status = x
def optimize(self, tractor, variance=False, **kwargs):
X = self._ceres_opt(tractor, variance=variance, **kwargs)
#print('optimize:', X)
chisq0 = X['initial_cost']
chisq1 = X['final_cost']
if variance:
# dlnp, dparams, alpha, variance
return chisq0 - chisq1, None, 1, X['variance']
# dlnp, dparams, alpha
return chisq0 - chisq1, None, 1
def optimize_loop(self, tractor, **kwargs):
X = self._ceres_opt(tractor, **kwargs)
return X
def _ceres_opt(self, tractor, variance=False, scale_columns=True,
numeric=False, scaled=True, numeric_stepsize=0.1,
dynamic_scale=True,
dlnp=1e-3, max_iterations=0, print_progress=True,
priors=False, bounds=False, **nil):
from tractor.ceres import ceres_opt
pp = tractor.getParams()
if len(pp) == 0:
return None
if scaled:
p0 = np.array(pp)
if dynamic_scale:
scales = self.getDynamicScales(tractor)
else:
scales = np.array(tractor.getStepSizes())
# print('parameter scales:', scales)
# Offset all the parameters so that Ceres sees them all
# with value 1.0 (they'll later get scaled by 1/scales)
p0 -= scales
params = np.ones_like(p0)
else:
params = np.array(pp)
p0 = 0
scales = np.ones(len(pp), float)
trwrapper = CeresTractorAdapter(tractor, self, p0, scales)
variance_out = None
if variance:
variance_out = np.zeros_like(params)
gpriors = None
if priors:
gpriors = tractor.getGaussianPriors()
#print('Gaussian priors:', gpriors)
if scaled:
scaled_gpriors = []
for i,mu,sig in gpriors:
# Apply scaling!
mu = (mu - p0[i]) / scales[i]
sig = sig / scales[i]
scaled_gpriors.append((i, mu, sig))
gpriors = scaled_gpriors
lubounds = None
if bounds:
lowers = tractor.getLowerBounds()
uppers = tractor.getUpperBounds()
# print('Lower bounds:', lowers)
# print('Upper bounds:', uppers)
assert(len(lowers) == len(pp))
assert(len(uppers) == len(pp))
if scaled:
lowers = [(b - p0[i]) / scales[i] if b is not None else None
for i,b in enumerate(lowers)]
uppers = [(b - p0[i]) / scales[i] if b is not None else None
for i,b in enumerate(uppers)]
# print('Scaled lower bounds:', lowers)
# print('Scaled upper bounds:', uppers)
lubounds = ([(i, float(b), True) for i, b in enumerate(lowers)
if b is not None] +
[(i, float(b), False) for i, b in enumerate(uppers)
if b is not None])
#print('lubounds:', lubounds)
R = ceres_opt(trwrapper, tractor.getNImages(), params, variance_out,
(1 if scale_columns else 0),
(1 if numeric else 0), numeric_stepsize,
dlnp, max_iterations, gpriors, lubounds,
print_progress)
if variance:
R['variance'] = variance_out
if scaled:
#print('Opt. in scaled space:', params)
tractor.setParams(p0 + params * scales)
if variance:
variance_out *= scales**2
R['params0'] = p0
R['scales'] = scales
else:
tractor.setParams(params)
return R
# This function is called-back by _ceres_opt; it is called from
# ceres-tractor.cc via ceres.i .
def _getOneImageDerivs(self, tractor, imgi):
from tractor.patch import Patch
# Returns:
# [ (param-index, deriv_x0, deriv_y0, deriv), ... ]
# not necessarily in order of param-index
# Where deriv_x0, deriv_y0 are integer pixel offsets of the "deriv" image.
#
# NOTE, this scales the derivatives by inverse-error and -1 to
# yield derivatives of CHI with respect to PARAMs; NOT the
# model image wrt params.
#
allderivs = []
# First, derivs for Image parameters (because 'images' comes
# first in the tractor's parameters)
parami = 0
img = tractor.images[imgi]
cat = tractor.catalog
if not tractor.isParamFrozen('images'):
for i in tractor.images.getThawedParamIndices():
if i == imgi:
# Give the image a chance to compute its own derivs
derivs = img.getParamDerivatives(tractor, cat,
**tractor.model_kwargs)
needj = []
for j, deriv in enumerate(derivs):
if deriv is None:
continue
if deriv is False:
needj.append(j)
continue
allderivs.append((parami + j, deriv))
if len(needj):
mod0 = tractor.getModelImage(i)
p0 = img.getParams()
ss = img.getStepSizes()
for j in needj:
step = ss[j]
img.setParam(j, p0[j] + step)
modj = tractor.getModelImage(i)
img.setParam(j, p0[j])
deriv = Patch(0, 0, (modj - mod0) / step)
allderivs.append((parami + j, deriv))
parami += tractor.images[i].numberOfParams()
assert(parami == tractor.images.numberOfParams())
srcs = list(tractor.catalog.getThawedSources())
for src in srcs:
derivs = tractor._getSourceDerivatives(src, img)
for j, deriv in enumerate(derivs):
if deriv is None:
continue
allderivs.append((parami + j, deriv))
parami += src.numberOfParams()
assert(parami == tractor.numberOfParams())
# Clip and unpack the (x0,y0,patch) elements for ease of use from C (ceres)
# Also scale by -1 * inverse-error to get units of dChi here.
ie = img.getInvError()
H, W = img.shape
chiderivs = []
for ind, d in allderivs:
d.clipTo(W, H)
if d.patch is None:
continue
deriv = -1. * d.patch.astype(np.float64) * ie[d.getSlice()]
chiderivs.append((ind, d.x0, d.y0, deriv))
# print('_getOneImageDerivs: image', tractor.images[imgi],
# ':', len(chiderivs))
# for ind,x0,y0,deriv in chiderivs:
# print(' ', deriv.shape)
return chiderivs
def _ceres_forced_photom(self, tractor, result, umodels,
imlist, mods0, scales,
skyderivs, minFlux,
nonneg=False,
wantims0=True,
wantims1=True,
negfluxval=None,
verbose=False,
**kwargs
):
'''
negfluxval: when 'nonneg' is set, the flux value to give sources that went
negative in an unconstrained fit.
'''
from tractor.ceres import ceres_forced_phot
t0 = Time()
blocks = []
blockstart = {}
usedParamMap = {}
nextparam = 0
# umodels[ imagei, srci ] = Patch
Nsky = 0
Z = []
if skyderivs is not None:
# skyderivs = [ (param0:)[ (deriv,img), ], (param1:)[ (deriv,img), ], ...]
# Reorg them to be in img-major order
skymods = [[] for im in imlist]
for skyd in skyderivs:
for (deriv, img) in skyd:
imi = imlist.index(img)
skymods[imi].append(deriv)
for mods, im, mod0 in zip(skymods, imlist, mods0):
Z.append((mods, im, 1., mod0, Nsky))
Nsky += len(mods)
Z.extend(zip(umodels, imlist, scales, mods0,
np.zeros(len(imlist), int) + Nsky))
sky = (skyderivs is not None)
for zi, (umods, img, scale, mod0, paramoffset) in enumerate(Z):
H, W = img.shape
if img in blockstart:
(b0, nbw, nbh) = blockstart[img]
else:
# Dice up the image
nbw = int(np.ceil(W / float(self.BW)))
nbh = int(np.ceil(H / float(self.BH)))
b0 = len(blocks)
blockstart[img] = (b0, nbw, nbh)
for iy in range(nbh):
for ix in range(nbw):
x0 = ix * self.BW
y0 = iy * self.BH
slc = (slice(y0, min(y0 + self.BH, H)),
slice(x0, min(x0 + self.BW, W)))
data = (x0, y0,
img.getImage()[slc].astype(self.ceresType),
mod0[slc].astype(self.ceresType),
img.getInvError()[slc].astype(self.ceresType))
blocks.append((data, []))
for modi, umod in enumerate(umods):
if umod is None:
continue
# DEBUG
if len(umod.shape) != 2:
print('zi', zi)
print('modi', modi)
print('umod', umod)
umod.clipTo(W, H)
umod.trimToNonZero()
if umod.patch is None:
continue
# Dice up the model
ph, pw = umod.shape
bx0 = np.clip(int(np.floor(umod.x0 / float(self.BW))),
0, nbw - 1)
bx1 = np.clip(int(np.ceil((umod.x0 + pw) / float(self.BW))),
0, nbw - 1)
by0 = np.clip(int(np.floor(umod.y0 / float(self.BH))),
0, nbh - 1)
by1 = np.clip(int(np.ceil((umod.y0 + ph) / float(self.BH))),
0, nbh - 1)
parami = paramoffset + modi
if parami in usedParamMap:
ceresparam = usedParamMap[parami]
else:
usedParamMap[parami] = nextparam
ceresparam = nextparam
nextparam += 1
cmod = (umod.patch * scale).astype(self.ceresType)
for by in range(by0, by1 + 1):
for bx in range(bx0, bx1 + 1):
bi = by * nbw + bx
# if type(umod.x0) != int or type(umod.y0) != int:
# print('umod:', umod.x0, umod.y0, type(umod.x0), type(umod.y0))
# print('umod:', umod)
dd = (ceresparam, int(umod.x0), int(umod.y0), cmod)
blocks[b0 + bi][1].append(dd)
logverb('forced phot: dicing up', Time() - t0)
if wantims0:
t0 = Time()
params = tractor.getParams()
result.ims0 = self._getims(params, imlist, umodels, mods0, scales,
sky, minFlux, None)
logverb('forced phot: ims0', Time() - t0)
t0 = Time()
fluxes = np.zeros(len(usedParamMap))
logverb('Ceres forced phot:')
logverb(len(blocks), ('image blocks (%ix%i), %i params' %
(self.BW, self.BH, len(fluxes))))
if len(blocks) == 0 or len(fluxes) == 0:
logverb('Nothing to do!')
return
# init fluxes passed to ceres
p0 = tractor.getParams()
for i, k in usedParamMap.items():
fluxes[k] = p0[i]
iverbose = 1 if verbose else 0
nonneg = int(nonneg)
ithreads = 0
if self.threads is not None:
ithreads = int(self.threads)
if nonneg:
# Initial run with nonneg=False, to get in the ballpark
x = ceres_forced_phot(blocks, fluxes, 0, iverbose, ithreads)
assert(x == 0)
logverb('forced phot: ceres initial run', Time() - t0)
t0 = Time()
if negfluxval is not None:
fluxes = np.maximum(fluxes, negfluxval)
x = ceres_forced_phot(blocks, fluxes, nonneg, iverbose, ithreads)
#print('Ceres forced phot:', x)
logverb('forced phot: ceres', Time() - t0)
t0 = Time()
params = np.zeros(len(p0))
for i, k in usedParamMap.items():
params[i] = fluxes[k]
tractor.setParams(params)
logverb('forced phot: unmapping params:', Time() - t0)
if wantims1:
t0 = Time()
result.ims1 = self._getims(params, imlist, umodels, mods0, scales,
sky, minFlux, None)
logverb('forced phot: ims1:', Time() - t0)
return x
class CeresTractorAdapter(object):
def __init__(self, tractor, ceresopt, p0, scales):
self.tractor = tractor
self.ceresopt = ceresopt
self.offset = p0
self.scale = scales
def getImage(self, i):
#print('CeresTractorAdapter: getImage(%i)' % i)
return self.tractor.getImage(i)
def getChiImage(self, i):
#print('CeresTractorAdapter: getChiImage(%i)' % i)
return self.tractor.getChiImage(i)
def _getOneImageDerivs(self, i):
derivs = self.ceresopt._getOneImageDerivs(self.tractor, i)
for (ind, x0, y0, der) in derivs:
der *= self.scale[ind]
return derivs
def setParams(self, p):
#print('CeresTractorAdapter: setParams:', self.offset + self.scale * p)
return self.tractor.setParams(self.offset + self.scale * p)
|
dstndstnREPO_NAMEtractorPATH_START.@tractor_extracted@tractor-main@tractor@[email protected]_END.py
|
{
"filename": "tutorial_mast.md",
"repo_name": "rbuehler/vasca",
"repo_path": "vasca_extracted/vasca-main/docs/tutorials/tutorial_mast.md",
"type": "Markdown"
}
|
---
jupytext:
hide_notebook_metadata: true
text_representation:
extension: .md
format_name: myst
format_version: 0.13
jupytext_version: 1.16.4
kernelspec:
display_name: vasca-github
language: python
name: vasca-github
---
```{code-cell}
:tags: [remove-input]
import pandas as pd
from IPython.display import HTML, display
from itables import init_notebook_mode, show
init_notebook_mode(all_interactive=True)
# Modify Table CSS so with colors that work ok in light and dark themes
class_specific_css = """
.dataTable th {
font-weight: normal;
background-color: #075;
color: #fff;
}
.dataTable td {
border-color: #f0f;
background-color: #333;
color: #fff;
}
.dt-container {
font-size: small;
}
"""
display(HTML(f"<style>{class_specific_css}</style>"))
```
# MAST Download
In this short tutorial the MAST query functions of [](#GALEXField) are used to
download fresh data.
```{code-cell}
from loguru import logger
from astropy.table import Table
from vasca.field import GALEXField
from vasca.resource_manager import ResourceManager
```
```{code-cell}
# Activate logging
logger.enable("vasca")
```
```{code-cell}
# Initialize ResourceManager
rm = ResourceManager()
docs_resources = rm.get_path("docs_resources", "vasca")
gal_visits = rm.get_path("gal_visits_list", "vasca")
```
```{code-cell}
# Let's look at a single field with two visits
field_name = "AIS_309_1_28" # 2 visits, contains Crab pulsar
field_id = 6381787756527353856
```
```{code-cell}
# Show visits info about this field
# tt_gal_visits is a table containing info about all GALEX visits
tt_gal_visits = Table.read(gal_visits)
sel_fd = tt_gal_visits["ParentImgRunID"] == field_id
# tt_gal_visits[sel_fd]
```
```{code-cell}
:tags: [remove-input]
show(
tt_gal_visits[sel_fd].to_pandas(),
classes="display nowrap compact",
scrollY="300px",
scrollCollapse=True,
paging=False,
columnDefs=[{"className": "dt-body-left", "targets": "_all"}],
)
```
```{code-cell}
:tags: [hide-output]
# Initialize a new field and
# download data from MAST
fd = GALEXField.load(
gfield_id=field_id,
obs_filter="NUV",
method="MAST_REMOTE",
load_products="ALL",
data_path=docs_resources,
visits_data_path=gal_visits,
)
```
|
rbuehlerREPO_NAMEvascaPATH_START.@vasca_extracted@vasca-main@docs@tutorials@[email protected]_END.py
|
{
"filename": "predict.py",
"repo_name": "jtdinsmore/leakagelib",
"repo_path": "leakagelib_extracted/leakagelib-main/examples/predict.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys, os
sys.path.append("../..")
import leakagelib
import time
SOURCE_SIZE = 53 # pixels
PIXEL_SIZE = 2.8 # arcsec
VMAX = 0.5
SPECTRUM = leakagelib.Spectrum.from_power_law_index(2)
leakagelib.funcs.override_matplotlib_defaults() # Set the matplotlib defaults to Jack's settings
if __name__ == "__main__":
# Load the nebula files
source = leakagelib.Source.load_file(
"data/pwn-i.fits",
False, # Predict leakage assuming Moments data
SOURCE_SIZE, # Number of spatial bins to put in a single row of your image. The image is assumed to be square
PIXEL_SIZE # The size of each pixel in arcsec. Together this and the previous argument multiply to give the width of the image in arcsec
)
source.polarize_file("data/pwn-qu.fits")
# Predict leakage for each source
fig, axs = plt.subplots(ncols=3,nrows=4, figsize=(12, 17), sharex=True, sharey=True, gridspec_kw=dict(height_ratios=(1,1,1,1.4)))
axs[0,0].pcolormesh(source.pixel_centers, source.pixel_centers, np.log10(source.source))
axs[0,1].pcolormesh(source.pixel_centers, source.pixel_centers, source.q_map, vmin=-VMAX,vmax=VMAX, cmap="RdBu")
axs[0,2].pcolormesh(source.pixel_centers, source.pixel_centers, source.u_map, vmin=-VMAX,vmax=VMAX, cmap="RdBu")
for det in range(3):
# Load the PSF
psf = leakagelib.PSF.sky_cal(
det, # Use the given detector index
source, # Use the Source object just created
det * np.pi / 3 * 2 # Rotate the source by this amount
)
start = time.time()
# Get the predicted detection maps for q and u
i, q_norm, u_norm = source.compute_leakage(
psf, # Use the PSF that was just loaded
SPECTRUM, # Use an example power-law spectrum
normalize=True # Normalize the output q and u. Off by default
)
# print(f"Took {(time.time() - start) / SOURCE_SIZE**2 * 1000} s per 1000 pixels")
# OPTIONAL: Divide these maps by the detector modulation factor. After this division, the
# PD = sqrt(q_norm**2 + u_norm**2) is equal to the point source polarization for an
# aperture large enough that leakage effects can be neglected. Before the division, the maps
# predict the actual detected polarizations and are therefore lowered by the modulation
# factor mu.
#
# This division step is likely necessary if comparing with other tools. For comparison with
# unweighted IXPE data, it should not be done.
q_norm, u_norm = source.divide_by_mu(q_norm, u_norm, SPECTRUM)
ci = axs[det+1,0].pcolormesh(source.pixel_centers, source.pixel_centers, np.log10(i))
axs[det+1,1].pcolormesh(source.pixel_centers, source.pixel_centers, q_norm, vmax=VMAX, vmin=-VMAX, cmap="RdBu")
cqu = axs[det+1,2].pcolormesh(source.pixel_centers, source.pixel_centers, u_norm, vmax=VMAX, vmin=-VMAX, cmap="RdBu")
for ax in axs.reshape(-1):
ax.set_aspect("equal")
ax.set_xlim(source.pixel_centers[-1], source.pixel_centers[0])
ax.set_ylim(source.pixel_centers[0], source.pixel_centers[-1])
axs[0,0].set_ylabel("Truth")
axs[1,0].set_ylabel("Detector 1")
axs[2,0].set_ylabel("Detector 2")
axs[3,0].set_ylabel("Detector 3")
axs[0,0].set_title("Log I")
axs[0,1].set_title("q (normalized)")
axs[0,2].set_title("u (normalized)")
axs[-1,0].set_xlabel("[arcsec]")
fig.colorbar(cqu, ax=axs[-1,(1,2)], orientation="horizontal", aspect=40)
cbari = fig.colorbar(ci, ax=axs[-1,0], orientation="horizontal")
cbari.set_ticks([])
if not os.path.exists("figs"):
os.mkdir("figs")
fig.savefig("figs/predict.png")
# fig.savefig("figs/predict.pdf")
|
jtdinsmoreREPO_NAMEleakagelibPATH_START.@leakagelib_extracted@leakagelib-main@[email protected]@.PATH_END.py
|
{
"filename": "generative_agents_interactive_simulacra_of_human_behavior.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb",
"type": "Jupyter Notebook"
}
|
# Generative Agents in LangChain
This notebook implements a generative agent based on the paper [Generative Agents: Interactive Simulacra of Human Behavior](https://arxiv.org/abs/2304.03442) by Park, et. al.
In it, we leverage a time-weighted Memory object backed by a LangChain Retriever.
```python
# Use termcolor to make it easy to colorize the outputs.
!pip install termcolor > /dev/null
```
```python
import logging
logging.basicConfig(level=logging.ERROR)
```
```python
from datetime import datetime, timedelta
from typing import List
from langchain.docstore import InMemoryDocstore
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain_community.vectorstores import FAISS
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from termcolor import colored
```
```python
USER_NAME = "Person A" # The name you want to use when interviewing the agent.
LLM = ChatOpenAI(max_tokens=1500) # Can be any LLM you want.
```
### Generative Agent Memory Components
This tutorial highlights the memory of generative agents and its impact on their behavior. The memory varies from standard LangChain Chat memory in two aspects:
1. **Memory Formation**
Generative Agents have extended memories, stored in a single stream:
1. Observations - from dialogues or interactions with the virtual world, about self or others
2. Reflections - resurfaced and summarized core memories
2. **Memory Recall**
Memories are retrieved using a weighted sum of salience, recency, and importance.
You can review the definitions of the `GenerativeAgent` and `GenerativeAgentMemory` in the [reference documentation]("https://api.python.langchain.com/en/latest/modules/experimental.html") for the following imports, focusing on `add_memory` and `summarize_related_memories` methods.
```python
from langchain_experimental.generative_agents import (
GenerativeAgent,
GenerativeAgentMemory,
)
```
## Memory Lifecycle
Summarizing the key methods in the above: `add_memory` and `summarize_related_memories`.
When an agent makes an observation, it stores the memory:
1. Language model scores the memory's importance (1 for mundane, 10 for poignant)
2. Observation and importance are stored within a document by TimeWeightedVectorStoreRetriever, with a `last_accessed_time`.
When an agent responds to an observation:
1. Generates query(s) for retriever, which fetches documents based on salience, recency, and importance.
2. Summarizes the retrieved information
3. Updates the `last_accessed_time` for the used documents.
## Create a Generative Character
Now that we've walked through the definition, we will create two characters named "Tommie" and "Eve".
```python
import math
import faiss
def relevance_score_fn(score: float) -> float:
"""Return a similarity score on a scale [0, 1]."""
# This will differ depending on a few things:
# - the distance / similarity metric used by the VectorStore
# - the scale of your embeddings (OpenAI's are unit norm. Many others are not!)
# This function converts the euclidean norm of normalized embeddings
# (0 is most similar, sqrt(2) most dissimilar)
# to a similarity function (0 to 1)
return 1.0 - score / math.sqrt(2)
def create_new_memory_retriever():
"""Create a new vector store retriever unique to the agent."""
# Define your embedding model
embeddings_model = OpenAIEmbeddings()
# Initialize the vectorstore as empty
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(
embeddings_model.embed_query,
index,
InMemoryDocstore({}),
{},
relevance_score_fn=relevance_score_fn,
)
return TimeWeightedVectorStoreRetriever(
vectorstore=vectorstore, other_score_keys=["importance"], k=15
)
```
```python
tommies_memory = GenerativeAgentMemory(
llm=LLM,
memory_retriever=create_new_memory_retriever(),
verbose=False,
reflection_threshold=8, # we will give this a relatively low number to show how reflection works
)
tommie = GenerativeAgent(
name="Tommie",
age=25,
traits="anxious, likes design, talkative", # You can add more persistent traits here
status="looking for a job", # When connected to a virtual world, we can have the characters update their status
memory_retriever=create_new_memory_retriever(),
llm=LLM,
memory=tommies_memory,
)
```
```python
# The current "Summary" of a character can't be made because the agent hasn't made
# any observations yet.
print(tommie.get_summary())
```
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
No information about Tommie's core characteristics is provided in the given statements.
```python
# We can add memories directly to the memory object
tommie_observations = [
"Tommie remembers his dog, Bruno, from when he was a kid",
"Tommie feels tired from driving so far",
"Tommie sees the new home",
"The new neighbors have a cat",
"The road is noisy at night",
"Tommie is hungry",
"Tommie tries to get some rest.",
]
for observation in tommie_observations:
tommie.memory.add_memory(observation)
```
```python
# Now that Tommie has 'memories', their self-summary is more descriptive, though still rudimentary.
# We will see how this summary updates after more observations to create a more rich description.
print(tommie.get_summary(force_refresh=True))
```
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is a person who is observant of his surroundings, has a sentimental side, and experiences basic human needs such as hunger and the need for rest. He also tends to get tired easily and is affected by external factors such as noise from the road or a neighbor's pet.
## Pre-Interview with Character
Before sending our character on their way, let's ask them a few questions.
```python
def interview_agent(agent: GenerativeAgent, message: str) -> str:
"""Help the notebook user interact with the agent."""
new_message = f"{USER_NAME} says {message}"
return agent.generate_dialogue_response(new_message)[1]
```
```python
interview_agent(tommie, "What do you like to do?")
```
'Tommie said "I really enjoy design and being creative. I\'ve been working on some personal projects lately. What about you, Person A? What do you like to do?"'
```python
interview_agent(tommie, "What are you looking forward to doing today?")
```
'Tommie said "Well, I\'m actually looking for a job right now, so hopefully I can find some job postings online and start applying. How about you, Person A? What\'s on your schedule for today?"'
```python
interview_agent(tommie, "What are you most worried about today?")
```
'Tommie said "Honestly, I\'m feeling pretty anxious about finding a job. It\'s been a bit of a struggle lately, but I\'m trying to stay positive and keep searching. How about you, Person A? What worries you?"'
## Step through the day's observations.
```python
# Let's have Tommie start going through a day in the life.
observations = [
"Tommie wakes up to the sound of a noisy construction site outside his window.",
"Tommie gets out of bed and heads to the kitchen to make himself some coffee.",
"Tommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.",
"Tommie finally finds the filters and makes himself a cup of coffee.",
"The coffee tastes bitter, and Tommie regrets not buying a better brand.",
"Tommie checks his email and sees that he has no job offers yet.",
"Tommie spends some time updating his resume and cover letter.",
"Tommie heads out to explore the city and look for job openings.",
"Tommie sees a sign for a job fair and decides to attend.",
"The line to get in is long, and Tommie has to wait for an hour.",
"Tommie meets several potential employers at the job fair but doesn't receive any offers.",
"Tommie leaves the job fair feeling disappointed.",
"Tommie stops by a local diner to grab some lunch.",
"The service is slow, and Tommie has to wait for 30 minutes to get his food.",
"Tommie overhears a conversation at the next table about a job opening.",
"Tommie asks the diners about the job opening and gets some information about the company.",
"Tommie decides to apply for the job and sends his resume and cover letter.",
"Tommie continues his search for job openings and drops off his resume at several local businesses.",
"Tommie takes a break from his job search to go for a walk in a nearby park.",
"A dog approaches and licks Tommie's feet, and he pets it for a few minutes.",
"Tommie sees a group of people playing frisbee and decides to join in.",
"Tommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.",
"Tommie goes back to his apartment to rest for a bit.",
"A raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.",
"Tommie starts to feel frustrated with his job search.",
"Tommie calls his best friend to vent about his struggles.",
"Tommie's friend offers some words of encouragement and tells him to keep trying.",
"Tommie feels slightly better after talking to his friend.",
]
```
```python
# Let's send Tommie on their way. We'll check in on their summary every few observations to watch it evolve
for i, observation in enumerate(observations):
_, reaction = tommie.generate_reaction(observation)
print(colored(observation, "green"), reaction)
if ((i + 1) % 20) == 0:
print("*" * 40)
print(
colored(
f"After {i+1} observations, Tommie's summary is:\n{tommie.get_summary(force_refresh=True)}",
"blue",
)
)
print("*" * 40)
```
[32mTommie wakes up to the sound of a noisy construction site outside his window.[0m Tommie groans and covers his head with a pillow, trying to block out the noise.
[32mTommie gets out of bed and heads to the kitchen to make himself some coffee.[0m Tommie stretches his arms and yawns before starting to make the coffee.
[32mTommie realizes he forgot to buy coffee filters and starts rummaging through his moving boxes to find some.[0m Tommie sighs in frustration and continues searching through the boxes.
[32mTommie finally finds the filters and makes himself a cup of coffee.[0m Tommie takes a deep breath and enjoys the aroma of the fresh coffee.
[32mThe coffee tastes bitter, and Tommie regrets not buying a better brand.[0m Tommie grimaces and sets the coffee mug aside.
[32mTommie checks his email and sees that he has no job offers yet.[0m Tommie sighs and closes his laptop, feeling discouraged.
[32mTommie spends some time updating his resume and cover letter.[0m Tommie nods, feeling satisfied with his progress.
[32mTommie heads out to explore the city and look for job openings.[0m Tommie feels a surge of excitement and anticipation as he steps out into the city.
[32mTommie sees a sign for a job fair and decides to attend.[0m Tommie feels hopeful and excited about the possibility of finding job opportunities at the job fair.
[32mThe line to get in is long, and Tommie has to wait for an hour.[0m Tommie taps his foot impatiently and checks his phone for the time.
[32mTommie meets several potential employers at the job fair but doesn't receive any offers.[0m Tommie feels disappointed and discouraged, but he remains determined to keep searching for job opportunities.
[32mTommie leaves the job fair feeling disappointed.[0m Tommie feels disappointed and discouraged, but he remains determined to keep searching for job opportunities.
[32mTommie stops by a local diner to grab some lunch.[0m Tommie feels relieved to take a break and satisfy his hunger.
[32mThe service is slow, and Tommie has to wait for 30 minutes to get his food.[0m Tommie feels frustrated and impatient due to the slow service.
[32mTommie overhears a conversation at the next table about a job opening.[0m Tommie feels a surge of hope and excitement at the possibility of a job opportunity but decides not to interfere with the conversation at the next table.
[32mTommie asks the diners about the job opening and gets some information about the company.[0m Tommie said "Excuse me, I couldn't help but overhear your conversation about the job opening. Could you give me some more information about the company?"
[32mTommie decides to apply for the job and sends his resume and cover letter.[0m Tommie feels hopeful and proud of himself for taking action towards finding a job.
[32mTommie continues his search for job openings and drops off his resume at several local businesses.[0m Tommie feels hopeful and determined to keep searching for job opportunities.
[32mTommie takes a break from his job search to go for a walk in a nearby park.[0m Tommie feels refreshed and rejuvenated after taking a break in the park.
[32mA dog approaches and licks Tommie's feet, and he pets it for a few minutes.[0m Tommie feels happy and enjoys the brief interaction with the dog.
****************************************
[34mAfter 20 observations, Tommie's summary is:
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is determined and hopeful in his search for job opportunities, despite encountering setbacks and disappointments. He is also able to take breaks and care for his physical needs, such as getting rest and satisfying his hunger. Tommie is nostalgic towards his past, as shown by his memory of his childhood dog. Overall, Tommie is a hardworking and resilient individual who remains focused on his goals.[0m
****************************************
[32mTommie sees a group of people playing frisbee and decides to join in.[0m Do nothing.
[32mTommie has fun playing frisbee but gets hit in the face with the frisbee and hurts his nose.[0m Tommie feels pain and puts a hand to his nose to check for any injury.
[32mTommie goes back to his apartment to rest for a bit.[0m Tommie feels relieved to take a break and rest for a bit.
[32mA raccoon tore open the trash bag outside his apartment, and the garbage is all over the floor.[0m Tommie feels annoyed and frustrated at the mess caused by the raccoon.
[32mTommie starts to feel frustrated with his job search.[0m Tommie feels discouraged but remains determined to keep searching for job opportunities.
[32mTommie calls his best friend to vent about his struggles.[0m Tommie said "Hey, can I talk to you for a bit? I'm feeling really frustrated with my job search."
[32mTommie's friend offers some words of encouragement and tells him to keep trying.[0m Tommie said "Thank you, I really appreciate your support and encouragement."
[32mTommie feels slightly better after talking to his friend.[0m Tommie feels grateful for his friend's support.
## Interview after the day
```python
interview_agent(tommie, "Tell me about how your day has been going")
```
'Tommie said "It\'s been a bit of a rollercoaster, to be honest. I\'ve had some setbacks in my job search, but I also had some good moments today, like sending out a few resumes and meeting some potential employers at a job fair. How about you?"'
```python
interview_agent(tommie, "How do you feel about coffee?")
```
'Tommie said "I really enjoy coffee, but sometimes I regret not buying a better brand. How about you?"'
```python
interview_agent(tommie, "Tell me about your childhood dog!")
```
'Tommie said "Oh, I had a dog named Bruno when I was a kid. He was a golden retriever and my best friend. I have so many fond memories of him."'
## Adding Multiple Characters
Let's add a second character to have a conversation with Tommie. Feel free to configure different traits.
```python
eves_memory = GenerativeAgentMemory(
llm=LLM,
memory_retriever=create_new_memory_retriever(),
verbose=False,
reflection_threshold=5,
)
eve = GenerativeAgent(
name="Eve",
age=34,
traits="curious, helpful", # You can add more persistent traits here
status="N/A", # When connected to a virtual world, we can have the characters update their status
llm=LLM,
daily_summaries=[
(
"Eve started her new job as a career counselor last week and received her first assignment, a client named Tommie."
)
],
memory=eves_memory,
verbose=False,
)
```
```python
yesterday = (datetime.now() - timedelta(days=1)).strftime("%A %B %d")
eve_observations = [
"Eve wakes up and hear's the alarm",
"Eve eats a boal of porridge",
"Eve helps a coworker on a task",
"Eve plays tennis with her friend Xu before going to work",
"Eve overhears her colleague say something about Tommie being hard to work with",
]
for observation in eve_observations:
eve.memory.add_memory(observation)
```
```python
print(eve.get_summary())
```
Name: Eve (age: 34)
Innate traits: curious, helpful
Eve is a helpful and active person who enjoys sports and takes care of her physical health. She is attentive to her surroundings, including her colleagues, and has good time management skills.
## Pre-conversation interviews
Let's "Interview" Eve before she speaks with Tommie.
```python
interview_agent(eve, "How are you feeling about today?")
```
'Eve said "I\'m feeling pretty good, thanks for asking! Just trying to stay productive and make the most of the day. How about you?"'
```python
interview_agent(eve, "What do you know about Tommie?")
```
'Eve said "I don\'t know much about Tommie, but I heard someone mention that they find them difficult to work with. Have you had any experiences working with Tommie?"'
```python
interview_agent(
eve,
"Tommie is looking to find a job. What are are some things you'd like to ask him?",
)
```
'Eve said "That\'s interesting. I don\'t know much about Tommie\'s work experience, but I would probably ask about his strengths and areas for improvement. What about you?"'
```python
interview_agent(
eve,
"You'll have to ask him. He may be a bit anxious, so I'd appreciate it if you keep the conversation going and ask as many questions as possible.",
)
```
'Eve said "Sure, I can keep the conversation going and ask plenty of questions. I want to make sure Tommie feels comfortable and supported. Thanks for letting me know."'
## Dialogue between Generative Agents
Generative agents are much more complex when they interact with a virtual environment or with each other. Below, we run a simple conversation between Tommie and Eve.
```python
def run_conversation(agents: List[GenerativeAgent], initial_observation: str) -> None:
"""Runs a conversation between agents."""
_, observation = agents[1].generate_reaction(initial_observation)
print(observation)
turns = 0
while True:
break_dialogue = False
for agent in agents:
stay_in_dialogue, observation = agent.generate_dialogue_response(
observation
)
print(observation)
# observation = f"{agent.name} said {reaction}"
if not stay_in_dialogue:
break_dialogue = True
if break_dialogue:
break
turns += 1
```
```python
agents = [tommie, eve]
run_conversation(
agents,
"Tommie said: Hi, Eve. Thanks for agreeing to meet with me today. I have a bunch of questions and am not sure where to start. Maybe you could first share about your experience?",
)
```
Eve said "Sure, Tommie. I'd be happy to share about my experience. Where would you like me to start?"
Tommie said "That's great, thank you! How about you start by telling me about your previous work experience?"
Eve said "Sure, I'd be happy to share my previous work experience with you. I've worked in a few different industries, including marketing and event planning. What specific questions do you have for me?"
Tommie said "That's great to hear. Can you tell me more about your experience in event planning? I've always been interested in that field."
Eve said "Sure, I'd be happy to share about my experience in event planning. I've worked on a variety of events, from corporate conferences to weddings. One of the biggest challenges I faced was managing multiple vendors and ensuring everything ran smoothly on the day of the event. What specific questions do you have?"
Tommie said "That sounds like a lot of responsibility! Can you tell me more about how you handled the challenges that came up during those events?"
Eve said "Sure, Tommie. I'd be happy to share with you how I handled those challenges. One approach that worked well for me was to stay organized and create a detailed timeline for the event. This helped me keep track of all the different tasks that needed to be done and when they needed to be completed. I also made sure to communicate clearly with all the vendors and team members involved in the event to ensure everyone was on the same page. Would you like me to go into more detail?"
Tommie said "Thank you for sharing that with me, Eve. That sounds like a great approach to managing events. Can you tell me more about how you handled any unexpected issues that came up during the events?"
Eve said "Of course, Tommie. One example of an unexpected issue I faced was when one of the vendors didn't show up on time. To handle this, I quickly contacted a backup vendor and was able to get everything back on track. It's always important to have a backup plan in case things don't go as planned. Do you have any other questions about event planning?"
Tommie said "Thank you for sharing that with me, Eve. It's really helpful to hear how you handled unexpected issues like that. Can you give me an example of how you communicated with your team to ensure everyone was on the same page during an event?"
Eve said "Sure, Tommie. One thing I did to ensure everyone was on the same page was to have regular check-ins and meetings with the team leading up to the event. This helped us address any issues or concerns early on and make sure everyone was clear on their roles and responsibilities. Have you ever had to manage a team for an event before?"
Tommie said "That's a great idea, Eve. I haven't had the opportunity to manage a team for an event yet, but I'll definitely keep that in mind for the future. Thank you for sharing your experience with me."
Eve said "Thanks for the opportunity to share my experience, Tommie. It was great meeting with you today."
## Let's interview our agents after their conversation
Since the generative agents retain their memories from the day, we can ask them about their plans, conversations, and other memoreis.
```python
# We can see a current "Summary" of a character based on their own perception of self
# has changed
print(tommie.get_summary(force_refresh=True))
```
Name: Tommie (age: 25)
Innate traits: anxious, likes design, talkative
Tommie is determined and hopeful in his job search, but can also feel discouraged and frustrated at times. He has a strong connection to his childhood dog, Bruno. Tommie seeks support from his friends when feeling overwhelmed and is grateful for their help. He also enjoys exploring his new city.
```python
print(eve.get_summary(force_refresh=True))
```
Name: Eve (age: 34)
Innate traits: curious, helpful
Eve is a helpful and friendly person who enjoys playing sports and staying productive. She is attentive and responsive to others' needs, actively listening and asking questions to understand their perspectives. Eve has experience in event planning and communication, and is willing to share her knowledge and expertise with others. She values teamwork and collaboration, and strives to create a comfortable and supportive environment for everyone.
```python
interview_agent(tommie, "How was your conversation with Eve?")
```
'Tommie said "It was really helpful actually. Eve shared some great tips on managing events and handling unexpected issues. I feel like I learned a lot from her experience."'
```python
interview_agent(eve, "How was your conversation with Tommie?")
```
'Eve said "It was great, thanks for asking. Tommie was very receptive and had some great questions about event planning. How about you, have you had any interactions with Tommie?"'
```python
interview_agent(eve, "What do you wish you would have said to Tommie?")
```
'Eve said "It was great meeting with you, Tommie. If you have any more questions or need any help in the future, don\'t hesitate to reach out to me. Have a great day!"'
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@cookbook@generative_agents_interactive_simulacra_of_human_behavior.ipynb@.PATH_END.py
|
{
"filename": "_visible.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/error_y/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="visible", parent_name="scatter3d.error_y", **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@scatter3d@error_y@[email protected]_END.py
|
{
"filename": "_textcase.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sankey/hoverlabel/font/_textcase.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcaseValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="textcase", parent_name="sankey.hoverlabel.font", **kwargs
):
super(TextcaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["normal", "word caps", "upper", "lower"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sankey@hoverlabel@font@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "rapidsai/cuml",
"repo_path": "cuml_extracted/cuml-main/python/cuml/cuml/neighbors/__init__.py",
"type": "Python"
}
|
#
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.internals.import_utils import has_dask
from cuml.neighbors.nearest_neighbors import NearestNeighbors
from cuml.neighbors.nearest_neighbors import kneighbors_graph
from cuml.neighbors.kneighbors_classifier import KNeighborsClassifier
from cuml.neighbors.kneighbors_regressor import KNeighborsRegressor
from cuml.neighbors.kernel_density import (
KernelDensity,
VALID_KERNELS,
logsumexp_kernel,
)
VALID_METRICS = {
"brute": set(
[
"l2",
"euclidean",
"l1",
"cityblock",
"manhattan",
"taxicab",
# TODO: add "braycurtis" after https://github.com/rapidsai/raft/issues/1285
"canberra",
"minkowski",
"lp",
"chebyshev",
"linf",
"jensenshannon",
"cosine",
"correlation",
"inner_product",
"sqeuclidean",
"haversine",
]
),
"rbc": set(["euclidean", "haversine", "l2"]),
"ivfflat": set(
[
"l2",
"euclidean",
"sqeuclidean",
"inner_product",
"cosine",
"correlation",
]
),
"ivfpq": set(
[
"l2",
"euclidean",
"sqeuclidean",
"inner_product",
"cosine",
"correlation",
]
),
}
VALID_METRICS_SPARSE = {
"brute": set(
[
"euclidean",
"l2",
"inner_product",
"l1",
"cityblock",
"manhattan",
"taxicab",
"canberra",
"linf",
"chebyshev",
"jaccard",
"minkowski",
"lp",
"cosine",
"hellinger",
]
)
}
|
rapidsaiREPO_NAMEcumlPATH_START.@cuml_extracted@cuml-main@python@cuml@cuml@neighbors@[email protected]_END.py
|
{
"filename": "test_psfs.py",
"repo_name": "LouisDesdoigts/dLux",
"repo_path": "dLux_extracted/dLux-main/tests/test_psfs.py",
"type": "Python"
}
|
from jax import numpy as np, config
config.update("jax_debug_nans", True)
import pytest
from dLux import PSF
@pytest.fixture
def psf():
return PSF(np.ones((16, 16)), 1 / 16)
class TestPSF:
def test_constructor(self, psf):
assert psf.npixels == 16
assert psf.pixel_scale == 1 / 16
def test_properties(self, psf):
assert psf.ndim == 0
def test_methods(self, psf):
assert psf.downsample(2).npixels == 8
assert isinstance(psf.convolve(np.ones((2, 2))), PSF)
assert isinstance(psf.convolve(np.ones((2, 2)), method="fft"), PSF)
assert isinstance(psf.rotate(np.pi), PSF)
assert isinstance(psf.resize(8), PSF)
assert isinstance(psf.flip(0), PSF)
def test_magic(self, psf):
psf *= np.ones(1)
assert isinstance(psf, PSF)
psf += np.ones(1)
assert isinstance(psf, PSF)
psf -= np.ones(1)
assert isinstance(psf, PSF)
psf /= np.ones(1)
assert isinstance(psf, PSF)
|
LouisDesdoigtsREPO_NAMEdLuxPATH_START.@dLux_extracted@dLux-main@tests@[email protected]_END.py
|
{
"filename": "_labelalias.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/densitymap/colorbar/_labelalias.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LabelaliasValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="labelalias", parent_name="densitymap.colorbar", **kwargs
):
super(LabelaliasValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@densitymap@colorbar@[email protected]_END.py
|
{
"filename": "adamax_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/optimizers/adamax_test.py",
"type": "Python"
}
|
# flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.optimizers.adamax import Adamax
class AdamaxTest(testing.TestCase):
def test_config(self):
optimizer = Adamax(
learning_rate=0.5,
beta_1=0.8,
beta_2=0.95,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adamax(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(vars, [0.5, 1.5, 2.5, 3.5], rtol=1e-4, atol=1e-4)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adamax(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adamax(
learning_rate=0.2, beta_1=0.85, beta_2=0.95, epsilon=1e-6
)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8],
[0.6827, 0.6873, 0.6888, 0.6896, 0.6901, 0.6904, 0.6906, 0.6908, 0.6909, 0.691],
[0.5333, 0.5407, 0.5431, 0.5444, 0.5451, 0.5456, 0.546, 0.5462, 0.5464, 0.5466],
[0.368, 0.3773, 0.3804, 0.382, 0.3829, 0.3835, 0.384, 0.3843, 0.3846, 0.3848],
[0.1933, 0.204, 0.2076, 0.2094, 0.2105, 0.2112, 0.2117, 0.2121, 0.2124, 0.2126]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adamax(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adamax(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@optimizers@[email protected]_END.py
|
{
"filename": "dump.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/core/langchain_core/load/dump.py",
"type": "Python"
}
|
import json
from typing import Any
from langchain_core.load.serializable import Serializable, to_json_not_implemented
def default(obj: Any) -> Any:
"""Return a default value for a Serializable object or
a SerializedNotImplemented object.
Args:
obj: The object to serialize to json if it is a Serializable object.
Returns:
A json serializable object or a SerializedNotImplemented object.
"""
if isinstance(obj, Serializable):
return obj.to_json()
else:
return to_json_not_implemented(obj)
def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:
"""Return a json string representation of an object.
Args:
obj: The object to dump.
pretty: Whether to pretty print the json. If true, the json will be
indented with 2 spaces (if no indent is provided as part of kwargs).
Default is False.
kwargs: Additional arguments to pass to json.dumps
Returns:
A json string representation of the object.
Raises:
ValueError: If `default` is passed as a kwarg.
"""
if "default" in kwargs:
msg = "`default` should not be passed to dumps"
raise ValueError(msg)
try:
if pretty:
indent = kwargs.pop("indent", 2)
return json.dumps(obj, default=default, indent=indent, **kwargs)
else:
return json.dumps(obj, default=default, **kwargs)
except TypeError:
if pretty:
indent = kwargs.pop("indent", 2)
return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs)
else:
return json.dumps(to_json_not_implemented(obj), **kwargs)
def dumpd(obj: Any) -> Any:
"""Return a dict representation of an object.
Note:
Unfortunately this function is not as efficient as it could be
because it first dumps the object to a json string and then loads it
back into a dictionary.
Args:
obj: The object to dump.
Returns:
dictionary that can be serialized to json using json.dumps
"""
return json.loads(dumps(obj))
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@core@langchain_core@[email protected]@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Starfish-develop/Starfish",
"repo_path": "Starfish_extracted/Starfish-master/Starfish/__init__.py",
"type": "Python"
}
|
__version__ = "0.4.2"
from .spectrum import Spectrum
__all__ = [
"constants",
"emulator",
"grid_tools",
"models",
"samplers",
"spectrum",
"Spectrum",
"transforms",
"utils",
]
|
Starfish-developREPO_NAMEStarfishPATH_START.@Starfish_extracted@Starfish-master@Starfish@[email protected]_END.py
|
{
"filename": "centroid.py",
"repo_name": "SAMI-Galaxy-Survey/sami",
"repo_path": "sami_extracted/sami-master/observing/centroid.py",
"type": "Python"
}
|
"""
This file contains some functions used during SAMI observing. These revolve around fitting stars in the RSS data.
1) centroid(infile, ifus='all', outfile=None, plot=True)
-infile should be a reduced RSS file, already passed through 2dfdr.
-ifus should be a list of the probe numbers you want to run on, e.g. [11,12].
-outfile should be a string, the desired name of the output file.
-plot should be set to True if you want to see and save the images with overlaid fits.
This function is primarily for use on commissioning star field observations. The main purpose of this function is to
calculate the offsets between the fitted positions of the stars and the centres of the hexabundles (i.e. where they
should be). Example usage:
centroid('12mar20044red.fits', ifus=[11,12], outfile='test', plot=True)
This will print to the screen the calculated offsets like so:
-------------------------------------------------
Offsets RA Dec:
Probe 11 -1.12940162731 -0.138415127654
Probe 12 0.0365293069473 1.75226680276
and will save two files: test.txt and test.pdf, the former for feeding Tony Farrell's code to calculate plate scale
errors (including global plate rotation) and the latter a pdf of the images with overlaid fits.
The widths of the fits are also printed to the screen like so:
-------------------------------------------------
FWHM of fits (in \").
[ 1.28656199 0.566648 ]
this gives you a handy measure of seeing. (Please note in this case we did not have 0.6\" seeing, there is no star in
probe 12, it's junk, designed only to illustrate use!)
2) focus(inlist, ifu)
-inlist should be a list of files to run the focus script on, one file name per line.
-ifu should be the ifu containing the star (ONE only) e.g. [11]
This function is for use during the daily telescope focus check. Example usage:
focus('focus.list', ifu=[11])
The output to the screen includes the telescope focus values and the FWHM values like so:
Focus values are (in mm): [ 38.83754565 38.1 38.3 38.5 38.7 38.9 ]
FWHM values are (in \"): [ 1.58563038 2.49753397 1.58024517 1.28656199 1.3452223 1.50470957]
Two figures will also be produced, the first showing the images and fits for the probe in question for all files. The
second plots the focus values vs fwhm values with a fitted parabola. The minimum of the fit can be picked by eye or the
official value is also printed to screen.
3) seeing(infile, ifu):
-infile should be a reduced RSS file, already passed through 2dfdr.
-ifu should be an integer (1-13).
Calculates the seeing from the star observation in a particular field.
Prints values to screen and makes a plot showing the fit.
Takes one ifu at a time so if you have many stars (i.e. a star field) then use the centroid function above.
4) centroid_fit(x,y,data,microns=True)
You shouldn't need to touch this one, it is called by the functions above (as well as by the align_micron module).
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import pylab as py
import numpy as np
import scipy as sp
import os
import photutils
# astropy fits file io (replacement for pyfits)
import astropy.io.fits as pf
import string
import itertools
from scipy.ndimage.filters import median_filter
# Circular patch.
from matplotlib.patches import Circle
from .. import utils
from .. import samifitting as fitting
# importing everything defined in the config file
from ..config import *
def centroid(infile, ifus='all', savefile=True, plot=True):
"""Fits to positions of the stars in ifus for infile. Primary purpose is to produce the files needed as imput for
Tony's code."""
# Define IFUs.
if ifus=='all':
ifus=[1,2,3,4,5,6,7,8,9,10,11,12,13]
else:
ifus=ifus
# Number of IFUs to display
n=len(ifus)
print()
print("--------------------------------------------------------------------------")
print("I am running the centroid script on", n, "IFU(s) in file", infile)
print("--------------------------------------------------------------------------")
print()
# Number of rows and columns needed in the final display box
# This is a bit of a fudge...
if n==1:
r=1
c=1
elif n==2:
r=1
c=2
elif n==3:
r=1
c=3
elif n==4:
r=2
c=2
elif n>3 and n<=6:
r=2
c=3
elif n>6 and n<=9:
r=3
c=3
elif n>=9 and n<=12:
r=3
c=4
elif n>=13 and n<=16:
r=4
c=4
if plot==True:
# Create the figure
f0=py.figure()
#f1=py.figure() # Add a figure for the sky coords plots.
# Open the output file for writing
if savefile:
# Find the name of the input file
outfile=str.split(os.path.basename(infile), '.')[0]
out_txt=string.join([outfile, ".txt"],'')
print("Output text file is:", out_txt)
# Open the text file for writing. Note this will overwrite existing files.
f=open(out_txt, 'w')
else:
outfile = None
# List for the size of the Gaussian
fwhm_arr=[]
fwhm_conv_arr=[] # For the converted numbers
# List for the x and y offsets in arcseconds.
x_off_arr=[]
y_off_arr=[]
# Print the heading for the offsets
print("-------------------------------------------------")
print("Offsets RA Dec:")
for i, ifu in enumerate(ifus):
# Use the utils module to extract data from a single IFU.
ifu_data=utils.IFU(infile, ifu, flag_name=False)
# Remove the position of fibre 1, the central fibre, from all other positions to get a grid of relative positions
idx0=np.where(ifu_data.n==1)
x_degrees=ifu_data.xpos-ifu_data.xpos[idx0]
y_degrees=ifu_data.ypos-ifu_data.ypos[idx0]
x_microns=ifu_data.x_microns-ifu_data.x_microns[idx0]
y_microns=ifu_data.y_microns-ifu_data.y_microns[idx0]
# Feed the wrapped fitter both the micron and sky values
p_sky, data_sky, xlin_sky, ylin_sky, model_sky=centroid_fit(x_degrees, y_degrees, ifu_data.data, microns=False,
circular=True)
p_mic, data_mic, xlin_mic, ylin_mic, model_mic=centroid_fit(x_microns, y_microns, ifu_data.data, circular=True)
# Expand out the returned fitted values.
amplitude_sky, xout_sky, yout_sky, sig_sky, bias_sky=p_sky
amplitude_mic, xout_mic, yout_mic, sig_mic, bias_mic=p_mic
# Find offsets in arcseconds using both methods
x_off=3600*xout_sky # Note - no need to subract the central fibre as that was done before the fit.
y_off=3600*yout_sky
# Use the micron values to calculate the offsets...
centroid_microns_converted=utils.plate2sky(xout_mic, yout_mic)
# Subtract the star postion from the hexabundle centre position
x_off_conv=-1*(centroid_microns_converted[0]) # plate2sky keeps micron sign convention
y_off_conv=centroid_microns_converted[1]
# Find the widths
x_w=sig_sky*3600.0
xm_w=sig_mic*15.22/1000.0
# FWHM (a measure of seeing)
fwhm=x_w*2.35
fwhm_arr.append(fwhm)
fwhm_conv=xm_w*2.35
fwhm_conv_arr.append(fwhm_conv)
#print "FWHM from four techniques:", fwhm, fwhm_corr, fwhm_conv, fwhm_conv_corr
print("Probe", ifu_data.ifu, x_off, y_off) #, x_off_conv, y_off_conv #, xm_off, ym_off, x_off, y_off
# Add the offsets to the lists
x_off_arr.append(x_off)
y_off_arr.append(y_off)
# Make an image of the bundle with the fit overlaid in contours. NOTE - plotting is done with the fit using
# the micron values. This is more aesthetic and simple.
if plot==True:
# The limits for the axes (plotting in microns).
xm_lower=np.min(x_microns)-100
xm_upper=np.max(x_microns)+100
ym_lower=np.min(y_microns)-100
ym_upper=np.max(y_microns)+100
# Debugging.
# The limits for the axes (plotting in sky coords).
#xs_lower=np.min(x_degrees)-0.001
#xs_upper=np.max(x_degrees)+0.001
#ys_lower=np.min(y_degrees)-0.001
#ys_upper=np.max(y_degrees)+0.001
#print np.min(ifu_data.xpos), np.max(ifu_data.xpos)
#print np.min(ifu_data.ypos), np.max(ifu_data.ypos)
# Add axes to the figure
ax0=f0.add_subplot(r,c,i+1, xlim=(xm_lower, xm_upper), ylim=(ym_lower, ym_upper), aspect='equal')
# For sky co-ords.
#ax1=f1.add_subplot(r,c,i+1, xlim=(xs_lower, xs_upper), ylim=(ys_lower, ys_upper), aspect='equal')
data_norm=data_mic/np.nanmax(data_mic)
mycolormap=py.get_cmap('YlGnBu_r')
# Iterate over the x, y positions (and data value) making a circle patch for each fibre, with the
# appropriate color.
for xmval, ymval, dataval in zip(x_microns, y_microns, data_norm):
# Make and add the fibre patch to the axes.
fibre_microns=Circle(xy=(xmval,ymval), radius=52.5) # 52.5
ax0.add_artist(fibre_microns)
fibre_microns.set_facecolor(mycolormap(dataval))
# Add the model fit as contors.
con0=ax0.contour(xlin_mic, ylin_mic, np.transpose(model_mic), origin='lower')
#con1=ax1.contour(xlin_sky, ylin_sky, np.transpose(model_sky), origin='lower')
# Title and get rid of ticks.
title_string=string.join(['Probe ', str(ifu_data.ifu)])
py.title(title_string)
py.setp(ax0.get_xticklabels(), visible=False)
py.setp(ax0.get_yticklabels(), visible=False)
# Needed in future for debugging...
#for xval, yval, dataval in zip(x_degrees, y_degrees, data_norm):
# Make and add the fibre patch to the axes.
#fibre_sky=Circle(xy=(xval,yval), radius=2.22e-4) # 52.5
#ax1.add_artist(fibre_sky)
#fibre_sky.set_facecolor(mycolormap(dataval))
#py.setp(ax1.get_xticklabels(), visible=False)
#py.setp(ax1.get_yticklabels(), visible=False)
# -------------------------------------------------------
# Write the results to file
if outfile is not None:
# Probe number, offset in RA ("), offset in Dec (")
s=str(ifu_data.ifu)+' '+str(x_off)+' '+str(y_off)+'\n' # the data to write to file
f.write(s)
print()
print("-------------------------------------------------")
if plot:
py.suptitle(infile)
py.show()
# Save the figure
if savefile:
# Save the figure
out_fig=string.join([outfile, ".pdf"],'') # outfile has been defined above
print("Output pdf file is:", out_fig)
py.savefig(out_fig, format='pdf')
if savefile:
f.close() # close the output file
# Print out the measured width values from the sky coords calculation
fwhm_arr=np.asanyarray(fwhm_arr)
fwhm_conv_arr=np.asanyarray(fwhm_conv_arr)
print("-------------------------------------------------")
print()
print("FWHM of fits (in \"):")
print(fwhm_arr)
print()
# Now print the average offsets
x_off_arr=np.asarray(x_off_arr)
y_off_arr=np.asarray(y_off_arr)
RA_med=np.median(x_off_arr)
Dec_med=np.median(y_off_arr)
#print "Median offsets RA/Dec (in \"):"
#print "RA:", np.median(x_off_arr)
#print "Dec:", np.median(y_off_arr)
if RA_med < 0.0:
RA_flag='W'
else:
RA_flag='E'
if Dec_med < 0.0:
Dec_flag='S'
else:
Dec_flag='N'
print("To centre the objects in the bundles you should offset the telescope:")
print("RA", np.abs(RA_med), RA_flag)
print("Dec", np.abs(Dec_med), Dec_flag)
#print fwhm_conv_arr
def focus(inlist, ifu):
# Read in the files from the list of files.
files=[]
for line in open(inlist):
cols=line.split()
cols[0]=str.strip(cols[0])
files.append(np.str(cols[0]))
# Number of files
n=len(files)
# Check the ifu makes some sense
all=[1,2,3,4,5,6,7,8,9,10,11,12,13]
if ifu in all:
print()
print("--------------------------------------------------------------------------")
print("I am running the focus script on probe", ifu, "for", n, "files.")
print("--------------------------------------------------------------------------")
print()
else:
print()
print("-----------------------------------------------------------------------")
print("You have not provided a vaild probe number. Must be between 1 and 13.")
print("-----------------------------------------------------------------------")
# Exit the function
return
# Number of rows and columns needed in the final display box
# This is a bit of a fudge...
if n==1:
r=1
c=1
elif n==2:
r=1
c=2
elif n==3:
r=1
c=3
elif n==4:
r=2
c=2
elif n>3 and n<=6:
r=2
c=3
elif n>6 and n<=9:
r=3
c=3
elif n>=9 and n<=12:
r=3
c=4
elif n>=13 and n<=16:
r=4
c=4
# It is possible you will have to put the values in a list here, due to header values being wrong!
# For example, replace the line below with this: focus_vals=np.array([38.8,39.0,39.2,39.4,39.6,38.6])
# then comment out line 308.
focus_values=np.empty((len(files))) # empty array for focus values from header
fwhm_values=np.empty((len(files))) # as above for calculated fwhm values
f0=py.figure() # figure to contain the fits
for i, infile in enumerate(files):
# Pull the focus value out of the header - NB this isn't always right!!
focus=pf.getval(infile,'TELFOC')
focus_values[i]=focus
# Use the utils module to extract data from a single IFU.
ifu_data=utils.IFU(infile, ifu, flag_name=False)
# Remove the position of fibre 1, the central fibre, from all other positions to get a grid of relative positions
idx0=np.where(ifu_data.n==1)
x_degrees=ifu_data.xpos-ifu_data.xpos[idx0]
y_degrees=ifu_data.ypos-ifu_data.ypos[idx0]
x_microns=ifu_data.x_microns-ifu_data.x_microns[idx0]
y_microns=ifu_data.y_microns-ifu_data.y_microns[idx0]
# Feed the wrapped fitter both the micron and sky values
p_sky, data_sky, xlin_sky, ylin_sky, model_sky=centroid_fit(x_degrees, y_degrees, ifu_data.data,
microns=False, circular=True)
p_mic, data_mic, xlin_mic, ylin_mic, model_mic=centroid_fit(x_microns, y_microns,
ifu_data.data, circular=True)
# Expand out the returned fitted values.
amplitude_sky, xout_sky, yout_sky, sig_sky, bias_sky=p_sky
amplitude_mic, xout_mic, yout_mic, sig_mic, bias_mic=p_mic
# Find the widths
x_w=sig_sky*3600.0 # from sky coords fit
xm_w=sig_mic*15.22/1000.0 # from plate coords (microns) fit.
# FWHM (a measure of seeing)
fwhm=x_w*2.35
fwhm_values[i]=fwhm
# The limits for the axes (plotting in microns).
xm_lower=np.min(x_microns)-100
xm_upper=np.max(x_microns)+100
ym_lower=np.min(y_microns)-100
ym_upper=np.max(y_microns)+100
# Add axes to the figure
ax0=f0.add_subplot(r,c,i+1, xlim=(xm_lower, xm_upper), ylim=(ym_lower, ym_upper), aspect='equal')
data_norm=data_mic/np.nanmax(data_mic)
mycolormap=py.get_cmap('YlGnBu_r')
# Iterate over the x, y positions (and data value) making a circle patch for each fibre, with the
# appropriate color.
for xmval, ymval, dataval in zip(x_microns, y_microns, data_norm):
# Make and add the fibre patch to the axes.
fibre=Circle(xy=(xmval,ymval), radius=52.5) # 52.5
ax0.add_artist(fibre)
fibre.set_facecolor(mycolormap(dataval))
# Add the model fit as contors.
con0=ax0.contour(xlin_mic, ylin_mic, np.transpose(model_mic), origin='lower')
subtitle_string=string.join(['Focus ', str(focus), '\n', str(infile)])
py.title(subtitle_string, fontsize=11)
py.setp(ax0.get_xticklabels(), visible=False)
py.setp(ax0.get_yticklabels(), visible=False)
# Title and get rid of ticks.
title_string=string.join(['Focus Run: Probe ', str(ifu)])
py.suptitle(title_string)
# Now make a plot of the focus values vs FWHM of the Gaussian fit.
f1=py.figure()
ax1=f1.add_subplot(1,1,1)
ax1.plot(focus_values, fwhm_values, 'bo') #, label=IFUlist[j])
print()
print("Focus values are (in mm):", focus_values)
print("FWHM values are (in \"):", fwhm_values)
print()
p=np.polyfit(focus_values, fwhm_values, 2)
focus_lin=np.arange(np.min(focus_values), np.max(focus_values)+0.1, 0.1)
fit=np.polyval(p, focus_lin)
ax1.plot(focus_lin, fit, 'r')
ax1.set_xlabel('Telescope focus (mm)')
ax1.set_ylabel('Star FWHM (\")')
py.show()
print("Focus value at minimum of fitted parabola: ", focus_lin[np.where(fit==np.min(fit))][0])
def seeing(infile, ifu):
"""
Calculate the seeing from the star observation in a particular field.
Takes one ifu at a time so if you have many stars (i.e. a star field) then use the centroid function above.
"""
# Check the ifu makes some sense
all=[1,2,3,4,5,6,7,8,9,10,11,12,13]
if ifu in all:
print()
print("------------------------------------------------------")
print("You have told me that the PSF star is in probe", ifu)
print("------------------------------------------------------")
else:
print()
print("-----------------------------------------------------------------------")
print("You have not provided a vaild probe number. Must be between 1 and 13.")
print("-----------------------------------------------------------------------")
# Exit the function
return
# Use the utils module to extract data from a single IFU.
ifu_data=utils.IFU(infile, ifu, flag_name=False)
# Remove the position of fibre 1, the central fibre, from all other positions to get a grid of relative positions
idx0=np.where(ifu_data.n==1)
x_degrees=ifu_data.xpos-ifu_data.xpos[idx0]
y_degrees=ifu_data.ypos-ifu_data.ypos[idx0]
x_microns=ifu_data.x_microns-ifu_data.x_microns[idx0]
y_microns=ifu_data.y_microns-ifu_data.y_microns[idx0]
# Feed the data to the fitter, using both types of coordinates.
p_sky, data_sky, xlin_sky, ylin_sky, model_sky=centroid_fit(x_degrees, y_degrees, ifu_data.data,
microns=False, circular=False)
p_mic, data_mic, xlin_mic, ylin_mic, model_mic=centroid_fit(x_microns, y_microns, ifu_data.data, circular=False)
# Expand out the returned fitted values.
amplitude_sky, xout_sky, yout_sky, sigx_sky, sigy_sky, rot_sky, bias_sky=p_sky
amplitude_mic, xout_mic, yout_mic, sigx_mic, sigy_mic, rot_mic, bias_mic=p_mic
# Find the widths
x_w=sigx_sky*3600 # from sky coords fit
y_w=sigy_sky*3600
xm_w=sigx_mic*15.22/1000 # from plate coords (microns) fit. Rough conversion
ym_w=sigy_mic*15.22/1000
# FWHM (a measure of seeing)
fwhmx_sky=x_w*2.35
fwhmy_sky=y_w*2.35
fwhmx_mic=xm_w*2.35
fwhmy_mic=ym_w*2.35
print()
print("FWHM X:", np.around(fwhmx_sky, 4))
print("FWHM Y:", np.around(fwhmy_sky, 4))
print()
print("Seeing (average):", np.mean([fwhmx_sky, fwhmy_sky]))
print()
print("FWHM X/FWHM Y:", np.around(fwhmx_sky, 4)/np.around(fwhmy_sky, 4))
print()
# The limits for the axes (plotting in microns).
xm_lower=np.min(x_microns)-100
xm_upper=np.max(x_microns)+100
ym_lower=np.min(y_microns)-100
ym_upper=np.max(y_microns)+100
# Create the figure
f0=py.figure()
# Add axes to the figure
ax0=f0.add_subplot(1,1,1, xlim=(xm_lower, xm_upper), ylim=(ym_lower, ym_upper), aspect='equal')
data_norm=data_mic/np.nanmax(data_mic)
mycolormap=py.get_cmap('YlGnBu_r')
# Iterate over the x, y positions (and data value) making a circle patch for each fibre, with the
# appropriate color.
for xmval, ymval, dataval in zip(x_microns, y_microns, data_norm):
# Make and add the fibre patch to the axes.
fibre=Circle(xy=(xmval,ymval), radius=52.5) # 52.5
ax0.add_artist(fibre)
fibre.set_facecolor(mycolormap(dataval))
ax0.contour(xlin_mic, ylin_mic, np.transpose(model_mic), origin='lower')
# A title for the axes
title_string=string.join(['Probe ', str(ifu_data.ifu)])
ax0.set_title(title_string, fontsize=14)
def centroid_fit(x,y,data,reference=None,rssframe=None,galaxyid=None,microns=True, circular=True): #** reference,rssframe,galaxyid added
"""Fit the x,y,data values, regardless of what they are and return some useful stuff. Data is an array of spectra"""
working_dir = rssframe.strip('sci.fits')
# Smooth the data spectrally to get rid of cosmics
data_smooth=np.zeros_like(data)
for q in range(np.shape(data)[0]):
# data_smooth[q,:]=utils.smooth(data[q,:], 11) #default hanning smooth
data_smooth[q,:]=median_filter(data[q,:], 15)
# Now sum the data over a large range to get broad band "image"
data_sum=np.nansum(data_smooth[:,200:1800],axis=1)
data_med=np.nanmedian(data_smooth[:,200:1800], axis=1)
#** New masking method starts ————————————————————————————————————————————————
from scipy.ndimage.filters import gaussian_filter
from astropy.stats import sigma_clipped_stats
from photutils import find_peaks
# Parameter initializations
x0, y0 = x-np.min(x), y-np.min(y) # image x,y
xc, yc = (np.max(x)-np.min(x))/2.+np.min(x),(np.max(y)-np.min(y))/2.+np.min(y) # central pixel
width = 85. # default gaussian filtering size
checkind = 'None' # for check list
img = np.zeros((np.max(x0)+1,np.max(y0)+1)) # rss image
x_good, y_good, data_sum_good = x, y, data_sum # good fibres to use
tx,ty,trad = xc,yc,1000 #target x,y centre and masking radius (1000 means no masking)
if not os.path.exists(working_dir+'_centroid_fit_reference/'): # path to save centre of reference frame & checklist
os.makedirs(working_dir+'_centroid_fit_reference')
# Load fibre flux to image
for i in range(len(x0)):
img[x0[i],y0[i]] = data_sum[i]
# Gaussian filtering
img1 = gaussian_filter(img, sigma=(width, width), order=0, mode='constant') # width = diameter of a core in degrees/microns
# Find peaks
mean, median, std = sigma_clipped_stats(img1, sigma=3.0)
threshold = median + std
tbl = find_peaks(img1, threshold, box_size=105)
# Case1: If no peaks are found, masking is not applied. Actually I don't find any.
if tbl == None:
checkind = 'nopeak'
elif(len(tbl) < 1):
checkind = 'nopeak'
# Case2: A single peak is found
elif(len(tbl) == 1):
checkind = 'single'
dist = (tbl['y_peak']+np.min(x)-xc)**2+(tbl['x_peak']+np.min(y)-yc)**2 # separation between a peak and centre
if(dist < (310)**2): # Single peak near the centre
tx,ty,trad = tbl['y_peak']+np.min(x), tbl['x_peak']+np.min(y),105*2 # y_peak is x. yes. it's right.
else: # When a peak is near the edge. High possibility that our target is not detected due to low brightness
for k in range(1,100): # repeat until it finds multiple peaks with reduced filtering box
width = width*0.98
img3 = gaussian_filter(img, sigma=(width, width), order=0, mode='constant',cval=np.min(img)) # width = diameter of a core in degrees/microns
mean, median, std = sigma_clipped_stats(img3, sigma=3.0)
threshold = median + std*0.1
tbl = find_peaks(img3, threshold, box_size=width) #find peaks
if tbl == None:
continue
if(len(tbl)==1): # only a single peak is found until maximum iteration (=100)
tx,ty,trad=tbl['y_peak']+np.min(x), tbl['x_peak']+np.min(y),1000 # fibre masking is not applied (trad = 1000)
checkind = 'single_edge'
if(len(tbl)>1): # multiple peaks are found, go to Case3: multiple peaks
checkind = 'multi_faint'
break
# Case3: When there are multiple peaks
elif(len(tbl) > 1):
if checkind is not 'multi_faint':
checkind = 'multi'
xx,yy = tbl['y_peak']+np.min(x), tbl['x_peak']+np.min(y) # y_peak is x. yes. it's right.
# The assumption is that dithering is relatively small, and our target is near the target centre from the (1st) reference frame
if reference is not None and rssframe != reference and os.path.exists(working_dir+'_centroid_fit_reference/centre_'+galaxyid+'_ref.txt') != False:
fileref = open(working_dir+'_centroid_fit_reference/centre_'+galaxyid+'_ref.txt','r')
rx,ry=np.loadtxt(fileref, usecols=(0,1))
coff = (xx-rx)**2+(yy-ry)**2 # If not reference frame, the closest object from the reference
else:
coff = (xx-xc)**2+(yy-yc)**2 # If reference frame, the closest object from the centre
tx, ty = xx[np.where(coff == np.min(coff))[0][0]], yy[np.where(coff == np.min(coff))[0][0]] # target centre
xx, yy = xx[np.where(xx*yy != tx*ty)], yy[np.where(xx*yy != tx*ty)]
osub = np.where(((xx-tx)**2+(yy-ty)**2 - np.min((xx-tx)**2+(yy-ty)**2)) < 0.1) # the 2nd closest object
trad = np.sqrt((xx[osub]-tx)**2+(yy[osub]-ty)**2)/2. # masking radius = (a separation btw the target and 2nd closest object)/2.
if(trad > 105*2): # when masking radius is too big
trad = 105*2
if(trad < 105*1.5): # when masking radius is too small
trad = 105*1.5
# Use fibres only within masking radius
gsub = np.where(np.sqrt((x-tx)**2+(y-ty)**2) < trad)
if len(gsub) < 5:
tdist = np.sqrt((x-tx)**2+(y-ty)**2)
inds = np.argsort(tdist)
gsub = inds[:5]
x_good, y_good, data_sum_good = x[gsub], y[gsub], data_sum[gsub]
# Save the target centre of reference frame
if reference is not None and rssframe == reference:
ref=open(working_dir+'_centroid_fit_reference/centre_'+galaxyid+'_ref.txt','w')
try:
ref.write(str(tx.data[0])+' '+str(ty.data[0]))
except:
ref.write(str(tx)+' '+str(ty))
ref.close()
#** New masking method ends ————————————————————————————————————————————————
# Use the crude distributed centre-of-mass to get the rough centre of mass
com=utils.comxyz(x_good,y_good,data_sum_good) #**use good data within masking
# Peak height guess could be closest fibre to com position.
dist=(x-com[0])**2+(y-com[1])**2 # distance between com and all fibres.
# First guess at width of Gaussian - diameter of a core in degrees/microns.
if microns==True:
sigx=105.0
core_diam=105.0
else:
sigx=4.44e-4
core_diam=4.44e-4
# First guess Gaussian parameters.
if circular==True:
p0=[data_sum[np.sum(np.where(dist==np.min(dist)))], com[0], com[1], sigx, 0.0]
#print "Guess Parameters:", p0 #here
elif circular==False:
p0=[data_sum[np.sum(np.where(dist==np.min(dist)))], com[0], com[1], sigx, sigx, 45.0, 0.0]
#print "Guess Parameters:", p0
# Fit two circular 2D Gaussians.
gf=fitting.TwoDGaussFitter(p0,x_good,y_good,data_sum_good) #** use good data within masking
amplitude_mic, xout_mic, yout_mic, sig_mic, bias_mic = gf.p
fitting.fibre_integrator(gf, core_diam) # fibre integrator
gf.fit() #### gaussian fitting
# Make a linear grid to reconstruct the fitted Gaussian over.
x_0=np.min(x)
y_0=np.min(y)
# dx should be 1/10th the fibre diameter (in whatever units)
dx=sigx/10.0
xlin=x_0+np.arange(100)*dx # x axis
ylin=y_0+np.arange(100)*dx # y axis
# Reconstruct the model
model=np.zeros((len(xlin), len(ylin)))
# Reconstructing the Gaussian over the proper grid.
for ii in range(len(xlin)):
xval=xlin[ii]
for jj in range(len(ylin)):
yval=ylin[jj]
model[ii,jj]=gf.fitfunc(gf.p, xval, yval)
amplitude_mic, xout_mic, yout_mic, sig_mic, bias_mic = gf.p
# print('gx,gy final',xout_mic,yout_mic) #test
return gf.p, data_sum, xlin, ylin, model
def guider_focus(values):
"""
#
# "guider_focus"
#
# This function finds the best focus position for the telescope using the
# FWHM pix values from the guide camera as obtained via the Night Assistant
# using the View > Pick Object -> FWHM function in the GAIA Guide Camera
# software (Telescope Control Software on main Control Desk).
#
# Function Example:
#
# quicklook.guider_focus([[36.7,26],[36.9,19],[37.1,19],[37.3,23],[37.5,28]])
#
# Input Parameters:
#
# values.......Array with each cell containing the Telescope focus
# positions in mm and the Guide Camera FWHM in pixels.
#
# quicklook.guider_focus([[mm,pix],[mm,pix],[mm,pix],etc...])
#
"""
focus_positions=[]
FWHMs=[]
# Get focus values from function input
for value in values:
focus_position = value[0]
FWHM = value[1]
focus_positions.append(focus_position)
FWHMs.append(FWHM)
# Fit 2nd order polynomial to data
p=np.polyfit(focus_positions, FWHMs, 2)
focus_lin=np.arange(np.min(focus_positions)-0.1, np.max(focus_positions)+0.1, 0.01)
fit=np.polyval(p, focus_lin)
# Equate minimum
min_x = -p[1]/(p[0]*2)
min_y = p[0]*(min_x**2) + p[1]*min_x + p[2]
min_FWHM = min_y*0.0787 #0.0787"/pix is the image scale on the SAMI guide camera
# Plot
fig = py.figure()
py.scatter(focus_positions, FWHMs)
py.scatter(min_x, min_y,marker="o",color="r")
py.plot(focus_lin, fit, "r")
py.title("Telescope focus from Guider"+"\n"+"Best focus position: {0:.2f}".format(min_x)+"mm FWHM = {0:.2f}".format(min_FWHM)+'"')
py.xlabel("Telescope Focus Position (mm)")
py.ylabel("FWHM (Guider Pixels)")
print("---> START")
print("--->")
print("---> The best focus position is: {0:.2f}".format(min_x))
print("--->")
print("---> END")
|
SAMI-Galaxy-SurveyREPO_NAMEsamiPATH_START.@sami_extracted@sami-master@[email protected]@.PATH_END.py
|
{
"filename": "python-features-data__desc.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/concepts/python-features-data__desc.md",
"type": "Markdown"
}
|
# FeaturesData
```python
class FeaturesData(num_feature_data=None,
cat_feature_data=None,
num_feature_names=None,
cat_feature_names=None)
```
## {{ dl--purpose }} {#purpose}
Allows to optimally store the feature data for further passing to the [Pool](python-reference_pool.md) constructor. The creation of pools from this representation is much faster than from generic {{ python-type__np_ndarray }}, {{ python-type--pandasDataFrame }} or {{ python-type--pandasSeries }} if the dataset contains both numerical and categorical features, most of which are numerical. Pass {{ python-type__np_ndarray }} with numpy.float32 dtype to get similar performance with datasets that contain only numerical features.
{% note warning %}
FeaturesData makes no checks at all to the input data. Use it only if there is confidence that everything is being done correctly, and it is preferable to avoid spending additional time on checks. Otherwise, pass the input dataset and target variables directly to the [Pool](python-reference_pool.md) class.
{% endnote %}
## {{ dl--parameters }} {#parameters}
### num_feature_data
#### Description
Numerical features for all objects from the dataset in the form of {{ python-type__np_ndarray }} of shape `(object_count x num_feature_count)` with dtype <q>numpy.float32</q>.
**Possible types**
{{ python-type__np_ndarray }}
**Default value**
None (the dataset does not contain numerical features)
### cat_feature_data
#### Description
Categorical features for all objects from the dataset in the form of {{ python-type__np_ndarray }} of shape `(object_count x cat_feature_count)` with dtype <q>object</q>.
The elements must be of {{ python-type__bytes }} type and should contain UTF-8 encoded strings.
{% note warning %}
Categorical features must be passed as strings, for example:
```
data=FeaturesData(cat_feature_data=np.array([['a','c'], ['b', 'c']], dtype=object))
```
Using other data types (for example, int32) raises an error.
{% endnote %}
**Possible types**
{{ python-type__np_ndarray }}
**Default value**
None (the dataset does not contain categorical features)
### num_feature_names
#### Description
The names of numerical features in the form of a sequence of strings or bytes.
If the string is represented by the {{ python-type__bytes }} type, it must be UTF-8 encoded.
**Possible types**
- {{ python-type--list-of-strings }}
- {{ python-type__list-of-bytes }}
**Default value**
None (the `num_feature_names` data attribute is set to a list of empty strings)
### cat_feature_names
#### Description
The names of categorical features in the form of a sequence of strings or bytes.
If the string is represented by the {{ python-type__bytes }} type, it must be UTF-8 encoded.
**Possible types**
- {{ python-type--list-of-strings }}
- {{ python-type__list-of-bytes }}
**Default value**
None (the `cat_feature_names` data attribute is set to a list of empty strings)
## {{ input_data__title__peculiarities }} {#specifics}
- The order of features in the created Pool is the following:
```
[num_features (if any present)][cat_features (if any present)]
```
- The feature data must be passed in the same order when applying the trained model.
## {{ dl--methods }} {#methods}
Method | Description
----- | -----
[get_cat_feature_count](python-features-data_get-cat-feature-count.md) | Return the number of categorical features contained in the dataset.|
[get_feature_count](python-features-data_get-feature-count.md) | Return the total number of features (both numerical and categorical) contained in the dataset.
[get_feature_names](python-features-data_get-feature-names.md) | Return the names of features from the dataset.
[get_num_feature_count](python-features-data_get-num-feature-count.md) | Return the number of numerical features contained in the dataset.
[get_object_count](python-features-data_get-object-count.md) | Return the number of objects contained in the dataset.
## {{ dl__usage-examples }} {#usage-examples}
#### [CatBoostClassifier](../concepts/python-reference_catboostclassifier.md) with [FeaturesData](../concepts/python-features-data__desc.md)
```python
import numpy as np
from catboost import CatBoostClassifier, FeaturesData
# Initialize data
cat_features = [0,1,2]
train_data = FeaturesData(
num_feature_data=np.array([[1, 4, 5, 6], [4, 5, 6, 7], [30, 40, 50, 60]], dtype=np.float32),
cat_feature_data=np.array([["a", "b"], ["a", "b"], ["c", "d"]], dtype=object)
)
train_labels = [1,1,-1]
test_data = FeaturesData(
num_feature_data=np.array([[2, 4, 6, 8], [1, 4, 50, 60]], dtype=np.float32),
cat_feature_data=np.array([["a", "b"], ["a", "d"]], dtype=object)
)
# Initialize CatBoostClassifier
model = CatBoostClassifier(iterations=2, learning_rate=1, depth=2, loss_function='Logloss')
# Fit model
model.fit(train_data, train_labels)
# Get predicted classes
preds_class = model.predict(test_data)
# Get predicted probabilities for each class
preds_proba = model.predict_proba(test_data)
# Get predicted RawFormulaVal
preds_raw = model.predict(test_data, prediction_type='RawFormulaVal')
```
#### [CatBoostClassifier](../concepts/python-reference_catboostclassifier.md) with [Pool](../concepts/python-reference_pool.md) and [FeaturesData](../concepts/python-features-data__desc.md)
```python
import numpy as np
from catboost import CatBoostClassifier, FeaturesData, Pool
# Initialize data
train_data = Pool(
data=FeaturesData(
num_feature_data=np.array([[1, 4, 5, 6],
[4, 5, 6, 7],
[30, 40, 50, 60]],
dtype=np.float32),
cat_feature_data=np.array([["a", "b"],
["a", "b"],
["c", "d"]],
dtype=object)
),
label=[1, 1, -1]
)
test_data = Pool(
data=FeaturesData(
num_feature_data=np.array([[2, 4, 6, 8],
[1, 4, 50, 60]],
dtype=np.float32),
cat_feature_data=np.array([["a", "b"],
["a", "d"]],
dtype=object)
)
)
# Initialize CatBoostClassifier
model = CatBoostClassifier(iterations = 2,
learning_rate = 1,
depth = 2,
loss_function = 'Logloss')
# Fit model
model.fit(train_data)
# Get predicted classes
preds_class = model.predict(test_data)
# Get predicted probabilities for each class
preds_proba = model.predict_proba(test_data)
# Get predicted RawFormulaVal
preds_raw = model.predict(test_data, prediction_type='RawFormulaVal')
```
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@concepts@[email protected]_END.py
|
{
"filename": "main.py",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/benchmark/fortran/pm_sampleCov/setCov_dim1_vs_dim2/main.py",
"type": "Python"
}
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
dirname = os.path.basename(os.getcwd())
fontsize = 14
df = pd.read_csv("main.out", delimiter = ",")
colnames = list(df.columns.values)
####################################################################################################################################
#### Plot the runtimes.
####################################################################################################################################
ax = plt.figure(figsize = 1.25 * np.array([6.4,4.6]), dpi = 200)
ax = plt.subplot()
for colname in colnames[1:]:
plt.plot( df[colnames[0]].values
, df[colname].values
, linewidth = 2
)
plt.xticks(fontsize = fontsize)
plt.yticks(fontsize = fontsize)
ax.set_xlabel(colnames[0], fontsize = fontsize)
ax.set_ylabel("Runtime [ seconds ]", fontsize = fontsize)
ax.set_title(" vs. ".join(colnames[1:])+"\nLower is better.", fontsize = fontsize)
ax.set_xscale("log")
ax.set_yscale("log")
plt.minorticks_on()
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "y", which = "minor")
ax.tick_params(axis = "x", which = "minor")
ax.legend ( colnames[1:]
#, loc='center left'
#, bbox_to_anchor=(1, 0.5)
, fontsize = fontsize
)
plt.tight_layout()
plt.savefig("benchmark." + dirname + ".runtime.png")
####################################################################################################################################
#### Plot the runtime ratios.
####################################################################################################################################
ax = plt.figure(figsize = 1.25 * np.array([6.4,4.6]), dpi = 200)
ax = plt.subplot()
plt.plot( df[colnames[0]].values
, np.ones(len(df[colnames[0]].values))
, linestyle = "--"
#, color = "black"
, linewidth = 2
)
for colname in colnames[2:]:
plt.plot( df[colnames[0]].values
, df[colname].values / df[colnames[1]].values
, linewidth = 2
)
plt.xticks(fontsize = fontsize)
plt.yticks(fontsize = fontsize)
ax.set_xlabel(colnames[0], fontsize = fontsize)
ax.set_ylabel("Runtime compared to {}".format(colnames[1]), fontsize = fontsize)
ax.set_title("Runtime Ratio Comparison. Lower means faster.\nLower than 1 means faster than {}().".format(colnames[1]), fontsize = fontsize)
ax.set_xscale("log")
ax.set_yscale("log")
plt.minorticks_on()
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "y", which = "minor")
ax.tick_params(axis = "x", which = "minor")
ax.legend ( colnames[1:]
#, bbox_to_anchor = (1, 0.5)
#, loc = "center left"
, fontsize = fontsize
)
plt.tight_layout()
plt.savefig("benchmark." + dirname + ".runtime.ratio.png")
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@paramonte-main@benchmark@fortran@pm_sampleCov@[email protected]@.PATH_END.py
|
{
"filename": "test_hidden_layer.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/contrib/bnn/test_hidden_layer.py",
"type": "Python"
}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
import torch.nn.functional as F
from torch.distributions import Normal
from pyro.contrib.bnn import HiddenLayer
from tests.common import assert_equal
@pytest.mark.parametrize("non_linearity", [F.relu])
@pytest.mark.parametrize("include_hidden_bias", [False, True])
def test_hidden_layer_rsample(
non_linearity, include_hidden_bias, B=2, D=3, H=4, N=900000
):
X = torch.randn(B, D)
A_mean = torch.rand(D, H)
A_scale = 0.3 * torch.exp(0.3 * torch.rand(D, H))
# test naive weight space sampling against sampling in pre-activation space
dist1 = HiddenLayer(
X=X,
A_mean=A_mean,
A_scale=A_scale,
non_linearity=non_linearity,
include_hidden_bias=include_hidden_bias,
weight_space_sampling=True,
)
dist2 = HiddenLayer(
X=X,
A_mean=A_mean,
A_scale=A_scale,
non_linearity=non_linearity,
include_hidden_bias=include_hidden_bias,
weight_space_sampling=False,
)
out1 = dist1.rsample(sample_shape=(N,))
out1_mean, out1_var = out1.mean(0), out1.var(0)
out2 = dist2.rsample(sample_shape=(N,))
out2_mean, out2_var = out2.mean(0), out2.var(0)
assert_equal(out1_mean, out2_mean, prec=0.003)
assert_equal(out1_var, out2_var, prec=0.003)
return
@pytest.mark.parametrize("non_linearity", [F.relu])
@pytest.mark.parametrize("include_hidden_bias", [True, False])
def test_hidden_layer_log_prob(non_linearity, include_hidden_bias, B=2, D=3, H=2):
X = torch.randn(B, D)
A_mean = torch.rand(D, H)
A_scale = 0.3 * torch.exp(0.3 * torch.rand(D, H))
dist = HiddenLayer(
X=X,
A_mean=A_mean,
A_scale=A_scale,
non_linearity=non_linearity,
include_hidden_bias=include_hidden_bias,
)
A_dist = Normal(A_mean, A_scale)
A_prior = Normal(torch.zeros(D, H), torch.ones(D, H))
kl = torch.distributions.kl.kl_divergence(A_dist, A_prior).sum()
assert_equal(kl, dist.KL, prec=0.01)
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@contrib@bnn@[email protected]_END.py
|
{
"filename": "aft_survival_demo.py",
"repo_name": "dmlc/xgboost",
"repo_path": "xgboost_extracted/xgboost-master/demo/aft_survival/aft_survival_demo.py",
"type": "Python"
}
|
"""
Demo for survival analysis (regression).
========================================
Demo for survival analysis (regression). using Accelerated Failure Time (AFT) model.
"""
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import ShuffleSplit
import xgboost as xgb
# The Veterans' Administration Lung Cancer Trial
# The Statistical Analysis of Failure Time Data by Kalbfleisch J. and Prentice R (1980)
CURRENT_DIR = os.path.dirname(__file__)
df = pd.read_csv(os.path.join(CURRENT_DIR, '../data/veterans_lung_cancer.csv'))
print('Training data:')
print(df)
# Split features and labels
y_lower_bound = df['Survival_label_lower_bound']
y_upper_bound = df['Survival_label_upper_bound']
X = df.drop(['Survival_label_lower_bound', 'Survival_label_upper_bound'], axis=1)
# Split data into training and validation sets
rs = ShuffleSplit(n_splits=2, test_size=.7, random_state=0)
train_index, valid_index = next(rs.split(X))
dtrain = xgb.DMatrix(X.values[train_index, :])
dtrain.set_float_info('label_lower_bound', y_lower_bound[train_index])
dtrain.set_float_info('label_upper_bound', y_upper_bound[train_index])
dvalid = xgb.DMatrix(X.values[valid_index, :])
dvalid.set_float_info('label_lower_bound', y_lower_bound[valid_index])
dvalid.set_float_info('label_upper_bound', y_upper_bound[valid_index])
# Train gradient boosted trees using AFT loss and metric
params = {'verbosity': 0,
'objective': 'survival:aft',
'eval_metric': 'aft-nloglik',
'tree_method': 'hist',
'learning_rate': 0.05,
'aft_loss_distribution': 'normal',
'aft_loss_distribution_scale': 1.20,
'max_depth': 6,
'lambda': 0.01,
'alpha': 0.02}
bst = xgb.train(params, dtrain, num_boost_round=10000,
evals=[(dtrain, 'train'), (dvalid, 'valid')],
early_stopping_rounds=50)
# Run prediction on the validation set
df = pd.DataFrame({'Label (lower bound)': y_lower_bound[valid_index],
'Label (upper bound)': y_upper_bound[valid_index],
'Predicted label': bst.predict(dvalid)})
print(df)
# Show only data points with right-censored labels
print(df[np.isinf(df['Label (upper bound)'])])
# Save trained model
bst.save_model('aft_model.json')
|
dmlcREPO_NAMExgboostPATH_START.@xgboost_extracted@xgboost-master@demo@aft_survival@[email protected]_END.py
|
{
"filename": "physical_constants.py",
"repo_name": "xraypy/xraylarch",
"repo_path": "xraylarch_extracted/xraylarch-master/larch/utils/physical_constants.py",
"type": "Python"
}
|
# Useful physical constants
# most of these are put into common X-ray units (Angstroms, ev)
import scipy.constants as consts
from numpy import pi
I = 0.0 + 1.0j
RAD2DEG = 180.0/pi
DEG2RAD = pi/180.0
PI = pi
TAU = 2*pi
# cross-section unit
BARN = 1.e-24 # cm^2
# atoms/mol = 6.0221413e23 atoms/mol
AVOGADRO = consts.Avogadro
# ATOMIC MASS in grams
AMU = consts.atomic_mass * 1000.0
# electron rest mass in eV
E_MASS = consts.electron_mass * consts.c**2 / consts.e
# Planck's Constant
# h*c ~= 12398.42 eV*Ang
# hbar*c ~= 1973.27 eV*Ang
PLANCK_HC = 1.e10 * consts.Planck * consts.c / consts.e
PLANCK_HBARC = PLANCK_HC / TAU
# Rydberg constant in eV (~13.6 eV)
RYDBERG = consts.Rydberg * consts.Planck * consts.c/ consts.e
# classical electron radius in cm and Ang
R_ELECTRON_CM = 100.0 * consts.physical_constants['classical electron radius'][0]
R_ELECTRON_ANG = 1.e8 * R_ELECTRON_CM
# a few standard lattice constants
STD_LATTICE_CONSTANTS = {'Si': 5.4310205, 'C': 3.567095, 'Ge': 5.64613}
# will be able to import these from xraydb when v 4.5.1 is required
ATOM_SYMS = ['H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg',
'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr',
'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br',
'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd',
'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La',
'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er',
'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au',
'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac', 'Th',
'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md',
'No', 'Lr', 'Rf', 'Db', 'Sg', 'Bh', 'Hs', 'Mt', 'Ds', 'Rg', 'Cn',
'Nh', 'Fl', 'Mc', 'Lv', 'Ts', 'Og']
ATOM_NAMES = ['hydrogen', 'helium', 'lithium', 'beryllium', 'boron', 'carbon',
'nitrogen', 'oxygen', 'fluorine', 'neon', 'sodium', 'magnesium',
'aluminum', 'silicon', 'phosphorus', 'sulfur', 'chlorine', 'argon',
'potassium', 'calcium', 'scandium', 'titanium', 'vanadium',
'chromium', 'manganese', 'iron', 'cobalt', 'nickel', 'copper',
'zinc', 'gallium', 'germanium', 'arsenic', 'selenium', 'bromine',
'krypton', 'rubidium', 'strontium', 'yttrium', 'zirconium',
'niobium', 'molybdenum', 'technetium', 'ruthenium', 'rhodium',
'palladium', 'silver', 'cadmium', 'indium', 'tin', 'antimony',
'tellurium', 'iodine', 'xenon', 'cesium', 'barium', 'lanthanum',
'cerium', 'praseodymium', 'neodymium', 'promethium', 'samarium',
'europium', 'gadolinium', 'terbium', 'dysprosium', 'holmium',
'erbium', 'thulium', 'ytterbium', 'lutetium', 'hafnium',
'tantalum', 'tungsten', 'rhenium', 'osmium', 'iridium', 'platinum',
'gold', 'mercury', 'thallium', 'lead', 'bismuth', 'polonium',
'astatine', 'radon', 'francium', 'radium', 'actinium', 'thorium',
'protactinium', 'uranium', 'neptunium', 'plutonium', 'americium',
'curium', 'berkelium', 'californium', 'einsteinium', 'fermium',
'mendelevium', 'nobelium', 'lawrencium', 'rutherfordium',
'dubnium', 'seaborgium', 'bohrium', 'hassium', 'meitnerium',
'darmstadtium', 'roentgenium', 'copernicium', 'nihonium',
'flerovium', 'moscovium', 'livermorium', 'tennessine', 'oganesson']
|
xraypyREPO_NAMExraylarchPATH_START.@xraylarch_extracted@xraylarch-master@larch@utils@[email protected]_END.py
|
{
"filename": "activations_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/activations/activations_test.py",
"type": "Python"
}
|
import numpy as np
from keras.src import activations
from keras.src import backend
from keras.src import testing
def _ref_softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
def _ref_softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
def _ref_log_softmax(values):
max_val = np.max(values) # for numerical stability
stabilized_values = values - max_val
log_sum_exp = np.log(np.sum(np.exp(stabilized_values)))
return stabilized_values - log_sum_exp
def _ref_leaky_relu(x, alpha=0.2):
return x if x > 0 else alpha * x
def _ref_relu6(x):
return min(max(0, x), 6)
def _ref_silu(x):
return x / (1 + np.exp(-x))
def _ref_hard_sigmoid(x):
x = (x / 6.0) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
def _ref_log_sigmoid(x):
return -1 * _ref_softplus(-x)
def _ref_hard_silu(x):
return x * np.minimum(np.maximum(0.0, x + 3.0), 6.0) * (1.0 / 6.0)
def _ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
def _ref_softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
class ActivationsTest(testing.TestCase):
def test_softmax(self):
x = np.random.random((2, 5))
result = activations.softmax(x[np.newaxis, :])[0]
expected = _ref_softmax(x[0])
self.assertAllClose(result[0], expected, rtol=1e-05)
def test_softmax_2d_axis_0(self):
x = np.random.random((2, 5))
result = activations.softmax(x[np.newaxis, :], axis=1)[0]
expected = np.zeros((2, 5))
for i in range(5):
expected[:, i] = _ref_softmax(x[:, i])
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_3d_axis_tuple(self):
x = np.random.random((2, 3, 5))
result = activations.softmax(x, axis=(1, 2))
expected = np.zeros((2, 3, 5))
for i in range(2):
expected[i, :, :] = _ref_softmax(x[i, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_1d(self):
x = np.random.random(5)
result = activations.softmax(x)
expected = _ref_softmax(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_higher_dim(self):
x = np.random.random((2, 3, 4, 5))
result = activations.softmax(x, axis=(2, 3))
expected = np.zeros((2, 3, 4, 5))
for i in range(2):
for j in range(3):
expected[i, j, :, :] = _ref_softmax(x[i, j, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_higher_dim_multiple_axes(self):
x = np.random.random((2, 3, 4, 5, 6))
result = activations.softmax(x, axis=(2, 3, 4))
expected = np.zeros((2, 3, 4, 5, 6))
for i in range(2):
for j in range(3):
expected[i, j, :, :, :] = _ref_softmax(x[i, j, :, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_softmax_negative_axis(self):
x = np.random.random((2, 5))
result = activations.softmax(x, axis=-1)
expected = np.zeros((2, 5))
for i in range(2):
expected[i, :] = _ref_softmax(x[i, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_temporal_softmax(self):
x = np.random.random((2, 2, 3)) * 10
result = activations.softmax(x[np.newaxis, :])[0]
expected = _ref_softmax(x[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_log_softmax_2d_axis_0(self):
x = np.random.random((2, 5))
result = activations.log_softmax(x[np.newaxis, :], axis=1)[0]
expected = np.zeros((2, 5))
for i in range(5):
expected[:, i] = _ref_log_softmax(x[:, i])
self.assertAllClose(result, expected, rtol=1e-05)
def test_log_softmax_3d_axis_tuple(self):
x = np.random.random((2, 3, 5))
result = activations.log_softmax(x, axis=(1, 2))
expected = np.zeros((2, 3, 5))
for i in range(2):
expected[i, :, :] = _ref_log_softmax(x[i, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_log_softmax_1d(self):
x = np.random.random(5)
result = activations.log_softmax(x)
expected = _ref_log_softmax(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_log_softmax_higher_dim(self):
x = np.random.random((2, 3, 4, 5))
result = activations.log_softmax(x, axis=(2, 3))
expected = np.zeros((2, 3, 4, 5))
for i in range(2):
for j in range(3):
expected[i, j, :, :] = _ref_log_softmax(x[i, j, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_log_softmax_higher_dim_multiple_axes(self):
x = np.random.random((2, 3, 4, 5, 6))
result = activations.log_softmax(x, axis=(2, 3, 4))
expected = np.zeros((2, 3, 4, 5, 6))
for i in range(2):
for j in range(3):
expected[i, j, :, :, :] = _ref_log_softmax(x[i, j, :, :, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_log_softmax_negative_axis(self):
x = np.random.random((2, 5))
result = activations.log_softmax(x, axis=-1)
expected = np.zeros((2, 5))
for i in range(2):
expected[i, :] = _ref_log_softmax(x[i, :])
self.assertAllClose(result, expected, rtol=1e-05)
def test_temporal_log_softmax(self):
x = np.random.random((2, 2, 3)) * 10
result = activations.log_softmax(x[np.newaxis, :])[0]
expected = _ref_log_softmax(x[0, 0])
self.assertAllClose(result[0, 0], expected, rtol=1e-05)
def test_selu(self):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
positive_values = np.array([[1, 2]], dtype=backend.floatx())
result = activations.selu(positive_values[np.newaxis, :])[0]
self.assertAllClose(result, positive_values * scale, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = activations.selu(negative_values[np.newaxis, :])[0]
true_result = (np.exp(negative_values) - 1) * scale * alpha
self.assertAllClose(result, true_result)
def test_softplus(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.softplus(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_softplus)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.softplus(x_1d)
expected_1d = np.vectorize(_ref_softplus)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.softplus(x_3d)
expected_3d = np.vectorize(_ref_softplus)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.softplus(x_zero)
expected_zero = np.vectorize(_ref_softplus)(x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(10, 100, (2, 5))
result_large_positive = activations.softplus(x_large_positive)
expected_large_positive = np.vectorize(_ref_softplus)(x_large_positive)
self.assertAllClose(
result_large_positive, expected_large_positive, rtol=1e-05
)
# Test large negative values
x_large_negative = np.random.uniform(-100, -10, (2, 5))
result_large_negative = activations.softplus(x_large_negative)
expected_large_negative = np.vectorize(_ref_softplus)(x_large_negative)
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_softsign(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.softsign(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_softsign)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.softsign(x_1d)
expected_1d = np.vectorize(_ref_softsign)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.softsign(x_3d)
expected_3d = np.vectorize(_ref_softsign)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.softsign(x_zero)
expected_zero = np.vectorize(_ref_softsign)(x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(10, 100, (2, 5))
result_large_positive = activations.softsign(x_large_positive)
expected_large_positive = np.vectorize(_ref_softsign)(x_large_positive)
self.assertAllClose(
result_large_positive, expected_large_positive, rtol=1e-05
)
# Test large negative values
x_large_negative = np.random.uniform(-100, -10, (2, 5))
result_large_negative = activations.softsign(x_large_negative)
expected_large_negative = np.vectorize(_ref_softsign)(x_large_negative)
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_sigmoid(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.sigmoid(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_sigmoid)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.sigmoid(x_1d)
expected_1d = np.vectorize(_ref_sigmoid)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.sigmoid(x_3d)
expected_3d = np.vectorize(_ref_sigmoid)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.sigmoid(x_zero)
expected_zero = np.vectorize(_ref_sigmoid)(x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(10, 100, (2, 5))
result_large_positive = activations.sigmoid(x_large_positive)
expected_large_positive = np.vectorize(_ref_sigmoid)(x_large_positive)
self.assertAllClose(
result_large_positive, expected_large_positive, rtol=1e-05
)
# Test large negative values
x_large_negative = np.random.uniform(-100, -10, (2, 5))
result_large_negative = activations.sigmoid(x_large_negative)
expected_large_negative = np.vectorize(_ref_sigmoid)(x_large_negative)
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_hard_sigmoid(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.hard_sigmoid(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_hard_sigmoid)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.hard_sigmoid(x_1d)
expected_1d = np.vectorize(_ref_hard_sigmoid)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.hard_sigmoid(x_3d)
expected_3d = np.vectorize(_ref_hard_sigmoid)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test with strictly positive values much larger than 1
x_positive_above_1 = np.random.uniform(
5, 10, (2, 5)
) # Adjusted this range
result_positive_above_1 = activations.hard_sigmoid(x_positive_above_1)
expected_positive_above_1 = np.ones((2, 5))
self.assertAllClose(
result_positive_above_1, expected_positive_above_1, rtol=1e-05
)
def test_log_sigmoid(self):
# Basic test for random values between 0 and 1
x = np.random.uniform(0, 1, (2, 5))
result = activations.log_sigmoid(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_log_sigmoid)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.log_sigmoid(x_1d)
expected_1d = np.vectorize(_ref_log_sigmoid)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.log_sigmoid(x_3d)
expected_3d = np.vectorize(_ref_log_sigmoid)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(10, 100, (2, 5))
result_large_positive = activations.log_sigmoid(x_large_positive)
expected_large_positive = np.vectorize(_ref_log_sigmoid)(
x_large_positive
)
self.assertAllClose(
result_large_positive, expected_large_positive, rtol=1e-05
)
# Test large negative values
x_large_negative = np.random.uniform(-100, -10, (2, 5))
result_large_negative = activations.log_sigmoid(x_large_negative)
expected_large_negative = np.vectorize(_ref_log_sigmoid)(
x_large_negative
)
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_hard_silu(self):
# Basic test for random values between -3 and 3
x = np.random.uniform(-3, 3, (2, 5)).astype("float32")
result = activations.hard_silu(x[np.newaxis, :])[0]
expected = np.vectorize(_ref_hard_silu)(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5).astype("float32")
result_1d = activations.hard_silu(x_1d)
expected_1d = np.vectorize(_ref_hard_silu)(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3)).astype("float32")
result_3d = activations.hard_silu(x_3d)
expected_3d = np.vectorize(_ref_hard_silu)(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test with strictly positive values much larger than 3
x_positive_above_3 = np.random.uniform(5, 10, (2, 5)).astype("float32")
result_positive_above_3 = activations.hard_silu(x_positive_above_3)
expected_positive_above_3 = x_positive_above_3
self.assertAllClose(
result_positive_above_3, expected_positive_above_3, rtol=1e-05
)
# Test with strictly negative values much smaller than -3
x_negatives = np.random.uniform(-10, -5, (2, 5)).astype("float32")
result = activations.hard_silu(x_negatives)
expected_zeros = np.zeros_like(x_negatives)
self.assertAllClose(result, expected_zeros, rtol=1e-05)
def test_relu_negative_slope(self):
# Define the input tensor
x = np.array([-10, -5, 0.0, 5, 10])
# Test with only negative_slope
result_negative_slope = activations.relu(x, negative_slope=0.5)
expected_negative_slope = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
self.assertAllClose(
result_negative_slope, expected_negative_slope, rtol=1e-05
)
def test_relu_max_value(self):
# Define the input tensor
x = np.array([-10, -5, 0.0, 5, 10])
# Test with only max_value
result_max_value = activations.relu(x, max_value=5.0)
expected_max_value = np.array([0.0, 0.0, 0.0, 5.0, 5.0])
self.assertAllClose(result_max_value, expected_max_value, rtol=1e-05)
def test_relu_threshold(self):
# Define the input tensor
x = np.array([-10, -5, 0.0, 5, 10])
# Test with only threshold
result_threshold = activations.relu(x, threshold=5.0)
expected_threshold = np.array([-0.0, -0.0, 0.0, 0.0, 10.0])
self.assertAllClose(result_threshold, expected_threshold, rtol=1e-05)
def test_relu_combined_threshold_and_max_value(self):
# Define the input tensor
x = np.array([-10, -5, 0.0, 5, 10])
# Test with threshold and max_value
result_combined = activations.relu(x, threshold=5.0, max_value=5.0)
expected_combined = np.array([0.0, 0.0, 0.0, 0.0, 5.0])
self.assertAllClose(result_combined, expected_combined, rtol=1e-05)
def test_relu_combined_all_parameters(self):
# Define the input tensor
x = np.array([-10, -5, 0.0, 5, 10])
# Test with negative_slope, max_value, and threshold
result_combined = activations.relu(
x, negative_slope=0.5, max_value=5.0, threshold=5.0
)
expected_combined = np.array([-7.5, -5.0, -2.5, 0.0, 5.0])
self.assertAllClose(result_combined, expected_combined, rtol=1e-05)
def test_relu_to_trigger_relu6(self):
x = np.array([-10, -5, 0.0, 5, 10, 12])
result_relu6 = activations.relu(x, max_value=6.0)
expected_relu6 = np.array([0.0, 0.0, 0.0, 5.0, 6.0, 6.0])
self.assertAllClose(result_relu6, expected_relu6, rtol=1e-05)
def test_relu_to_trigger_leaky(self):
x = np.array([-10, -5, 0.0, 5, 10])
result_leaky = activations.relu(x, negative_slope=0.5)
expected_leaky = np.array([-5.0, -2.5, 0.0, 5.0, 10.0])
self.assertAllClose(result_leaky, expected_leaky, rtol=1e-05)
def test_relu(self):
# Basic test for positive values
positive_values = np.random.uniform(0.1, 10, (2, 5))
result = activations.relu(positive_values[np.newaxis, :])[0]
self.assertAllClose(result, positive_values, rtol=1e-05)
# Basic test for negative values
negative_values = np.random.uniform(-10, -0.1, (2, 5))
result = activations.relu(negative_values[np.newaxis, :])[0]
expected = np.zeros((2, 5))
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.relu(x_1d)
expected_1d = np.maximum(0, x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.relu(x_3d)
expected_3d = np.maximum(0, x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.relu(x_zero)
expected_zero = np.maximum(0, x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large positive values
x_large_positive = np.random.uniform(1e4, 1e5, (2, 5))
result_large_positive = activations.relu(x_large_positive)
self.assertAllClose(result_large_positive, x_large_positive, rtol=1e-05)
# Test large negative values
x_large_negative = np.random.uniform(-1e5, -1e4, (2, 5))
result_large_negative = activations.relu(x_large_negative)
expected_large_negative = np.zeros((2, 5))
self.assertAllClose(
result_large_negative, expected_large_negative, rtol=1e-05
)
def test_leaky_relu(self):
leaky_relu_vectorized = np.vectorize(_ref_leaky_relu)
# Test for negative_slope = 0.01
# Test positive values
positive_values = np.random.random((2, 5))
result = activations.leaky_relu(
positive_values[np.newaxis, :], negative_slope=0.01
)[0]
expected = leaky_relu_vectorized(positive_values, alpha=0.01)
self.assertAllClose(result, expected, rtol=1e-05)
# Test negative values
negative_values = np.random.uniform(-1, 0, (2, 5))
result = activations.leaky_relu(
negative_values[np.newaxis, :], negative_slope=0.01
)[0]
expected = leaky_relu_vectorized(negative_values, alpha=0.01)
self.assertAllClose(result, expected, rtol=1e-05)
# Test for negative_slope = 0.3
# Test positive values
positive_values = np.random.random((2, 5))
result = activations.leaky_relu(
positive_values[np.newaxis, :], negative_slope=0.3
)[0]
expected = leaky_relu_vectorized(positive_values, alpha=0.3)
self.assertAllClose(result, expected, rtol=1e-05)
# Test negative values
negative_values = np.random.uniform(-1, 0, (2, 5))
result = activations.leaky_relu(
negative_values[np.newaxis, :], negative_slope=0.3
)[0]
expected = leaky_relu_vectorized(negative_values, alpha=0.3)
self.assertAllClose(result, expected, rtol=1e-05)
def test_relu6(self):
relu6_vectorized = np.vectorize(_ref_relu6)
# Test positive values less than 6
positive_values = np.random.uniform(0, 5.9, (2, 5))
result = activations.relu6(positive_values[np.newaxis, :])[0]
expected = relu6_vectorized(positive_values)
self.assertAllClose(result, expected, rtol=1e-05)
# Test positive values greater than 6
positive_values_above_6 = np.random.uniform(6.1, 10, (2, 5))
result = activations.relu6(positive_values_above_6[np.newaxis, :])[0]
expected = relu6_vectorized(positive_values_above_6)
self.assertAllClose(result, expected, rtol=1e-05)
# Test negative values
negative_values = np.random.uniform(-1, 0, (2, 5))
result = activations.relu6(negative_values[np.newaxis, :])[0]
expected = relu6_vectorized(negative_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_silu(self):
silu_vectorized = np.vectorize(_ref_silu)
# Test positive values
positive_values = np.random.uniform(0, 5.9, (2, 5))
result = activations.silu(positive_values[np.newaxis, :])[0]
expected = silu_vectorized(positive_values)
self.assertAllClose(result, expected, rtol=1e-05)
# Test values around zero (to ensure sigmoid behaves correctly)
around_zero_values = np.random.uniform(-1, 1, (2, 5))
result = activations.silu(around_zero_values[np.newaxis, :])[0]
expected = silu_vectorized(around_zero_values)
self.assertAllClose(result, expected, rtol=1e-05)
# Test negative values
negative_values = np.random.uniform(-5.9, 0, (2, 5))
result = activations.silu(negative_values[np.newaxis, :])[0]
expected = silu_vectorized(negative_values)
self.assertAllClose(result, expected, rtol=1e-05)
def test_gelu(self):
def gelu(x, approximate=False):
if approximate:
return (
0.5
* x
* (
1.0
+ np.tanh(
np.sqrt(2.0 / np.pi)
* (x + 0.044715 * np.power(x, 3))
)
)
)
else:
from scipy.stats import norm
return x * norm.cdf(x)
x = np.random.random((2, 5))
result = activations.gelu(x[np.newaxis, :])[0]
expected = gelu(x)
self.assertAllClose(result, expected, rtol=1e-05)
x = np.random.random((2, 5))
result = activations.gelu(x[np.newaxis, :], approximate=True)[0]
expected = gelu(x, True)
self.assertAllClose(result, expected, rtol=1e-05)
def test_celu(self):
def celu(x, alpha=1.0):
return np.maximum(x, 0.0) + alpha * np.expm1(
np.minimum(x, 0.0) / alpha
)
x = np.random.random((2, 5))
result = activations.celu(x[np.newaxis, :])[0]
expected = celu(x)
self.assertAllClose(result, expected, rtol=1e-05)
x = np.random.random((2, 5))
result = activations.celu(x[np.newaxis, :], alpha=0.5)[0]
expected = celu(x, alpha=0.5)
self.assertAllClose(result, expected, rtol=1e-05)
def test_glu(self):
def glu(x, axis=-1):
x1, x2 = np.split(x, 2, axis)
return x1 * (1 / (1 + np.exp(-x2)))
x = np.random.random((2, 4))
result = activations.glu(x[np.newaxis, :])[0]
expected = glu(x)
self.assertAllClose(result, expected, rtol=1e-05)
x = np.random.random((2, 4))
result = activations.glu(x[np.newaxis, :], axis=-2)[0]
expected = glu(x, axis=-2)
self.assertAllClose(result, expected, rtol=1e-05)
def test_tanh_shrink(self):
def tanh_shrink(x):
return x - np.tanh(x)
x = np.random.random((2, 5))
result = activations.tanh_shrink(x[np.newaxis, :])[0]
expected = tanh_shrink(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_hard_tanh(self):
def hard_tanh(x):
return np.clip(x, -1.0, 1.0)
x = np.random.random((2, 5))
result = activations.hard_tanh(x[np.newaxis, :])[0]
expected = hard_tanh(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_hard_shrink(self):
def hard_shrink(x):
return np.where(np.abs(x) > 0.5, x, 0.0)
x = np.random.random((2, 5))
result = activations.hard_shrink(x[np.newaxis, :])[0]
expected = hard_shrink(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_threshold(self):
def threshold(x, threshold_value, value):
return np.where(
x > threshold_value, x, np.array(value, dtype=x.dtype)
)
x = np.random.random((2, 5))
result = activations.threshold(x[np.newaxis, :], 0, 0)[0]
expected = threshold(x, 0, 0)
self.assertAllClose(result, expected, rtol=1e-05)
def test_squareplus(self):
def squareplus(x, b=4):
y = x + np.sqrt(x**2 + b)
return y / 2
x = np.random.random((2, 5))
result = activations.squareplus(x[np.newaxis, :])[0]
expected = squareplus(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_soft_shrink(self):
def soft_shrink(x, threshold=0.5):
return np.where(
x > threshold,
x - threshold,
np.where(x < -threshold, x + threshold, 0.0),
)
x = np.random.random((2, 5))
result = activations.soft_shrink(x[np.newaxis, :])[0]
expected = soft_shrink(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_sparse_plus(self):
def sparse_plus(x):
return np.where(
x <= -1,
np.zeros_like(x),
np.where(x < 1, (1 / 4) * (x + 1) ** 2, x),
)
x = np.random.random((2, 5))
result = activations.sparse_plus(x[np.newaxis, :])[0]
expected = sparse_plus(x)
self.assertAllClose(result, expected, rtol=1e-05)
def test_elu(self):
x = np.random.random((2, 5))
result = activations.elu(x[np.newaxis, :])[0]
self.assertAllClose(result, x, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=backend.floatx())
result = activations.elu(negative_values[np.newaxis, :])[0]
true_result = np.exp(negative_values) - 1
self.assertAllClose(result, true_result)
def test_tanh(self):
# Basic test for the tanh activation function
x = np.random.random((2, 5))
result = activations.tanh(x[np.newaxis, :])[0]
expected = np.tanh(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Basic test for the tanh activation function
x = np.random.uniform(-10, 10, (2, 5))
result = activations.tanh(x[np.newaxis, :])[0]
expected = np.tanh(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.tanh(x_1d)
expected_1d = np.tanh(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.tanh(x_3d)
expected_3d = np.tanh(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test with strictly positive values
x_positive = np.random.uniform(0, 10, (2, 5))
result_positive = activations.tanh(x_positive)
expected_positive = np.tanh(x_positive)
self.assertAllClose(result_positive, expected_positive, rtol=1e-05)
# Test with strictly negative values
x_negative = np.random.uniform(-10, 0, (2, 5))
result_negative = activations.tanh(x_negative)
expected_negative = np.tanh(x_negative)
self.assertAllClose(result_negative, expected_negative, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.tanh(x_zero)
expected_zero = np.tanh(x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large values to check stability
x_large = np.random.uniform(1e4, 1e5, (2, 5))
result_large = activations.tanh(x_large)
expected_large = np.tanh(x_large)
self.assertAllClose(result_large, expected_large, rtol=1e-05)
def test_exponential(self):
# Basic test for the exponential activation function
x = np.random.random((2, 5))
result = activations.exponential(x[np.newaxis, :])[0]
expected = np.exp(x)
self.assertAllClose(result, expected, rtol=1e-05)
x = np.random.uniform(-10, 10, (2, 5))
result = activations.exponential(x[np.newaxis, :])[0]
expected = np.exp(x)
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.exponential(x_1d)
expected_1d = np.exp(x_1d)
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.exponential(x_3d)
expected_3d = np.exp(x_3d)
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test with strictly positive values
x_positive = np.random.uniform(0, 10, (2, 5))
result_positive = activations.exponential(x_positive)
expected_positive = np.exp(x_positive)
self.assertAllClose(result_positive, expected_positive, rtol=1e-05)
# Test with strictly negative values
x_negative = np.random.uniform(-10, 0, (2, 5))
result_negative = activations.exponential(x_negative)
expected_negative = np.exp(x_negative)
self.assertAllClose(result_negative, expected_negative, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.exponential(x_zero)
expected_zero = np.exp(x_zero)
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large values to check stability
x_large = np.random.uniform(1e4, 1e5, (2, 5))
result_large = activations.exponential(x_large)
expected_large = np.exp(x_large)
self.assertAllClose(result_large, expected_large, rtol=1e-05)
def test_mish(self):
# Basic test for the mish activation function
x = np.random.random((2, 5))
result = activations.mish(x[np.newaxis, :])[0]
expected = x * np.tanh(_ref_softplus(x))
self.assertAllClose(result, expected, rtol=1e-05)
x = np.random.uniform(-10, 10, (2, 5))
result = activations.mish(x[np.newaxis, :])[0]
expected = x * np.tanh(_ref_softplus(x))
self.assertAllClose(result, expected, rtol=1e-05)
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
result_1d = activations.mish(x_1d)
expected_1d = x_1d * np.tanh(_ref_softplus(x_1d))
self.assertAllClose(result_1d, expected_1d, rtol=1e-05)
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (3, 3, 3))
result_3d = activations.mish(x_3d)
expected_3d = x_3d * np.tanh(_ref_softplus(x_3d))
self.assertAllClose(result_3d, expected_3d, rtol=1e-05)
# Test with strictly positive values
x_positive = np.random.uniform(0, 10, (2, 5))
result_positive = activations.mish(x_positive)
expected_positive = x_positive * np.tanh(_ref_softplus(x_positive))
self.assertAllClose(result_positive, expected_positive, rtol=1e-05)
# Test with strictly negative values
x_negative = np.random.uniform(-10, 0, (2, 5))
result_negative = activations.mish(x_negative)
expected_negative = x_negative * np.tanh(_ref_softplus(x_negative))
self.assertAllClose(result_negative, expected_negative, rtol=1e-05)
# Test near zero values
x_zero = np.random.uniform(-1e-7, 1e-7, (2, 5))
result_zero = activations.mish(x_zero)
expected_zero = x_zero * np.tanh(_ref_softplus(x_zero))
self.assertAllClose(result_zero, expected_zero, rtol=1e-05)
# Test large values to check stability
x_large = np.random.uniform(1e4, 1e5, (2, 5))
result_large = activations.mish(x_large)
expected_large = x_large * np.tanh(_ref_softplus(x_large))
self.assertAllClose(result_large, expected_large, rtol=1e-05)
def test_linear(self):
x = np.random.random((10, 5))
self.assertAllClose(x, activations.linear(x))
# Test with 1D array
x_1d = np.random.uniform(-10, 10, 5)
self.assertAllClose(x_1d, activations.linear(x_1d))
# Test with 2D array
x = np.random.uniform(-10, 10, (10, 5))
self.assertAllClose(x, activations.linear(x))
# Test with 3D array
x_3d = np.random.uniform(-10, 10, (5, 5, 5))
self.assertAllClose(x_3d, activations.linear(x_3d))
# Test with float32 data type
x_float32 = np.random.uniform(-10, 10, (10, 5)).astype(np.float32)
self.assertAllClose(x_float32, activations.linear(x_float32))
# Test with int32 data type
x_int32 = np.random.randint(-10, 10, (10, 5)).astype(np.int32)
self.assertAllClose(x_int32, activations.linear(x_int32))
def test_sparsemax(self):
# result check with 1d
x_1d = np.linspace(1, 12, num=12)
expected_result = np.zeros_like(x_1d)
expected_result[-1] = 1.0
self.assertAllClose(expected_result, activations.sparsemax(x_1d))
# result check with 2d
x_2d = np.linspace(1, 12, num=12).reshape(-1, 2)
expected_result = np.zeros_like(x_2d)
expected_result[:, -1] = 1.0
self.assertAllClose(expected_result, activations.sparsemax(x_2d))
# result check with 3d
x_3d = np.linspace(1, 12, num=12).reshape(-1, 1, 3)
expected_result = np.zeros_like(x_3d)
expected_result[:, :, -1] = 1.0
self.assertAllClose(expected_result, activations.sparsemax(x_3d))
# result check with axis=-2 with 2d input
x_2d = np.linspace(1, 12, num=12).reshape(-1, 2)
expected_result = np.zeros_like(x_2d)
expected_result[-1, :] = 1.0
self.assertAllClose(
expected_result, activations.sparsemax(x_2d, axis=-2)
)
# result check with axis=-2 with 3d input
x_3d = np.linspace(1, 12, num=12).reshape(-1, 1, 3)
expected_result = np.ones_like(x_3d)
self.assertAllClose(
expected_result, activations.sparsemax(x_3d, axis=-2)
)
# result check with axis=-3 with 3d input
x_3d = np.linspace(1, 12, num=12).reshape(-1, 1, 3)
expected_result = np.zeros_like(x_3d)
expected_result[-1, :, :] = 1.0
self.assertAllClose(
expected_result, activations.sparsemax(x_3d, axis=-3)
)
# result check with axis=-3 with 4d input
x_4d = np.linspace(1, 12, num=12).reshape(-1, 1, 1, 2)
expected_result = np.ones_like(x_4d)
self.assertAllClose(
expected_result, activations.sparsemax(x_4d, axis=-3)
)
def test_get_method(self):
obj = activations.get("relu")
self.assertEqual(obj, activations.relu)
obj = activations.get(None)
self.assertEqual(obj, activations.linear)
with self.assertRaises(ValueError):
activations.get("typo")
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@activations@[email protected]_END.py
|
{
"filename": "PsBsB.py",
"repo_name": "nickhand/pyRSD",
"repo_path": "pyRSD_extracted/pyRSD-master/pyRSD/rsd/power/gal/Pss/PsBsB.py",
"type": "Python"
}
|
from .. import TwoHaloTerm, OneHaloTerm, DampedGalaxyPowerTerm
class PsBsB_2h(TwoHaloTerm):
"""
The 2-halo term for `PsBsB`
"""
name = 'PsBsB_2h'
def __init__(self, model):
super(PsBsB_2h, self).__init__(model, 'b1_sB')
class PsBsB_1h(OneHaloTerm):
"""
The 1-halo term for `PsBsB`
"""
name = 'NsBsB'
class PsBsB(DampedGalaxyPowerTerm):
"""
The auto power spectrum of satellites with other
satellites in the same halo ('satB')
"""
name = "PsBsB"
def __init__(self, model):
super(PsBsB, self).__init__(model, PsBsB_2h, PsBsB_1h, sigma1='sigma_sB')
@property
def coefficient(self):
return self.model.fsB**2
|
nickhandREPO_NAMEpyRSDPATH_START.@pyRSD_extracted@pyRSD-master@pyRSD@rsd@power@gal@[email protected]@.PATH_END.py
|
{
"filename": "bimod_censat_params.py",
"repo_name": "ArgonneCPAC/diffmah",
"repo_path": "diffmah_extracted/diffmah-main/diffmah/diffmahpop_kernels/bimod_censat_params.py",
"type": "Python"
}
|
"""
"""
from collections import OrderedDict, namedtuple
from jax import jit as jjit
from . import (
covariance_kernels,
early_index_bimod,
frac_early_cens,
late_index_bimod,
logtc_bimod,
)
from .bimod_logm0_kernels import logm0_pop_bimod
from .bimod_logm0_sats import logm0_pop_bimod_sats
from .t_peak_kernels import tp_pdf_cens_flex, tp_pdf_sats
DEFAULT_DIFFMAHPOP_PDICT = OrderedDict()
COMPONENT_PDICTS = (
tp_pdf_cens_flex.DEFAULT_TPCENS_PDICT,
tp_pdf_sats.DEFAULT_TP_SATS_PDICT,
logm0_pop_bimod.DEFAULT_LOGM0_PDICT,
logm0_pop_bimod_sats.DEFAULT_LOGM0_PDICT,
logtc_bimod.LOGTC_PDICT,
early_index_bimod.EARLY_INDEX_PDICT,
late_index_bimod.LATE_INDEX_PDICT,
frac_early_cens.DEFAULT_FEC_PDICT,
covariance_kernels.DEFAULT_COV_PDICT,
)
for pdict in COMPONENT_PDICTS:
DEFAULT_DIFFMAHPOP_PDICT.update(pdict)
DiffmahPop_Params = namedtuple("DiffmahPop_Params", DEFAULT_DIFFMAHPOP_PDICT.keys())
DEFAULT_DIFFMAHPOP_PARAMS = DiffmahPop_Params(**DEFAULT_DIFFMAHPOP_PDICT)
COMPONENT_U_PDICTS = (
tp_pdf_cens_flex.DEFAULT_TPCENS_U_PARAMS._asdict(),
tp_pdf_sats.DEFAULT_TP_SATS_U_PARAMS._asdict(),
logm0_pop_bimod.DEFAULT_LOGM0POP_U_PARAMS._asdict(),
logm0_pop_bimod_sats.DEFAULT_LOGM0POP_U_PARAMS._asdict(),
logtc_bimod.DEFAULT_LOGTC_U_PARAMS._asdict(),
early_index_bimod.DEFAULT_EARLY_INDEX_U_PARAMS._asdict(),
late_index_bimod.DEFAULT_LATE_INDEX_U_PARAMS._asdict(),
frac_early_cens.DEFAULT_FEC_U_PARAMS._asdict(),
covariance_kernels.DEFAULT_COV_U_PARAMS._asdict(),
)
DEFAULT_DIFFMAHPOP_U_PDICT = OrderedDict()
for updict in COMPONENT_U_PDICTS:
DEFAULT_DIFFMAHPOP_U_PDICT.update(updict)
DiffmahPop_UParams = namedtuple("DiffmahPop_UParams", DEFAULT_DIFFMAHPOP_U_PDICT.keys())
DEFAULT_DIFFMAHPOP_U_PARAMS = DiffmahPop_UParams(**DEFAULT_DIFFMAHPOP_U_PDICT)
@jjit
def get_component_model_params(diffmahpop_params):
tp_pdf_cens_flex_params = tp_pdf_cens_flex.TPCens_Params(
*[
getattr(diffmahpop_params, key)
for key in tp_pdf_cens_flex.TPCens_Params._fields
]
)
tp_pdf_sats_params = tp_pdf_sats.TP_Sats_Params(
*[getattr(diffmahpop_params, key) for key in tp_pdf_sats.TP_Sats_Params._fields]
)
logm0_params = logm0_pop_bimod.LGM0Pop_Params(
*[
getattr(diffmahpop_params, key)
for key in logm0_pop_bimod.LGM0Pop_Params._fields
]
)
logm0_params_sats = logm0_pop_bimod_sats.LGM0Pop_Params(
*[
getattr(diffmahpop_params, key)
for key in logm0_pop_bimod_sats.LGM0Pop_Params._fields
]
)
logtc_params = logtc_bimod.Logtc_Params(
*[getattr(diffmahpop_params, key) for key in logtc_bimod.Logtc_Params._fields]
)
early_index_params = early_index_bimod.EarlyIndex_Params(
*[
getattr(diffmahpop_params, key)
for key in early_index_bimod.EarlyIndex_Params._fields
]
)
late_index_params = late_index_bimod.LateIndex_Params(
*[
getattr(diffmahpop_params, key)
for key in late_index_bimod.LateIndex_Params._fields
]
)
fec_params = frac_early_cens.FEC_Params(
*[getattr(diffmahpop_params, key) for key in frac_early_cens.FEC_Params._fields]
)
cov_params = covariance_kernels.CovParams(
*[
getattr(diffmahpop_params, key)
for key in covariance_kernels.CovParams._fields
]
)
return (
tp_pdf_cens_flex_params,
tp_pdf_sats_params,
logm0_params,
logm0_params_sats,
logtc_params,
early_index_params,
late_index_params,
fec_params,
cov_params,
)
@jjit
def get_component_model_u_params(diffmahpop_u_params):
tp_pdf_cens_flex_u_params = tp_pdf_cens_flex.TPCens_UParams(
*[
getattr(diffmahpop_u_params, key)
for key in tp_pdf_cens_flex.TPCens_UParams._fields
]
)
tp_pdf_sats_u_params = tp_pdf_sats.TP_Sats_UParams(
*[
getattr(diffmahpop_u_params, key)
for key in tp_pdf_sats.TP_Sats_UParams._fields
]
)
logm0_u_params = logm0_pop_bimod.LGM0Pop_UParams(
*[
getattr(diffmahpop_u_params, key)
for key in logm0_pop_bimod.LGM0Pop_UParams._fields
]
)
logm0_sats_u_params = logm0_pop_bimod_sats.LGM0Pop_UParams(
*[
getattr(diffmahpop_u_params, key)
for key in logm0_pop_bimod_sats.LGM0Pop_UParams._fields
]
)
logtc_u_params = logtc_bimod.Logtc_UParams(
*[
getattr(diffmahpop_u_params, key)
for key in logtc_bimod.Logtc_UParams._fields
]
)
early_index_u_params = early_index_bimod.EarlyIndex_UParams(
*[
getattr(diffmahpop_u_params, key)
for key in early_index_bimod.EarlyIndex_UParams._fields
]
)
late_index_u_params = late_index_bimod.LateIndex_UParams(
*[
getattr(diffmahpop_u_params, key)
for key in late_index_bimod.LateIndex_UParams._fields
]
)
fec_u_params = frac_early_cens.FEC_UParams(
*[
getattr(diffmahpop_u_params, key)
for key in frac_early_cens.FEC_UParams._fields
]
)
cov_u_params = covariance_kernels.CovUParams(
*[
getattr(diffmahpop_u_params, key)
for key in covariance_kernels.CovUParams._fields
]
)
return (
tp_pdf_cens_flex_u_params,
tp_pdf_sats_u_params,
logm0_u_params,
logm0_sats_u_params,
logtc_u_params,
early_index_u_params,
late_index_u_params,
fec_u_params,
cov_u_params,
)
@jjit
def get_diffmahpop_params_from_u_params(diffmahpop_u_params):
component_model_u_params = get_component_model_u_params(diffmahpop_u_params)
tpc_u_params, tps_u_params, logm0_u_params, logm0_sats_u_params = (
component_model_u_params[:4]
)
logtc_u_params = component_model_u_params[4]
early_index_u_params, late_index_u_params = component_model_u_params[5:7]
fec_u_params, cov_u_params = component_model_u_params[7:]
tpc_params = tp_pdf_cens_flex.get_bounded_tp_cens_params(tpc_u_params)
tps_params = tp_pdf_sats.get_bounded_tp_sat_params(tps_u_params)
logm0_params = logm0_pop_bimod.get_bounded_m0pop_params(logm0_u_params)
logm0_sats_params = logm0_pop_bimod_sats.get_bounded_m0pop_params(
logm0_sats_u_params
)
logtc_params = logtc_bimod.get_bounded_logtc_params(logtc_u_params)
early_index_params = early_index_bimod.get_bounded_early_index_params(
early_index_u_params
)
late_index_params = late_index_bimod.get_bounded_late_index_params(
late_index_u_params
)
fec_params = frac_early_cens.get_bounded_fec_params(fec_u_params)
cov_params = covariance_kernels.get_bounded_cov_params(cov_u_params)
component_model_params = (
tpc_params,
tps_params,
logm0_params,
logm0_sats_params,
logtc_params,
early_index_params,
late_index_params,
fec_params,
cov_params,
)
diffmahpop_params = DEFAULT_DIFFMAHPOP_PARAMS._make(DEFAULT_DIFFMAHPOP_PARAMS)
for params in component_model_params:
diffmahpop_params = diffmahpop_params._replace(**params._asdict())
return diffmahpop_params
@jjit
def get_diffmahpop_u_params_from_params(diffmahpop_params):
component_model_params = get_component_model_params(diffmahpop_params)
tpc_params, tps_params, logm0_params, logm0_sats_params = component_model_params[:4]
logtc_params = component_model_params[4]
early_index_params, late_index_params = component_model_params[5:7]
fec_params, cov_params = component_model_params[7:]
tpc_u_params = tp_pdf_cens_flex.get_unbounded_tp_cens_params(tpc_params)
tps_u_params = tp_pdf_sats.get_unbounded_tp_sat_params(tps_params)
logm0_u_params = logm0_pop_bimod.get_unbounded_m0pop_params(logm0_params)
logm0_sats_u_params = logm0_pop_bimod_sats.get_unbounded_m0pop_params(
logm0_sats_params
)
logtc_u_params = logtc_bimod.get_unbounded_logtc_params(logtc_params)
early_index_u_params = early_index_bimod.get_unbounded_early_index_params(
early_index_params
)
late_index_u_params = late_index_bimod.get_unbounded_late_index_params(
late_index_params
)
fec_u_params = frac_early_cens.get_unbounded_fec_params(fec_params)
cov_u_params = covariance_kernels.get_unbounded_cov_params(cov_params)
component_model_u_params = (
tpc_u_params,
tps_u_params,
logm0_u_params,
logm0_sats_u_params,
logtc_u_params,
early_index_u_params,
late_index_u_params,
fec_u_params,
cov_u_params,
)
diffmahpop_u_params = DEFAULT_DIFFMAHPOP_U_PARAMS._make(DEFAULT_DIFFMAHPOP_U_PARAMS)
for u_params in component_model_u_params:
diffmahpop_u_params = diffmahpop_u_params._replace(**u_params._asdict())
return diffmahpop_u_params
@jjit
def _get_all_diffmahpop_params_from_varied(
varied_params, default_params=DEFAULT_DIFFMAHPOP_PARAMS
):
diffmahpop_params = default_params._replace(**varied_params._asdict())
return diffmahpop_params
def get_varied_params_by_exclusion(all_params, excluded_pnames):
gen = zip(all_params._fields, all_params)
varied_pdict = OrderedDict(
[(name, float(x)) for (name, x) in gen if name not in excluded_pnames]
)
VariedParams = namedtuple("VariedParams", varied_pdict.keys())
varied_params = VariedParams(**varied_pdict)
return varied_params
|
ArgonneCPACREPO_NAMEdiffmahPATH_START.@diffmah_extracted@diffmah-main@diffmah@diffmahpop_kernels@[email protected]_END.py
|
{
"filename": "test_na_scalar.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/scalar/test_na_scalar.py",
"type": "Python"
}
|
from datetime import (
date,
time,
timedelta,
)
import pickle
import numpy as np
import pytest
from pandas._libs.missing import NA
from pandas.core.dtypes.common import is_scalar
import pandas as pd
import pandas._testing as tm
def test_singleton():
assert NA is NA
new_NA = type(NA)()
assert new_NA is NA
def test_repr():
assert repr(NA) == "<NA>"
assert str(NA) == "<NA>"
def test_format():
# GH-34740
assert format(NA) == "<NA>"
assert format(NA, ">10") == " <NA>"
assert format(NA, "xxx") == "<NA>" # NA is flexible, accept any format spec
assert f"{NA}" == "<NA>"
assert f"{NA:>10}" == " <NA>"
assert f"{NA:xxx}" == "<NA>"
def test_truthiness():
msg = "boolean value of NA is ambiguous"
with pytest.raises(TypeError, match=msg):
bool(NA)
with pytest.raises(TypeError, match=msg):
not NA
def test_hashable():
assert hash(NA) == hash(NA)
d = {NA: "test"}
assert d[NA] == "test"
@pytest.mark.parametrize(
"other", [NA, 1, 1.0, "a", b"a", np.int64(1), np.nan], ids=repr
)
def test_arithmetic_ops(all_arithmetic_functions, other):
op = all_arithmetic_functions
if op.__name__ in ("pow", "rpow", "rmod") and isinstance(other, (str, bytes)):
pytest.skip(reason=f"{op.__name__} with NA and {other} not defined.")
if op.__name__ in ("divmod", "rdivmod"):
assert op(NA, other) is (NA, NA)
else:
if op.__name__ == "rpow":
# avoid special case
other += 1
assert op(NA, other) is NA
@pytest.mark.parametrize(
"other",
[
NA,
1,
1.0,
"a",
b"a",
np.int64(1),
np.nan,
np.bool_(True),
time(0),
date(1, 2, 3),
timedelta(1),
pd.NaT,
],
)
def test_comparison_ops(comparison_op, other):
assert comparison_op(NA, other) is NA
assert comparison_op(other, NA) is NA
@pytest.mark.parametrize(
"value",
[
0,
0.0,
-0,
-0.0,
False,
np.bool_(False),
np.int_(0),
np.float64(0),
np.int_(-0),
np.float64(-0),
],
)
@pytest.mark.parametrize("asarray", [True, False])
def test_pow_special(value, asarray):
if asarray:
value = np.array([value])
result = NA**value
if asarray:
result = result[0]
else:
# this assertion isn't possible for ndarray.
assert isinstance(result, type(value))
assert result == 1
@pytest.mark.parametrize(
"value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float64(1)]
)
@pytest.mark.parametrize("asarray", [True, False])
def test_rpow_special(value, asarray):
if asarray:
value = np.array([value])
result = value**NA
if asarray:
result = result[0]
elif not isinstance(value, (np.float64, np.bool_, np.int_)):
# this assertion isn't possible with asarray=True
assert isinstance(result, type(value))
assert result == value
@pytest.mark.parametrize("value", [-1, -1.0, np.int_(-1), np.float64(-1)])
@pytest.mark.parametrize("asarray", [True, False])
def test_rpow_minus_one(value, asarray):
if asarray:
value = np.array([value])
result = value**NA
if asarray:
result = result[0]
assert pd.isna(result)
def test_unary_ops():
assert +NA is NA
assert -NA is NA
assert abs(NA) is NA
assert ~NA is NA
def test_logical_and():
assert NA & True is NA
assert True & NA is NA
assert NA & False is False
assert False & NA is False
assert NA & NA is NA
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
NA & 5
def test_logical_or():
assert NA | True is True
assert True | NA is True
assert NA | False is NA
assert False | NA is NA
assert NA | NA is NA
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
NA | 5
def test_logical_xor():
assert NA ^ True is NA
assert True ^ NA is NA
assert NA ^ False is NA
assert False ^ NA is NA
assert NA ^ NA is NA
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
NA ^ 5
def test_logical_not():
assert ~NA is NA
@pytest.mark.parametrize("shape", [(3,), (3, 3), (1, 2, 3)])
def test_arithmetic_ndarray(shape, all_arithmetic_functions):
op = all_arithmetic_functions
a = np.zeros(shape)
if op.__name__ == "pow":
a += 5
result = op(NA, a)
expected = np.full(a.shape, NA, dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_is_scalar():
assert is_scalar(NA) is True
def test_isna():
assert pd.isna(NA) is True
assert pd.notna(NA) is False
def test_series_isna():
s = pd.Series([1, NA], dtype=object)
expected = pd.Series([False, True])
tm.assert_series_equal(s.isna(), expected)
def test_ufunc():
assert np.log(NA) is NA
assert np.add(NA, 1) is NA
result = np.divmod(NA, 1)
assert result[0] is NA and result[1] is NA
result = np.frexp(NA)
assert result[0] is NA and result[1] is NA
def test_ufunc_raises():
msg = "ufunc method 'at'"
with pytest.raises(ValueError, match=msg):
np.log.at(NA, 0)
def test_binary_input_not_dunder():
a = np.array([1, 2, 3])
expected = np.array([NA, NA, NA], dtype=object)
result = np.logaddexp(a, NA)
tm.assert_numpy_array_equal(result, expected)
result = np.logaddexp(NA, a)
tm.assert_numpy_array_equal(result, expected)
# all NA, multiple inputs
assert np.logaddexp(NA, NA) is NA
result = np.modf(NA, NA)
assert len(result) == 2
assert all(x is NA for x in result)
def test_divmod_ufunc():
# binary in, binary out.
a = np.array([1, 2, 3])
expected = np.array([NA, NA, NA], dtype=object)
result = np.divmod(a, NA)
assert isinstance(result, tuple)
for arr in result:
tm.assert_numpy_array_equal(arr, expected)
tm.assert_numpy_array_equal(arr, expected)
result = np.divmod(NA, a)
for arr in result:
tm.assert_numpy_array_equal(arr, expected)
tm.assert_numpy_array_equal(arr, expected)
def test_integer_hash_collision_dict():
# GH 30013
result = {NA: "foo", hash(NA): "bar"}
assert result[NA] == "foo"
assert result[hash(NA)] == "bar"
def test_integer_hash_collision_set():
# GH 30013
result = {NA, hash(NA)}
assert len(result) == 2
assert NA in result
assert hash(NA) in result
def test_pickle_roundtrip():
# https://github.com/pandas-dev/pandas/issues/31847
result = pickle.loads(pickle.dumps(NA))
assert result is NA
def test_pickle_roundtrip_pandas():
result = tm.round_trip_pickle(NA)
assert result is NA
@pytest.mark.parametrize(
"values, dtype", [([1, 2, NA], "Int64"), (["A", "B", NA], "string")]
)
@pytest.mark.parametrize("as_frame", [True, False])
def test_pickle_roundtrip_containers(as_frame, values, dtype):
s = pd.Series(pd.array(values, dtype=dtype))
if as_frame:
s = s.to_frame(name="A")
result = tm.round_trip_pickle(s)
tm.assert_equal(result, s)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@scalar@[email protected]_END.py
|
{
"filename": "_itertools.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/importlib/resources/_itertools.py",
"type": "Python"
}
|
# from more_itertools 9.0
def only(iterable, default=None, too_long=None):
"""If *iterable* has only one item, return it.
If it has zero items, return *default*.
If it has more than one item, raise the exception given by *too_long*,
which is ``ValueError`` by default.
>>> only([], default='missing')
'missing'
>>> only([1])
1
>>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 1, 2,
and perhaps more.'
>>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError
Note that :func:`only` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check
iterable contents less destructively.
"""
it = iter(iterable)
first_value = next(it, default)
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@importlib@resources@[email protected]_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.