Dataset Viewer
metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
---|---|---|
{
"filename": "test_waste_free_smc.py",
"repo_name": "blackjax-devs/blackjax",
"repo_path": "blackjax_extracted/blackjax-main/tests/smc/test_waste_free_smc.py",
"type": "Python"
}
|
"""Test the tempered SMC steps and routine"""
import functools
import chex
import jax
import jax.numpy as jnp
import numpy as np
import pytest
from absl.testing import absltest
import blackjax
import blackjax.smc.resampling as resampling
from blackjax import adaptive_tempered_smc, tempered_smc
from blackjax.smc import extend_params
from blackjax.smc.waste_free import update_waste_free, waste_free_smc
from tests.smc import SMCLinearRegressionTestCase
from tests.smc.test_tempered_smc import inference_loop
class WasteFreeSMCTest(SMCLinearRegressionTestCase):
"""Test posterior mean estimate."""
def setUp(self):
super().setUp()
self.key = jax.random.key(42)
@chex.variants(with_jit=True)
def test_fixed_schedule_tempered_smc(self):
(
init_particles,
logprior_fn,
loglikelihood_fn,
) = self.particles_prior_loglikelihood()
num_tempering_steps = 10
lambda_schedule = np.logspace(-5, 0, num_tempering_steps)
hmc_init = blackjax.hmc.init
hmc_kernel = blackjax.hmc.build_kernel()
hmc_parameters = extend_params(
{
"step_size": 10e-2,
"inverse_mass_matrix": jnp.eye(2),
"num_integration_steps": 50,
},
)
tempering = tempered_smc(
logprior_fn,
loglikelihood_fn,
hmc_kernel,
hmc_init,
hmc_parameters,
resampling.systematic,
None,
waste_free_smc(100, 4),
)
init_state = tempering.init(init_particles)
smc_kernel = self.variant(tempering.step)
def body_fn(carry, lmbda):
i, state = carry
subkey = jax.random.fold_in(self.key, i)
new_state, info = smc_kernel(subkey, state, lmbda)
return (i + 1, new_state), (new_state, info)
(_, result), _ = jax.lax.scan(body_fn, (0, init_state), lambda_schedule)
self.assert_linear_regression_test_case(result)
@chex.variants(with_jit=True)
def test_adaptive_tempered_smc(self):
(
init_particles,
logprior_fn,
loglikelihood_fn,
) = self.particles_prior_loglikelihood()
hmc_init = blackjax.hmc.init
hmc_kernel = blackjax.hmc.build_kernel()
hmc_parameters = extend_params(
{
"step_size": 10e-2,
"inverse_mass_matrix": jnp.eye(2),
"num_integration_steps": 50,
},
)
tempering = adaptive_tempered_smc(
logprior_fn,
loglikelihood_fn,
hmc_kernel,
hmc_init,
hmc_parameters,
resampling.systematic,
0.5,
update_strategy=waste_free_smc(100, 4),
num_mcmc_steps=None,
)
init_state = tempering.init(init_particles)
n_iter, result, log_likelihood = self.variant(
functools.partial(inference_loop, tempering.step)
)(self.key, init_state)
self.assert_linear_regression_test_case(result)
class Update_waste_free_multivariate_particles(chex.TestCase):
@chex.variants(with_jit=True)
def test_update_waste_free_multivariate_particles(self):
"""
Given resampled multivariate particles,
when updating with waste free, they are joined
by the result of iterating the MCMC chain to
get a bigger set of particles.
"""
resampled_particles = np.ones((50, 3))
n_particles = 100
def normal_logdensity(x):
return jnp.log(
jax.scipy.stats.multivariate_normal.pdf(
x, mean=np.zeros(3), cov=np.diag(np.ones(3))
)
)
def rmh_proposal_distribution(rng_key, position):
return position + jax.random.normal(rng_key, (3,)) * 25.0
kernel = functools.partial(
blackjax.rmh.build_kernel(), transition_generator=rmh_proposal_distribution
)
init = blackjax.rmh.init
update, _ = waste_free_smc(n_particles, 2)(
init, normal_logdensity, kernel, n_particles
)
updated_particles, infos = self.variant(update)(
jax.random.split(jax.random.PRNGKey(10), 50), resampled_particles, {}
)
assert updated_particles.shape == (n_particles, 3)
def test_waste_free_set_num_mcmc_steps():
with pytest.raises(ValueError) as exc_info:
update_waste_free(
lambda x: x, lambda x: 1, lambda x: 1, 100, 10, 3, num_mcmc_steps=50
)
assert str(exc_info.value).startswith(
"Can't use waste free SMC with a num_mcmc_steps parameter"
)
def test_waste_free_p_non_divier():
with pytest.raises(ValueError) as exc_info:
waste_free_smc(100, 3)
assert str(exc_info.value).startswith("p must be a divider")
if __name__ == "__main__":
absltest.main()
|
blackjax-devsREPO_NAMEblackjaxPATH_START.@blackjax_extracted@blackjax-main@tests@smc@[email protected]_END.py
|
{
"filename": "_y0.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/box/_y0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Y0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="y0", parent_name="box", **kwargs):
super(Y0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@box@[email protected]_END.py
|
{
"filename": "crd_trans.py",
"repo_name": "dullemond/radmc3d-2.0",
"repo_path": "radmc3d-2.0_extracted/radmc3d-2.0-master/python/radmc3dPy/radmc3dPy/crd_trans.py",
"type": "Python"
}
|
"""
This module contains functions for coordinate transformations (e.g. rotation).
For help on the syntax or functionality of each function see the help of the individual functions
"""
from __future__ import absolute_import
from __future__ import print_function
import traceback
try:
import numpy as np
except ImportError:
np = None
print(traceback.format_exc())
def ctransSph2Cart(crd=None, reverse=False):
"""Transform coordinates between spherical to cartesian systems
Parameters
----------
crd : ndarray
Three element array containing the input
coordinates [x,y,z] or [r,theta,phi] by default
the coordinates assumed to be in the cartesian system
reverse : bool
If True calculates the inverse transformation
(cartesian -> spherical). In this case crd should be [r,theta,phi]
Returns
-------
Returns a three element array containig the output coordinates [r,theta,phi] or [x,y,z]
"""
if crd is None:
raise ValueError('Unknown crd. Cannot do coordinate transformation without knowing the coordinates.')
if reverse is False:
r = crd[0]
theta = crd[1] + 1e-50
phi = crd[2]
x = np.sin(theta) * np.cos(phi) * r
y = np.sin(theta) * np.sin(phi) * r
z = np.cos(theta) * r
crdout = [x, y, z]
else:
x = crd[0]
y = crd[1]
z = crd[2]
r = np.sqrt(x**2 + y**2 + z**2)
phi = np.arccos(x / np.sqrt(x**2 + y**2) + 1e-90)
theta = np.arccos(z / r)
if y < 0.0:
phi = 2.0 * np.pi - phi
crdout = [r, theta, phi]
return crdout
def vtransSph2Cart(crd=None, v=None, reverse=False):
"""Transform velocities between spherical to cartesian systems
Parameters
----------
crd : ndarray
Three element array containing the input
coordinates [x,y,z] or [r,theta,phi] by default
the coordinates assumed to be in the cartesian system
v : ndarray
Three element array containing the input
velocities in the same coordinate system as crd
reverse : bool
If True it calculates the inverse trasnformation (cartesian -> spherical)
Returns
-------
Returns a three element array containg the output velocities [vr,vphi,vtheta] or [vx,vy,vz]
"""
# NOTE!!!!! The velocities in the spherical system are not angular velocities!!!!
# v[1] = dphi/dt * r * sin(theta)
# v[2] = dtheta/dt * r
if crd is None:
raise ValueError('Unknown crd. Cannot do coordinate transformation without knowing the coordinates.')
if v is None:
raise ValueError('Unknown v. Cannot transform vectors without knowing the vectors themselves.')
if reverse is False:
# r = crd[0]
theta = crd[1]
phi = crd[2]
vr = v[0]
vtheta = v[1]
vphi = v[2]
vx = vr * np.sin(theta) * np.cos(phi) - vphi * np.sin(phi) + vtheta * np.cos(theta) * np.cos(phi)
vy = vr * np.sin(theta) * np.sin(phi) + vphi * np.cos(phi) + vtheta * np.cos(theta) * np.sin(phi)
vz = vr * np.cos(theta) - vtheta * np.sin(theta)
vout = [vx, vy, vz]
else:
# crd_sph = ctrans_sph2cart(crd, reverse=True)
# r = crd_sph[0]
# theta = crd_sph[1]
# phi = crd_sph[2]
# a = [[np.sin(theta)*np.cos(phi), -np.sin(phi), np.cos(theta)*np.cos(phi)],\
# [np.sin(theta)*np.sin(phi), np.cos(phi), np.cos(theta)*np.sin(phi)],\
# [np.cos(theta), 0., -np.sin(theta)]]
# a = [[np.sin(theta)*np.cos(phi), np.cos(theta)*np.cos(phi), -np.sin(phi)],\
# [np.sin(theta)*np.sin(phi), np.cos(theta)*np.sin(phi), np.cos(phi)],\
# [np.cos(theta), -np.sin(theta),0.]]
# a = np.array(a, dtype=np.float64)
# vout = np.linalg.solve(a,v)
#
# New stuff
#
vout = np.zeros(3, dtype=float)
r = np.sqrt((crd**2).sum())
rc = np.sqrt(crd[0]**2 + crd[1]**2)
# Vr
vout[0] = (crd * v).sum() / r
# Vtheta
vout[1] = (crd[2] * (crd[0] * v[0] + crd[1] * v[1]) - v[2] * rc**2) / (r * rc)
# Vphi
vout[2] = (crd[0] * v[1] - crd[1] * v[0]) / rc
return vout
def csrot(crd=None, ang=None, xang=0.0, yang=0.0, zang=0.0, deg=False):
""" Performs coordinate system rotation.
Parameters
----------
crd : numpy ndarray
Three element vector containing the coordinates of a given point in a cartesian system
ang : list, ndarray
Three element list/ndarray describing the rotation angles around the x, y and z axes, respectively
xang: float
Rotation around the x-axis
yang: float
Rotation around the y-axis
zang: float
Rotation around the z-axis
deg : float, optional
If True angles should be given in degree instead of radians (as by default)
Returns
-------
list
Returns a three element list with the rotated coordinates
Notes
-----
Rotation matrices
Around the x-axis:
.. math::
\\left(\\begin{matrix}
1 & 0 & 0 \\\\
0 & cos(\\alpha) & -sin(\\alpha)\\\\
0 & sin(\\alpha) & cos(\\alpha)
\\end{matrix}\\right)
Around the y-axis:
.. math::
\\left(\\begin{matrix}
cos(\\beta) & 0 & -sin(\\beta) \\\\
0 & 1 & 0\\\\
sin(\\beta)& 0 & cos(\\beta)
\\end{matrix}\\right)
Around the z-axis
.. math::
\\left(\\begin{matrix}
cos(\\gamma) & -sin\\gamma) & 0 \\\\
sin(\\gamma) & cos(\\gamma) & 0 \\\\
0 & 0 & 1
\\end{matrix}\\right)
"""
if crd is None:
raise ValueError('Unknown crd. Cannot do coordinate transformation without knowing the coordinates.')
if ang is None:
if (xang == 0.) & (yang == 0.) & (zang == 0.):
return crd
if ang is not None:
xang = ang[0]
yang = ang[1]
zang = ang[2]
#
# Convert degree into radian if the angles are given in degree
#
if deg:
xang = xang / 180.0 * np.pi
yang = yang / 180.0 * np.pi
zang = zang / 180.0 * np.pi
crd_new = np.zeros(len(crd), dtype=np.float64)
#
# Rotation around the x axis
#
if xang != 0.0:
dumx = crd[0]
dumy = np.cos(xang) * crd[1] - np.sin(xang) * crd[2]
dumz = np.sin(xang) * crd[1] + np.cos(xang) * crd[2]
crd_new = [dumx, dumy, dumz]
#
# Rotation around the y axis
#
if yang != 0.0:
dumx = np.cos(yang) * crd[0] + np.sin(yang) * crd[2]
dumy = crd[1]
dumz = -np.sin(yang) * crd[0] + np.cos(yang) * crd[2]
crd_new = [dumx, dumy, dumz]
#
# Rotation around the z axis
#
if zang != 0.0:
dumx = np.cos(zang) * crd[0] - np.sin(zang) * crd[1] + 0.0
dumy = np.sin(zang) * crd[0] + np.cos(zang) * crd[1] + 0.0
dumz = crd[2]
crd_new = [dumx, dumy, dumz]
return crd_new
def vrot(crd=None, v=None, ang=None):
"""Rotates a vector in spherical coordinate system.
First transforms the vector to cartesian coordinate system, then does the rotation then
makes the inverse transformation
Parameters
----------
crd : ndarray
Three element array containing the coordinates of a
given point in the cartesian system
v : ndarray
Three element array, angles of rotation around the x,y,z axes
ang : ndarray
Three element arrray containing the angles to rotate around the x, y, z, axes, respectively
"""
if crd is None:
raise ValueError('Unknown crd. Cannot do coordinate transformation without knowing the coordinates.')
if v is None:
raise ValueError('Unknown v. Vector rotation cannot be done without knowing the vectors themselves.')
if ang is None:
raise ValueError('Unknown ang. Vector rotation cannot be done without knowing the rotation angles.')
# Convert the position vector to cartesian coordinate system
crd_xyz = ctransSph2Cart(crd=crd)
# Convert the velocity vector to cartesian coordinate system
v_xyz = vtransSph2Cart(crd=crd, v=v)
# Rotate the vector
v_xyz_rot = csrot(crd=v_xyz, ang=ang)
# Transform the rotated vector back to the spherical coordinate system
v_rot = vtransSph2Cart(crd=crd_xyz, v=v_xyz_rot, reverse=True)
return v_rot
|
[email protected][email protected]@python@radmc3dPy@radmc3dPy@[email protected]_END.py
|
{
"filename": "test_st_prompt_list.py",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/src/nextline_rdb/models/strategies/tests/test_st_prompt_list.py",
"type": "Python"
}
|
from typing import Optional, TypedDict
from hypothesis import Phase, given, note, settings
from hypothesis import strategies as st
from nextline_test_utils import safe_compare as sc
from nextline_test_utils.strategies import st_none_or, st_ranges
from ... import Model, Run
from .. import (
st_model_prompt_list,
st_model_run,
st_model_trace_call_list,
st_model_trace_list,
)
from .funcs import assert_model_persistence
class StModelPromptListKwargs(TypedDict, total=False):
run: Optional[Run]
min_size: int
max_size: Optional[int]
@st.composite
def st_st_model_prompt_list_kwargs(draw: st.DrawFn) -> StModelPromptListKwargs:
kwargs = StModelPromptListKwargs()
if draw(st.booleans()):
# generate_traces=False because True would generate a trace with prompts
run = draw(st_none_or(st_model_run(generate_traces=False)))
kwargs['run'] = run
if run:
draw(st_none_or(st_model_trace_list(run=run, min_size=0, max_size=3)))
draw(st_none_or(st_model_trace_call_list(run=run, min_size=0, max_size=5)))
if draw(st.booleans()):
min_size, max_size = draw(
st_ranges(
st.integers,
min_start=0,
max_end=4,
allow_start_none=False,
allow_end_none=False,
)
)
assert isinstance(min_size, int)
kwargs['min_size'] = min_size
kwargs['max_size'] = max_size
return kwargs
@given(kwargs=st_st_model_prompt_list_kwargs())
def test_st_model_prompt_list_kwargs(kwargs: StModelPromptListKwargs) -> None:
assert sc(kwargs.get('min_size')) <= sc(kwargs.get('max_size'))
@settings(max_examples=500, phases=(Phase.generate,)) # Avoid shrinking
@given(st.data())
async def test_options(data: st.DataObject) -> None:
# Generate options of the strategy to be tested
kwargs = data.draw(st_st_model_prompt_list_kwargs())
note(kwargs)
# Call the strategy to be tested
prompts = data.draw(st_model_prompt_list(**kwargs))
# Assert the generated values
run = kwargs.get('run')
min_size = kwargs.get('min_size', 0)
max_size = kwargs.get('max_size')
if run and not run.trace_calls:
# `prompts` is not generated if `run` with no `trace_calls` is provided
assert not prompts
else:
assert min_size <= len(prompts) <= sc(max_size)
if prompts:
runs = set(prompt.trace.run for prompt in prompts)
assert len(runs) == 1
assert run is None or run is runs.pop()
@settings(phases=(Phase.generate,)) # Avoid shrinking
@given(instances=st_model_prompt_list(max_size=5))
async def test_db(instances: list[Model]) -> None:
await assert_model_persistence(instances)
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@src@nextline_rdb@models@strategies@tests@[email protected]_END.py
|
{
"filename": "helpers.py",
"repo_name": "spacetelescope/calcos",
"repo_path": "calcos_extracted/calcos-master/tests/helpers.py",
"type": "Python"
}
|
"""CALCOS regression test helpers."""
import os
import sys
import pytest
from ci_watson.artifactory_helpers import get_bigdata
from ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds
from astropy.io import fits
from astropy.io.fits import FITSDiff
__all__ = ['calref_from_image', 'BaseCOS']
def calref_from_image(input_image):
"""
Return a list of reference filenames, as defined in the primary
header of the given input image, necessary for calibration; i.e.,
only those associated with ``*CORR`` set to ``PERFORM`` will be
considered.
"""
# NOTE: Add additional mapping as needed.
# Map mandatory CRDS reference file for instrument/detector combo.
# This is for file not tied to any particular *CORR or used throughout.
det_lookup = {
('COS', 'FUV'): ['PROFTAB', 'SPWCSTAB'],
('COS', 'NUV'): []}
# NOTE: Add additional mapping as needed.
# Map *CORR to associated CRDS reference file.
corr_lookup = {
'BADTCORR': ['BADTTAB'],
'TEMPCORR': ['BRFTAB'],
'GEOCORR': ['GEOFILE'],
'DGEOCORR': ['DGEOFILE'],
'YWLKCORR': ['YWLKFILE'],
'XWLKCORR': ['XWLKFILE'],
'DEADCORR': ['DEADTAB'],
'PHACORR': ['PHATAB', 'PHAFILE'],
'FLATCORR': ['FLATFILE'],
'WAVECORR': ['LAMPTAB', 'DISPTAB', 'TWOZXTAB', 'XTRACTAB'],
'BRSTCORR': ['BRSTTAB'],
'TRCECORR': ['TRACETAB'],
'ALGNCORR': ['TWOZXTAB'],
'DQICORR': ['SPOTTAB', 'TRACETAB', 'BPIXTAB', 'GSAGTAB'],
'X1DCORR': ['WCPTAB', 'TWOZXTAB', 'XTRACTAB'],
'BACKCORR': ['TWOZXTAB', 'XTRACTAB'],
'FLUXCORR': ['FLUXTAB', 'TDSTAB', 'PHOTTAB'],
'WALKCORR': ['WALKTAB']}
hdr = fits.getheader(input_image, ext=0)
ref_files = ref_from_image(
input_image, det_lookup[(hdr['INSTRUME'], hdr['DETECTOR'])])
for step in corr_lookup:
# Not all images have the CORR step and it is not always on.
if (step not in hdr) or (hdr[step].strip().upper() != 'PERFORM'):
continue
ref_files += ref_from_image(input_image, corr_lookup[step])
# Special case for STATFLAG=T, which requires XTRACTAB, but MissingRefFiles()
# doesn't know
if hdr['STATFLAG']:
ref_files += ref_from_image(input_image, ['XTRACTAB'])
return list(set(ref_files)) # Remove duplicates
# Base class for actual tests.
# NOTE: Named in a way so pytest will not pick them up here.
# NOTE: bigdata marker requires TEST_BIGDATA environment variable to
# point to a valid big data directory, whether locally or on Artifactory.
# NOTE: envopt would point tests to "dev" or "stable".
# NOTE: _jail fixture ensures each test runs in a clean tmpdir.
@pytest.mark.bigdata
@pytest.mark.usefixtures('_jail', 'envopt')
class BaseCOS:
instrument = 'cos'
ignore_keywords = ['DATE', 'CAL_VER']
# To be defined by test class in actual test modules.
detector = ''
@pytest.fixture(autouse=True)
def setup_class(self, envopt):
"""
Class-level setup that is done at the beginning of the test.
Parameters
----------
envopt : {'dev', 'stable'}
This is a ``pytest`` fixture that defines the test
environment in which input and truth files reside.
"""
# Since CALCOS still runs in PY2, need to check here because
# tests can only run in PY3.
if sys.version_info < (3, ):
raise SystemError('tests can only run in Python 3')
self.env = envopt
def get_input_files(self, filenames):
"""
Copy input files (ASN, RAW, etc) into the working directory.
If ASN is given, RAW files in the ASN table are also copied.
The associated CRDS reference files are also copied or
downloaded, if necessary.
Data directory layout for CALCOS::
detector/
input/
truth/
Parameters
----------
filename : list
List of filenames of the ASN/RAW/etc to copy over, along with their
associated files.
"""
all_raws = []
for file in filenames:
if 'rawtag' in file:
all_raws.append(file)
# List of filenames can include _rawtag, _asn and _spt files
dest = get_bigdata('scsb-calcos', self.env, self.detector, 'input',
file)
# If file is an association table, download raw files specified in the table
if file.endswith('_asn.fits'):
if self.detector == 'nuv':
asn_raws = raw_from_asn(file, '_rawtag.fits')
else:
asn_raws = raw_from_asn(file, '_rawtag_a.fits')
asn_raws += raw_from_asn(file, '_rawtag_b.fits')
for raw in asn_raws: # Download RAWs in ASN.
get_bigdata('scsb-calcos', self.env, self.detector, 'input',
raw)
all_raws += asn_raws
first_pass = ('JENKINS_URL' in os.environ and
'ssbjenkins' in os.environ['JENKINS_URL'])
for raw in all_raws:
ref_files = calref_from_image(raw)
for ref_file in ref_files:
print("Getting reference file {}".format(ref_file))
# Special reference files that live with inputs.
if ('$' not in ref_file and
os.path.basename(ref_file) == ref_file):
get_bigdata('scsb-calcos', self.env, self.detector,
'input', ref_file)
print('{} downloaded successfully')
continue
# Jenkins cannot see Central Storage on push event,
# and somehow setting, say, jref to "." does not work anymore.
# So, we need this hack.
if '$' in ref_file and first_pass:
first_pass = False
if not os.path.isdir('/grp/hst/cdbs'):
ref_path = os.path.dirname(dest) + os.sep
var = ref_file.split('$')[0]
os.environ[var] = ref_path # hacky hack hack
# Download reference files, if needed only.
download_crds(ref_file, verbose=True)
def compare_outputs(self, outputs, atol=0, rtol=1e-7, raise_error=True,
ignore_keywords_overwrite=None):
"""
Compare CALXXX output with "truth" using ``fitsdiff``.
Parameters
----------
outputs : list of tuple
A list of tuples, each containing filename (without path)
of CALXXX output and truth, in that order. Example::
[('output1.fits', 'truth1.fits'),
('output2.fits', 'truth2.fits'),
...]
atol, rtol : float
Absolute and relative tolerance for data comparison.
raise_error : bool
Raise ``AssertionError`` if difference is found.
ignore_keywords_overwrite : list of str or `None`
If not `None`, these will overwrite
``self.ignore_keywords`` for the calling test.
Returns
-------
report : str
Report from ``fitsdiff``.
This is part of error message if ``raise_error=True``.
"""
all_okay = True
creature_report = ''
if ignore_keywords_overwrite is None:
ignore_keywords = self.ignore_keywords
else:
ignore_keywords = ignore_keywords_overwrite
for actual, desired in outputs:
desired = get_bigdata('scsb-calcos', self.env, self.detector,
'truth', desired)
fdiff = FITSDiff(actual, desired, rtol=rtol, atol=atol,
ignore_keywords=ignore_keywords)
creature_report += fdiff.report()
if not fdiff.identical and all_okay:
all_okay = False
if not all_okay and raise_error:
raise AssertionError(os.linesep + creature_report)
return creature_report
|
spacetelescopeREPO_NAMEcalcosPATH_START.@calcos_extracted@calcos-master@[email protected]@.PATH_END.py
|
{
"filename": "find_stars.py",
"repo_name": "ucl-exoplanets/pylightcurve",
"repo_path": "pylightcurve_extracted/pylightcurve-master/pylightcurve/images/find_stars.py",
"type": "Python"
}
|
__all__ = ['find_single_star']
import numpy as np
import warnings
from pylightcurve.analysis.gaussian import fit_two_d_gaussian
from pylightcurve.analysis.distributions import one_d_distribution
def find_single_star(data_array, predicted_x, predicted_y, mean=None, std=None, burn_limit=65000, star_std=2,
std_limit=5.0):
star = None
if 0 < predicted_x < len(data_array[0]) and 0 < predicted_x < len(data_array):
if mean is None or std is None:
fit_mean, fit_std = one_d_distribution(data_array, gaussian_fit=True, mad_filter=5)[2:4]
if not mean:
mean = fit_mean
if not std:
std = fit_std
centroids = find_centroids(data_array, predicted_x - 5 * star_std, predicted_x + 5 * star_std,
predicted_y - 5 * star_std, predicted_y + 5 * star_std, mean, std, burn_limit, star_std,
std_limit)
centroids = sorted(centroids, key=lambda x: np.sqrt((x[0] - predicted_x) ** 2 + (x[1] - predicted_y) ** 2))
for centroid in centroids:
star = _star_from_centroid(data_array, centroid[0], centroid[1], mean, std, burn_limit, star_std, std_limit)
if star:
star = [star[0][2], star[0][3], star[0][0], star[0][1], star[0][4], star[0][5], centroid[0], centroid[1]]
break
return star
def _star_from_centroid(data_array, centroid_x, centroid_y, mean, std, burn_limit, star_std, std_limit):
star = None
try:
search_window = int(round(10 * star_std))
y_min = int(max(int(centroid_y) - search_window, 0))
y_max = int(min(int(centroid_y) + search_window, len(data_array) - 1))
x_min = int(max(int(centroid_x) - search_window, 0))
x_max = int(min(int(centroid_x) + search_window, len(data_array[0]) - 1))
datax, datay = np.meshgrid(np.arange(x_min, x_max + 1) + 0.5,
np.arange(y_min, y_max + 1) + 0.5)
dataz = data_array[y_min: y_max + 1, x_min: x_max + 1]
popt, pcov = fit_two_d_gaussian(datax, datay, dataz, positive=True, point_xy=(centroid_x, centroid_y),
sigma=star_std, maxfev=1000)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if popt[0] > std_limit * std and popt[0] + popt[1] < burn_limit:
if np.sqrt(pcov[0][0]) != np.inf:
if popt[0] > std_limit * np.sqrt(pcov[0][0]):
star = (popt, pcov)
else:
star = (popt, pcov)
except:
pass
return star
def find_centroids(data_array, x_low, x_upper, y_low, y_upper, mean, std, burn_limit, star_std, std_limit):
x_upper = int(min(x_upper, len(data_array[0])))
y_upper = int(min(y_upper, len(data_array)))
x_low = int(max(0, x_low))
y_low = int(max(0, y_low))
data_array = np.full_like(data_array[y_low:y_upper + 1, x_low:x_upper + 1],
data_array[y_low:y_upper + 1, x_low:x_upper + 1])
test = []
for i in range(-star_std, star_std + 1):
for j in range(-star_std, star_std + 1):
rolled = np.roll(np.roll(data_array, i, 0), j, 1)
test.append(rolled)
median_test = np.median(test, 0)
max_test = np.max(test, 0)
del test
stars = np.where((data_array < burn_limit) & (data_array > mean + std_limit * std) & (max_test == data_array)
& (median_test > mean + 2 * std))
del data_array
stars = [stars[1] + x_low, stars[0] + y_low]
stars = np.swapaxes(stars, 0, 1)
return stars
|
ucl-exoplanetsREPO_NAMEpylightcurvePATH_START.@pylightcurve_extracted@pylightcurve-master@pylightcurve@images@[email protected]_END.py
|
{
"filename": "Useful_Utilities.ipynb",
"repo_name": "LSSTDESC/rail",
"repo_path": "rail_extracted/rail-main/examples/core_examples/Useful_Utilities.ipynb",
"type": "Jupyter Notebook"
}
|
# Useful Utilities
**Authors:** Olivia Lynn
**Last Run Successfully:** September 20, 2023
This is a notebook that contains various utilities that may be used when working with RAIL.
## Setting Things Up
```python
import rail
```
### Listing imported stages (1/2)
Let's list out our currently imported stages. Right now, this will only be what we get by importing `rail` and `rail.stages`.
```python
import rail.stages
for val in rail.core.stage.RailStage.pipeline_stages.values():
print(val[0])
```
### Import and attach all
Using `rail.stages.import_and_attach_all()` lets you import all packages within the RAIL ecosystem at once.
This kind of blanket import is a useful shortcut; however, it will be slower than specific imports, as you will import things you'll never need.
As of such, `import_and_attach_all` is recommended for new users and those who wish to do rapid exploration with notebooks; pipelines designed to be run at scale would generally prefer lightweight, specific imports.
```python
import rail
import rail.stages
rail.stages.import_and_attach_all()
```
Now that we've attached all available stages to rail.stages, we can use `from rail.stages import *` to let us omit prefixes.
To see this in action:
```python
# with prefix
print(rail.tools.table_tools.ColumnMapper)
```
```python
# without prefix
try:
print(ColumnMapper)
except Exception as e:
print(e)
```
```python
from rail.stages import *
```
```python
print(ColumnMapper)
```
### Listing imported stages (2/2)
Now, let's try listing imported stages again, and notice how many more we get.
```python
for val in rail.core.stage.RailStage.pipeline_stages.values():
print(val[0])
```
We can use this list of imported stages to browse for specifics, such as looking through our available estimators.
**Note:** this will only filter through what you've imported, so if you haven't imported everything above, this will not be a complete list of all estimators available in RAIL.
```python
for val in rail.core.stage.RailStage.pipeline_stages.values():
if issubclass(val[0], rail.estimation.estimator.CatEstimator):
print(val[0])
```
### Listing keys in the Data Store (1/2)
Let's list out the keys in the Data Store to see what data we have stored.
First, we must set up the Data Store:
```python
DS = rail.core.stage.RailStage.data_store
DS.__class__.allow_overwrite = True
```
And because we've only just created the store, as you may have guessed, it is empty.
We'll come back to this in a bit.
```python
DS.keys()
```
### Finding data files with find_rail_file
We need to define our flow file that we'll use in our pipeline
If we already know its path, we can just point directly to the file (relative to the directory that holds our `rail/` directory):
```python
import os
from rail.utils.path_utils import RAILDIR
flow_file = os.path.join(
RAILDIR, "rail/examples_data/goldenspike_data/data/pretrained_flow.pkl"
)
```
But if we aren't sure where our file is (or we're just feeling lazy) we can use `find_rail_file`.
This is especially helpful in cases where our installation is spread out, and some rail modules are located separately from others.
```python
from rail.utils.path_utils import find_rail_file
flow_file = find_rail_file('examples_data/goldenspike_data/data/pretrained_flow.pkl')
```
We can set our FLOWDIR based on the location of our flow file, too.
```python
os.environ['FLOWDIR'] = os.path.dirname(flow_file)
```
```python
# Now, we have to set up some other variables for our pipeline:
import numpy as np
bands = ["u", "g", "r", "i", "z", "y"]
band_dict = {band: f"mag_{band}_lsst" for band in bands}
rename_dict = {f"mag_{band}_lsst_err": f"mag_err_{band}_lsst" for band in bands}
post_grid = [float(x) for x in np.linspace(0.0, 5, 21)]
```
## Creating the Pipeline
```python
import ceci
```
```python
# Make some stages
flow_engine_test = FlowCreator.make_stage(
name="flow_engine_test", model=flow_file, n_samples=50
)
col_remapper_test = ColumnMapper.make_stage(
name="col_remapper_test", hdf5_groupname="", columns=rename_dict
)
#flow_engine_test.sample(6, seed=0).data
```
```python
# Add the stages to the pipeline
pipe = ceci.Pipeline.interactive()
stages = [flow_engine_test, col_remapper_test]
for stage in stages:
pipe.add_stage(stage)
```
```python
# Connect stages
col_remapper_test.connect_input(flow_engine_test)
```
## Introspecting the Pipeline
### Listing keys in the Data Store (2/2)
Now that we have a some data in the Data Store, let's take another look at it.
```python
DS.keys()
```
### Getting names of stages in the pipeline
```python
pipe.stage_names
```
### Getting the configuration of a particular stage
Let's take a look a the config of the first stage we just listed above.
```python
pipe.flow_engine_test.config
```
### Updating a configuration value
We can update config values even after the stage has been created. Let's give it a try.
```python
pipe.flow_engine_test.config.update(seed=42)
pipe.flow_engine_test.config
```
### Listing stage outputs (as both tags and aliased tags)
Let's get the list of outputs as 'tags'.
These are how the stage thinks of the outputs, as a list names associated to DataHandle types.
```python
pipe.flow_engine_test.outputs
```
We can also get the list of outputs as 'aliased tags'.
These are how the pipeline thinks of the outputs, as a unique key that points to a particular file
```python
pipe.flow_engine_test._outputs
```
### Listing all pipeline methods and parameters that can be set
If you'd like to take a closer look at what you can do with a pipeline, use `dir(pipe)` to list out available methods and parameters.
```python
for item in dir(pipe):
if '__' not in item:
print(item)
```
## Initializing the Pipeline
### Toggling resume mode
We can turn 'resume mode' on when initializing a pipeline.
Resume mode lets us skip stages that already have output files, so we don't have to rerun the same stages as we iterate on a pipeline.
Just add a `resume=True` to do so.
```python
pipe.initialize(
dict(model=flow_file), dict(output_dir=".", log_dir=".", resume=True), None
)
```
Running `pipe.stages` should show order of classes, or all the stages this pipeline will run.
```python
pipe.stages
```
## Managing notebooks with git
_(thank you to https://stackoverflow.com/a/58004619)_
You can modify your git settings to run a filter over certain files before they are added to git. This will leave the original file on disk as-is, but commit the "cleaned" version.
First, add the following to your local `.git/config` file (or global `~/.gitconfig`):
[filter "strip-notebook-output"]
clean = "jupyter nbconvert --ClearOutputPreprocessor.enabled=True --to=notebook --stdin --stdout --log-level=ERROR"
Then, create a `.gitattributes` file in your directory with notebooks and add the following line:
*.ipynb filter=strip-notebook-output
|
LSSTDESCREPO_NAMErailPATH_START.@rail_extracted@rail-main@examples@core_examples@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "ahmedfgad/GeneticAlgorithmPython",
"repo_path": "GeneticAlgorithmPython_extracted/GeneticAlgorithmPython-master/pygad/utils/__init__.py",
"type": "Python"
}
|
from pygad.utils import parent_selection
from pygad.utils import crossover
from pygad.utils import mutation
from pygad.utils import nsga2
__version__ = "1.2.1"
|
ahmedfgadREPO_NAMEGeneticAlgorithmPythonPATH_START.@GeneticAlgorithmPython_extracted@GeneticAlgorithmPython-master@pygad@utils@[email protected]_END.py
|
{
"filename": "test_special.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/tests/ops/test_special.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import pytest
import torch
from scipy.special import iv
from torch import tensor
from torch.autograd import grad
from pyro.ops.special import get_quad_rule, log_beta, log_binomial, log_I1, safe_log
from tests.common import assert_equal
def test_safe_log():
# Test values.
x = torch.randn(1000).exp().requires_grad_()
expected = x.log()
actual = safe_log(x)
assert_equal(actual, expected)
assert_equal(grad(actual.sum(), [x])[0], grad(expected.sum(), [x])[0])
# Test gradients.
x = torch.tensor(0.0, requires_grad=True)
assert not torch.isfinite(grad(x.log(), [x])[0])
assert torch.isfinite(grad(safe_log(x), [x])[0])
@pytest.mark.parametrize(
"tol",
[
1e-8,
1e-6,
1e-4,
1e-2,
0.02,
0.05,
0.1,
0.2,
0.1,
1.0,
],
)
def test_log_beta_stirling(tol):
x = torch.logspace(-5, 5, 200)
y = x.unsqueeze(-1)
expected = log_beta(x, y)
actual = log_beta(x, y, tol=tol)
assert (actual <= expected).all()
assert (expected < actual + tol).all()
@pytest.mark.parametrize(
"tol",
[
1e-8,
1e-6,
1e-4,
1e-2,
0.02,
0.05,
0.1,
0.2,
0.1,
1.0,
],
)
def test_log_binomial_stirling(tol):
k = torch.arange(200.0)
n_minus_k = k.unsqueeze(-1)
n = k + n_minus_k
# Test binomial coefficient choose(n, k).
expected = (n + 1).lgamma() - (k + 1).lgamma() - (n_minus_k + 1).lgamma()
actual = log_binomial(n, k, tol=tol)
assert (actual - expected).abs().max() < tol
@pytest.mark.parametrize("order", [0, 1, 5, 10, 20])
@pytest.mark.parametrize("value", [0.01, 0.1, 1.0, 10.0, 100.0])
def test_log_I1(order, value):
value = tensor([value])
expected = torch.tensor([iv(i, value.numpy()) for i in range(order + 1)]).log()
actual = log_I1(order, value)
assert_equal(actual, expected)
def test_log_I1_shapes():
assert_equal(log_I1(10, tensor(0.6)).shape, torch.Size([11, 1]))
assert_equal(log_I1(10, tensor([0.6])).shape, torch.Size([11, 1]))
assert_equal(log_I1(10, tensor([[0.6]])).shape, torch.Size([11, 1, 1]))
assert_equal(log_I1(10, tensor([0.6, 0.2])).shape, torch.Size([11, 2]))
assert_equal(log_I1(0, tensor(0.6)).shape, torch.Size((1, 1)))
@pytest.mark.parametrize("sigma", [0.5, 1.25])
def test_get_quad_rule(sigma):
quad_points, log_weights = get_quad_rule(32, torch.zeros(1))
quad_points *= sigma # transform to N(0, sigma) gaussian
variance = torch.logsumexp(quad_points.pow(2.0).log() + log_weights, axis=0).exp()
assert_equal(sigma**2, variance.item())
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@tests@ops@[email protected]_END.py
|
{
"filename": "scanner.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/scanner.py",
"type": "Python"
}
|
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner:
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos.
"""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolean that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@[email protected]@.PATH_END.py
|
{
"filename": "clusters.py",
"repo_name": "ICRAR/shark",
"repo_path": "shark_extracted/shark-master/standard_plots/clusters.py",
"type": "Python"
}
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2018
# Copyright by UWA (in the framework of the ICRAR)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import functools
import numpy as np
import h5py
import common
import utilities_statistics as us
##################################
# Constants
mlow = 8.0
mupp = 12.0
dm = 1.0
mbins = np.arange(mlow, mupp, dm)
xmf = mbins + dm/2.0
rlow = 0.0
rupp = 7.0
dr = 0.5
rbins = np.arange(rlow, rupp, dr)
xrf = rbins + dr/2.0
GyrtoYr = 1e9
MpcToKpc = 1e3
G = 4.299e-9 #Gravity constant in units of (km/s)^2 * Mpc/Msun
offMS = 0.2
def add_observations_to_plot(obsdir, fname, ax, marker, label, color='k', err_absolute=False):
fname = '%s/Gas/%s' % (obsdir, fname)
x, y, yerr_down, yerr_up = common.load_observation(obsdir, fname, (0, 1, 2, 3))
common.errorbars(ax, x, y, yerr_down, yerr_up, color, marker, label=label, err_absolute=err_absolute)
def prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit):
common.prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit)
xleg = xmax - 0.2 * (xmax-xmin)
yleg = ymax - 0.1 * (ymax-ymin)
#ax.text(xleg, yleg, 'z=0')
def prepare_data(hdf5_data, fradii, index):
bin_it = functools.partial(us.wmedians, xbins=xmf)
stack = functools.partial(us.stacking, xbins=xmf)
# Unpack data
(h0, _, typeg, mdisk, mbulge, _, _, mHI, mH2, mgas,
mHI_bulge, mH2_bulge, mgas_bulge, mvir, sfrd, sfrb,
x, y, z, vvir) = hdf5_data
XH = 0.72
h0log = np.log10(float(h0))
rvir = G * mvir / pow(vvir,2.0) / h0
mstar_tot = (mdisk + mbulge) / h0
sfr_tot = (sfrd + sfrb) / h0 / GyrtoYr
#define main sequence first
inms = np.where((mstar_tot > 5e8) & (mstar_tot < 7e9) & (typeg == 0) & (sfr_tot > 0))
ms = np.polyfit(np.log10(mstar_tot[inms]), np.log10(sfr_tot[inms]), 2)
gasfracms = np.polyfit(np.log10(mstar_tot[inms]), np.log10(mgas[inms]+mgas_bulge[inms])-h0log, 2)
indcen = np.where((mvir/h0 > 3e14) & (typeg == 0))
x_cen = x[indcen]
y_cen = y[indcen]
z_cen = z[indcen]
rvir_cen = rvir[indcen]
#find the closest central to the centrals in massive clusters
for g in range(0,len(x_cen)):
selec_cens = np.where((typeg == 0) & (mstar_tot > 1e9))
d_all = np.sqrt(pow(x[selec_cens] - x_cen[g], 2.0) + pow(y[selec_cens] - y_cen[g], 2.0) + pow(z[selec_cens] - z_cen[g], 2.0))/h0/rvir_cen[g]
ms_all = mstar_tot[selec_cens]
selec_cens = np.where(d_all > 0)
d_all_in = d_all[selec_cens]
ms_all_in = ms_all[selec_cens]
ids = np.argsort(d_all_in)
print ("minimum distance to a central %s of mass %s" % (str(d_all_in[ids[0]]), str(ms_all_in[ids[0]])))
print ('number of clusters %d'% len(x_cen))
nradii_this_z = np.zeros(shape = (3, len(xmf), len(xrf), len(x_cen)))
#xy projection
for g in range(0,len(x_cen)):
d_all = np.sqrt(pow(x - x_cen[g], 2.0) + pow(y - y_cen[g], 2.0))/h0/rvir_cen[g]
for i in range(0, len(xmf)):
ind = np.where((np.log10(mstar_tot) >= xmf[i] - dm/2.0)
& (np.log10(mstar_tot) < xmf[i] + dm/2.0)
& (d_all < 7.5))
#print 'number of neighbours', len(sfr_tot[ind])
mstars_galsin = np.log10(mstar_tot[ind])
sfr_tot_galsin = sfr_tot[ind]
mgas_tot_galsin = (mgas[ind] + mgas_bulge[ind])/h0
dist_to_ms = sfr_tot_galsin / pow(10.0, (ms[0] * mstars_galsin**2.0 + ms[1] * mstars_galsin + ms[2]))
dist_to_gf = mgas_tot_galsin / pow(10.0, (gasfracms[0] * mstars_galsin**2.0 + gasfracms[1] * mstars_galsin + gasfracms[2]))
dist_proj = d_all[ind]
for j in range(0, len(xrf)):
inr = np.where((dist_proj >= xrf[j] - dr/2.0) & (dist_proj < xrf[j] + dr/2.0))
nradii_this_z[0,i,j,g] = len(dist_proj[inr])
inr = np.where((dist_proj >= xrf[j] - dr/2.0) & (dist_proj < xrf[j] + dr/2.0) & (dist_to_ms > offMS))
nradii_this_z[1,i,j,g] = len(dist_proj[inr])
inr = np.where((dist_proj >= xrf[j] - dr/2.0) & (dist_proj < xrf[j] + dr/2.0) & (dist_to_gf > offMS))
nradii_this_z[2,i,j,g] = len(dist_proj[inr])
for i in range(0, len(xmf)):
for j in range(0, len(xrf)):
selec_cl = np.where(nradii_this_z[0,i,j,:] > 0)
fradii[0,0,index,i,j] = np.median(nradii_this_z[1,i,j,selec_cl] / nradii_this_z[0,i,j,selec_cl])
fradii[0,1,index,i,j] = np.std(nradii_this_z[1,i,j,selec_cl] / nradii_this_z[0,i,j,selec_cl])
fradii[1,0,index,i,j] = np.median(nradii_this_z[2,i,j,selec_cl] / nradii_this_z[0,i,j,selec_cl])
fradii[1,1,index,i,j] = np.std(nradii_this_z[2,i,j,selec_cl] / nradii_this_z[0,i,j,selec_cl])
return nradii_this_z
def plot_fractions_radii(plt, output_dir, fradii):
###################################
# Plots global mass densities
fig = plt.figure(figsize=(6,7))
plt.subplots_adjust(bottom=0.15, left=0.15)
subplots = (321, 322, 323, 324, 325, 326)
zs = (0, 0.3, 0.5)
cols = ('r','yellowgreen','darkblue')
colse = ('Crimson','Green','blue')
xmin, xmax, ymin, ymax = 0, 7, -0.05, 1.05
xleg = xmin + 0.05 * (xmax - xmin)
yleg = ymax - 0.1 * (ymax - ymin)
xtitle = '$\\rm d_{\\rm proj}/cMpc$'
ytitle = '$\\rm fraction$'
labels = ('Main sequence', 'Gas rich')
labelsz = ('z=0', 'z=0.3', 'z=0.5')
#read C-EAGLE data
#ceagledata = h5py.File('../../BuffaloFigure_C-EAGLE_Jul19_longMS.hdf5','r')
ceagledatasf = h5py.File('../../BuffaloFigure_C-EAGLE_30Jul19_longMS_ssfr_Hydrangea.hdf5', 'r')
a_group_key = list(ceagledatasf.keys())[1]
print (a_group_key)
# Get the data
databahesf = list(ceagledatasf[a_group_key])
ceagledatagas = h5py.File('../../BuffaloFigure_C-EAGLE_30Jul19_longMS_hn_Hydrangea.hdf5', 'r')
a_group_key = list(ceagledatagas.keys())[1]
# Get the data
databahegas = list(ceagledatagas[a_group_key])
p = 0
for i in range(0, len(xmf)-1):
for j in range(0,2):
ax = fig.add_subplot(subplots[p])
if(p <= 1):
ax.text(1,1.1,labels[j])
if (p >= 4):
xtit = xtitle
else:
xtit = ''
if (p == 0 or p == 2 or p == 4):
ytit = ytitle
else:
ytit = ''
common.prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit, locators=(1, 1, 1))
ax.text(xleg, yleg, '$M_{\\star}$=%s' % str(xmf[i]))
if(j == 0):
for z in range(0,3):
x = databahesf[i][z][0]
y = databahesf[i][z][1]
yerrdn = databahesf[i][z][2]
yerrup = databahesf[i][z][3]
ind = np.where(y >= 0)
ax.fill_between(x[ind],yerrdn[ind],yerrup[ind], facecolor=colse[z], alpha=0.2,interpolate=True)
ax.plot(x[ind],y[ind],linewidth=2, linestyle='dashed', color=colse[z])
if(j == 1):
for z in range(0,3):
x = databahegas[i][z][0]
y = databahegas[i][z][1]
yerrdn = databahegas[i][z][2]
yerrup = databahegas[i][z][3]
ind = np.where(y >= 0)
ax.fill_between(x[ind],yerrdn[ind],yerrup[ind], facecolor=colse[z], alpha=0.2,interpolate=True)
ax.plot(x[ind],y[ind],linewidth=2, linestyle='dashed', color=colse[z])
#predicted fraction
for z in range (0,3):
ind = np.where(fradii[j,0,z,i,:] > 0)
xplot = xrf[ind]
yplot = fradii[j,0,z,i,ind]
err = fradii[j,1,z,i,ind]
if(p == 2):
ax.plot(xplot, yplot[0], color=cols[z], linestyle='solid', label=labelsz[z], linewidth=2)
else:
ax.plot(xplot, yplot[0], color=cols[z], linestyle='solid', linewidth=2)
ax.fill_between(xplot,yplot[0],yplot[0]-err[0], facecolor=cols[z], alpha=0.2,interpolate=True)
ax.fill_between(xplot,yplot[0],yplot[0]+err[0], facecolor=cols[z], alpha=0.2,interpolate=True)
if(p == 2):
ax.legend(['z=0','z=0.3','z=0.5'],loc='lower right',fontsize='small')
p = p + 1
common.savefig(output_dir, fig, "cluster_fractions.pdf")
def plot_individual_clusters(plt, output_dir, nradii_z0, nradii_z0p3, nradii_z0p5):
###################################
# Plots global mass densities
fig = plt.figure(figsize=(6,7))
plt.subplots_adjust(bottom=0.15, left=0.15)
subplots = (321, 322, 323, 324, 325, 326)
zs = (0, 0.3, 0.5)
cols = ('r','g','b')
lines = ('dotted', 'dashed', 'solid')
xmin, xmax, ymin, ymax = 0, 5, -0.05, 1.05
xleg = xmax - 0.3 * (xmax - xmin)
yleg = ymin + 0.1 * (ymax - ymin)
xtitle = '$\\rm d_{\\rm proj}/cMpc$'
ytitle = '$\\rm fraction$'
labels = ('Main sequence', 'Gas rich')
p = 0
for i in range(0, len(xmf)-1):
for j in range(0,2):
ax = fig.add_subplot(subplots[p])
if(p <= 1):
ax.text(1,1.1,labels[j])
if (p >= 4):
xtit = xtitle
else:
xtit = ''
if (p == 0 or p == 2 or p == 4):
ytit = ytitle
else:
ytit = ''
common.prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit, locators=(1, 1, 1))
ax.text(xleg, yleg, '$M_{\\star}$=%s' % str(xmf[i]))
#predicted fraction
for j in range(0,len(nradii_z0p3[0,0,0,:])):
ind = np.where(nradii_z0p3[0,i,:,j] > 4)
xplot = xrf[ind]
yplot = (nradii_z0p3[1,i,ind,j] + 0.0)/(nradii_z0p3[0,i,ind,j] + 0.0)
ax.plot(xplot, yplot[0], color=cols[1], linestyle = 'solid',linewidth = 0.5)
for j in range(0,len(nradii_z0p5[0,0,0,:])):
ind = np.where(nradii_z0p5[0,i,:,j] > 4)
xplot = xrf[ind]
yplot = (nradii_z0p5[1,i,ind,j] + 0.0)/(nradii_z0p5[0,i,ind,j] + 0.0)
ax.plot(xplot, yplot[0], color=cols[2], linestyle = 'dashed',linewidth = 0.5)
p = p + 1
#common.prepare_legend(ax, ['k','b','r','grey','grey'])
common.savefig(output_dir, fig, "individual_cluster_fractions.pdf")
def main(model_dir, output_dir, redshift_table, subvols, obs_dir):
plt = common.load_matplotlib()
zlist = (0, 0.3, 0.5)
fields = {'galaxies': ('type', 'mstars_disk', 'mstars_bulge',
'rstar_disk', 'm_bh', 'matom_disk', 'mmol_disk', 'mgas_disk',
'matom_bulge', 'mmol_bulge', 'mgas_bulge', 'mvir_hosthalo', 'sfr_disk',
'sfr_burst', 'position_x', 'position_y', 'position_z', 'vvir_hosthalo')}
fradii = np.zeros(shape = (2, 2, len(zlist), len(xmf), len(xrf)))
fradii[:] = -1
for index, snapshot in enumerate(redshift_table[zlist]):
hdf5_data = common.read_data(model_dir, snapshot, fields, subvols)
nradii = prepare_data(hdf5_data, fradii, index)
if(index == 0):
nradii_z0 = nradii
if(index == 1):
nradii_z0p3 = nradii
if(index == 2):
nradii_z0p5 = nradii
plot_fractions_radii(plt, output_dir, fradii)
plot_individual_clusters(plt, output_dir, nradii_z0, nradii_z0p3, nradii_z0p5)
if __name__ == '__main__':
main(*common.parse_args())
|
ICRARREPO_NAMEsharkPATH_START.@shark_extracted@shark-master@[email protected]@.PATH_END.py
|
{
"filename": "filters.py",
"repo_name": "astro-informatics/s2wav",
"repo_path": "s2wav_extracted/s2wav-main/s2wav/filters.py",
"type": "Python"
}
|
from jax import jit
import jax.numpy as jnp
import torch
import numpy as np
from typing import Tuple
from functools import partial
from s2wav import samples
def filters_axisym(
L: int, J_min: int = 0, lam: float = 2.0
) -> Tuple[np.ndarray, np.ndarray]:
r"""Computes wavelet kernels :math:`\Psi^j_{\ell m}` and scaling kernel
:math:`\Phi_{\ell m}` in harmonic space.
Specifically, these kernels are derived in `[1] <https://arxiv.org/pdf/1211.1680.pdf>`_,
where the wavelet kernels are defined (15) for scale :math:`j` to be
.. math::
\Psi^j_{\ell m} \equiv \sqrt{\frac{2\ell+1}{4\pi}} \kappa_{\lambda}(\frac{\ell}{\lambda^j})\delta_{m0},
where :math:`\kappa_{\lambda} = \sqrt{k_{\lambda}(t/\lambda) - k_{\lambda}(t)}` for :math:`k_{\lambda}`
given in :func:`~k_lam`. Similarly, the scaling kernel is defined (16) as
.. math::
\Phi_{\ell m} \equiv \sqrt{\frac{2\ell+1}{4\pi}} \nu_{\lambda} (\frac{\ell}{\lambda^{J_0}})\delta_{m0},
where :math:`\nu_{\lambda} = \sqrt{k_{\lambda}(t)}` for :math:`k_{\lambda}` given in :func:`~k_lam`.
Notice that :math:`\delta_{m0}` enforces that these kernels are axisymmetric, i.e. coefficients
for :math:`m \not = \ell` are zero. In this implementation the normalisation constant has been
omitted as it is nulled in subsequent functions.
Args:
L (int): Harmonic band-limit.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor between
consecutive wavelet scales. Note that :math:`\lambda = 2` indicates dyadic
wavelets. Defaults to 2.
Raises:
ValueError: J_min is negative or greater than J.
Returns:
Tuple[np.ndarray, np.ndarray]: Unnormalised wavelet kernels :math:`\Psi^j_{\ell m}`
with shape :math:`[(J+1)L]`, and scaling kernel :math:`\Phi_{\el m}` with shape
:math:`[L]` in harmonic space.
Note:
[1] B. Leidstedt et. al., "S2LET: A code to perform fast wavelet analysis on the sphere", A&A, vol. 558, p. A128, 2013.
"""
J = samples.j_max(L, lam)
if J_min >= J or J_min < 0:
raise ValueError(
"J_min must be non-negative and less than J= "
+ str(J)
+ " for given L and lam."
)
previoustemp = 0.0
k = k_lam(L, lam)
psi = np.zeros((J + 1, L), np.float64)
phi = np.zeros(L, np.float64)
for l in range(L):
phi[l] = np.sqrt(k[J_min, l])
for j in range(J_min, J + 1):
for l in range(L):
diff = k[j + 1, l] - k[j, l]
if diff < 0:
psi[j, l] = previoustemp
else:
temp = np.sqrt(diff)
psi[j, l] = temp
previoustemp = temp
return psi, phi
def filters_directional(
L: int,
N: int = 1,
J_min: int = 0,
lam: float = 2.0,
spin: int = 0,
spin0: int = 0,
using_torch: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
r"""Generates the harmonic coefficients for the directional tiling wavelets.
This implementation is based on equation 36 in the wavelet computation paper
`[1] <https://arxiv.org/pdf/1509.06749.pdf>`_.
Args:
L (int): Harmonic band-limit.
N (int, optional): Upper azimuthal band-limit. Defaults to 1.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor between
consecutive wavelet scales. Note that :math:`\lambda = 2` indicates dyadic
wavelets. Defaults to 2.
spin (int, optional): Spin (integer) to perform the transform. Defaults to 0.
spin0 (int, optional): Spin number the wavelet was lowered from. Defaults to 0.
using_torch (bool, optional): Desired frontend functionality. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple of wavelet and scaling kernels
(:math:`\Psi^j_{\ell n}`, :math:`\Phi_{\ell m}`)
Notes:
[1] J. McEwen et. al., "Directional spin wavelets on the sphere", arXiv preprint arXiv:1509.06749 (2015).
"""
J = samples.j_max(L, lam)
el_min = max(abs(spin), abs(spin0))
phi = np.zeros(L, dtype=np.float64)
psi = np.zeros((J + 1, L, 2 * L - 1), dtype=np.complex128)
kappa, kappa0 = filters_axisym(L, J_min, lam)
s_elm = tiling_direction(L, N)
for el in range(el_min, L):
if kappa0[el] != 0:
phi[el] = np.sqrt((2 * el + 1) / (4.0 * np.pi)) * kappa0[el]
if spin0 != 0:
phi[el] *= _spin_normalization(el, spin0) * (-1) ** spin0
for j in range(J_min, J + 1):
for el in range(el_min, L):
if kappa[j, el] != 0:
for m in range(-el, el + 1):
if s_elm[el, L - 1 + m] != 0:
psi[j, el, L - 1 + m] = (
np.sqrt((2 * el + 1) / (8.0 * np.pi * np.pi))
* kappa[j, el]
* s_elm[el, L - 1 + m]
)
if spin0 != 0:
psi[j, el, L - 1 + m] *= (
_spin_normalization(el, spin0) * (-1) ** spin0
)
if using_torch:
psi = torch.from_numpy(psi)
phi = torch.from_numpy(phi)
return psi, phi
def filters_axisym_vectorised(
L: int, J_min: int = 0, lam: float = 2.0
) -> Tuple[np.ndarray, np.ndarray]:
r"""Vectorised version of :func:`~filters_axisym`.
Args:
L (int): Harmonic band-limit.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales. Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
Raises:
ValueError: J_min is negative or greater than J.
Returns:
Tuple[np.ndarray, np.ndarray]: Unnormalised wavelet kernels :math:`\Psi^j_{\ell m}`
with shape :math:`[(J+1)L], and scaling kernel :math:`\Phi_{\ell m}` with shape
:math:`[L]` in harmonic space.
"""
J = samples.j_max(L, lam)
if J_min >= J or J_min < 0:
raise ValueError(
"J_min must be non-negative and less than J= "
+ str(J)
+ " for given L and lam."
)
k = k_lam(L, lam)
diff = (np.roll(k, -1, axis=0) - k)[:-1]
diff[diff < 0] = 0
return np.sqrt(diff), np.sqrt(k[J_min])
def filters_directional_vectorised(
L: int,
N: int = 1,
J_min: int = 0,
lam: float = 2.0,
spin: int = 0,
spin0: int = 0,
using_torch: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
r"""Vectorised version of :func:`~filters_directional`.
Args:
L (int): Harmonic band-limit.
N (int, optional): Upper azimuthal band-limit. Defaults to 1.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor between
consecutive wavelet scales. Note that :math:`\lambda = 2` indicates dyadic
wavelets. Defaults to 2.
spin (int, optional): Spin (integer) to perform the transform. Defaults to 0.
spin0 (int, optional): Spin number the wavelet was lowered from. Defaults to 0.
using_torch (bool, optional): Desired frontend functionality. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple of wavelet and scaling kernels
(:math:`\Psi^j_{\ell n}`, :math:`\Phi_{\ell m}`).
"""
el_min = max(abs(spin), abs(spin0))
spin_norms = (
(-1) ** spin0 * _spin_normalization_vectorised(np.arange(L), spin0)
if spin0 != 0
else 1
)
kappa, kappa0 = filters_axisym_vectorised(L, J_min, lam)
s_elm = tiling_direction(L, N)
kappa0 *= np.sqrt((2 * np.arange(L) + 1) / (4.0 * np.pi))
kappa0 = kappa0 * spin_norms if spin0 != 0 else kappa0
kappa *= np.sqrt((2 * np.arange(L) + 1) / 8.0) / np.pi
kappa = np.einsum("ij,jk->ijk", kappa, s_elm)
kappa = np.einsum("ijk,j->ijk", kappa, spin_norms) if spin0 != 0 else kappa
kappa0[:el_min] = 0
kappa[:, :el_min, :] = 0
if using_torch:
kappa0 = torch.from_numpy(kappa0)
kappa = torch.from_numpy(kappa)
return kappa, kappa0
@partial(jit, static_argnums=(0, 1, 2))
def filters_axisym_jax(
L: int, J_min: int = 0, lam: float = 2.0
) -> Tuple[jnp.ndarray, jnp.ndarray]:
r"""JAX version of :func:`~filters_axisym_vectorised`.
Args:
L (int): Harmonic band-limit.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales. Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
Raises:
ValueError: J_min is negative or greater than J.
Returns:
Tuple[np.ndarray, np.ndarray]: Unnormalised wavelet kernels :math:`\Psi^j_{\ell m}`
with shape :math:`[(J+1)L], and scaling kernel :math:`\Phi_{\ell m}` with shape
:math:`[L]` in harmonic space.
"""
J = samples.j_max(L, lam)
if J_min >= J or J_min < 0:
raise ValueError(
"J_min must be non-negative and less than J= "
+ str(J)
+ " for given L and lam."
)
k = k_lam_jax(L, lam)
diff = (jnp.roll(k, -1, axis=0) - k)[:-1]
diff = jnp.where(diff < 0, jnp.zeros((J + 1, L)), diff)
return jnp.sqrt(diff), jnp.sqrt(k[J_min])
@partial(jit, static_argnums=(0, 1, 2, 3, 4, 5))
def filters_directional_jax(
L: int,
N: int = 1,
J_min: int = 0,
lam: float = 2.0,
spin: int = 0,
spin0: int = 0,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
r"""JAX version of :func:`~filters_directional`.
Args:
L (int): Harmonic band-limit.
N (int, optional): Upper azimuthal band-limit. Defaults to 1.
J_min (int, optional): Lowest frequency wavelet scale to be used. Defaults to 0.
lam (float, optional): Wavelet parameter which determines the scale factor between
consecutive wavelet scales. Note that :math:`\lambda = 2` indicates dyadic
wavelets. Defaults to 2.
spin (int, optional): Spin (integer) to perform the transform. Defaults to 0.
spin0 (int, optional): Spin number the wavelet was lowered from. Defaults to 0.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple of wavelet and scaling kernels
(:math:`\Psi^j_{\ell n}`, :math:`\Phi_{\ell m}`).
"""
el_min = max(abs(spin), abs(spin0))
spin_norms = (
(-1) ** spin0 * _spin_normalization_jax(np.arange(L), spin0)
if spin0 != 0
else 1
)
kappa, kappa0 = filters_axisym_jax(L, J_min, lam)
s_elm = tiling_direction_jax(L, N)
kappa0 *= jnp.sqrt((2 * jnp.arange(L) + 1) / (4.0 * jnp.pi))
kappa0 = kappa0 * spin_norms if spin0 != 0 else kappa0
kappa *= jnp.sqrt((2 * jnp.arange(L) + 1) / 8.0) / np.pi
kappa = jnp.einsum("ij,jk->ijk", kappa, s_elm, optimize=True)
kappa = (
jnp.einsum("ijk,j->ijk", kappa, spin_norms, optimize=True)
if spin0 != 0
else kappa
)
kappa0 = kappa0.at[:el_min].set(0)
kappa = kappa.at[:, :el_min, :].set(0)
return kappa, kappa0
def tiling_integrand(t: float, lam: float = 2.0) -> float:
r"""Tiling integrand for scale-discretised wavelets `[1] <https://arxiv.org/pdf/1211.1680.pdf>`_.
Intermediate step used to compute the wavelet and scaling function generating
functions. One of the basic mathematical functions needed to carry out the tiling of
the harmonic space.
Args:
t (float): Real argument over which we integrate.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales.Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
Returns:
float: Value of tiling integrand for given :math:`t` and scaling factor.
Note:
[1] B. Leidstedt et. al., "S2LET: A code to perform fast wavelet analysis on
the sphere", A&A, vol. 558, p. A128, 2013.
"""
s_arg = (t - (1.0 / lam)) * (2.0 * lam / (lam - 1.0)) - 1.0
integrand = np.exp(-2.0 / (1.0 - s_arg**2.0)) / t
return integrand
def part_scaling_fn(a: float, b: float, n: int, lam: float = 2.0) -> float:
r"""Computes integral used to calculate smoothly decreasing function :math:`k_{\lambda}`.
Intermediate step used to compute the wavelet and scaling function generating
functions. Uses the trapezium method to integrate :func:`~tiling_integrand` in the
limits from :math:`a \rightarrow b` with scaling parameter :math:`\lambda`. One of
the basic mathematical functions needed to carry out the tiling of the harmonic
space.
Args:
a (float): Lower limit of the numerical integration.
b (float): Upper limit of the numerical integration.
n (int): Number of steps to be performed during integration.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales.Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
Returns:
float: Integral of the tiling integrand from :math:`a \rightarrow b`.
"""
sum = 0.0
h = (b - a) / n
if a == b:
return 0
for i in range(n):
if a + i * h not in [1 / lam, 1.0] and a + (i + 1) * h not in [
1 / lam,
1.0,
]:
f1 = tiling_integrand(a + i * h, lam)
f2 = tiling_integrand(a + (i + 1) * h, lam)
sum += ((f1 + f2) * h) / 2
return sum
def k_lam(L: int, lam: float = 2.0, quad_iters: int = 300) -> float:
r"""Compute function :math:`k_{\lambda}` used as a wavelet generating function.
Specifically, this function is derived in [1] and is given by
.. math::
k_{\lambda} \equiv \frac{ \int_t^1 \frac{\text{d}t^{\prime}}{t^{\prime}}
s_{\lambda}^2(t^{\prime})}{ \int_{\frac{1}{\lambda}}^1
\frac{\text{d}t^{\prime}}{t^{\prime}} s_{\lambda}^2(t^{\prime})},
where the integrand is defined to be
.. math::
s_{\lambda} \equiv s \Big ( \frac{2\lambda}{\lambda - 1}(t-\frac{1}{\lambda})
- 1 \Big ),
for infinitely differentiable Cauchy-Schwartz function :math:`s(t) \in C^{\infty}`.
Args:
L (int): Harmonic band-limit.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales. Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
quad_iters (int, optional): Total number of iterations for quadrature
integration. Defaults to 300.
Returns:
(np.ndarray): Value of :math:`k_{\lambda}` computed for values between
:math:`\frac{1}{\lambda}` and 1, parametrised by :math:`\ell` as required to
compute the axisymmetric filters in :func:`~tiling_axisym`.
Note:
[1] B. Leidstedt et. al., "S2LET: A code to perform fast wavelet analysis on the
sphere", A&A, vol. 558, p. A128, 2013.
"""
J = samples.j_max(L, lam)
normalisation = part_scaling_fn(1.0 / lam, 1.0, quad_iters, lam)
k = np.zeros((J + 2, L))
for j in range(J + 2):
for l in range(L):
if l < lam ** (j - 1):
k[j, l] = 1
elif l > lam**j:
k[j, l] = 0
else:
k[j, l] = (
part_scaling_fn(l / lam**j, 1.0, quad_iters, lam) / normalisation
)
return k
@partial(jit, static_argnums=(2, 3)) # not sure
def _part_scaling_fn_jax(a: float, b: float, n: int, lam: float = 2.0) -> float:
r"""JAX version of part_scaling_fn. Computes integral used to calculate smoothly
decreasing function :math:`k_{\lambda}`.
Intermediate step used to compute the wavelet and scaling function generating
functions. Uses the trapezium method to integrate :func:`~tiling_integrand` in the
limits from :math:`a \rightarrow b` with scaling parameter :math:`\lambda`. One of
the basic mathematical functions needed to carry out the tiling of the harmonic
space.
Args:
a (float): Lower limit of the numerical integration.
b (float): Upper limit of the numerical integration.
n (int): Number of steps to be performed during integration.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales.Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
Returns:
float: Integral of the tiling integrand from :math:`a \rightarrow b`.
"""
h = (b - a) / n
x = jnp.linspace(a, b, num=n + 1)
s_arg = (x - (1.0 / lam)) * (2.0 * lam / (lam - 1.0)) - 1.0
value = jnp.where(
(x[:-1] == 1.0 / lam) | (x[:-1] == 1.0) | (x[1:] == 1.0 / lam) | (x[1:] == 1.0),
jnp.zeros(n),
(jnp.exp(-2.0 / (1.0 - jnp.square(s_arg))) / x)[:-1]
+ (jnp.exp(-2.0 / (1.0 - jnp.square(s_arg))) / x)[1:],
)
return jnp.sum(value * h / 2)
@partial(jit, static_argnums=(0, 1, 2))
def k_lam_jax(L: int, lam: float = 2.0, quad_iters: int = 300) -> float:
r"""JAX version of k_lam. Compute function :math:`k_{\lambda}` used as a wavelet
generating function.
Specifically, this function is derived in [1] and is given by
.. math::
k_{\lambda} \equiv \frac{ \int_t^1 \frac{\text{d}t^{\prime}}{t^{\prime}}
s_{\lambda}^2(t^{\prime})}{ \int_{\frac{1}{\lambda}}^1
\frac{\text{d}t^{\prime}}{t^{\prime}} s_{\lambda}^2(t^{\prime})},
where the integrand is defined to be
.. math::
s_{\lambda} \equiv s \Big ( \frac{2\lambda}{\lambda - 1}(t-\frac{1}{\lambda})
- 1 \Big ),
for infinitely differentiable Cauchy-Schwartz function :math:`s(t) \in C^{\infty}`.
Args:
L (int): Harmonic band-limit.
lam (float, optional): Wavelet parameter which determines the scale factor
between consecutive wavelet scales. Note that :math:`\lambda = 2` indicates
dyadic wavelets. Defaults to 2.
quad_iters (int, optional): Total number of iterations for quadrature
integration. Defaults to 300.
Returns:
(np.ndarray): Value of :math:`k_{\lambda}` computed for values between
:math:`\frac{1}{\lambda}` and 1, parametrised by :math:`\ell` as required to
compute the axisymmetric filters in :func:`~tiling_axisym`.
Note:
[1] B. Leidstedt et. al., "S2LET: A code to perform fast wavelet analysis on the
sphere", A&A, vol. 558, p. A128, 2013.
"""
J = samples.j_max(L, lam)
normalisation = part_scaling_fn(1.0 / lam, 1.0, quad_iters, lam)
k = jnp.zeros((J + 2, L))
for j in range(J + 2):
for l in range(L):
if l < lam ** (j - 1):
k = k.at[j, l].set(1.0)
elif l > lam**j:
k = k.at[j, l].set(0.0)
else:
k = k.at[j, l].set(
part_scaling_fn(l / lam**j, 1.0, quad_iters, lam) / normalisation
)
return k
def tiling_direction(L: int, N: int = 1) -> np.ndarray:
r"""Generates the harmonic coefficients for the directionality component of the
tiling functions.
Formally, this function implements the follow equation
.. math::
_{s}\eta_{\el m} = \nu \vu \sqrt{\frac{1}{2^{\gamma}} \big ( \binom{\gamma}{
(\gamma - m)/2} \big )}
which was first derived in `[1] <https://arxiv.org/pdf/1211.1680.pdf>`_.
Args:
L (int): Harmonic band-limit.
N (int, optional): Upper orientational band-limit. Defaults to 1.
Returns:
np.ndarray: Harmonic coefficients of directionality components
:math:`_{s}\eta_{\el m}`.
Notes:
[1] J. McEwen et. al., "Directional spin wavelets on the sphere", arXiv preprint
arXiv:1509.06749 (2015).
"""
if N % 2:
nu = 1
else:
nu = 1j
s_elm = np.zeros((L, 2 * L - 1), dtype=np.complex128)
for el in range(1, L):
if (N + el) % 2:
gamma = min(N - 1, el)
else:
gamma = min(N - 1, el - 1)
for m in range(-el, el + 1):
if abs(m) < N and (N + m) % 2:
s_elm[el, L - 1 + m] = nu * np.sqrt(
(samples.binomial_coefficient(gamma, ((gamma - m) / 2)))
/ (2**gamma)
)
else:
s_elm[el, L - 1 + m] = 0.0
return s_elm
def _spin_normalization(el: int, spin: int = 0) -> float:
r"""Computes the normalization factor for spin-lowered wavelets, which is
:math:`\sqrt{\frac{(\ell+s)!}{(\ell-s)!}}`.
Args:
el (int): Harmonic index :math:`\ell`.
spin (int): Spin of field over which to perform the transform. Defaults to 0.
Returns:
float: Normalization factor for spin-lowered wavelets.
"""
factor = 1.0
for s in range(-abs(spin) + 1, abs(spin) + 1):
factor *= el + s
if spin > 0:
return np.sqrt(factor)
else:
return np.sqrt(1.0 / factor)
def _spin_normalization_vectorised(el: np.ndarray, spin: int = 0) -> float:
r"""Vectorised version of :func:`~_spin_normalization`.
Args:
el (int): Harmonic index :math:`\ell`.
spin (int): Spin of field over which to perform the transform. Defaults to 0.
Returns:
float: Normalization factor for spin-lowered wavelets.
"""
factor = np.arange(-abs(spin) + 1, abs(spin) + 1).reshape(1, 2 * abs(spin) + 1)
factor = el.reshape(len(el), 1).dot(factor)
return np.sqrt(np.prod(factor, axis=1) ** (np.sign(spin)))
@partial(jit, static_argnums=(0, 1))
def tiling_direction_jax(L: int, N: int = 1) -> np.ndarray:
r"""JAX version of tiling_direction. Generates the harmonic coefficients for the
directionality component of the tiling functions.
Formally, this function implements the follow equation
.. math::
_{s}\eta_{\ell m} = \nu \vu \sqrt{\frac{1}{2^{\gamma}} \big ( \binom{\gamma}{
(\gamma - m)/2} \big )}
which was first derived in `[1] <https://arxiv.org/pdf/1211.1680.pdf>`_.
Args:
L (int): Harmonic band-limit.
N (int, optional): Upper orientational band-limit. Defaults to 1.
Returns:
np.ndarray: Harmonic coefficients of directionality components
:math:`_{s}\eta_{\ell m}`.
Notes:
[1] J. McEwen et. al., "Directional spin wavelets on the sphere", arXiv preprint
arXiv:1509.06749 (2015).
"""
nu = (N % 2 - 1) ** 2 * 1j + (N % 2)
s_elm = jnp.zeros((L, 2 * L - 1), dtype=np.complex128)
for el in range(1, L):
gamma = min(N - 1, el - 1 + (N + el) % 2)
ms = jnp.arange(-el, el + 1)
val = nu * jnp.sqrt(
(samples.binomial_coefficient_jax(gamma, ((gamma - ms) / 2))) / (2**gamma)
)
val = jnp.where(
(ms < N) & (ms > -N) & ((N + ms) % 2 == 1),
val,
jnp.zeros(2 * el + 1),
)
s_elm = s_elm.at[el, L - 1 - el : L + el].set(val)
return s_elm
@partial(jit, static_argnums=(1))
def _spin_normalization_jax(el: np.ndarray, spin: int = 0) -> float:
r"""JAX version of :func:`~_spin_normalization`.
Args:
el (int): Harmonic index :math:`\ell`.
spin (int): Spin of field over which to perform the transform. Defaults to 0.
Returns:
float: Normalization factor for spin-lowered wavelets.
"""
factor = jnp.arange(-abs(spin) + 1, abs(spin) + 1).reshape(1, 2 * abs(spin) + 1)
factor = el.reshape(len(el), 1).dot(factor)
return jnp.sqrt(jnp.prod(factor, axis=1) ** (jnp.sign(spin)))
|
astro-informaticsREPO_NAMEs2wavPATH_START.@s2wav_extracted@s2wav-main@[email protected]@.PATH_END.py
|
{
"filename": "saveable_object.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/training/saving/saveable_object.py",
"type": "Python"
}
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Types for specifying saving and loading behavior."""
class SaveSpec:
"""Class used to describe tensor slices that need to be saved."""
def __init__(self, tensor, slice_spec, name, dtype=None, device=None):
"""Creates a `SaveSpec` object.
Args:
tensor: the tensor to save or callable that produces a tensor to save.
If the value is `None`, the `SaveSpec` is ignored.
slice_spec: the slice to be saved. See `Variable.SaveSliceInfo`.
name: the name to save the tensor under.
dtype: The data type of the Tensor. Required if `tensor` is callable.
Used for error checking in the restore op.
device: The device generating and consuming this tensor. Required if
`tensor` is callable. Used to group objects to save by device.
"""
self._tensor = tensor
self.slice_spec = slice_spec
self.name = name
if callable(self._tensor):
if dtype is None or device is None:
raise AssertionError(
"When passing a callable `tensor` to a SaveSpec, an explicit "
"dtype and device must be provided.")
self.dtype = dtype
self.device = device
else:
self.dtype = tensor.dtype
if device is not None:
self.device = device
else:
self.device = tensor.device
@property
def tensor(self):
return self._tensor() if callable(self._tensor) else self._tensor
class SaveableObject:
"""Base class for saving and restoring saveable objects."""
def __init__(self, op, specs, name):
"""Creates a `SaveableObject` object.
Args:
op: the "producer" object that this class wraps; it produces a list of
tensors to save. E.g., a "Variable" object saving its backing tensor.
specs: a list of SaveSpec, each element of which describes one tensor to
save under this object. All Tensors must be on the same device.
name: the name to save the object under.
"""
self.op = op
self.specs = specs
self.name = name
@property
def device(self):
"""The device for SaveSpec Tensors."""
return self.specs[0].device
def restore(self, restored_tensors, restored_shapes):
"""Restores this object from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint
restored_shapes: the shapes this object should conform to after
restore, or None.
Returns:
An operation that restores the state of the object.
Raises:
ValueError: If the object cannot be restored using the provided
parameters.
"""
# pylint: disable=unused-argument
raise ValueError("Calling an abstract method.")
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@training@saving@[email protected]_END.py
|
{
"filename": "test_bookkeeping.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/core/tests/test_bookkeeping.py",
"type": "Python"
}
|
# pytest suite
"""
Tests for primitives_bookkeeping.
This is a suite of tests to be run with pytest.
To run:
1) Set the environment variable GEMPYTHON_TESTDATA to the path that
contains the directories with the test data.
Eg. /net/chara/data2/pub/gempython_testdata/
2) From the ??? (location): pytest -v --capture=no
"""
# TODO @bquint: clean up these tests
import astrodata
import gemini_instruments
import os
import pytest
# from . import ad_compare
from geminidr.niri.primitives_niri_image import NIRIImage
from geminidr.gmos.primitives_gmos_image import GMOSImage
from gempy.utils import logutils
TESTDATAPATH = os.getenv('GEMPYTHON_TESTDATA', '.')
logfilename = 'test_bookkeeping.log'
# --- Fixtures ---
@pytest.fixture(scope="class")
def log():
if os.path.exists(logfilename):
os.remove(logfilename)
log = logutils.get_logger(__name__)
log.root.handlers = []
logutils.config(mode='standard', file_name=logfilename)
yield log
os.remove(logfilename)
@pytest.fixture(scope="function")
def niri_ads(request, astrofaker):
return [astrofaker.create('NIRI', ['IMAGE'], filename=f"X{i+1}.fits")
for i in range(request.param)]
# --- Tests ---
@pytest.mark.parametrize('niri_ads', [3], indirect=True)
def test_append_stream(niri_ads):
"""Some manipulation of streams using appendStream()"""
def filenames(stream):
return ''.join([ad.filename[1] for ad in stream])
p = NIRIImage(niri_ads[:1])
p.streams['test'] = niri_ads[1:2]
# Add the AD in 'test' to 'main' leaving it in 'test'
p.appendStream(from_stream='test', copy=True)
assert len(p.streams['main']) == 2
assert len(p.streams['test']) == 1
# Change filename of version in 'test' to confirm that the one in 'main'
# is not simply a reference
p.streams['test'][0].filename = 'X4.fits'
assert filenames(p.streams['main']) == '12'
# Add the copy in 'test' to 'main', and delete 'test'
p.appendStream(from_stream='test', copy=False)
assert len(p.streams['main']) == 3
assert filenames(p.streams['main']) == '124'
# Take 'test2', append 'main', and put the result in 'main'
p.streams['test2'] = niri_ads[2:]
p.appendStream(instream='test2', from_stream='main')
assert filenames(p.streams['main']) == '3124'
@pytest.mark.parametrize('niri_ads', [2], indirect=True)
def test_clear_all_streams(niri_ads):
p = NIRIImage(niri_ads[:1])
p.streams['test'] = niri_ads[1:]
p.clearAllStreams()
assert not p.streams['test']
assert len(p.streams['main']) == 1
@pytest.mark.parametrize('niri_ads', [2], indirect=True)
def test_clear_stream(niri_ads):
p = NIRIImage(niri_ads[:1])
p.streams['test'] = niri_ads[1:]
p.clearStream(stream='test')
assert not p.streams['test']
assert len(p.streams['main']) == 1
p.clearStream()
assert not p.streams['main']
def test_slice_into_streams(astrofaker):
def gmos_ads():
ad1 = astrofaker.create("GMOS-N")
ad1.init_default_extensions()
ad2 = astrofaker.create("GMOS-N")
ad2.init_default_extensions()
return [ad1, ad2]
# Slice, clearing "main"
p = GMOSImage(gmos_ads())
p.sliceIntoStreams(copy=False)
p.clearStream()
assert len(p.streams) == 13
for k, v in p.streams.items():
assert len(v) == 0 if k == 'main' else 2
# Slice, not clearing "main"
p = GMOSImage(gmos_ads())
p.sliceIntoStreams(copy=True)
assert len(p.streams) == 13
for k, v in p.streams.items():
assert len(v) == 2
# Slice with different lengths of input
ad1, ad2 = gmos_ads()
ad2.phu['EXTRA_KW'] = 33
del ad1[5]
p = GMOSImage([ad1, ad2])
p.sliceIntoStreams(copy=True)
assert len(p.streams) == 13
for k, v in p.streams.items():
assert len(v) == 1 if k == 'ext12' else 2
# The last stream should only have a slice from ad2
assert 'EXTRA_KW' in p.streams['ext12'][0].phu
class TestBookkeeping:
"""
Suite of tests for the functions in the primitives_standardize module.
"""
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_addToList(self):
filenames = ['N20070819S{:04d}_flatCorrected.fits'.format(i)
for i in range(104, 109)]
adinputs = [astrodata.open(os.path.join(TESTDATAPATH, 'NIRI', f))
for f in filenames]
# Add one image twice, just for laughs; it should appear only once
adinputs.append(adinputs[0])
p = NIRIImage(adinputs)
p.stacks = {}
p.addToList(purpose='forTest')
for f in filenames:
newfilename = f.replace('flatCorrected', 'forTest')
assert os.path.exists(newfilename)
os.remove(newfilename)
# Check there's one stack of length 5
assert len(p.stacks) == 1
assert len(p.stacks[p.stacks.keys()[0]]) == 5
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_getList(self):
pass
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_showInputs(self):
pass
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_showList(self):
pass
@pytest.mark.xfail(reason="Test needs revision", run=False)
def test_writeOutputs(self):
filenames = ['N20070819S{:04d}_flatCorrected.fits'.format(i)
for i in range(104, 106)]
adinputs = [astrodata.open(os.path.join(TESTDATAPATH, 'NIRI', f))
for f in filenames]
p = NIRIImage(adinputs)
p.writeOutputs(prefix='test', suffix='_blah', strip=True)
# Check renamed files are on disk and the filenames have been
# changed for the adinputs
for f, ad in zip(filenames, p.streams['main']):
newfilename = 'test' + f.replace('flatCorrected', 'blah')
assert os.path.exists(newfilename)
os.remove(newfilename)
assert newfilename == ad.filename
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@core@tests@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "ggmichael/craterstats",
"repo_path": "craterstats_extracted/craterstats-main/src/craterstats/sample/__init__.py",
"type": "Python"
}
|
ggmichaelREPO_NAMEcraterstatsPATH_START.@craterstats_extracted@craterstats-main@src@craterstats@sample@[email protected]_END.py
|
|
{
"filename": "pi_pi_eta.py",
"repo_name": "LoganAMorrison/Hazma",
"repo_path": "Hazma_extracted/Hazma-master/hazma/vector_mediator/form_factors/pi_pi_eta.py",
"type": "Python"
}
|
"""
F_{eta,pi,pi} = (1/Z) * BW(s, 0) [
a0*e^{i*p0}BW(q^2,0) +
a1*e^{i*p1}BW(q^2,1) +
a2*e^{i*p2}BW(q^2,2)
]
Z = a0*e^{i*p0} + a1*e^{i*p1} + a2*e^{i*p2}
"""
from dataclasses import dataclass
import numpy as np
from scipy.integrate import quad
from .cross_sections import width_to_cs
from hazma.utils import kallen_lambda
from hazma.vector_mediator.form_factors.utils import (
FPI_GEV,
META_GEV,
MPI_GEV,
RealArray,
)
META = META_GEV * 1e3
MPI = MPI_GEV * 1e3
@dataclass
class FormFactorPiPiEta:
masses: RealArray = np.array([0.77549, 1.54, 1.76, 2.15])
widths: RealArray = np.array([0.1494, 0.356, 0.113, 0.32])
amps: RealArray = np.array([1.0, 0.326, 0.0115, 0.0])
phases: RealArray = np.array([0, 3.14, 3.14, 0.0])
def __bw0(self, s):
m0 = self.masses[0]
w0 = self.widths[0]
w = (
w0
* m0**2
/ s
* ((s - 4.0 * MPI_GEV**2) / (m0**2 - 4.0 * MPI_GEV**2)) ** 1.5
)
return m0**2 / (m0**2 - s - 1j * np.sqrt(s) * w)
def __bw(self, s):
w = self.widths * s / self.masses**2
bw = self.masses**2 / (self.masses**2 - s - 1j * np.sqrt(s) * w)
bw[0] = self.__bw0(s)
return bw
def form_factor(self, cme, s, gvuu, gvdd):
"""
Compute the form factor for a vector decaying into two charged pions and
an eta.
Parameters
----------
q2:
Square of the center-of-mass energy in GeV.
"""
pre = 1.0 / (4.0 * np.sqrt(3.0) * np.pi**2 * FPI_GEV**3)
ci1 = gvuu - gvdd
amps = self.amps * np.exp(1j * self.phases)
amps /= np.sum(amps)
return pre * ci1 * self.__bw0(s) * np.sum(amps * self.__bw(cme**2))
def __integrated_form_factor(
self, *, cme: float, gvuu: float, gvdd: float
) -> float:
"""
Compute the form factor for a vector decaying into two charged pions and
a neutral pion integrated over the three-body phase-space.
Parameters
----------
q2:
Square of the center-of-mass energy in GeV.
"""
mpi = MPI_GEV
meta = META_GEV
if cme < 2 * mpi + meta:
return 0.0
jac = 1 / (128.0 * np.pi**3 * cme**2)
def integrand(s):
f2 = np.abs(self.form_factor(cme, s, gvuu, gvdd)) ** 2
k1 = kallen_lambda(s, cme**2, meta**2)
k2 = kallen_lambda(s, mpi**2, mpi**2)
return (k1 * k2) ** 1.5 * f2 / (72 * s**2)
lb = (2 * mpi) ** 2
ub = (cme - meta) ** 2
return jac * quad(integrand, lb, ub)[0]
def integrated_form_factor(self, *, cme: float, gvuu: float, gvdd: float) -> float:
"""
Compute the form factor for a vector decaying into two charged pions and
a neutral pion integrated over the three-body phase-space.
Parameters
----------
q2:
Square of the center-of-mass energy in MeV.
"""
cme_gev = cme * 1e-3
integral = self.__integrated_form_factor(cme=cme_gev, gvuu=gvuu, gvdd=gvdd)
return integral * 1e6
def width(self, *, mv: float, gvuu: float, gvdd: float) -> float:
if mv < 2 * MPI + META:
return 0.0
integral = self.integrated_form_factor(cme=mv, gvuu=gvuu, gvdd=gvdd)
return integral / (2 * mv)
def cross_section(
self,
*,
cme,
mx: float,
mv: float,
gvuu: float,
gvdd: float,
gamv: float,
):
rescale = width_to_cs(cme=cme, mx=mx, mv=mv, wv=gamv)
return rescale * self.width(mv=cme, gvuu=gvuu, gvdd=gvdd)
def energy_distributions(self, cme, gvuu, gvdd, nbins):
if cme < 2 * MPI + META:
return [([], []), ([], []), ([], [])]
def edist(e, m1, m2, m3):
s = cme**2 + m1**2 - 2 * cme * e
if s <= (m2 + m3) ** 2 or s >= (cme - m1) ** 2:
return 0.0
k1 = kallen_lambda(s, m1**2, cme**2)
k2 = kallen_lambda(s, m2**2, m3**2)
return (k1 * k2) ** 1.5 / (s**2)
def ebounds(m1, m2, m3):
return m1, (cme**2 + m1**2 - (m2 + m3) ** 2) / (2 * cme)
def make_dist(m1, m2, m3):
elow, ehigh = ebounds(m1, m2, m3)
edges = np.linspace(elow, ehigh, nbins + 1)
es = 0.5 * (edges[1:] + edges[:-1])
norm = quad(lambda e: edist(e, m1, m2, m3), elow, ehigh)[0]
dist = [edist(e, m1, m2, m3) / norm for e in es]
return dist, es
dist_pi, es_pi = make_dist(MPI, MPI, META)
dist_eta, es_eta = make_dist(META, MPI, MPI)
return [(dist_pi, es_pi), (dist_pi, es_pi), (dist_eta, es_eta)]
|
LoganAMorrisonREPO_NAMEHazmaPATH_START.@Hazma_extracted@Hazma-master@hazma@vector_mediator@form_factors@[email protected]_END.py
|
{
"filename": "_legendgrouptitle.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattercarpet/_legendgrouptitle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="legendgrouptitle", parent_name="scattercarpet", **kwargs
):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattercarpet@[email protected]_END.py
|
{
"filename": "make_figure_07.ipynb",
"repo_name": "tcallister/learning-p-det",
"repo_path": "learning-p-det_extracted/learning-p-det-main/figures/make_figure_07.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.ticker
mpl.style.use("plotting.mplstyle")
import pandas as pd
import sys
sys.path.append('./../../pdet/')
from pdet.emulator import *
sys.path.append("./../code/")
from training_routines import *
from draw_new_injections import draw_new_injections
import tqdm
from figure_utilities import *
np.random.seed(230529)
cbbh = '#1f78b4'
cnsbh = '#33a02c'
cbns = '#e31a1c'
cmisc = '#6a3d9a'
```
```python
# Load network
ann = pdet_O3()
jitted_ann = jax.jit(ann)
```
```python
# Draw CDFs to be used in dynamic injection generation
injectionData = draw_vals(100000)
# Prep arrays to hold estimated efficiencies and sample sizes
n = 100
inj_effs = np.zeros(n)
nn_effs = np.zeros(n)
neff_inj = np.zeros(n)
neff_nn = np.zeros(n)
# Choose population hyperparameters, and prepare
# array of log-widths for cos tilt distribution
alphas = -3.
kappas = 3.
mu_m1 = 35.
sig_m1 = 5.
log_f_peaks = -3.
mMaxs = 80.
mMins = 10.
log_dmMaxs = 1.
log_dmMins = 0.
bqs = 2.
mu_chis = 0.
logsig_chis = np.linspace(-4,0.5,n)
f_iso = 0.5
mu_costs = 1.
sig_costs = 0.5
# Loop across parameters
for i in tqdm.tqdm(range(n)):
# Reweight pipeline injections.
# First argument is estimated detection efficiency,
# second is number of effective samples in Monte Carlo average
inj_effs[i], neff_inj[i] = get_inj_efficiency(
alphas,
mu_m1,
sig_m1,
log_f_peaks,
mMaxs,
mMins,
log_dmMaxs,
log_dmMins,
bqs,
mu_chis,
logsig_chis[i],
f_iso,
mu_costs,
sig_costs,
kappas)
# Use neural net to directly average Pdet over proposed population
# (subject to reweighting in redshift, as described in paper text).
# First argument is estimated detection efficiency. Second is number
# of effective samples in Monte Carlo integral over Pdet.
# Third (ignored) is number of effective draws from target distribution,
# after reweighting in redshift
nn_effs[i], neff_nn[i], _ = get_nn_efficiency(
jitted_ann,
injectionData,
alphas,
mu_m1,
sig_m1,
log_f_peaks,
mMaxs,
mMins,
log_dmMaxs,
log_dmMins,
bqs,
mu_chis,
logsig_chis[i],
f_iso,
mu_costs,
sig_costs,
kappas,
hybrid=True)
```
100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [00:26<00:00, 3.72it/s]
```python
# Plot
fig = plt.figure(figsize=(4,5.5))
ax = fig.add_subplot(211)
sorting = np.argsort(logsig_chis)
ax.plot(logsig_chis[sorting],(inj_effs)[sorting],color=cnsbh,lw=2,label='Injection Reweighting')
ax.plot(logsig_chis[sorting],(nn_effs)[sorting],color=cmisc,lw=2,label='Emulator')
ax.set_ylabel(r"Predicted Detection Efficiency")
ax.legend(loc='lower right',fontsize=9)
formatter = matplotlib.ticker.ScalarFormatter(useOffset=False, useMathText=True)
formatter.set_powerlimits((-2, 2))
ax.yaxis.set_major_formatter(formatter)
ax.set_xticklabels([])
ax = fig.add_subplot(212)
ax.plot(logsig_chis[sorting],np.log10(neff_inj)[sorting],color=cnsbh,lw=2,label='Injection Reweighting')
ax.plot(logsig_chis[sorting],np.log10(neff_nn)[sorting],color=cmisc,lw=2,label='Emulator')
ax.set_xlabel(r"$\log_{10}\sigma_\chi$")
ax.set_ylabel(r"$\log_{10} N_\mathrm{eff}$")
ax.legend(loc='lower right',fontsize=9)
ax.axhline(y=np.log10(4*59),color='black',ls=':',zorder=0)
ax.text(0.06,0.55,r'$N_\mathrm{eff} = 4\times N_\mathrm{events}$',transform=ax.transAxes,fontsize=10)
plt.tight_layout()
plt.savefig('figure_07.pdf',bbox_inches='tight')
plt.show()
```

```python
test = draw_vals(100000)
n = 50
inj_effs = np.zeros(n)
nn_effs = np.zeros(n)
neff_inj = np.zeros(n)
neff_nn = np.zeros(n)
alphas = -3.
kappas = np.linspace(-1,4,n)
mu_m1 = 35.
sig_m1 = 5.
log_f_peaks = -3.
mMaxs = 80.
mMins = 10.
log_dmMaxs = 1.
log_dmMins = 0.
bqs = 2.
mu_chis = 0.
logsig_chis = -1
f_iso = 0.5
mu_costs = 1
sig_costs = 0.5
for i in tqdm.tqdm(range(n)):
inj_effs[i],neff_inj[i] = get_inj_efficiency(
alphas,
mu_m1,
sig_m1,
log_f_peaks,
mMaxs,
mMins,
log_dmMaxs,
log_dmMins,
bqs,
mu_chis,
logsig_chis,
f_iso,
mu_costs,
sig_costs,
kappas[i])
nn_effs[i],neff_nn[i],_ = get_nn_efficiency(
jitted_ann,
injectionData,
alphas,
mu_m1,
sig_m1,
log_f_peaks,
mMaxs,
mMins,
log_dmMaxs,
log_dmMins,
bqs,
mu_chis,
logsig_chis,
f_iso,
mu_costs,
sig_costs,
kappas[i],
hybrid=True)
```
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 50/50 [00:13<00:00, 3.75it/s]
```python
fig = plt.figure(figsize=(4,5.5))
ax = fig.add_subplot(211)
sorting = np.argsort(kappas)
ax.plot(kappas[sorting],(inj_effs)[sorting],color=cnsbh,lw=2,label='Injection Reweighting')
ax.plot(kappas[sorting],(nn_effs)[sorting],color=cmisc,lw=2,label='Emulator')
ax.set_ylabel(r"Predicted Detection Efficiency")
ax.legend(loc='lower right',fontsize=9)
formatter = matplotlib.ticker.ScalarFormatter(useOffset=False, useMathText=True)
formatter.set_powerlimits((-2, 2))
ax.yaxis.set_major_formatter(formatter)
ax.set_xticklabels([])
ax.set_yscale('log')
ax = fig.add_subplot(212)
ax.plot(kappas[sorting],np.log10(neff_inj)[sorting],color=cnsbh,lw=2,label='Injection Reweighting')
ax.plot(kappas[sorting],np.log10(neff_nn)[sorting],color=cmisc,lw=2,label='Emulator')
ax.set_xlabel(r"$\log_{10}\sigma_\chi$")
ax.set_ylabel(r"$\log_{10} N_\mathrm{eff}$")
ax.legend(loc='lower right',fontsize=9)
ax.axhline(y=np.log10(4*59),color='black',ls=':',zorder=0)
ax.text(0.06,0.5,r'$N_\mathrm{eff} = 4\times N_\mathrm{events}$',transform=ax.transAxes,fontsize=10)
plt.tight_layout()
#plt.savefig('figure_07.pdf',bbox_inches='tight')
plt.show()
```

```python
```
|
tcallisterREPO_NAMElearning-p-detPATH_START.@learning-p-det_extracted@learning-p-det-main@figures@[email protected]_END.py
|
{
"filename": "io.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/flax/io.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IO Abstraction Layer.
The sole purpose of this abstraction layer is to avoid requiring tensorflow
as an open-source dependency solely for its tensorflow.io.gfile functions.
"""
import contextlib
import glob as glob_module
import importlib
import os
import shutil
from enum import Enum
from absl import logging
from . import errors
# Global Modes and selective import of tensorflow.io gfile.
class BackendMode(Enum):
DEFAULT = 0
TF = 1
io_mode = None
gfile = None
if importlib.util.find_spec('tensorflow'):
from tensorflow.io import gfile # type: ignore
io_mode = BackendMode.TF
else:
logging.warning(
'Tensorflow library not found, tensorflow.io.gfile '
'operations will use native shim calls. '
"GCS paths (i.e. 'gs://...') cannot be accessed."
)
io_mode = BackendMode.DEFAULT
# Constants and Exceptions
if io_mode == BackendMode.TF:
from tensorflow import errors as tf_errors # type: ignore
NotFoundError = tf_errors.NotFoundError
else:
NotFoundError = FileNotFoundError
# Overrides for testing.
@contextlib.contextmanager
def override_mode(override: BackendMode):
# pylint: disable=g-doc-return-or-yield
"""Returns a context manager that changes backend IO mode.
Args:
override: BackendMode enum value to set IO mode inside context.
"""
# pylint: enable=g-doc-return-or-yield
global io_mode
io_mode_prev = io_mode
io_mode = override
try:
yield
finally:
io_mode = io_mode_prev
def set_mode(override: BackendMode):
"""Sets global io mode.
Args:
override: BackendMode enum value to set for IO mode.
"""
global io_mode
io_mode = override
# tensorflow.io.gfile API shim functions.
def GFile(name, mode): # pylint: disable=invalid-name
if io_mode == BackendMode.DEFAULT:
if 'b' in mode:
return open(name, mode) # pylint: disable=unspecified-encoding
else:
return open(name, mode, encoding='utf-8')
elif io_mode == BackendMode.TF:
return gfile.GFile(name, mode)
else:
raise ValueError('Unknown IO Backend Mode.')
def listdir(path):
if io_mode == BackendMode.DEFAULT:
return os.listdir(path=path)
elif io_mode == BackendMode.TF:
return gfile.listdir(path=path)
else:
raise ValueError('Unknown IO Backend Mode.')
def isdir(path):
if io_mode == BackendMode.DEFAULT:
return os.path.isdir(path)
elif io_mode == BackendMode.TF:
return gfile.isdir(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def copy(src, dst, overwrite=False):
if io_mode == BackendMode.DEFAULT:
if os.path.exists(dst) and not overwrite:
raise errors.AlreadyExistsError(dst)
shutil.copy(src, dst)
return
elif io_mode == BackendMode.TF:
return gfile.copy(src, dst, overwrite=overwrite)
else:
raise ValueError('Unknown IO Backend Mode.')
def rename(src, dst, overwrite=False):
if io_mode == BackendMode.DEFAULT:
if os.path.exists(dst) and not overwrite:
raise errors.AlreadyExistsError(dst)
return os.rename(src, dst)
elif io_mode == BackendMode.TF:
return gfile.rename(src, dst, overwrite=overwrite)
else:
raise ValueError('Unknown IO Backend Mode.')
def exists(path):
if io_mode == BackendMode.DEFAULT:
return os.path.exists(path)
elif io_mode == BackendMode.TF:
return gfile.exists(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def makedirs(path):
if io_mode == BackendMode.DEFAULT:
return os.makedirs(path, exist_ok=True)
elif io_mode == BackendMode.TF:
return gfile.makedirs(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def glob(pattern):
if io_mode == BackendMode.DEFAULT:
return [
path.rstrip('/') for path in glob_module.glob(pattern, recursive=False)
]
elif io_mode == BackendMode.TF:
return gfile.glob(pattern)
else:
raise ValueError('Unknown IO Backend Mode.')
def remove(path):
"""Remove the file at path. Might fail if used on a directory path."""
if io_mode == BackendMode.DEFAULT:
return os.remove(path)
elif io_mode == BackendMode.TF:
return gfile.remove(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def rmtree(path):
"""Remove a directory and recursively all contents inside. Might fail if used on a file path."""
if io_mode == BackendMode.DEFAULT:
return shutil.rmtree(path)
elif io_mode == BackendMode.TF:
return gfile.rmtree(path)
else:
raise ValueError('Unknown IO Backend Mode.')
def getsize(path):
"""Return the size, in bytes, of path."""
if io_mode == BackendMode.DEFAULT:
return os.path.getsize(path)
elif io_mode == BackendMode.TF:
return gfile.stat(path).length
else:
raise ValueError('Unknown IO Backend Mode.')
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@[email protected]@.PATH_END.py
|
{
"filename": "data_structures.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/frontends/exodus_ii/data_structures.py",
"type": "Python"
}
|
import numpy as np
from yt.data_objects.index_subobjects.unstructured_mesh import UnstructuredMesh
from yt.data_objects.static_output import Dataset
from yt.data_objects.unions import MeshUnion
from yt.funcs import setdefaultattr
from yt.geometry.unstructured_mesh_handler import UnstructuredIndex
from yt.utilities.file_handler import NetCDF4FileHandler, valid_netcdf_signature
from yt.utilities.logger import ytLogger as mylog
from .fields import ExodusIIFieldInfo
from .util import get_num_pseudo_dims, load_info_records, sanitize_string
class ExodusIIUnstructuredMesh(UnstructuredMesh):
_index_offset = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class ExodusIIUnstructuredIndex(UnstructuredIndex):
def __init__(self, ds, dataset_type="exodus_ii"):
super().__init__(ds, dataset_type)
def _initialize_mesh(self):
coords = self.ds._read_coordinates()
connectivity = self.ds._read_connectivity()
self.meshes = []
for mesh_id, conn_ind in enumerate(connectivity):
displaced_coords = self.ds._apply_displacement(coords, mesh_id)
mesh = ExodusIIUnstructuredMesh(
mesh_id, self.index_filename, conn_ind, displaced_coords, self
)
self.meshes.append(mesh)
self.mesh_union = MeshUnion("mesh_union", self.meshes)
def _detect_output_fields(self):
elem_names = self.dataset.parameters["elem_names"]
node_names = self.dataset.parameters["nod_names"]
fnames = elem_names + node_names
self.field_list = []
for i in range(1, len(self.meshes) + 1):
self.field_list += [("connect%d" % i, fname) for fname in fnames]
self.field_list += [("all", fname) for fname in fnames]
class ExodusIIDataset(Dataset):
_load_requirements = ["netCDF4"]
_index_class = ExodusIIUnstructuredIndex
_field_info_class = ExodusIIFieldInfo
def __init__(
self,
filename,
step=0,
displacements=None,
dataset_type="exodus_ii",
storage_filename=None,
units_override=None,
):
"""
A class used to represent an on-disk ExodusII dataset. The initializer takes
two extra optional parameters, "step" and "displacements."
Parameters
----------
step : integer
The step tells which time index to slice at. It throws an Error if
the index is larger than the number of time outputs in the ExodusII
file. Passing step=-1 picks out the last dataframe.
Default is 0.
displacements : dictionary of tuples
This is a dictionary that controls whether or not displacement fields
will be used with the meshes in this dataset. The keys of the
displacements dictionary should the names of meshes in the file
(e.g., "connect1", "connect2", etc... ), while the values should be
tuples of the form (scale, offset), where "scale" is a floating point
value and "offset" is an array-like with one component for each spatial
dimension in the dataset. When the displacements for a given mesh are
turned on, the coordinates of the vertices in that mesh get transformed
as:
vertex_x = vertex_x + disp_x*scale + offset_x
vertex_y = vertex_y + disp_y*scale + offset_y
vertex_z = vertex_z + disp_z*scale + offset_z
If no displacement
fields (assumed to be named 'disp_x', 'disp_y', etc... ) are detected in
the output file, then this dictionary is ignored.
Examples
--------
This will load the Dataset at time index '0' with displacements turned off.
>>> import yt
>>> ds = yt.load("MOOSE_sample_data/mps_out.e")
This will load the Dataset at the final index with displacements turned off.
>>> import yt
>>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)
This will load the Dataset at index 10, turning on displacement fields for
the 2nd mesh without applying any scale or offset:
>>> import yt
>>> ds = yt.load(
... "MOOSE_sample_data/mps_out.e",
... step=10,
... displacements={"connect2": (1.0, [0.0, 0.0, 0.0])},
... )
This will load the Dataset at index 10, scaling the displacements
in the 2nd mesh by a factor of 5 while not applying an offset:
>>> import yt
>>> ds = yt.load(
... "MOOSE_sample_data/mps_out.e",
... step=10,
... displacements={"connect2": (5.0, [0.0, 0.0, 0.0])},
... )
This will load the Dataset at index 10, scaling the displacements for
the 2nd mesh by a factor of 5.0 and shifting all the vertices in
the first mesh by 1.0 unit in the z direction.
>>> import yt
>>> ds = yt.load(
... "MOOSE_sample_data/mps_out.e",
... step=10,
... displacements={
... "connect1": (0.0, [0.0, 0.0, 1.0]),
... "connect2": (5.0, [0.0, 0.0, 0.0]),
... },
... )
"""
self.step = step
if displacements is None:
self.displacements = {}
else:
self.displacements = displacements
self.storage_filename = storage_filename
super().__init__(filename, dataset_type, units_override=units_override)
self.fluid_types += self._get_fluid_types()
self.default_field = [f for f in self.field_list if f[0] == "connect1"][-1]
@property
def index_filename(self):
# historic alias
return self.filename
def _set_code_unit_attributes(self):
# This is where quantities are created that represent the various
# on-disk units. These are the currently available quantities which
# should be set, along with examples of how to set them to standard
# values.
#
setdefaultattr(self, "length_unit", self.quan(1.0, "cm"))
setdefaultattr(self, "mass_unit", self.quan(1.0, "g"))
setdefaultattr(self, "time_unit", self.quan(1.0, "s"))
#
# These can also be set:
# self.velocity_unit = self.quan(1.0, "cm/s")
# self.magnetic_unit = self.quan(1.0, "gauss")
def _parse_parameter_file(self):
self._handle = NetCDF4FileHandler(self.parameter_filename)
with self._handle.open_ds() as ds:
self._read_glo_var()
self.dimensionality = ds.variables["coor_names"].shape[0]
self.parameters["info_records"] = self._load_info_records()
self.num_steps = len(ds.variables["time_whole"])
self.current_time = self._get_current_time()
self.parameters["num_meshes"] = ds.variables["eb_status"].shape[0]
self.parameters["elem_names"] = self._get_elem_names()
self.parameters["nod_names"] = self._get_nod_names()
self.domain_left_edge, self.domain_right_edge = self._load_domain_edge()
self._periodicity = (False, False, False)
# These attributes don't really make sense for unstructured
# mesh data, but yt warns if they are not present, so we set
# them to dummy values here.
self.domain_dimensions = np.ones(3, "int32")
self.cosmological_simulation = 0
self.current_redshift = 0
self.omega_lambda = 0
self.omega_matter = 0
self.hubble_constant = 0
self.refine_by = 0
def _get_fluid_types(self):
with NetCDF4FileHandler(self.parameter_filename).open_ds() as ds:
fluid_types = ()
i = 1
while True:
ftype = "connect%d" % i
if ftype in ds.variables:
fluid_types += (ftype,)
i += 1
else:
break
fluid_types += ("all",)
return fluid_types
def _read_glo_var(self):
"""
Adds each global variable to the dict of parameters
"""
names = self._get_glo_names()
if not names:
return
with self._handle.open_ds() as ds:
values = ds.variables["vals_glo_var"][:].transpose()
for name, value in zip(names, values, strict=True):
self.parameters[name] = value
def _load_info_records(self):
"""
Returns parsed version of the info_records.
"""
with self._handle.open_ds() as ds:
try:
return load_info_records(ds.variables["info_records"])
except (KeyError, TypeError):
mylog.warning("No info_records found")
return []
def _get_current_time(self):
with self._handle.open_ds() as ds:
try:
return ds.variables["time_whole"][self.step]
except IndexError as e:
raise RuntimeError(
"Invalid step number, max is %d" % (self.num_steps - 1)
) from e
except (KeyError, TypeError):
return 0.0
def _get_glo_names(self):
"""
Returns the names of the global vars, if available.
"""
with self._handle.open_ds() as ds:
if "name_glo_var" not in ds.variables:
mylog.warning("name_glo_var not found")
return []
else:
return [
sanitize_string(v.tobytes()) for v in ds.variables["name_glo_var"]
]
def _get_elem_names(self):
"""
Returns the names of the element vars, if available.
"""
with self._handle.open_ds() as ds:
if "name_elem_var" not in ds.variables:
mylog.warning("name_elem_var not found")
return []
else:
return [
sanitize_string(v.tobytes()) for v in ds.variables["name_elem_var"]
]
def _get_nod_names(self):
"""
Returns the names of the node vars, if available
"""
with self._handle.open_ds() as ds:
if "name_nod_var" not in ds.variables:
mylog.warning("name_nod_var not found")
return []
else:
return [
sanitize_string(v.tobytes()) for v in ds.variables["name_nod_var"]
]
def _read_coordinates(self):
"""
Loads the coordinates for the mesh
"""
coord_axes = "xyz"[: self.dimensionality]
mylog.info("Loading coordinates")
with self._handle.open_ds() as ds:
if "coord" not in ds.variables:
coords = (
np.array([ds.variables[f"coord{ax}"][:] for ax in coord_axes])
.transpose()
.astype("f8")
)
else:
coords = (
np.array(list(ds.variables["coord"][:])).transpose().astype("f8")
)
return coords
def _apply_displacement(self, coords, mesh_id):
mesh_name = "connect%d" % (mesh_id + 1)
new_coords = coords.copy()
if mesh_name not in self.displacements:
return new_coords
fac = self.displacements[mesh_name][0]
offset = self.displacements[mesh_name][1]
coord_axes = "xyz"[: self.dimensionality]
with self._handle.open_ds() as ds:
for i, ax in enumerate(coord_axes):
if f"disp_{ax}" in self.parameters["nod_names"]:
ind = self.parameters["nod_names"].index(f"disp_{ax}")
disp = ds.variables["vals_nod_var%d" % (ind + 1)][self.step]
new_coords[:, i] = coords[:, i] + fac * disp + offset[i]
return new_coords
def _read_connectivity(self):
"""
Loads the connectivity data for the mesh
"""
mylog.info("Loading connectivity")
connectivity = []
with self._handle.open_ds() as ds:
for i in range(self.parameters["num_meshes"]):
var = ds.variables["connect%d" % (i + 1)][:].astype("i8")
try:
elem_type = var.elem_type.lower()
if elem_type == "nfaced":
raise NotImplementedError(
"3D arbitrary polyhedra are not implemented yet"
)
arbitrary_polyhedron = elem_type == "nsided"
except AttributeError:
arbitrary_polyhedron = False
conn = var[:]
if arbitrary_polyhedron:
nodes_per_element = ds.variables[f"ebepecnt{i + 1}"]
npe = nodes_per_element[0]
if np.any(nodes_per_element != npe):
raise NotImplementedError("only equal-size polyhedra supported")
q, r = np.divmod(len(conn), npe)
assert r == 0
conn.shape = (q, npe)
connectivity.append(conn)
return connectivity
def _load_domain_edge(self):
"""
Loads the boundaries for the domain edge
"""
coords = self._read_coordinates()
connectivity = self._read_connectivity()
mi = 1e300
ma = -1e300
for mesh_id, _ in enumerate(connectivity):
displaced_coords = self._apply_displacement(coords, mesh_id)
mi = np.minimum(displaced_coords.min(axis=0), mi)
ma = np.maximum(displaced_coords.max(axis=0), ma)
# pad domain boundaries
width = ma - mi
mi -= 0.1 * width
ma += 0.1 * width
# set up pseudo-3D for lodim datasets here
for _ in range(self.dimensionality, 3):
mi = np.append(mi, 0.0)
ma = np.append(ma, 1.0)
num_pseudo_dims = get_num_pseudo_dims(coords)
self.dimensionality -= num_pseudo_dims
for i in range(self.dimensionality, 3):
mi[i] = 0.0
ma[i] = 1.0
return mi, ma
@classmethod
def _is_valid(cls, filename: str, *args, **kwargs) -> bool:
if not valid_netcdf_signature(filename):
return False
if cls._missing_load_requirements():
return False
try:
from netCDF4 import Dataset
# We use keepweakref here to avoid holding onto the file handle
# which can interfere with other is_valid calls.
with Dataset(filename, keepweakref=True) as f:
f.variables["connect1"]
return True
except Exception:
return False
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@frontends@exodus_ii@[email protected]_END.py
|
{
"filename": "DQ_definitions.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/gemini/lookups/DQ_definitions.py",
"type": "Python"
}
|
import numpy as np
datatype = np.uint16
max = np.iinfo(datatype).max
good = datatype(0)
bad_pixel = datatype(1)
non_linear = datatype(2)
saturated = datatype(4)
cosmic_ray = datatype(8)
no_data = datatype(16)
overlap = datatype(32)
unilluminated = datatype(64)
fail = bad_pixel | saturated | cosmic_ray | no_data
not_signal = max ^ (non_linear | saturated)
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@gemini@lookups@[email protected]_END.py
|
{
"filename": "utils.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/wcs/wcsapi/utils.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import numpy as np
__all__ = ["deserialize_class", "wcs_info_str"]
def deserialize_class(tpl, construct=True):
"""
Deserialize classes recursively.
"""
if not isinstance(tpl, tuple) or len(tpl) != 3:
raise ValueError("Expected a tuple of three values")
module, klass = tpl[0].rsplit(".", 1)
module = importlib.import_module(module)
klass = getattr(module, klass)
args = tuple(
deserialize_class(arg) if isinstance(arg, tuple) else arg for arg in tpl[1]
)
kwargs = dict(
(key, deserialize_class(val)) if isinstance(val, tuple) else (key, val)
for (key, val) in tpl[2].items()
)
if construct:
return klass(*args, **kwargs)
else:
return klass, args, kwargs
def wcs_info_str(wcs):
# Overall header
if wcs.array_shape is None:
array_shape = None
else:
array_shape = tuple(int(n) for n in wcs.array_shape)
s = (
f"{type(wcs).__name__} Transformation\n\n"
f"This transformation has {wcs.pixel_n_dim} pixel and {wcs.world_n_dim} "
"world dimensions\n\n"
f"Array shape (Numpy order): {array_shape}\n\n"
)
# Pixel dimensions table
array_shape = array_shape or (0,)
pixel_shape = wcs.pixel_shape or (None,) * wcs.pixel_n_dim
# Find largest between header size and value length
pixel_dim_width = max(9, len(str(wcs.pixel_n_dim)))
pixel_nam_width = max(9, *map(len, wcs.pixel_axis_names))
pixel_siz_width = max(9, len(str(max(array_shape))))
# fmt: off
s += (('{0:' + str(pixel_dim_width) + 's}').format('Pixel Dim') + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(pixel_siz_width) + 's}').format('Data size') + ' ' +
'Bounds\n')
# fmt: on
if wcs.pixel_bounds is None:
pixel_bounds = [None for _ in range(wcs.pixel_n_dim)]
else:
# converting to scalar arrays and back to Python with np.array(val).item()
# guarantees that we end up with Python scalars (int or float) with
# simple reprs, while not making any unnecessary type promotion
# (e.g. int to float)
pixel_bounds = [
tuple(np.array(b).item() for b in bounds) for bounds in wcs.pixel_bounds
]
for ipix in range(wcs.pixel_n_dim):
# fmt: off
s += (('{0:' + str(pixel_dim_width) + 'g}').format(ipix) + ' ' +
('{0:' + str(pixel_nam_width) + 's}').format(wcs.pixel_axis_names[ipix] or 'None') + ' ' +
(" " * 5 + str(None) if pixel_shape[ipix] is None else
('{0:' + str(pixel_siz_width) + 'g}').format(pixel_shape[ipix])) + ' ' +
f"{pixel_bounds[ipix]}\n"
)
# fmt: on
s += "\n"
# World dimensions table
# Find largest between header size and value length
world_dim_width = max(9, len(str(wcs.world_n_dim)))
world_nam_width = max(9, *(len(x) for x in wcs.world_axis_names if x is not None))
world_typ_width = max(
[13] + [len(x) for x in wcs.world_axis_physical_types if x is not None]
)
# fmt: off
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') + ' ' +
('{0:' + str(world_nam_width) + 's}').format('Axis Name') + ' ' +
('{0:' + str(world_typ_width) + 's}').format('Physical Type') + ' ' +
'Units\n')
# fmt: on
for iwrl in range(wcs.world_n_dim):
name = wcs.world_axis_names[iwrl] or "None"
typ = wcs.world_axis_physical_types[iwrl] or "None"
unit = wcs.world_axis_units[iwrl] or "unknown"
# fmt: off
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) + ' ' +
('{0:' + str(world_nam_width) + 's}').format(name) + ' ' +
('{0:' + str(world_typ_width) + 's}').format(typ) + ' ' +
'{:s}'.format(unit + '\n'))
# fmt: on
s += "\n"
# Axis correlation matrix
pixel_dim_width = max(3, len(str(wcs.world_n_dim)))
s += "Correlation between pixel and world axes:\n\n"
# fmt: off
s += (' ' * world_dim_width + ' ' +
('{0:^' + str(wcs.pixel_n_dim * 5 - 2) + 's}').format('Pixel Dim') +
'\n')
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') +
''.join([' ' + ('{0:' + str(pixel_dim_width) + 'd}').format(ipix)
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# fmt: on
matrix = wcs.axis_correlation_matrix
matrix_str = np.empty(matrix.shape, dtype="U3")
matrix_str[matrix] = "yes"
matrix_str[~matrix] = "no"
for iwrl in range(wcs.world_n_dim):
# fmt: off
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) +
''.join([' ' + ('{0:>' + str(pixel_dim_width) + 's}').format(matrix_str[iwrl, ipix])
for ipix in range(wcs.pixel_n_dim)]) +
'\n')
# fmt: on
# Make sure we get rid of the extra whitespace at the end of some lines
return "\n".join([l.rstrip() for l in s.splitlines()])
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@wcs@[email protected]@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "tvwenger/kd",
"repo_path": "kd_extracted/kd-master/setup.py",
"type": "Python"
}
|
"""
Copyright(C) 2017-2021 by
Trey V. Wenger; [email protected]
GNU General Public License v3 (GNU GPLv3)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from setuptools import setup
setup(
name="kd",
version="2.1",
description="Kinematic distance utilities",
author="Trey V. Wenger",
author_email="[email protected]",
packages=["kd"],
install_requires=["numpy<2.0.0", "matplotlib", "scipy", "pathos"],
package_data={"kd": ["curve_data_wise_small.sav", "reid19_params.pkl"]},
)
|
tvwengerREPO_NAMEkdPATH_START.@kd_extracted@[email protected]@.PATH_END.py
|
{
"filename": "increasebuffer.py",
"repo_name": "CosmicFish/CosmicFish",
"repo_path": "CosmicFish_extracted/CosmicFish-master/bundled/doxygen/src/increasebuffer.py",
"type": "Python"
}
|
# Since the internal token buffer of a generated flex file is hardcoded
# to 16K, this script is used to increase the buffer size of a flex
# generated scanner to 256K.
import sys
sys.stdout.write(sys.stdin.read().
replace('YY_BUF_SIZE 16384','YY_BUF_SIZE 262144').
replace('YY_READ_BUF_SIZE 8192','YY_READ_BUF_SIZE 262144'))
|
CosmicFishREPO_NAMECosmicFishPATH_START.@CosmicFish_extracted@CosmicFish-master@bundled@doxygen@[email protected]@.PATH_END.py
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/image/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="layout.image", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@image@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "ESDS-Leipzig/sen2nbar",
"repo_path": "sen2nbar_extracted/sen2nbar-main/sen2nbar/__init__.py",
"type": "Python"
}
|
"""sen2nbar - Nadir BRDF Adjusted Reflectance (NBAR) for Sentinel-2 in Python"""
__version__ = "2024.6.0"
__author__ = "David Montero Loaiza <[email protected]>"
__all__ = []
from . import *
|
ESDS-LeipzigREPO_NAMEsen2nbarPATH_START.@sen2nbar_extracted@sen2nbar-main@sen2nbar@[email protected]_END.py
|
{
"filename": "_h_e_a_d.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/fonttools/fontTools/ttLib/tables/_h_e_a_d.py",
"type": "Python"
}
|
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from fontTools.misc.timeTools import (
timestampFromString,
timestampToString,
timestampNow,
)
from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
from fontTools.misc.arrayTools import intRect, unionRect
from . import DefaultTable
import logging
log = logging.getLogger(__name__)
headFormat = """
> # big endian
tableVersion: 16.16F
fontRevision: 16.16F
checkSumAdjustment: I
magicNumber: I
flags: H
unitsPerEm: H
created: Q
modified: Q
xMin: h
yMin: h
xMax: h
yMax: h
macStyle: H
lowestRecPPEM: H
fontDirectionHint: h
indexToLocFormat: h
glyphDataFormat: h
"""
class table__h_e_a_d(DefaultTable.DefaultTable):
dependencies = ["maxp", "loca", "CFF ", "CFF2"]
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(headFormat, data, self)
if rest:
# this is quite illegal, but there seem to be fonts out there that do this
log.warning("extra bytes at the end of 'head' table")
assert rest == b"\0\0"
# For timestamp fields, ignore the top four bytes. Some fonts have
# bogus values there. Since till 2038 those bytes only can be zero,
# ignore them.
#
# https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
for stamp in "created", "modified":
value = getattr(self, stamp)
if value > 0xFFFFFFFF:
log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
value &= 0xFFFFFFFF
setattr(self, stamp, value)
if value < 0x7C259DC0: # January 1, 1970 00:00:00
log.warning(
"'%s' timestamp seems very low; regarding as unix timestamp", stamp
)
value += 0x7C259DC0
setattr(self, stamp, value)
def compile(self, ttFont):
if ttFont.recalcBBoxes:
# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
if "CFF " in ttFont:
topDict = ttFont["CFF "].cff.topDictIndex[0]
self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
elif "CFF2" in ttFont:
topDict = ttFont["CFF2"].cff.topDictIndex[0]
charStrings = topDict.CharStrings
fontBBox = None
for charString in charStrings.values():
bounds = charString.calcBounds(charStrings)
if bounds is not None:
if fontBBox is not None:
fontBBox = unionRect(fontBBox, bounds)
else:
fontBBox = bounds
if fontBBox is not None:
self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
if ttFont.recalcTimestamp:
self.modified = timestampNow()
data = sstruct.pack(headFormat, self)
return data
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
_, names, fixes = sstruct.getformat(headFormat)
for name in names:
value = getattr(self, name)
if name in fixes:
value = floatToFixedToStr(value, precisionBits=fixes[name])
elif name in ("created", "modified"):
value = timestampToString(value)
elif name in ("magicNumber", "checkSumAdjustment"):
if value < 0:
value = value + 0x100000000
value = hex(value)
if value[-1:] == "L":
value = value[:-1]
elif name in ("macStyle", "flags"):
value = num2binary(value, 16)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
fixes = sstruct.getformat(headFormat)[2]
if name in fixes:
value = strToFixedToFloat(value, precisionBits=fixes[name])
elif name in ("created", "modified"):
value = timestampFromString(value)
elif name in ("macStyle", "flags"):
value = binary2num(value)
else:
value = safeEval(value)
setattr(self, name, value)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@fonttools@fontTools@ttLib@tables@[email protected]_END.py
|
{
"filename": "bayesian.ipynb",
"repo_name": "sotzee/CPREX",
"repo_path": "CPREX_extracted/CPREX-main/bayesian.ipynb",
"type": "Jupyter Notebook"
}
|
```python
```
```python
import numpy as np
import scipy
import toolbox
import unitconvert
from matplotlib_rc import *
from load_all_data import *
from cov import cov_ellipse_xy
from pdf_plot import get_kde_1D,get_kde_2D,plot_density_2D
Fw_exp=np.array([0.1304,0.368])
Fw_sig2=np.array([0.0052**2+0.002**2,0.013**2])
Fc_exp=np.array([0.1581,0.409])
likelihood_name=['$R_{ch}, F_{ch}, BE$','+CREX+PREX','+CREX','+PREX']
likelihood_filename=['none','all','crex','prex']
scale_chi2=scipy.stats.chi2.ppf(np.array([0.6827,0.9545,0.9973]),df=2)**0.5
```
```python
```
```python
def plot_density_1D_L(x,density_array,percentile,color,color_list,ax,marginal_axis='x',unit='',legend_loc=0,figsize_norm=1,n=30,label_text_add='',ls='-'):
det_x=x[1]-x[0]
density_array=density_array/(density_array.sum()*det_x)
density_array_max=density_array.max()
x_max=x[density_array==density_array_max]
t = np.linspace(0, density_array_max, n)
integral = ((density_array >= t[:, None]) * density_array).sum(axis=(1))
f = scipy.interpolate.interp1d(integral, t)
t_contours = f(np.array(percentile)/det_x)
x_contours = []
density_countours = []
for index_list_flag in density_array>t_contours[:,None]:
index_list=np.where(index_list_flag)[0]
x_contours.append(x[[index_list.min(),index_list.max()]])
density_countours.append(density_array[[index_list.min(),index_list.max()]])
if(marginal_axis=='x'):
line=ax.plot(x,density_array,linewidth=2*figsize_norm,color=color,label=label_text_add,ls=ls)[0]
ax.tick_params(labelbottom=True, labelleft=False)
#ax.legend(fontsize=20*figsize_norm,frameon=False,loc=legend_loc,ncol=2)
return x_max,x_contours,density_countours,line
def PDF_DP(L,Sv,weights=1): #Dipole plolarizability corelation, https://journals.aps.org/prc/pdf/10.1103/PhysRevC.88.024316
return np.exp(-0.5*(L-6.11*Sv+146)**2/(0.1*Sv**2+1**2))/(2*np.pi*(0.1*Sv**2+1**2))**0.5*weights
```
```python
percentile_list = [0.68269]
color_list = ['y','k','tab:red','tab:blue','g','c']
ls_list=['--','-',(0, (2, 1, 1, 1)),(0, (4, 1, 1, 1))]
#label_list=['$R_{ch}, F_{ch}, BE$','+CREX+PREX','+CREX','+PREX']
label_list=['Basic nuclei','+CREX+PREX','+CREX','+PREX']
likelihood_name=label_list
#name='Skyrme'
#SAT_list=SAT_Skyrme_list
name='RMF'
SAT_list=SAT_RMF_to_Skyrme_list
fig, axes = plt.subplots(1,2,sharex=False,sharey=False,figsize=(8,3))
Sv_density_array_list=[]
Sv_x_contours_list=[]
Sv_x_max_list=[]
Sv_line_list=[]
for i in range(len(SAT_list)):
x=SAT_list[i][3] #Sv
x_grid,pdf_grid=get_kde_1D(x,1000,weights=None)
x_max,x_contours,density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],ls=ls_list[i],label_text_add=likelihood_name[i],unit='MeV',n=200)
Sv_density_array_list.append(density_countours)
Sv_x_contours_list.append(x_contours)
Sv_x_max_list.append(x_max)
Sv_line_list.append(line)
if(i==1):
#Sv_plot=np.linspace(20,50,1000)
x_grid,pdf_grid=get_kde_1D(x,1000,weights=PDF_DP(SAT_list[i][4],SAT_list[i][3],weights=1))
#Sv_x_max,Sv_x_contours,Sv_density_countours,Sv_line=plot_density_1D_L(Sv_plot,PDF_DP(SAT_list[i][4][np.newaxis],Sv_plot[:,np.newaxis],weights=0*SAT_list[i][4]+1).sum(axis=1),percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=100,ls=':')
Sv_x_max,Sv_x_contours,Sv_density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=200,ls=':')
#first_legend=axes[0].legend(handles=line_list,frameon=False,fontsize=10,labelspacing=0.2,loc=2)
#axes[0].add_artist(first_legend)
axes[0].set_xlim(20,50)
axes[0].set_ylim(0,0.12)
axes[0].text(0.05*(50-20)+20,0.85*0.12,name,fontsize=16)
axes[0].set_xlabel('S$_v$ [MeV]',fontsize=15)
lines=[]
for i in range(len(likelihood_name)):
for j in range(len(x_contours)):
axes[0].plot([Sv_x_contours_list[i][j][0],Sv_x_contours_list[i][j][0]],[0,Sv_density_array_list[i][j][0]],ls=ls_list[i],color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[0].plot([Sv_x_contours_list[i][j][1],Sv_x_contours_list[i][j][1]],[0,Sv_density_array_list[i][j][1]],ls=ls_list[i],color=color_list[i],linewidth=2,label=likelihood_name[i])
if(i==1):
axes[0].plot([Sv_x_contours[j][0],Sv_x_contours[j][0]],[0,Sv_density_countours[j][0]],':',color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[0].plot([Sv_x_contours[j][1],Sv_x_contours[j][1]],[0,Sv_density_countours[j][1]],':',color=color_list[i],linewidth=2,label=likelihood_name[1]+'+$\\alpha_D$')
second_legend=axes[0].legend(handles=lines,frameon=False,fontsize=10,labelspacing=0.4,loc=1)
axes[0].add_artist(second_legend)
density_array_list=[]
x_contours_list=[]
x_max_list=[]
line_list=[]
lines=[]
for i in range(len(SAT_list)):
x=SAT_list[i][4] #L
x_grid,pdf_grid=get_kde_1D(x,1000,weights=None)
x_max,x_contours,density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[1],ls=ls_list[i],label_text_add=likelihood_name[i],unit='MeV',n=200)
density_array_list.append(density_countours)
x_contours_list.append(x_contours)
x_max_list.append(x_max)
line_list.append(line)
print(x_contours-x_max,x_max)
for j in range(len(x_contours)):
axes[1].plot([x_contours_list[i][j][0],x_contours_list[i][j][0]],[0,density_array_list[i][j][0]],ls=ls_list[i],color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[1].plot([x_contours_list[i][j][1],x_contours_list[i][j][1]],[0,density_array_list[i][j][1]],ls=ls_list[i],color=color_list[i],linewidth=2,label='$S_v,L=%.1f_{%.1f}^{+%.1f}, %.1f_{%.1f}^{+%.1f}$ '%(Sv_x_max_list[i],Sv_x_contours_list[i][j][0]-Sv_x_max_list[i],Sv_x_contours_list[i][j][1]-Sv_x_max_list[i], x_max_list[i],x_contours_list[i][j][0]-x_max_list[i],x_contours_list[i][j][1]-x_max_list[i]))
if(i==1):
#L_PDF_post=interpolate.UnivariateSpline(x_grid,pdf_grid,k=3,s=0)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
#lines+=axes[1].plot([Sv_x_contours[j][1],Sv_x_contours[j][1]],[0,Sv_density_countours[j][1]],':',color=color_list[i],linewidth=2,label='$S_v=%.1f_{%.1f}^{+%.1f}$ '%(Sv_x_max,Sv_x_contours[j][0]-Sv_x_max,Sv_x_contours[j][1]-Sv_x_max)+'MeV')
#Sv_plot=np.linspace(20,50,1000)
x_grid,pdf_grid=get_kde_1D(x,1000,weights=PDF_DP(SAT_list[i][4],SAT_list[i][3],weights=1))
#Sv_x_max,Sv_x_contours,Sv_density_countours,Sv_line=plot_density_1D_L(Sv_plot,PDF_DP(SAT_list[i][4][np.newaxis],Sv_plot[:,np.newaxis],weights=0*SAT_list[i][4]+1).sum(axis=1),percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=100,ls=':')
L_x_max,L_x_contours,L_density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[1],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=200,ls=':')
axes[1].plot([L_x_contours[j][0],L_x_contours[j][0]],[0,L_density_countours[j][0]],':',color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[1].plot([L_x_contours[j][1],L_x_contours[j][1]],[0,L_density_countours[j][1]],':',color=color_list[i],linewidth=2,label='$S_v,L=%.1f_{%.1f}^{+%.1f}, %.1f_{%.1f}^{+%.1f}$'%(Sv_x_max,Sv_x_contours[j][0]-Sv_x_max,Sv_x_contours[j][1]-Sv_x_max, L_x_max,L_x_contours[j][0]-L_x_max,L_x_contours[j][1]-L_x_max))
axes[1].legend(handles=lines,frameon=False,fontsize=9,labelspacing=0.1,loc=1)
#axes[1].set_xlim(x_grid[0],x_grid[-1])
axes[1].set_xlim(0,160)
axes[1].set_ylim(0,0.021)
axes[1].text(0.72*160,0.32*0.02,' '*(10-2*len(name))+name,fontsize=16)
axes[1].set_xlabel('L [MeV]',fontsize=15)
fig.tight_layout(pad=1.0)
fig.savefig('./figures/Sv_L_posterior_'+name+'.pdf',bbox_inches = 'tight',format='pdf')
#Sv_PDF_post_Skyrme=Sv_PDF_post
#L_PDF_post_Skyrme=L_PDF_post
```
[[-22.22856073 44.45712146]] [54.19852518]
[[-25.03362672 29.08318398]] [40.03770082]
[[-22.32849318 30.16012885]] [26.88414443]
[[-25.87404443 38.91293297]] [66.77608073]

```python
percentile_list = [0.68269]
color_list = ['y','k','tab:red','tab:blue','g','c']
ls_list=['--','-',(0, (2, 1, 1, 1)),(0, (4, 1, 1, 1))]
#label_list=['$R_{ch}, F_{ch}, BE$','+CREX+PREX','+CREX','+PREX']
label_list=['Basic nuclei','+CREX+PREX','+CREX','+PREX']
likelihood_name=label_list
name='Skyrme'
SAT_list=SAT_Skyrme_list
fig, axes = plt.subplots(1,2,sharex=False,sharey=False,figsize=(8,3))
density_array_list=[]
x_contours_list=[]
x_max_list=[]
line_list=[]
lines=[]
for i in range(len(SAT_list)):
x=SAT_list[i][4] #L
x_grid,pdf_grid=get_kde_1D(x,1000,weights=None)
x_max,x_contours,density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[1],ls=ls_list[i],label_text_add=likelihood_name[i],unit='MeV',n=200)
density_array_list.append(density_countours)
x_contours_list.append(x_contours)
x_max_list.append(x_max)
line_list.append(line)
print(x_contours-x_max,x_max)
for j in range(len(x_contours)):
axes[1].plot([x_contours_list[i][j][0],x_contours_list[i][j][0]],[0,density_array_list[i][j][0]],ls=ls_list[i],color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[1].plot([x_contours_list[i][j][1],x_contours_list[i][j][1]],[0,density_array_list[i][j][1]],ls=ls_list[i],color=color_list[i],linewidth=2,label=likelihood_name[i])
if(i==1):
#L_PDF_post=interpolate.UnivariateSpline(x_grid,pdf_grid,k=3,s=0)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
#lines+=axes[1].plot([Sv_x_contours[j][1],Sv_x_contours[j][1]],[0,Sv_density_countours[j][1]],':',color=color_list[i],linewidth=2,label='$S_v=%.1f_{%.1f}^{+%.1f}$ '%(Sv_x_max,Sv_x_contours[j][0]-Sv_x_max,Sv_x_contours[j][1]-Sv_x_max)+'MeV')
#Sv_plot=np.linspace(20,50,1000)
x_grid,pdf_grid=get_kde_1D(x,1000,weights=PDF_DP(SAT_list[i][4],SAT_list[i][3],weights=1))
#Sv_x_max,Sv_x_contours,Sv_density_countours,Sv_line=plot_density_1D_L(Sv_plot,PDF_DP(SAT_list[i][4][np.newaxis],Sv_plot[:,np.newaxis],weights=0*SAT_list[i][4]+1).sum(axis=1),percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=100,ls=':')
L_x_max,L_x_contours,L_density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[1],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=200,ls=':')
axes[1].plot([L_x_contours[j][0],L_x_contours[j][0]],[0,L_density_countours[j][0]],':',color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[1].plot([L_x_contours[j][1],L_x_contours[j][1]],[0,L_density_countours[j][1]],':',color=color_list[i],linewidth=2,label=likelihood_name[1]+'+$\\alpha_D$')
axes[1].legend(handles=lines,frameon=False,fontsize=10,labelspacing=0.2,loc='upper right', bbox_to_anchor=(1.02, 1.01))
#axes[1].set_xlim(x_grid[0],x_grid[-1])
axes[1].set_xlim(0,160)
axes[1].set_ylim(0,0.021)
axes[1].text(0.72*160,0.32*0.02,' '*(10-2*len(name))+name,fontsize=16)
axes[1].set_xlabel('L [MeV]',fontsize=15)
Sv_density_array_list=[]
Sv_x_contours_list=[]
Sv_x_max_list=[]
Sv_line_list=[]
for i in range(len(SAT_list)):
x=SAT_list[i][3] #Sv
x_grid,pdf_grid=get_kde_1D(x,1000,weights=None)
x_max,x_contours,density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],ls=ls_list[i],label_text_add=likelihood_name[i],unit='MeV',n=200)
Sv_density_array_list.append(density_countours)
Sv_x_contours_list.append(x_contours)
Sv_x_max_list.append(x_max)
Sv_line_list.append(line)
if(i==1):
#Sv_plot=np.linspace(20,50,1000)
x_grid,pdf_grid=get_kde_1D(x,1000,weights=PDF_DP(SAT_list[i][4],SAT_list[i][3],weights=1))
#Sv_x_max,Sv_x_contours,Sv_density_countours,Sv_line=plot_density_1D_L(Sv_plot,PDF_DP(SAT_list[i][4][np.newaxis],Sv_plot[:,np.newaxis],weights=0*SAT_list[i][4]+1).sum(axis=1),percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=100,ls=':')
Sv_x_max,Sv_x_contours,Sv_density_countours,line=plot_density_1D_L(x_grid,pdf_grid,percentile_list,color_list[i], [color_list[i],color_list[i]],axes[0],label_text_add=likelihood_name[1]+'+$\\alpha_D$',unit='MeV',n=200,ls=':')
#first_legend=axes[0].legend(handles=line_list,frameon=False,fontsize=10,labelspacing=0.2,loc=2)
#axes[0].add_artist(first_legend)
axes[0].set_xlim(20,50)
axes[0].set_ylim(0,0.12)
axes[0].text(0.05*(50-20)+20,0.85*0.12,name,fontsize=16)
axes[0].set_xlabel('S$_v$ [MeV]',fontsize=15)
lines=[]
for i in range(len(likelihood_name)):
for j in range(len(x_contours)):
axes[0].plot([Sv_x_contours_list[i][j][0],Sv_x_contours_list[i][j][0]],[0,Sv_density_array_list[i][j][0]],ls=ls_list[i],color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[0].plot([Sv_x_contours_list[i][j][1],Sv_x_contours_list[i][j][1]],[0,Sv_density_array_list[i][j][1]],ls=ls_list[i],color=color_list[i],linewidth=2,label='$S_v,L=%.1f_{%.1f}^{+%.1f}, %.1f_{%.1f}^{+%.1f}$ '%(Sv_x_max_list[i],Sv_x_contours_list[i][j][0]-Sv_x_max_list[i],Sv_x_contours_list[i][j][1]-Sv_x_max_list[i], x_max_list[i],x_contours_list[i][j][0]-x_max_list[i],x_contours_list[i][j][1]-x_max_list[i]))
if(i==1):
axes[0].plot([Sv_x_contours[j][0],Sv_x_contours[j][0]],[0,Sv_density_countours[j][0]],':',color=color_list[i],linewidth=2)
#ax.plot([x_contours[i][1],x_contours[i][1]],[0,density_countours[i][1]],'--',color=color_list[i],linewidth=5*figsize_norm,label=label_text_add+'%.3f - %.3f'%(x_contours[i][0],x_contours[i][1])+unit)
lines+=axes[0].plot([Sv_x_contours[j][1],Sv_x_contours[j][1]],[0,Sv_density_countours[j][1]],':',color=color_list[i],linewidth=2,label='$S_v,L=%.1f_{%.1f}^{+%.1f}, %.1f_{%.1f}^{+%.1f}$'%(Sv_x_max,Sv_x_contours[j][0]-Sv_x_max,Sv_x_contours[j][1]-Sv_x_max, L_x_max,L_x_contours[j][0]-L_x_max,L_x_contours[j][1]-L_x_max))
second_legend=axes[0].legend(handles=lines,frameon=False,fontsize=9,labelspacing=0.1,loc='upper right', bbox_to_anchor=(1.02, 1.01))
axes[0].add_artist(second_legend)
fig.tight_layout(pad=1.0)
fig.savefig('./figures/Sv_L_posterior_'+name+'.pdf',bbox_inches = 'tight',format='pdf')
```
[[-26.678478 35.12103433]] [56.58082067]
[[-22.49127297 21.26892118]] [35.9491778]
[[-14.60625154 25.80437772]] [21.66114762]
[[-26.7471772 29.76094364]] [72.36653698]

```python
```
```python
```
```python
#Show Sv-L posterior with PREX and CREX:
for SAT in [SAT_Skyrme_list[1],SAT_RMF_to_Skyrme_list[1],SAT_RMF_list[1]]:
mean=np.mean(SAT[3:5],axis=1)
cov =np.cov(SAT[3],SAT[4])#,SAT_RMF_to_Skyrme_list
print(mean,cov)
```
[34.1403562 40.3024777] [[ 42.19991969 83.52975473]
[ 83.52975473 453.23480257]]
[31.42158453 49.13189107] [[ 26.71305169 92.79344187]
[ 92.79344187 749.8499721 ]]
[ 39.21816128 -10.08244647] [[ 60.26530628 -105.82394433]
[-105.82394433 2354.43845758]]
```python
#Show Sv-L posterior with Basic nucli constraints:
for SAT in [np.concatenate((SAT_Skyrme_list[0],SAT_RMF_to_Skyrme_list[0][:,::10]),axis=1)]:
mean=np.mean(SAT[3:5],axis=1)
cov =np.cov(SAT[3],SAT[4])#,SAT_RMF_to_Skyrme_list
print(mean,cov)
x,y=cov_ellipse_xy(mean,cov)
plt.plot(x,y,'--b',label='RMF')
```
[32.77900096 66.82662838] [[ 34.96167206 136.75320432]
[136.75320432 976.4023266 ]]

```python
```
```python
```
```python
def normal(FcFw_Pb,FcFw_Ca):
part3=-0.5*((FcFw_Ca-(Fc_exp[0]-Fw_exp[0]))**2/Fw_sig2[0])
part4=-0.5*((FcFw_Pb-(Fc_exp[1]-Fw_exp[1]))**2/Fw_sig2[1])
return np.exp(part3+part4)
def distribution(det_x,det_y,x_grid,y_grid,pdf_grid,pdf_function):
return (pdf_grid*(pdf_function(x_grid+det_x,y_grid+det_y))).sum()
dimension=1
def discrepancy_sig(n_sigma,discrepancy,dimension=1):
return scipy.special.gammainc(dimension/2,n_sigma**2/2) - discrepancy
```
```python
bin_num=[100,200]
#bin_num=[50,100]
percentile_list = [0.9545, 0.9, 0.6827, 0.3]
color_list = ['c','r','k','y','g']
for name,PrexCrex_list in zip(['Skyrme','RMF'],[PrexCrex_Skyrme_list,PrexCrex_RMF_to_Skyrme_list]):
discrepancy=[]
for i,PrexCrex_i,likelihood_name_i,likelihood_filename_i in zip(range(4),PrexCrex_list,likelihood_name,likelihood_filename):
y=PrexCrex_i[0]-PrexCrex_i[2]
x=PrexCrex_i[1]-PrexCrex_i[3]
x_grid,y_grid,pdf_grid=get_kde_2D([x,y],bin_num,weights=None,x_min=0,x_max=0.06)
ax=plot_density_2D(x_grid,y_grid,pdf_grid,percentile_list,color_list,'$F_{ch}^{Pb208}-F_W^{Pb208}$','$F_{ch}^{Ca48}-F_W^{Ca48}$',y_unit='',inline=True)
ax.legend(fontsize=0)
ax.set_xlim(0,0.06)
ax.set_ylim(0.02,0.06)
line=ax.plot([0.024,0.024],[0.026,0.039],'k')
line0=ax.plot([0.011,0.033],[0.034,0.034],'k',label='Ab initio')
#line1=ax.plot(Fc_new[2]-Fw_new[2],Fc_new[0]-Fw_new[0],'om',label='L30 - L130')
#line2=ax.plot(Fc_rex[2]-Fw_rex[2],Fc_rex[0]-Fw_rex[0],'o',color='brown',label='Sv31 - Sv51')
#line_skyrme=ax.plot(FchFw_andrew[logic_plot_andrew,1],FchFw_andrew[logic_plot_andrew,0],'og',label='Skyrme')
line3=ax.plot([0],[0],'--k',lw=5,label='Posterior:'+likelihood_name[i])
#line4=ax.plot(Fc_lit[2,7:]-Fw_lit[2,7:],Fc_lit[0,7:]-Fw_lit[0,7:],'+k')
#line5=ax.plot(Fc_lit[2,:7]-Fw_lit[2,:7],Fc_lit[0,:7]-Fw_lit[0,:7],'Xk')
#for j in range(len(special[0])):
# ax.text((Fc_lit[2]-Fw_lit[2])[j]+0.0004,(Fc_lit[0]-Fw_lit[0])[j]-0.0004,special[0][j],fontsize=20)
theta=np.linspace(0,2*np.pi,100)
mean_208Pb=Fc_exp[1]-Fw_exp[1]
mean_48Ca =Fc_exp[0]-Fw_exp[0]
std_208Pb =np.sqrt(Fw_sig2[1])
std_48Ca =np.sqrt(Fw_sig2[0])
ax.plot(mean_208Pb+scale_chi2[0]*std_208Pb*np.cos(theta),mean_48Ca+scale_chi2[0]*std_48Ca*np.sin(theta),'--b')
ax.plot(mean_208Pb+scale_chi2[1]*std_208Pb*np.cos(theta),mean_48Ca+scale_chi2[1]*std_48Ca*np.sin(theta),'--b')
line5=ax.plot(mean_208Pb+scale_chi2[2]*std_208Pb*np.cos(theta),mean_48Ca+scale_chi2[2]*std_48Ca*np.sin(theta),'--b',label='PREX & CREX 68%,80%,95% CR')
first_legend = ax.legend(handles=line3+line0,fontsize=40,frameon=False, loc='upper left')
ax.add_artist(first_legend)
plt.savefig('./figures/FF_posterior_'+likelihood_filename_i+'_'+name+'.pdf',bbox_inches = 'tight',format='pdf')
det_x_plot=np.linspace(-0.05,0.05,201)
det_y_plot=np.linspace(-0.05,0.05,201)
det_pdf_plot=[]
for x_i in det_x_plot:
det_pdf_plot.append([])
for y_i in det_y_plot:
det_pdf_plot[-1].append(distribution(x_i,y_i,x_grid,y_grid,pdf_grid,normal))
det_pdf_plot=np.array(det_pdf_plot)
discrepancy.append(det_pdf_plot[det_pdf_plot>det_pdf_plot[100,100]].sum()/det_pdf_plot.sum())
discrepancy=np.array(discrepancy)
discrepancy_sigma=[]
for discrepancy_i in discrepancy:
discrepancy_sigma.append(scipy.optimize.root(discrepancy_sig,[1],args=(discrepancy_i,dimension)).x[0])
discrepancy_sigma=np.array(discrepancy_sigma)
print('discrepancy:')
print(discrepancy)
print(discrepancy_sigma)
```
['95.45' '90.0' '68.27' '30.0']
[3.41296430e-05 6.69900905e-05 1.90477190e-04 3.82325601e-04]
['95.45' '90.0' '68.27' '30.0']
[3.58176538e-05 7.26046254e-05 1.99614937e-04 4.07239349e-04]
['95.45' '90.0' '68.27' '30.0']
[3.46224025e-05 7.11831546e-05 1.97704966e-04 4.17419706e-04]
['95.45' '90.0' '68.27' '30.0']
[3.42866694e-05 7.28949447e-05 2.20050077e-04 4.81429380e-04]
discrepancy:
[0.99758352 0.93534285 0.97576861 0.99358055]
[3.03360763 1.8476213 2.25344046 2.72554979]
['95.45' '90.0' '68.27' '30.0']
[3.63940451e-05 7.18120417e-05 2.03007325e-04 4.89894953e-04]
['95.45' '90.0' '68.27' '30.0']
[3.36621251e-05 6.75055237e-05 1.99928137e-04 3.83128663e-04]
['95.45' '90.0' '68.27' '30.0']
[3.44100485e-05 6.45700366e-05 1.72366892e-04 3.20490870e-04]
['95.45' '90.0' '68.27' '30.0']
[3.73099490e-05 7.25232625e-05 2.17844822e-04 5.27913574e-04]
discrepancy:
[0.96797957 0.76028388 0.80019743 0.97115723]
[2.14415553 1.17569666 1.28211425 2.18562891]








```python
```
```python
```
```python
```
```python
N=211
r_fm_max=20
r_grid_fm =np.linspace(0,r_fm_max,N)
r_grid_MeV=r_grid_fm*unitconvert.unitMeVfm**(1/3)
omega_over_m=0.022
M=939
def load_basis(path,dir_name):
kappa_list=np.loadtxt(path+dir_name+'/init.txt')
x=np.loadtxt(path+dir_name+'/x_grid.txt')
E_over_m_list=np.loadtxt(path+dir_name+'/E_over_m.txt')
g_basis_list=[]
f_basis_list=[]
g_times_r_basis_list=[]
f_times_r_basis_list=[]
g_basis_norm_list=[]
f_basis_norm_list=[]
g_times_r_basis_norm_list=[]
f_times_r_basis_norm_list=[]
Matrix_fg=[]
for i,kappa in enumerate(kappa_list):
dir_kappa=dir_name+'/kappa_%d'%kappa
path_dir_kappa=path+dir_kappa
g_basis_list.append(np.loadtxt(path_dir_kappa+'/g_basis_norm0.txt'))
f_basis_list.append(np.loadtxt(path_dir_kappa+'/f_basis_norm0.txt'))
g_times_r_basis_list.append(np.loadtxt(path_dir_kappa+'/g_basis_norm1.txt'))
f_times_r_basis_list.append(np.loadtxt(path_dir_kappa+'/f_basis_norm1.txt'))
g_basis_norm_list.append(np.loadtxt(path_dir_kappa+'/g_basis_norm2.txt'))
f_basis_norm_list.append(np.loadtxt(path_dir_kappa+'/f_basis_norm2.txt'))
g_times_r_basis_norm_list.append(np.loadtxt(path_dir_kappa+'/g_basis_norm3.txt'))
f_times_r_basis_norm_list.append(np.loadtxt(path_dir_kappa+'/f_basis_norm3.txt'))
Matrix_fg.append(np.loadtxt(path_dir_kappa+'/Matrix_fg.txt'))
g_basis_list=np.array(g_basis_list)
f_basis_list=np.array(f_basis_list)
g_times_r_basis_list=np.array(g_times_r_basis_list)
f_times_r_basis_list=np.array(f_times_r_basis_list)
g_basis_norm_list=np.array(g_basis_norm_list)
f_basis_norm_list=np.array(f_basis_norm_list)
g_times_r_basis_norm_list=np.array(g_times_r_basis_norm_list)
f_times_r_basis_norm_list=np.array(f_times_r_basis_norm_list)
Matrix_fg=np.array(Matrix_fg)
return [kappa_list,E_over_m_list,g_basis_list,f_basis_list,g_times_r_basis_list,f_times_r_basis_list, g_basis_norm_list,f_basis_norm_list,g_times_r_basis_norm_list,f_times_r_basis_norm_list,Matrix_fg]
kappa_list,E_over_m_list,g_basis_list,f_basis_list,g_times_r_basis_list,f_times_r_basis_list, g_basis_norm_list,f_basis_norm_list,g_times_r_basis_norm_list,f_times_r_basis_norm_list,Matrix_fg=load_basis('./data/','basis')
kappa_list=list(kappa_list)
level_list=[[22,16],[11,10],[7,6]]
q_list=[0.3977,1,0.8733] #Form factor at momentum q in fm-1, q for 90Zr is dummy.
density_unit=omega_over_m*M**2/(4*np.pi*(r_grid_MeV[1]-r_grid_MeV[0]))
def vector_to_result(kappa_all,vector_all,kappa_list,basis_norm_list):
return np.array([vector_all[:,i].dot(basis_norm_list[kappa_list.index(kappa_i)]) for i,kappa_i in enumerate(kappa_all)])
def density(kappaEgf,level_list,return_levels=False):
N_basis=int(len(kappaEgf)/2-1)
kappa_all=kappaEgf[0]
densities=[]
densities_levels=[]
level_sum=0
for to_level_np in level_list:
densities_levels.append([])
for to_level in to_level_np:
g_vector_all=kappaEgf[2:(2+N_basis),level_sum:(level_sum+to_level)]
f_vector_all=kappaEgf[(2+N_basis):,level_sum:(level_sum+to_level)]
#print(kappa_all[level_sum:(level_sum+to_level)],kappa_all[level_sum:(level_sum+to_level)].shape,g_vector_all.shape)
g_result_all=vector_to_result(kappa_all[level_sum:(level_sum+to_level)],g_vector_all,kappa_list,g_basis_norm_list)
f_result_all=vector_to_result(kappa_all[level_sum:(level_sum+to_level)],f_vector_all,kappa_list,f_basis_norm_list)
g2_result_all=g_result_all**2
f2_result_all=f_result_all**2
svt_result_all=np.array([g2_result_all-f2_result_all,g2_result_all+f2_result_all,2*g_result_all*f_result_all])
densities_levels[-1].append(density_unit*svt_result_all*np.abs(2*kappa_all[level_sum:(level_sum+to_level)])[np.newaxis,:,np.newaxis])
densities.append(densities_levels[-1][-1].sum(axis=1))
level_sum+=to_level
if(return_levels):
return np.array(densities).reshape((3,2,3,N)),densities_levels
else:
return np.array(densities).reshape((3,2,3,N))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[0][:,:,:5000],'kappaEgf_RMF_to_Skyrme_none1'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[0][:,:,5000:],'kappaEgf_RMF_to_Skyrme_none2'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[1],'kappaEgf_RMF_to_Skyrme_all'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[2],'kappaEgf_RMF_to_Skyrme_crex'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[3][:,:,:5000],'kappaEgf_RMF_to_Skyrme_prex1'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[3][:,:,5000:10000],'kappaEgf_RMF_to_Skyrme_prex2'],))
# toolbox.pickle_dump('./','data',([kappaEgf_RMF_to_Skyrme_list[3][:,:,10000:],'kappaEgf_RMF_to_Skyrme_prex3'],))
#kappaEgf_RMF_to_Skyrme_list=toolbox.pickle_load('./','data',['kappaEgf_RMF_to_Skyrme_list'])[0]
kappaEgf_RMF_to_Skyrme_list=[]
kappaEgf_RMF_to_Skyrme_list+=[np.concatenate(toolbox.pickle_load('./','data',['kappaEgf_RMF_to_Skyrme_none1','kappaEgf_RMF_to_Skyrme_none2']),axis=2)]
kappaEgf_RMF_to_Skyrme_list+=toolbox.pickle_load('./','data',['kappaEgf_RMF_to_Skyrme_all','kappaEgf_RMF_to_Skyrme_crex'])
kappaEgf_RMF_to_Skyrme_list+=[np.concatenate(toolbox.pickle_load('./','data',['kappaEgf_RMF_to_Skyrme_PREX1','kappaEgf_RMF_to_Skyrme_PREX2','kappaEgf_RMF_to_Skyrme_PREX3']),axis=2)]
densities_fm3_RMF_to_Skyrme_list=[unitconvert.toMevfm(np.array([density(kappaEgf_i,level_list) for kappaEgf_i in kappaEgf.transpose((2,0,1))]),'mev4') for kappaEgf in kappaEgf_RMF_to_Skyrme_list]
```
```python
```
```python
percentile_array=np.array([16,50,84])
density_percentile=[]
#for densities_fm3_i in densities_fm3_RMF_list:
for densities_fm3_i in densities_fm3_RMF_to_Skyrme_list:
density_percentile.append(np.percentile(densities_fm3_i,percentile_array,axis=0))
density_percentile=np.array(density_percentile)
```
```python
density_percentile_to_plot=density_percentile
name='RMF'
lw=0.5
ls='--'
alpha=0.4
color_list = ['y','k','tab:red','tab:blue']
for nuclei_index,nuclei_name,file_name,xlim,xlim_small in zip([0,1,2],['$^{208}$Pb','$^{90}$Zr','$^{48}$Ca'],['pb208','zr490','ca48'],[12.5,9,7],[[6.7,7.6],[5.1,5.6],[4,4.6]]):
fig,ax=plt.subplots(1,1,figsize=(5,4),sharex=False,sharey=False)
ax.set_xticks(range(0,(int(xlim/2)+1)*2,2))
ax_twin=ax.twinx()
nucleon_index=0
band_list=[]
line_list=[]
for i in range(4):
band_list.append(ax.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,label=likelihood_name[i],color=color_list[i],linewidth=0))
line_list+=ax_twin.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],label=likelihood_name[i],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
nucleon_index=1
for i in range(4):
ax.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,label=likelihood_name[i],color=color_list[i],linewidth=0)
ax_twin.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],label=likelihood_name[i],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
first_legend = ax.legend(handles=band_list,fontsize=12,frameon=False,handletextpad=0.6,ncol=1,columnspacing=-0.8, loc='lower left', bbox_to_anchor=(0, 0))
ax.add_artist(first_legend)
ax.set_xlim(0,xlim)
ax.set_ylim(0,0.11)
ax.set_xlabel('r [fm]',fontsize=15)
ax.set_ylabel('$n$ [fm$^{-3}$]',fontsize=15)
first_legend = ax_twin.legend(handles=line_list,fontsize=12,frameon=False,handletextpad=0.6,ncol=1,columnspacing=-0.8, loc='lower left', bbox_to_anchor=(0, 0))
ax_twin.add_artist(first_legend)
ax_twin.set_xlim(0,xlim)
ax_twin.set_ylim(0,0.11)
ax_twin.set_yticklabels('')
ax.plot(np.concatenate((xlim_small,xlim_small[::-1],[xlim_small[0]])),[0.02,0.02,0.03,0.03,0.02],'k',lw=1)
ax_small=ax.inset_axes([0.72,0.35,0.25,0.4])
for nucleon_index in [0,1]:
for i in range(4):
ax_small.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,color=color_list[i],linewidth=0)
ax_small.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
ax_small.set_xlim(xlim_small[0],xlim_small[1])
ax_small.set_ylim(0.02,0.03)
ax_small.set_yticks([0.02,0.03])
second_legend = ax.legend(title=nuclei_name,handles=[],title_fontsize=20,frameon=False,handletextpad=0.3, loc='lower left', bbox_to_anchor=(0.7, 0.75))
#ax.add_artist(second_legend)
plt.savefig('./figures/profile_'+file_name+'_'+name+'.pdf',bbox_inches = 'tight',format='pdf')
```



```python
```
```python
percentile_array=np.array([16,50,84])
b4_grid=np.array([-np.infty,0.1,0.3,0.5,0.7,np.infty])
b4_grid_name=['0.0','0.1','0.3','0.5','0.7','1']
#b4p=(eos_args_RMF[4]/763**2+eos_args_RMF[2]/980**2)/(8*939**2)*197.3**4
#densities_fm3_RMF=np.concatenate(densities_fm3_RMF_list)
b4p=(eos_args_RMF_to_Skyrme[4]/763**2+eos_args_RMF_to_Skyrme[2]/980**2)/(8*939**2)*197.3**4
densities_fm3_RMF=np.concatenate(densities_fm3_RMF_to_Skyrme_list)
density_percentile_b4p=[]
for i in range(len(b4_grid)-1):
logic_i=np.logical_and(b4p>b4_grid[i],b4p<b4_grid[i+1])
densities_fm3_i=densities_fm3_RMF[logic_i]
density_percentile_b4p.append(np.percentile(densities_fm3_i,percentile_array,axis=0))
print(len(densities_fm3_i),np.diff(PrexCrex_RMF_to_Skyrme[[11,10]][:,logic_i].mean(axis=1)),np.diff(PrexCrex_RMF_to_Skyrme[[3,1]][:,logic_i].mean(axis=1)))
print(np.diff(PrexCrex_RMF_to_Skyrme[[13,12]][:,logic_i].mean(axis=1)),np.diff(PrexCrex_RMF_to_Skyrme[[2,0]][:,logic_i].mean(axis=1)))
density_percentile_b4p=np.array(density_percentile_b4p)
```
8177 [0.21528704] [0.03138166]
[0.20179449] [0.04928604]
10325 [0.20266091] [0.02962093]
[0.18067771] [0.04601113]
7101 [0.19738922] [0.02929961]
[0.14686463] [0.04128215]
5272 [0.21358778] [0.03198978]
[0.12272937] [0.03756205]
556 [0.25659021] [0.03809455]
[0.13373278] [0.03832359]
```python
```
```python
density_percentile_to_plot=density_percentile_b4p
label_nambe_to_plot=[b4_grid_name[i]+'<$b^{\'}_4$/fm$^4$<'+b4_grid_name[i+1] for i in range(len(b4_grid_name)-1)]
lw=0.5
ls='--'
alpha=0.4
color_list=np.array([[0. , 0.64509804, 1. , 1. ],
[0.24984187, 1. , 0.71790006, 1. ],
[0.71790006, 1. , 0.24984187, 1. ],
[1. , 0.72694263, 0. , 1. ],
[1. , 0.18954248, 0. , 1. ]])
#color_list = ['y','k','tab:red','tab:blue']
for nuclei_index,nuclei_name,file_name,xlim,xlim_small in zip([0,1,2],['$^{208}$Pb','$^{90}$Zr','$^{48}$Ca'],['pb208','zr490','ca48'],[12.5,9,7],[[6.7,7.6],[5.1,5.6],[4,4.6]]):
fig,ax=plt.subplots(1,1,figsize=(5,4),sharex=False,sharey=False)
ax.set_xticks(range(0,(int(xlim/2)+1)*2,2))
ax_twin=ax.twinx()
nucleon_index=0
band_list=[]
line_list=[]
for i in range(5):
band_list.append(ax.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,label=label_nambe_to_plot[i],color=color_list[i],linewidth=0))
line_list+=ax_twin.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],label=label_nambe_to_plot[i],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
nucleon_index=1
for i in range(5):
ax.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,label=label_nambe_to_plot[i],color=color_list[i],linewidth=0)
ax_twin.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],label=label_nambe_to_plot[i],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
first_legend = ax.legend(handles=band_list,fontsize=11,frameon=False,handletextpad=0.6,ncol=1,columnspacing=-0.8, loc='lower left', bbox_to_anchor=(0, 0))
ax.add_artist(first_legend)
ax.set_xlim(0,xlim)
ax.set_ylim(0,0.11)
ax.set_xlabel('r [fm]',fontsize=15)
ax.set_ylabel('$n$ [fm$^{-3}$]',fontsize=15)
first_legend = ax_twin.legend(handles=line_list,fontsize=11,frameon=False,handletextpad=0.6,ncol=1,columnspacing=-0.8, loc='lower left', bbox_to_anchor=(0, 0))
ax_twin.add_artist(first_legend)
ax_twin.set_xlim(0,xlim)
ax_twin.set_ylim(0,0.11)
ax_twin.set_yticklabels('')
ax.plot(np.concatenate((xlim_small,xlim_small[::-1],[xlim_small[0]])),[0.02,0.02,0.03,0.03,0.02],'k',lw=1)
ax_small=ax.inset_axes([0.72,0.35,0.25,0.4])
for nucleon_index in [0,1]:
for i in range(5):
ax_small.fill_between(r_grid_fm,density_percentile_to_plot[i,0,nuclei_index,nucleon_index,1],density_percentile_to_plot[i,2,nuclei_index,nucleon_index,1],alpha=alpha,label=label_nambe_to_plot[i],color=color_list[i],linewidth=0)
ax_small.plot(r_grid_fm,density_percentile_to_plot[i,1,nuclei_index,nucleon_index,1],label=label_nambe_to_plot[i],color=color_list[i],lw=lw,ls=ls,alpha=0.8)
ax_small.set_xlim(xlim_small[0],xlim_small[1])
ax_small.set_ylim(0.02,0.03)
ax_small.set_yticks([0.02,0.03])
second_legend = ax.legend(title=nuclei_name,handles=[],title_fontsize=20,frameon=False,handletextpad=0.3, loc='lower left', bbox_to_anchor=(0.7, 0.75))
#ax.add_artist(second_legend)
plt.savefig('./figures/profile_b4p_'+file_name+'_RMF.pdf',bbox_inches = 'tight',format='pdf')
```



```python
```
|
sotzeeREPO_NAMECPREXPATH_START.@CPREX_extracted@[email protected]@.PATH_END.py
|
{
"filename": "AmoebaFitter.py",
"repo_name": "dokester/BayesicFitting",
"repo_path": "BayesicFitting_extracted/BayesicFitting-master/BayesicFitting/source/AmoebaFitter.py",
"type": "Python"
}
|
import numpy as numpy
import math
from . import Tools
from .MaxLikelihoodFitter import MaxLikelihoodFitter
from .AnnealingAmoeba import AnnealingAmoeba
__author__ = "Do Kester"
__year__ = 2023
__license__ = "GPL3"
__version__ = "3.1.0"
__url__ = "https://www.bayesicfitting.nl"
__status__ = "Perpetual Beta"
# *
# * This file is part of the BayesicFitting package.
# *
# * BayesicFitting is free software: you can redistribute it and/or modify
# * it under the terms of the GNU Lesser General Public License as
# * published by the Free Software Foundation, either version 3 of
# * the License, or ( at your option ) any later version.
# *
# * BayesicFitting is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU Lesser General Public License for more details.
# *
# * The GPL3 license can be found at <http://www.gnu.org/licenses/>.
# *
# * A JAVA version of this code was part of the Herschel Common
# * Science System (HCSS), also under GPL3.
# *
# * 2003 - 2014 Do Kester, SRON (Java code)
# * 2017 - 2023 Do Kester
class AmoebaFitter( MaxLikelihoodFitter ):
"""
Fitter using the simulated annealing simplex minimum finding algorithm,
See also: @AnnealingAmoeba
Author Do Kester
Examples
--------
# assume x and y are Double1d data arrays.
>>> x = numpy.arange( 100, dtype=float ) / 10
>>> y = 3.5 * SIN( x + 0.4 ) # make sine
>>> numpy.random.seed( 12345L ) # Gaussian random number generator
>>> y += numpy.random.randn( 100 ) * 0.2 # add noise
>>> sine = SineModel( ) # sinusiodal model
>>> lolim = numpy.asarray( [1,-10,-10], dtype=float )
>>> hilim = numpy.asarray( [100,10,10], dtype=float )
>>> sine.setLimits( lolim, hilim ) # set limits on the model parameters
>>> amfit = AmoebaFitter( x, sine )
>>> param = amfit.fit( y, temp=10 )
>>> stdev = amfit.getStandardDeviation( ) # stdevs on the parameters
>>> chisq = amfit.getChiSquared( )
>>> scale = amfit.getScale( ) # noise scale
>>> yfit = amfit.getResult( ) # fitted values
>>> yfit = sine( x ) # fitted values ( same as previous )
>>> yband = amfit.monteCarloError( ) # 1 sigma confidence region
# for diagnostics ( or just for fun )
>>> amfit = AmoebaFitter( x, sine )
>>> amfit.setTemperature( 10 ) # set a temperature to escape local minima
>>> amfit.setVerbose( 10 ) # report every 10th iteration
>>> plotter = IterationPlotter( ) # from BayesicFitting
>>> amfit.setPlotter( plotter, 20 ) # make a plot every 20th iteration
>>> param = amfit.fit( y )
Notes
-----
1. AmoebaFitter is not guaranteed to find the global minimum.
2. The calculation of the evidence is an Gaussian approximation which is
only exact for linear models with a fixed scale.
Author : Do Kester.
"""
# *************************************************************************
def __init__( self, xdata, model, **kwargs ):
"""
Create a new Amoeba class, providing inputs and model.
Parameters
----------
xdata : array_like
independent input values
model : Model
the model function to be fitted
kwargs : dict
Possibly includes keywords from
MaxLikelihoodFitter : errdis, scale, power
IterativeFitter : maxIter, tolerance, verbose
BaseFitter : map, keep, fixedScale
"""
if model.npchain <= 1 :
raise ValueError( "AmoebaFitter cannot make a simplex of one parameter" )
super( AmoebaFitter, self ).__init__( xdata, model, **kwargs )
# *************************************************************************
def fit( self, data, weights=None, par0=None, keep=None, size=None,
seed=4567, temp=0, limits=None, maxiter=1000,
tolerance=0.0001, cooling=0.95, steps=10,
verbose=0, plot=False, accuracy=None, callback=None ):
### TBC parameter defaults
"""
Return Model fitted to the data array.
When done, it also calculates the hessian matrix and chisq.
Parameters
----------
data : array_like
the data vector to be fitted
weights : array_like
weights pertaining to the data
The weights are relative weights unless `scale` is set.
accuracy : float or array_like
accuracy of (individual) data
par0 : array_like
initial values of teh parameters of the model
default: from model
keep : dict of {int:float}
dictionary of indices (int) to be kept at a fixed value (float)
The values of keep are only valid for *this* fit
See also `AmoebaFitter( ..., keep=dict )`
size : float or array_like
step size of the simplex
seed : int
for random number generator
temp : float
temperature of annealing (0 is no annealing)
limits : None or list of 2 floats or list of 2 array_like
None : no limits applied
[lo,hi] : low and high limits for all values
[la,ha] : low array and high array limits for the values
maxiter : int
max number of iterations
tolerance : float
stops when ( |hi-lo| / (|hi|+|lo|) ) < tolerance
cooling : float
cooling factor when annealing
steps : int
number of cycles in each cooling step.
verbose : int
0 : silent
1 : print results to output
2 : print some info every 100 iterations
3 : print some info all iterations
plot : bool
plot the results.
callback : callable
is called each iteration as
`val = callback( val )`
where `val` is the minimizable array
"""
fitIndex, data, weights = self.fitprolog( data, weights=weights,
accuracy=accuracy, keep=keep )
func = self.makeFuncs( data, weights=weights, index=fitIndex, ret=1 )
if par0 is None :
par0 = self.model.parameters
if fitIndex is not None and len( fitIndex ) < len( par0 ) :
par0 = par0[fitIndex]
kwargs = {}
if size is not None :
kwargs["size"] = size
if seed is not None :
kwargs["seed"] = seed
if temp is not None :
kwargs["temp"] = temp
if limits is not None :
kwargs["limits"] = limits
if maxiter is not None :
kwargs["maxiter"] = maxiter
if tolerance is not None :
kwargs["reltol"] = tolerance
kwargs["abstol"] = tolerance
if cooling is not None :
kwargs["cooling"] = cooling
if steps is not None :
kwargs["steps"] = steps
if verbose is not None :
kwargs["verbose"] = verbose
if callback is not None :
kwargs["callback"] = callback
amoeba = AnnealingAmoeba( func, par0, **kwargs )
par = amoeba.minimize()
parameters = self.insertParameters( par, index=fitIndex )
self.model.parameters = parameters
if self.isChisq :
self.chisq = amoeba.fopt
else :
self.logLikelihood = -amoeba.fopt
self.chisq = self.chiSquared( data, weights=weights )
self.iter = amoeba.iter
self.ntrans = amoeba.ncalls
self.simplex = amoeba.simplex
self.values = amoeba.values
# plot = plot or ( verbose == 2 )
self.fitpostscript( data, plot=plot )
return parameters
def __str__( self ):
""" Return name of the fitter. """
return "AmoebaFitter"
|
dokesterREPO_NAMEBayesicFittingPATH_START.@BayesicFitting_extracted@BayesicFitting-master@BayesicFitting@[email protected]@.PATH_END.py
|
{
"filename": "_xref.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/colorbar/_xref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XrefValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="xref", parent_name="surface.colorbar", **kwargs):
super(XrefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["container", "paper"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@colorbar@[email protected]_END.py
|
{
"filename": "__init__.py",
"repo_name": "bwinkel/cygrid",
"repo_path": "cygrid_extracted/cygrid-master/cygrid/__init__.py",
"type": "Python"
}
|
from .cygrid import *
from .healpix import *
from .hphashtab import *
from .helpers import *
from .mock import *
from .init_testrunner import *
from .version import version
__version__ = version
|
bwinkelREPO_NAMEcygridPATH_START.@cygrid_extracted@cygrid-master@cygrid@[email protected]_END.py
|
{
"filename": "setup.py",
"repo_name": "Keck-DataReductionPipelines/KPF-Pipeline",
"repo_path": "KPF-Pipeline_extracted/KPF-Pipeline-master/setup.py",
"type": "Python"
}
|
from setuptools import setup, find_packages
import re
def get_property(prop, project):
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop),
open(project + '/__init__.py').read())
return result.group(1)
# reqs = []
# for line in open('requirements.txt', 'r').readlines():
# reqs.append(line)
setup(
name="kpfpipe",
version=get_property('__version__', 'kpfpipe'),
author="BJ Fulton, Arpita Roy, Andrew Howard",
packages=find_packages(),
entry_points={'console_scripts': ['kpf=kpfpipe.cli:main']},
)
|
Keck-DataReductionPipelinesREPO_NAMEKPF-PipelinePATH_START.@KPF-Pipeline_extracted@[email protected]@.PATH_END.py
|
{
"filename": "_lineposition.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymap/legendgrouptitle/font/_lineposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(
self,
plotly_name="lineposition",
parent_name="densitymap.legendgrouptitle.font",
**kwargs,
):
super(LinepositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["under", "over", "through"]),
**kwargs,
)
|
[email protected][email protected]@packages@python@plotly@plotly@validators@densitymap@legendgrouptitle@font@[email protected]_END.py
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 11